Tue, 17 Jan 2012 13:08:52 -0500
7071311: Decoder enhancement
Summary: Made decoder thread-safe
Reviewed-by: coleenp, kamg
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // no precompiled headers
26 #include "classfile/classLoader.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "jvm_solaris.h"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/filemap.hpp"
36 #include "mutex_solaris.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "os_share_solaris.hpp"
39 #include "prims/jniFastGetField.hpp"
40 #include "prims/jvm.h"
41 #include "prims/jvm_misc.hpp"
42 #include "runtime/arguments.hpp"
43 #include "runtime/extendedPC.hpp"
44 #include "runtime/globals.hpp"
45 #include "runtime/interfaceSupport.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/javaCalls.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "runtime/objectMonitor.hpp"
50 #include "runtime/osThread.hpp"
51 #include "runtime/perfMemory.hpp"
52 #include "runtime/sharedRuntime.hpp"
53 #include "runtime/statSampler.hpp"
54 #include "runtime/stubRoutines.hpp"
55 #include "runtime/threadCritical.hpp"
56 #include "runtime/timer.hpp"
57 #include "services/attachListener.hpp"
58 #include "services/runtimeService.hpp"
59 #include "thread_solaris.inline.hpp"
60 #include "utilities/decoder.hpp"
61 #include "utilities/defaultStream.hpp"
62 #include "utilities/events.hpp"
63 #include "utilities/growableArray.hpp"
64 #include "utilities/vmError.hpp"
65 #ifdef TARGET_ARCH_x86
66 # include "assembler_x86.inline.hpp"
67 # include "nativeInst_x86.hpp"
68 #endif
69 #ifdef TARGET_ARCH_sparc
70 # include "assembler_sparc.inline.hpp"
71 # include "nativeInst_sparc.hpp"
72 #endif
73 #ifdef COMPILER1
74 #include "c1/c1_Runtime1.hpp"
75 #endif
76 #ifdef COMPILER2
77 #include "opto/runtime.hpp"
78 #endif
80 // put OS-includes here
81 # include <dlfcn.h>
82 # include <errno.h>
83 # include <exception>
84 # include <link.h>
85 # include <poll.h>
86 # include <pthread.h>
87 # include <pwd.h>
88 # include <schedctl.h>
89 # include <setjmp.h>
90 # include <signal.h>
91 # include <stdio.h>
92 # include <alloca.h>
93 # include <sys/filio.h>
94 # include <sys/ipc.h>
95 # include <sys/lwp.h>
96 # include <sys/machelf.h> // for elf Sym structure used by dladdr1
97 # include <sys/mman.h>
98 # include <sys/processor.h>
99 # include <sys/procset.h>
100 # include <sys/pset.h>
101 # include <sys/resource.h>
102 # include <sys/shm.h>
103 # include <sys/socket.h>
104 # include <sys/stat.h>
105 # include <sys/systeminfo.h>
106 # include <sys/time.h>
107 # include <sys/times.h>
108 # include <sys/types.h>
109 # include <sys/wait.h>
110 # include <sys/utsname.h>
111 # include <thread.h>
112 # include <unistd.h>
113 # include <sys/priocntl.h>
114 # include <sys/rtpriocntl.h>
115 # include <sys/tspriocntl.h>
116 # include <sys/iapriocntl.h>
117 # include <sys/loadavg.h>
118 # include <string.h>
119 # include <stdio.h>
121 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
122 # include <sys/procfs.h> // see comment in <sys/procfs.h>
124 #define MAX_PATH (2 * K)
126 // for timer info max values which include all bits
127 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
129 #ifdef _GNU_SOURCE
130 // See bug #6514594
131 extern "C" int madvise(caddr_t, size_t, int);
132 extern "C" int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
133 int attr, int mask);
134 #endif //_GNU_SOURCE
136 /*
137 MPSS Changes Start.
138 The JVM binary needs to be built and run on pre-Solaris 9
139 systems, but the constants needed by MPSS are only in Solaris 9
140 header files. They are textually replicated here to allow
141 building on earlier systems. Once building on Solaris 8 is
142 no longer a requirement, these #defines can be replaced by ordinary
143 system .h inclusion.
145 In earlier versions of the JDK and Solaris, we used ISM for large pages.
146 But ISM requires shared memory to achieve this and thus has many caveats.
147 MPSS is a fully transparent and is a cleaner way to get large pages.
148 Although we still require keeping ISM for backward compatiblitiy as well as
149 giving the opportunity to use large pages on older systems it is
150 recommended that MPSS be used for Solaris 9 and above.
152 */
154 #ifndef MC_HAT_ADVISE
156 struct memcntl_mha {
157 uint_t mha_cmd; /* command(s) */
158 uint_t mha_flags;
159 size_t mha_pagesize;
160 };
161 #define MC_HAT_ADVISE 7 /* advise hat map size */
162 #define MHA_MAPSIZE_VA 0x1 /* set preferred page size */
163 #define MAP_ALIGN 0x200 /* addr specifies alignment */
165 #endif
166 // MPSS Changes End.
169 // Here are some liblgrp types from sys/lgrp_user.h to be able to
170 // compile on older systems without this header file.
172 #ifndef MADV_ACCESS_LWP
173 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */
174 #endif
175 #ifndef MADV_ACCESS_MANY
176 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */
177 #endif
179 #ifndef LGRP_RSRC_CPU
180 # define LGRP_RSRC_CPU 0 /* CPU resources */
181 #endif
182 #ifndef LGRP_RSRC_MEM
183 # define LGRP_RSRC_MEM 1 /* memory resources */
184 #endif
186 // Some more macros from sys/mman.h that are not present in Solaris 8.
188 #ifndef MAX_MEMINFO_CNT
189 /*
190 * info_req request type definitions for meminfo
191 * request types starting with MEMINFO_V are used for Virtual addresses
192 * and should not be mixed with MEMINFO_PLGRP which is targeted for Physical
193 * addresses
194 */
195 # define MEMINFO_SHIFT 16
196 # define MEMINFO_MASK (0xFF << MEMINFO_SHIFT)
197 # define MEMINFO_VPHYSICAL (0x01 << MEMINFO_SHIFT) /* get physical addr */
198 # define MEMINFO_VLGRP (0x02 << MEMINFO_SHIFT) /* get lgroup */
199 # define MEMINFO_VPAGESIZE (0x03 << MEMINFO_SHIFT) /* size of phys page */
200 # define MEMINFO_VREPLCNT (0x04 << MEMINFO_SHIFT) /* no. of replica */
201 # define MEMINFO_VREPL (0x05 << MEMINFO_SHIFT) /* physical replica */
202 # define MEMINFO_VREPL_LGRP (0x06 << MEMINFO_SHIFT) /* lgrp of replica */
203 # define MEMINFO_PLGRP (0x07 << MEMINFO_SHIFT) /* lgroup for paddr */
205 /* maximum number of addresses meminfo() can process at a time */
206 # define MAX_MEMINFO_CNT 256
208 /* maximum number of request types */
209 # define MAX_MEMINFO_REQ 31
210 #endif
212 // see thr_setprio(3T) for the basis of these numbers
213 #define MinimumPriority 0
214 #define NormalPriority 64
215 #define MaximumPriority 127
217 // Values for ThreadPriorityPolicy == 1
218 int prio_policy1[MaxPriority+1] = { -99999, 0, 16, 32, 48, 64,
219 80, 96, 112, 124, 127 };
221 // System parameters used internally
222 static clock_t clock_tics_per_sec = 100;
224 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
225 static bool enabled_extended_FILE_stdio = false;
227 // For diagnostics to print a message once. see run_periodic_checks
228 static bool check_addr0_done = false;
229 static sigset_t check_signal_done;
230 static bool check_signals = true;
232 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo
233 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
235 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
238 // "default" initializers for missing libc APIs
239 extern "C" {
240 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
241 static int lwp_mutex_destroy(mutex_t *mx) { return 0; }
243 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
244 static int lwp_cond_destroy(cond_t *cv) { return 0; }
245 }
247 // "default" initializers for pthread-based synchronization
248 extern "C" {
249 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
250 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
251 }
253 // Thread Local Storage
254 // This is common to all Solaris platforms so it is defined here,
255 // in this common file.
256 // The declarations are in the os_cpu threadLS*.hpp files.
257 //
258 // Static member initialization for TLS
259 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
261 #ifndef PRODUCT
262 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
264 int ThreadLocalStorage::_tcacheHit = 0;
265 int ThreadLocalStorage::_tcacheMiss = 0;
267 void ThreadLocalStorage::print_statistics() {
268 int total = _tcacheMiss+_tcacheHit;
269 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
270 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
271 }
272 #undef _PCT
273 #endif // PRODUCT
275 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
276 int index) {
277 Thread *thread = get_thread_slow();
278 if (thread != NULL) {
279 address sp = os::current_stack_pointer();
280 guarantee(thread->_stack_base == NULL ||
281 (sp <= thread->_stack_base &&
282 sp >= thread->_stack_base - thread->_stack_size) ||
283 is_error_reported(),
284 "sp must be inside of selected thread stack");
286 thread->set_self_raw_id(raw_id); // mark for quick retrieval
287 _get_thread_cache[ index ] = thread;
288 }
289 return thread;
290 }
293 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
294 #define NO_CACHED_THREAD ((Thread*)all_zero)
296 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
298 // Store the new value before updating the cache to prevent a race
299 // between get_thread_via_cache_slowly() and this store operation.
300 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
302 // Update thread cache with new thread if setting on thread create,
303 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
304 uintptr_t raw = pd_raw_thread_id();
305 int ix = pd_cache_index(raw);
306 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
307 }
309 void ThreadLocalStorage::pd_init() {
310 for (int i = 0; i < _pd_cache_size; i++) {
311 _get_thread_cache[i] = NO_CACHED_THREAD;
312 }
313 }
315 // Invalidate all the caches (happens to be the same as pd_init).
316 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
318 #undef NO_CACHED_THREAD
320 // END Thread Local Storage
322 static inline size_t adjust_stack_size(address base, size_t size) {
323 if ((ssize_t)size < 0) {
324 // 4759953: Compensate for ridiculous stack size.
325 size = max_intx;
326 }
327 if (size > (size_t)base) {
328 // 4812466: Make sure size doesn't allow the stack to wrap the address space.
329 size = (size_t)base;
330 }
331 return size;
332 }
334 static inline stack_t get_stack_info() {
335 stack_t st;
336 int retval = thr_stksegment(&st);
337 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
338 assert(retval == 0, "incorrect return value from thr_stksegment");
339 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
340 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
341 return st;
342 }
344 address os::current_stack_base() {
345 int r = thr_main() ;
346 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
347 bool is_primordial_thread = r;
349 // Workaround 4352906, avoid calls to thr_stksegment by
350 // thr_main after the first one (it looks like we trash
351 // some data, causing the value for ss_sp to be incorrect).
352 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
353 stack_t st = get_stack_info();
354 if (is_primordial_thread) {
355 // cache initial value of stack base
356 os::Solaris::_main_stack_base = (address)st.ss_sp;
357 }
358 return (address)st.ss_sp;
359 } else {
360 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
361 return os::Solaris::_main_stack_base;
362 }
363 }
365 size_t os::current_stack_size() {
366 size_t size;
368 int r = thr_main() ;
369 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
370 if(!r) {
371 size = get_stack_info().ss_size;
372 } else {
373 struct rlimit limits;
374 getrlimit(RLIMIT_STACK, &limits);
375 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
376 }
377 // base may not be page aligned
378 address base = current_stack_base();
379 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
380 return (size_t)(base - bottom);
381 }
383 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
384 return localtime_r(clock, res);
385 }
387 // interruptible infrastructure
389 // setup_interruptible saves the thread state before going into an
390 // interruptible system call.
391 // The saved state is used to restore the thread to
392 // its former state whether or not an interrupt is received.
393 // Used by classloader os::read
394 // os::restartable_read calls skip this layer and stay in _thread_in_native
396 void os::Solaris::setup_interruptible(JavaThread* thread) {
398 JavaThreadState thread_state = thread->thread_state();
400 assert(thread_state != _thread_blocked, "Coming from the wrong thread");
401 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
402 OSThread* osthread = thread->osthread();
403 osthread->set_saved_interrupt_thread_state(thread_state);
404 thread->frame_anchor()->make_walkable(thread);
405 ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
406 }
408 // Version of setup_interruptible() for threads that are already in
409 // _thread_blocked. Used by os_sleep().
410 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
411 thread->frame_anchor()->make_walkable(thread);
412 }
414 JavaThread* os::Solaris::setup_interruptible() {
415 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
416 setup_interruptible(thread);
417 return thread;
418 }
420 void os::Solaris::try_enable_extended_io() {
421 typedef int (*enable_extended_FILE_stdio_t)(int, int);
423 if (!UseExtendedFileIO) {
424 return;
425 }
427 enable_extended_FILE_stdio_t enabler =
428 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
429 "enable_extended_FILE_stdio");
430 if (enabler) {
431 enabler(-1, -1);
432 }
433 }
436 #ifdef ASSERT
438 JavaThread* os::Solaris::setup_interruptible_native() {
439 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
440 JavaThreadState thread_state = thread->thread_state();
441 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
442 return thread;
443 }
445 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
446 JavaThreadState thread_state = thread->thread_state();
447 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
448 }
449 #endif
451 // cleanup_interruptible reverses the effects of setup_interruptible
452 // setup_interruptible_already_blocked() does not need any cleanup.
454 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
455 OSThread* osthread = thread->osthread();
457 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
458 }
460 // I/O interruption related counters called in _INTERRUPTIBLE
462 void os::Solaris::bump_interrupted_before_count() {
463 RuntimeService::record_interrupted_before_count();
464 }
466 void os::Solaris::bump_interrupted_during_count() {
467 RuntimeService::record_interrupted_during_count();
468 }
470 static int _processors_online = 0;
472 jint os::Solaris::_os_thread_limit = 0;
473 volatile jint os::Solaris::_os_thread_count = 0;
475 julong os::available_memory() {
476 return Solaris::available_memory();
477 }
479 julong os::Solaris::available_memory() {
480 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
481 }
483 julong os::Solaris::_physical_memory = 0;
485 julong os::physical_memory() {
486 return Solaris::physical_memory();
487 }
489 julong os::allocatable_physical_memory(julong size) {
490 #ifdef _LP64
491 return size;
492 #else
493 julong result = MIN2(size, (julong)3835*M);
494 if (!is_allocatable(result)) {
495 // Memory allocations will be aligned but the alignment
496 // is not known at this point. Alignments will
497 // be at most to LargePageSizeInBytes. Protect
498 // allocations from alignments up to illegal
499 // values. If at this point 2G is illegal.
500 julong reasonable_size = (julong)2*G - 2 * LargePageSizeInBytes;
501 result = MIN2(size, reasonable_size);
502 }
503 return result;
504 #endif
505 }
507 static hrtime_t first_hrtime = 0;
508 static const hrtime_t hrtime_hz = 1000*1000*1000;
509 const int LOCK_BUSY = 1;
510 const int LOCK_FREE = 0;
511 const int LOCK_INVALID = -1;
512 static volatile hrtime_t max_hrtime = 0;
513 static volatile int max_hrtime_lock = LOCK_FREE; // Update counter with LSB as lock-in-progress
516 void os::Solaris::initialize_system_info() {
517 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
518 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
519 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
520 }
522 int os::active_processor_count() {
523 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
524 pid_t pid = getpid();
525 psetid_t pset = PS_NONE;
526 // Are we running in a processor set or is there any processor set around?
527 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
528 uint_t pset_cpus;
529 // Query the number of cpus available to us.
530 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
531 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
532 _processors_online = pset_cpus;
533 return pset_cpus;
534 }
535 }
536 // Otherwise return number of online cpus
537 return online_cpus;
538 }
540 static bool find_processors_in_pset(psetid_t pset,
541 processorid_t** id_array,
542 uint_t* id_length) {
543 bool result = false;
544 // Find the number of processors in the processor set.
545 if (pset_info(pset, NULL, id_length, NULL) == 0) {
546 // Make up an array to hold their ids.
547 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
548 // Fill in the array with their processor ids.
549 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
550 result = true;
551 }
552 }
553 return result;
554 }
556 // Callers of find_processors_online() must tolerate imprecise results --
557 // the system configuration can change asynchronously because of DR
558 // or explicit psradm operations.
559 //
560 // We also need to take care that the loop (below) terminates as the
561 // number of processors online can change between the _SC_NPROCESSORS_ONLN
562 // request and the loop that builds the list of processor ids. Unfortunately
563 // there's no reliable way to determine the maximum valid processor id,
564 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online
565 // man pages, which claim the processor id set is "sparse, but
566 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually
567 // exit the loop.
568 //
569 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
570 // not available on S8.0.
572 static bool find_processors_online(processorid_t** id_array,
573 uint* id_length) {
574 const processorid_t MAX_PROCESSOR_ID = 100000 ;
575 // Find the number of processors online.
576 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
577 // Make up an array to hold their ids.
578 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length);
579 // Processors need not be numbered consecutively.
580 long found = 0;
581 processorid_t next = 0;
582 while (found < *id_length && next < MAX_PROCESSOR_ID) {
583 processor_info_t info;
584 if (processor_info(next, &info) == 0) {
585 // NB, PI_NOINTR processors are effectively online ...
586 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
587 (*id_array)[found] = next;
588 found += 1;
589 }
590 }
591 next += 1;
592 }
593 if (found < *id_length) {
594 // The loop above didn't identify the expected number of processors.
595 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
596 // and re-running the loop, above, but there's no guarantee of progress
597 // if the system configuration is in flux. Instead, we just return what
598 // we've got. Note that in the worst case find_processors_online() could
599 // return an empty set. (As a fall-back in the case of the empty set we
600 // could just return the ID of the current processor).
601 *id_length = found ;
602 }
604 return true;
605 }
607 static bool assign_distribution(processorid_t* id_array,
608 uint id_length,
609 uint* distribution,
610 uint distribution_length) {
611 // We assume we can assign processorid_t's to uint's.
612 assert(sizeof(processorid_t) == sizeof(uint),
613 "can't convert processorid_t to uint");
614 // Quick check to see if we won't succeed.
615 if (id_length < distribution_length) {
616 return false;
617 }
618 // Assign processor ids to the distribution.
619 // Try to shuffle processors to distribute work across boards,
620 // assuming 4 processors per board.
621 const uint processors_per_board = ProcessDistributionStride;
622 // Find the maximum processor id.
623 processorid_t max_id = 0;
624 for (uint m = 0; m < id_length; m += 1) {
625 max_id = MAX2(max_id, id_array[m]);
626 }
627 // The next id, to limit loops.
628 const processorid_t limit_id = max_id + 1;
629 // Make up markers for available processors.
630 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id);
631 for (uint c = 0; c < limit_id; c += 1) {
632 available_id[c] = false;
633 }
634 for (uint a = 0; a < id_length; a += 1) {
635 available_id[id_array[a]] = true;
636 }
637 // Step by "boards", then by "slot", copying to "assigned".
638 // NEEDS_CLEANUP: The assignment of processors should be stateful,
639 // remembering which processors have been assigned by
640 // previous calls, etc., so as to distribute several
641 // independent calls of this method. What we'd like is
642 // It would be nice to have an API that let us ask
643 // how many processes are bound to a processor,
644 // but we don't have that, either.
645 // In the short term, "board" is static so that
646 // subsequent distributions don't all start at board 0.
647 static uint board = 0;
648 uint assigned = 0;
649 // Until we've found enough processors ....
650 while (assigned < distribution_length) {
651 // ... find the next available processor in the board.
652 for (uint slot = 0; slot < processors_per_board; slot += 1) {
653 uint try_id = board * processors_per_board + slot;
654 if ((try_id < limit_id) && (available_id[try_id] == true)) {
655 distribution[assigned] = try_id;
656 available_id[try_id] = false;
657 assigned += 1;
658 break;
659 }
660 }
661 board += 1;
662 if (board * processors_per_board + 0 >= limit_id) {
663 board = 0;
664 }
665 }
666 if (available_id != NULL) {
667 FREE_C_HEAP_ARRAY(bool, available_id);
668 }
669 return true;
670 }
672 void os::set_native_thread_name(const char *name) {
673 // Not yet implemented.
674 return;
675 }
677 bool os::distribute_processes(uint length, uint* distribution) {
678 bool result = false;
679 // Find the processor id's of all the available CPUs.
680 processorid_t* id_array = NULL;
681 uint id_length = 0;
682 // There are some races between querying information and using it,
683 // since processor sets can change dynamically.
684 psetid_t pset = PS_NONE;
685 // Are we running in a processor set?
686 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
687 result = find_processors_in_pset(pset, &id_array, &id_length);
688 } else {
689 result = find_processors_online(&id_array, &id_length);
690 }
691 if (result == true) {
692 if (id_length >= length) {
693 result = assign_distribution(id_array, id_length, distribution, length);
694 } else {
695 result = false;
696 }
697 }
698 if (id_array != NULL) {
699 FREE_C_HEAP_ARRAY(processorid_t, id_array);
700 }
701 return result;
702 }
704 bool os::bind_to_processor(uint processor_id) {
705 // We assume that a processorid_t can be stored in a uint.
706 assert(sizeof(uint) == sizeof(processorid_t),
707 "can't convert uint to processorid_t");
708 int bind_result =
709 processor_bind(P_LWPID, // bind LWP.
710 P_MYID, // bind current LWP.
711 (processorid_t) processor_id, // id.
712 NULL); // don't return old binding.
713 return (bind_result == 0);
714 }
716 bool os::getenv(const char* name, char* buffer, int len) {
717 char* val = ::getenv( name );
718 if ( val == NULL
719 || strlen(val) + 1 > len ) {
720 if (len > 0) buffer[0] = 0; // return a null string
721 return false;
722 }
723 strcpy( buffer, val );
724 return true;
725 }
728 // Return true if user is running as root.
730 bool os::have_special_privileges() {
731 static bool init = false;
732 static bool privileges = false;
733 if (!init) {
734 privileges = (getuid() != geteuid()) || (getgid() != getegid());
735 init = true;
736 }
737 return privileges;
738 }
741 void os::init_system_properties_values() {
742 char arch[12];
743 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch));
745 // The next steps are taken in the product version:
746 //
747 // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
748 // This library should be located at:
749 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
750 //
751 // If "/jre/lib/" appears at the right place in the path, then we
752 // assume libjvm[_g].so is installed in a JDK and we use this path.
753 //
754 // Otherwise exit with message: "Could not create the Java virtual machine."
755 //
756 // The following extra steps are taken in the debugging version:
757 //
758 // If "/jre/lib/" does NOT appear at the right place in the path
759 // instead of exit check for $JAVA_HOME environment variable.
760 //
761 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
762 // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
763 // it looks like libjvm[_g].so is installed there
764 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
765 //
766 // Otherwise exit.
767 //
768 // Important note: if the location of libjvm.so changes this
769 // code needs to be changed accordingly.
771 // The next few definitions allow the code to be verbatim:
772 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
773 #define free(p) FREE_C_HEAP_ARRAY(char, p)
774 #define getenv(n) ::getenv(n)
776 #define EXTENSIONS_DIR "/lib/ext"
777 #define ENDORSED_DIR "/lib/endorsed"
778 #define COMMON_DIR "/usr/jdk/packages"
780 {
781 /* sysclasspath, java_home, dll_dir */
782 {
783 char *home_path;
784 char *dll_path;
785 char *pslash;
786 char buf[MAXPATHLEN];
787 os::jvm_path(buf, sizeof(buf));
789 // Found the full path to libjvm.so.
790 // Now cut the path to <java_home>/jre if we can.
791 *(strrchr(buf, '/')) = '\0'; /* get rid of /libjvm.so */
792 pslash = strrchr(buf, '/');
793 if (pslash != NULL)
794 *pslash = '\0'; /* get rid of /{client|server|hotspot} */
795 dll_path = malloc(strlen(buf) + 1);
796 if (dll_path == NULL)
797 return;
798 strcpy(dll_path, buf);
799 Arguments::set_dll_dir(dll_path);
801 if (pslash != NULL) {
802 pslash = strrchr(buf, '/');
803 if (pslash != NULL) {
804 *pslash = '\0'; /* get rid of /<arch> */
805 pslash = strrchr(buf, '/');
806 if (pslash != NULL)
807 *pslash = '\0'; /* get rid of /lib */
808 }
809 }
811 home_path = malloc(strlen(buf) + 1);
812 if (home_path == NULL)
813 return;
814 strcpy(home_path, buf);
815 Arguments::set_java_home(home_path);
817 if (!set_boot_path('/', ':'))
818 return;
819 }
821 /*
822 * Where to look for native libraries
823 */
824 {
825 // Use dlinfo() to determine the correct java.library.path.
826 //
827 // If we're launched by the Java launcher, and the user
828 // does not set java.library.path explicitly on the commandline,
829 // the Java launcher sets LD_LIBRARY_PATH for us and unsets
830 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
831 // dlinfo returns LD_LIBRARY_PATH + crle settings (including
832 // /usr/lib), which is exactly what we want.
833 //
834 // If the user does set java.library.path, it completely
835 // overwrites this setting, and always has.
836 //
837 // If we're not launched by the Java launcher, we may
838 // get here with any/all of the LD_LIBRARY_PATH[_32|64]
839 // settings. Again, dlinfo does exactly what we want.
841 Dl_serinfo _info, *info = &_info;
842 Dl_serpath *path;
843 char* library_path;
844 char *common_path;
845 int i;
847 // determine search path count and required buffer size
848 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
849 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
850 }
852 // allocate new buffer and initialize
853 info = (Dl_serinfo*)malloc(_info.dls_size);
854 if (info == NULL) {
855 vm_exit_out_of_memory(_info.dls_size,
856 "init_system_properties_values info");
857 }
858 info->dls_size = _info.dls_size;
859 info->dls_cnt = _info.dls_cnt;
861 // obtain search path information
862 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
863 free(info);
864 vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
865 }
867 path = &info->dls_serpath[0];
869 // Note: Due to a legacy implementation, most of the library path
870 // is set in the launcher. This was to accomodate linking restrictions
871 // on legacy Solaris implementations (which are no longer supported).
872 // Eventually, all the library path setting will be done here.
873 //
874 // However, to prevent the proliferation of improperly built native
875 // libraries, the new path component /usr/jdk/packages is added here.
877 // Determine the actual CPU architecture.
878 char cpu_arch[12];
879 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
880 #ifdef _LP64
881 // If we are a 64-bit vm, perform the following translations:
882 // sparc -> sparcv9
883 // i386 -> amd64
884 if (strcmp(cpu_arch, "sparc") == 0)
885 strcat(cpu_arch, "v9");
886 else if (strcmp(cpu_arch, "i386") == 0)
887 strcpy(cpu_arch, "amd64");
888 #endif
890 // Construct the invariant part of ld_library_path. Note that the
891 // space for the colon and the trailing null are provided by the
892 // nulls included by the sizeof operator.
893 size_t bufsize = sizeof(COMMON_DIR) + sizeof("/lib/") + strlen(cpu_arch);
894 common_path = malloc(bufsize);
895 if (common_path == NULL) {
896 free(info);
897 vm_exit_out_of_memory(bufsize,
898 "init_system_properties_values common_path");
899 }
900 sprintf(common_path, COMMON_DIR "/lib/%s", cpu_arch);
902 // struct size is more than sufficient for the path components obtained
903 // through the dlinfo() call, so only add additional space for the path
904 // components explicitly added here.
905 bufsize = info->dls_size + strlen(common_path);
906 library_path = malloc(bufsize);
907 if (library_path == NULL) {
908 free(info);
909 free(common_path);
910 vm_exit_out_of_memory(bufsize,
911 "init_system_properties_values library_path");
912 }
913 library_path[0] = '\0';
915 // Construct the desired Java library path from the linker's library
916 // search path.
917 //
918 // For compatibility, it is optimal that we insert the additional path
919 // components specific to the Java VM after those components specified
920 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
921 // infrastructure.
922 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it
923 strcpy(library_path, common_path);
924 } else {
925 int inserted = 0;
926 for (i = 0; i < info->dls_cnt; i++, path++) {
927 uint_t flags = path->dls_flags & LA_SER_MASK;
928 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
929 strcat(library_path, common_path);
930 strcat(library_path, os::path_separator());
931 inserted = 1;
932 }
933 strcat(library_path, path->dls_name);
934 strcat(library_path, os::path_separator());
935 }
936 // eliminate trailing path separator
937 library_path[strlen(library_path)-1] = '\0';
938 }
940 // happens before argument parsing - can't use a trace flag
941 // tty->print_raw("init_system_properties_values: native lib path: ");
942 // tty->print_raw_cr(library_path);
944 // callee copies into its own buffer
945 Arguments::set_library_path(library_path);
947 free(common_path);
948 free(library_path);
949 free(info);
950 }
952 /*
953 * Extensions directories.
954 *
955 * Note that the space for the colon and the trailing null are provided
956 * by the nulls included by the sizeof operator (so actually one byte more
957 * than necessary is allocated).
958 */
959 {
960 char *buf = (char *) malloc(strlen(Arguments::get_java_home()) +
961 sizeof(EXTENSIONS_DIR) + sizeof(COMMON_DIR) +
962 sizeof(EXTENSIONS_DIR));
963 sprintf(buf, "%s" EXTENSIONS_DIR ":" COMMON_DIR EXTENSIONS_DIR,
964 Arguments::get_java_home());
965 Arguments::set_ext_dirs(buf);
966 }
968 /* Endorsed standards default directory. */
969 {
970 char * buf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
971 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
972 Arguments::set_endorsed_dirs(buf);
973 }
974 }
976 #undef malloc
977 #undef free
978 #undef getenv
979 #undef EXTENSIONS_DIR
980 #undef ENDORSED_DIR
981 #undef COMMON_DIR
983 }
985 void os::breakpoint() {
986 BREAKPOINT;
987 }
989 bool os::obsolete_option(const JavaVMOption *option)
990 {
991 if (!strncmp(option->optionString, "-Xt", 3)) {
992 return true;
993 } else if (!strncmp(option->optionString, "-Xtm", 4)) {
994 return true;
995 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
996 return true;
997 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
998 return true;
999 }
1000 return false;
1001 }
1003 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
1004 address stackStart = (address)thread->stack_base();
1005 address stackEnd = (address)(stackStart - (address)thread->stack_size());
1006 if (sp < stackStart && sp >= stackEnd ) return true;
1007 return false;
1008 }
1010 extern "C" void breakpoint() {
1011 // use debugger to set breakpoint here
1012 }
1014 // Returns an estimate of the current stack pointer. Result must be guaranteed to
1015 // point into the calling threads stack, and be no lower than the current stack
1016 // pointer.
1017 address os::current_stack_pointer() {
1018 volatile int dummy;
1019 address sp = (address)&dummy + 8; // %%%% need to confirm if this is right
1020 return sp;
1021 }
1023 static thread_t main_thread;
1025 // Thread start routine for all new Java threads
1026 extern "C" void* java_start(void* thread_addr) {
1027 // Try to randomize the cache line index of hot stack frames.
1028 // This helps when threads of the same stack traces evict each other's
1029 // cache lines. The threads can be either from the same JVM instance, or
1030 // from different JVM instances. The benefit is especially true for
1031 // processors with hyperthreading technology.
1032 static int counter = 0;
1033 int pid = os::current_process_id();
1034 alloca(((pid ^ counter++) & 7) * 128);
1036 int prio;
1037 Thread* thread = (Thread*)thread_addr;
1038 OSThread* osthr = thread->osthread();
1040 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
1041 thread->_schedctl = (void *) schedctl_init () ;
1043 if (UseNUMA) {
1044 int lgrp_id = os::numa_get_group_id();
1045 if (lgrp_id != -1) {
1046 thread->set_lgrp_id(lgrp_id);
1047 }
1048 }
1050 // If the creator called set priority before we started,
1051 // we need to call set priority now that we have an lwp.
1052 // Get the priority from libthread and set the priority
1053 // for the new Solaris lwp.
1054 if ( osthr->thread_id() != -1 ) {
1055 if ( UseThreadPriorities ) {
1056 thr_getprio(osthr->thread_id(), &prio);
1057 if (ThreadPriorityVerbose) {
1058 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT ", setting priority: %d\n",
1059 osthr->thread_id(), osthr->lwp_id(), prio );
1060 }
1061 os::set_native_priority(thread, prio);
1062 }
1063 } else if (ThreadPriorityVerbose) {
1064 warning("Can't set priority in _start routine, thread id hasn't been set\n");
1065 }
1067 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
1069 // initialize signal mask for this thread
1070 os::Solaris::hotspot_sigmask(thread);
1072 thread->run();
1074 // One less thread is executing
1075 // When the VMThread gets here, the main thread may have already exited
1076 // which frees the CodeHeap containing the Atomic::dec code
1077 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
1078 Atomic::dec(&os::Solaris::_os_thread_count);
1079 }
1081 if (UseDetachedThreads) {
1082 thr_exit(NULL);
1083 ShouldNotReachHere();
1084 }
1085 return NULL;
1086 }
1088 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
1089 // Allocate the OSThread object
1090 OSThread* osthread = new OSThread(NULL, NULL);
1091 if (osthread == NULL) return NULL;
1093 // Store info on the Solaris thread into the OSThread
1094 osthread->set_thread_id(thread_id);
1095 osthread->set_lwp_id(_lwp_self());
1096 thread->_schedctl = (void *) schedctl_init () ;
1098 if (UseNUMA) {
1099 int lgrp_id = os::numa_get_group_id();
1100 if (lgrp_id != -1) {
1101 thread->set_lgrp_id(lgrp_id);
1102 }
1103 }
1105 if ( ThreadPriorityVerbose ) {
1106 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
1107 osthread->thread_id(), osthread->lwp_id() );
1108 }
1110 // Initial thread state is INITIALIZED, not SUSPENDED
1111 osthread->set_state(INITIALIZED);
1113 return osthread;
1114 }
1116 void os::Solaris::hotspot_sigmask(Thread* thread) {
1118 //Save caller's signal mask
1119 sigset_t sigmask;
1120 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
1121 OSThread *osthread = thread->osthread();
1122 osthread->set_caller_sigmask(sigmask);
1124 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
1125 if (!ReduceSignalUsage) {
1126 if (thread->is_VM_thread()) {
1127 // Only the VM thread handles BREAK_SIGNAL ...
1128 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
1129 } else {
1130 // ... all other threads block BREAK_SIGNAL
1131 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
1132 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
1133 }
1134 }
1135 }
1137 bool os::create_attached_thread(JavaThread* thread) {
1138 #ifdef ASSERT
1139 thread->verify_not_published();
1140 #endif
1141 OSThread* osthread = create_os_thread(thread, thr_self());
1142 if (osthread == NULL) {
1143 return false;
1144 }
1146 // Initial thread state is RUNNABLE
1147 osthread->set_state(RUNNABLE);
1148 thread->set_osthread(osthread);
1150 // initialize signal mask for this thread
1151 // and save the caller's signal mask
1152 os::Solaris::hotspot_sigmask(thread);
1154 return true;
1155 }
1157 bool os::create_main_thread(JavaThread* thread) {
1158 #ifdef ASSERT
1159 thread->verify_not_published();
1160 #endif
1161 if (_starting_thread == NULL) {
1162 _starting_thread = create_os_thread(thread, main_thread);
1163 if (_starting_thread == NULL) {
1164 return false;
1165 }
1166 }
1168 // The primodial thread is runnable from the start
1169 _starting_thread->set_state(RUNNABLE);
1171 thread->set_osthread(_starting_thread);
1173 // initialize signal mask for this thread
1174 // and save the caller's signal mask
1175 os::Solaris::hotspot_sigmask(thread);
1177 return true;
1178 }
1180 // _T2_libthread is true if we believe we are running with the newer
1181 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
1182 bool os::Solaris::_T2_libthread = false;
1184 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
1185 // Allocate the OSThread object
1186 OSThread* osthread = new OSThread(NULL, NULL);
1187 if (osthread == NULL) {
1188 return false;
1189 }
1191 if ( ThreadPriorityVerbose ) {
1192 char *thrtyp;
1193 switch ( thr_type ) {
1194 case vm_thread:
1195 thrtyp = (char *)"vm";
1196 break;
1197 case cgc_thread:
1198 thrtyp = (char *)"cgc";
1199 break;
1200 case pgc_thread:
1201 thrtyp = (char *)"pgc";
1202 break;
1203 case java_thread:
1204 thrtyp = (char *)"java";
1205 break;
1206 case compiler_thread:
1207 thrtyp = (char *)"compiler";
1208 break;
1209 case watcher_thread:
1210 thrtyp = (char *)"watcher";
1211 break;
1212 default:
1213 thrtyp = (char *)"unknown";
1214 break;
1215 }
1216 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1217 }
1219 // Calculate stack size if it's not specified by caller.
1220 if (stack_size == 0) {
1221 // The default stack size 1M (2M for LP64).
1222 stack_size = (BytesPerWord >> 2) * K * K;
1224 switch (thr_type) {
1225 case os::java_thread:
1226 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1227 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1228 break;
1229 case os::compiler_thread:
1230 if (CompilerThreadStackSize > 0) {
1231 stack_size = (size_t)(CompilerThreadStackSize * K);
1232 break;
1233 } // else fall through:
1234 // use VMThreadStackSize if CompilerThreadStackSize is not defined
1235 case os::vm_thread:
1236 case os::pgc_thread:
1237 case os::cgc_thread:
1238 case os::watcher_thread:
1239 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1240 break;
1241 }
1242 }
1243 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1245 // Initial state is ALLOCATED but not INITIALIZED
1246 osthread->set_state(ALLOCATED);
1248 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1249 // We got lots of threads. Check if we still have some address space left.
1250 // Need to be at least 5Mb of unreserved address space. We do check by
1251 // trying to reserve some.
1252 const size_t VirtualMemoryBangSize = 20*K*K;
1253 char* mem = os::reserve_memory(VirtualMemoryBangSize);
1254 if (mem == NULL) {
1255 delete osthread;
1256 return false;
1257 } else {
1258 // Release the memory again
1259 os::release_memory(mem, VirtualMemoryBangSize);
1260 }
1261 }
1263 // Setup osthread because the child thread may need it.
1264 thread->set_osthread(osthread);
1266 // Create the Solaris thread
1267 // explicit THR_BOUND for T2_libthread case in case
1268 // that assumption is not accurate, but our alternate signal stack
1269 // handling is based on it which must have bound threads
1270 thread_t tid = 0;
1271 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1272 | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1273 (thr_type == vm_thread) ||
1274 (thr_type == cgc_thread) ||
1275 (thr_type == pgc_thread) ||
1276 (thr_type == compiler_thread && BackgroundCompilation)) ?
1277 THR_BOUND : 0);
1278 int status;
1280 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1281 //
1282 // On multiprocessors systems, libthread sometimes under-provisions our
1283 // process with LWPs. On a 30-way systems, for instance, we could have
1284 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1285 // to our process. This can result in under utilization of PEs.
1286 // I suspect the problem is related to libthread's LWP
1287 // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1288 // upcall policy.
1289 //
1290 // The following code is palliative -- it attempts to ensure that our
1291 // process has sufficient LWPs to take advantage of multiple PEs.
1292 // Proper long-term cures include using user-level threads bound to LWPs
1293 // (THR_BOUND) or using LWP-based synchronization. Note that there is a
1294 // slight timing window with respect to sampling _os_thread_count, but
1295 // the race is benign. Also, we should periodically recompute
1296 // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1297 // the number of PEs in our partition. You might be tempted to use
1298 // THR_NEW_LWP here, but I'd recommend against it as that could
1299 // result in undesirable growth of the libthread's LWP pool.
1300 // The fix below isn't sufficient; for instance, it doesn't take into count
1301 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
1302 //
1303 // Some pathologies this scheme doesn't handle:
1304 // * Threads can block, releasing the LWPs. The LWPs can age out.
1305 // When a large number of threads become ready again there aren't
1306 // enough LWPs available to service them. This can occur when the
1307 // number of ready threads oscillates.
1308 // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
1309 //
1310 // Finally, we should call thr_setconcurrency() periodically to refresh
1311 // the LWP pool and thwart the LWP age-out mechanism.
1312 // The "+3" term provides a little slop -- we want to slightly overprovision.
1314 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1315 if (!(flags & THR_BOUND)) {
1316 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
1317 }
1318 }
1319 // Although this doesn't hurt, we should warn of undefined behavior
1320 // when using unbound T1 threads with schedctl(). This should never
1321 // happen, as the compiler and VM threads are always created bound
1322 DEBUG_ONLY(
1323 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1324 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1325 ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1326 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1327 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1328 }
1329 );
1332 // Mark that we don't have an lwp or thread id yet.
1333 // In case we attempt to set the priority before the thread starts.
1334 osthread->set_lwp_id(-1);
1335 osthread->set_thread_id(-1);
1337 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1338 if (status != 0) {
1339 if (PrintMiscellaneous && (Verbose || WizardMode)) {
1340 perror("os::create_thread");
1341 }
1342 thread->set_osthread(NULL);
1343 // Need to clean up stuff we've allocated so far
1344 delete osthread;
1345 return false;
1346 }
1348 Atomic::inc(&os::Solaris::_os_thread_count);
1350 // Store info on the Solaris thread into the OSThread
1351 osthread->set_thread_id(tid);
1353 // Remember that we created this thread so we can set priority on it
1354 osthread->set_vm_created();
1356 // Set the default thread priority otherwise use NormalPriority
1358 if ( UseThreadPriorities ) {
1359 thr_setprio(tid, (DefaultThreadPriority == -1) ?
1360 java_to_os_priority[NormPriority] :
1361 DefaultThreadPriority);
1362 }
1364 // Initial thread state is INITIALIZED, not SUSPENDED
1365 osthread->set_state(INITIALIZED);
1367 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1368 return true;
1369 }
1371 /* defined for >= Solaris 10. This allows builds on earlier versions
1372 * of Solaris to take advantage of the newly reserved Solaris JVM signals
1373 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1374 * and -XX:+UseAltSigs does nothing since these should have no conflict
1375 */
1376 #if !defined(SIGJVM1)
1377 #define SIGJVM1 39
1378 #define SIGJVM2 40
1379 #endif
1381 debug_only(static bool signal_sets_initialized = false);
1382 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1383 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1384 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1386 bool os::Solaris::is_sig_ignored(int sig) {
1387 struct sigaction oact;
1388 sigaction(sig, (struct sigaction*)NULL, &oact);
1389 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
1390 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
1391 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1392 return true;
1393 else
1394 return false;
1395 }
1397 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1398 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1399 static bool isJVM1available() {
1400 return SIGJVM1 < SIGRTMIN;
1401 }
1403 void os::Solaris::signal_sets_init() {
1404 // Should also have an assertion stating we are still single-threaded.
1405 assert(!signal_sets_initialized, "Already initialized");
1406 // Fill in signals that are necessarily unblocked for all threads in
1407 // the VM. Currently, we unblock the following signals:
1408 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1409 // by -Xrs (=ReduceSignalUsage));
1410 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1411 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1412 // the dispositions or masks wrt these signals.
1413 // Programs embedding the VM that want to use the above signals for their
1414 // own purposes must, at this time, use the "-Xrs" option to prevent
1415 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1416 // (See bug 4345157, and other related bugs).
1417 // In reality, though, unblocking these signals is really a nop, since
1418 // these signals are not blocked by default.
1419 sigemptyset(&unblocked_sigs);
1420 sigemptyset(&allowdebug_blocked_sigs);
1421 sigaddset(&unblocked_sigs, SIGILL);
1422 sigaddset(&unblocked_sigs, SIGSEGV);
1423 sigaddset(&unblocked_sigs, SIGBUS);
1424 sigaddset(&unblocked_sigs, SIGFPE);
1426 if (isJVM1available) {
1427 os::Solaris::set_SIGinterrupt(SIGJVM1);
1428 os::Solaris::set_SIGasync(SIGJVM2);
1429 } else if (UseAltSigs) {
1430 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1431 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1432 } else {
1433 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1434 os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1435 }
1437 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1438 sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1440 if (!ReduceSignalUsage) {
1441 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1442 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1443 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1444 }
1445 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1446 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1447 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1448 }
1449 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1450 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1451 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1452 }
1453 }
1454 // Fill in signals that are blocked by all but the VM thread.
1455 sigemptyset(&vm_sigs);
1456 if (!ReduceSignalUsage)
1457 sigaddset(&vm_sigs, BREAK_SIGNAL);
1458 debug_only(signal_sets_initialized = true);
1460 // For diagnostics only used in run_periodic_checks
1461 sigemptyset(&check_signal_done);
1462 }
1464 // These are signals that are unblocked while a thread is running Java.
1465 // (For some reason, they get blocked by default.)
1466 sigset_t* os::Solaris::unblocked_signals() {
1467 assert(signal_sets_initialized, "Not initialized");
1468 return &unblocked_sigs;
1469 }
1471 // These are the signals that are blocked while a (non-VM) thread is
1472 // running Java. Only the VM thread handles these signals.
1473 sigset_t* os::Solaris::vm_signals() {
1474 assert(signal_sets_initialized, "Not initialized");
1475 return &vm_sigs;
1476 }
1478 // These are signals that are blocked during cond_wait to allow debugger in
1479 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1480 assert(signal_sets_initialized, "Not initialized");
1481 return &allowdebug_blocked_sigs;
1482 }
1485 void _handle_uncaught_cxx_exception() {
1486 VMError err("An uncaught C++ exception");
1487 err.report_and_die();
1488 }
1491 // First crack at OS-specific initialization, from inside the new thread.
1492 void os::initialize_thread() {
1493 int r = thr_main() ;
1494 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1495 if (r) {
1496 JavaThread* jt = (JavaThread *)Thread::current();
1497 assert(jt != NULL,"Sanity check");
1498 size_t stack_size;
1499 address base = jt->stack_base();
1500 if (Arguments::created_by_java_launcher()) {
1501 // Use 2MB to allow for Solaris 7 64 bit mode.
1502 stack_size = JavaThread::stack_size_at_create() == 0
1503 ? 2048*K : JavaThread::stack_size_at_create();
1505 // There are rare cases when we may have already used more than
1506 // the basic stack size allotment before this method is invoked.
1507 // Attempt to allow for a normally sized java_stack.
1508 size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1509 stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1510 } else {
1511 // 6269555: If we were not created by a Java launcher, i.e. if we are
1512 // running embedded in a native application, treat the primordial thread
1513 // as much like a native attached thread as possible. This means using
1514 // the current stack size from thr_stksegment(), unless it is too large
1515 // to reliably setup guard pages. A reasonable max size is 8MB.
1516 size_t current_size = current_stack_size();
1517 // This should never happen, but just in case....
1518 if (current_size == 0) current_size = 2 * K * K;
1519 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1520 }
1521 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1522 stack_size = (size_t)(base - bottom);
1524 assert(stack_size > 0, "Stack size calculation problem");
1526 if (stack_size > jt->stack_size()) {
1527 NOT_PRODUCT(
1528 struct rlimit limits;
1529 getrlimit(RLIMIT_STACK, &limits);
1530 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1531 assert(size >= jt->stack_size(), "Stack size problem in main thread");
1532 )
1533 tty->print_cr(
1534 "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1535 "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1536 "See limit(1) to increase the stack size limit.",
1537 stack_size / K, jt->stack_size() / K);
1538 vm_exit(1);
1539 }
1540 assert(jt->stack_size() >= stack_size,
1541 "Attempt to map more stack than was allocated");
1542 jt->set_stack_size(stack_size);
1543 }
1545 // 5/22/01: Right now alternate signal stacks do not handle
1546 // throwing stack overflow exceptions, see bug 4463178
1547 // Until a fix is found for this, T2 will NOT imply alternate signal
1548 // stacks.
1549 // If using T2 libthread threads, install an alternate signal stack.
1550 // Because alternate stacks associate with LWPs on Solaris,
1551 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1552 // we prefer to explicitly stack bang.
1553 // If not using T2 libthread, but using UseBoundThreads any threads
1554 // (primordial thread, jni_attachCurrentThread) we do not create,
1555 // probably are not bound, therefore they can not have an alternate
1556 // signal stack. Since our stack banging code is generated and
1557 // is shared across threads, all threads must be bound to allow
1558 // using alternate signal stacks. The alternative is to interpose
1559 // on _lwp_create to associate an alt sig stack with each LWP,
1560 // and this could be a problem when the JVM is embedded.
1561 // We would prefer to use alternate signal stacks with T2
1562 // Since there is currently no accurate way to detect T2
1563 // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1564 // on installing alternate signal stacks
1567 // 05/09/03: removed alternate signal stack support for Solaris
1568 // The alternate signal stack mechanism is no longer needed to
1569 // handle stack overflow. This is now handled by allocating
1570 // guard pages (red zone) and stackbanging.
1571 // Initially the alternate signal stack mechanism was removed because
1572 // it did not work with T1 llibthread. Alternate
1573 // signal stacks MUST have all threads bound to lwps. Applications
1574 // can create their own threads and attach them without their being
1575 // bound under T1. This is frequently the case for the primordial thread.
1576 // If we were ever to reenable this mechanism we would need to
1577 // use the dynamic check for T2 libthread.
1579 os::Solaris::init_thread_fpu_state();
1580 std::set_terminate(_handle_uncaught_cxx_exception);
1581 }
1585 // Free Solaris resources related to the OSThread
1586 void os::free_thread(OSThread* osthread) {
1587 assert(osthread != NULL, "os::free_thread but osthread not set");
1590 // We are told to free resources of the argument thread,
1591 // but we can only really operate on the current thread.
1592 // The main thread must take the VMThread down synchronously
1593 // before the main thread exits and frees up CodeHeap
1594 guarantee((Thread::current()->osthread() == osthread
1595 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1596 if (Thread::current()->osthread() == osthread) {
1597 // Restore caller's signal mask
1598 sigset_t sigmask = osthread->caller_sigmask();
1599 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1600 }
1601 delete osthread;
1602 }
1604 void os::pd_start_thread(Thread* thread) {
1605 int status = thr_continue(thread->osthread()->thread_id());
1606 assert_status(status == 0, status, "thr_continue failed");
1607 }
1610 intx os::current_thread_id() {
1611 return (intx)thr_self();
1612 }
1614 static pid_t _initial_pid = 0;
1616 int os::current_process_id() {
1617 return (int)(_initial_pid ? _initial_pid : getpid());
1618 }
1620 int os::allocate_thread_local_storage() {
1621 // %%% in Win32 this allocates a memory segment pointed to by a
1622 // register. Dan Stein can implement a similar feature in
1623 // Solaris. Alternatively, the VM can do the same thing
1624 // explicitly: malloc some storage and keep the pointer in a
1625 // register (which is part of the thread's context) (or keep it
1626 // in TLS).
1627 // %%% In current versions of Solaris, thr_self and TSD can
1628 // be accessed via short sequences of displaced indirections.
1629 // The value of thr_self is available as %g7(36).
1630 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1631 // assuming that the current thread already has a value bound to k.
1632 // It may be worth experimenting with such access patterns,
1633 // and later having the parameters formally exported from a Solaris
1634 // interface. I think, however, that it will be faster to
1635 // maintain the invariant that %g2 always contains the
1636 // JavaThread in Java code, and have stubs simply
1637 // treat %g2 as a caller-save register, preserving it in a %lN.
1638 thread_key_t tk;
1639 if (thr_keycreate( &tk, NULL ) )
1640 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1641 "(%s)", strerror(errno)));
1642 return int(tk);
1643 }
1645 void os::free_thread_local_storage(int index) {
1646 // %%% don't think we need anything here
1647 // if ( pthread_key_delete((pthread_key_t) tk) )
1648 // fatal("os::free_thread_local_storage: pthread_key_delete failed");
1649 }
1651 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific
1652 // small number - point is NO swap space available
1653 void os::thread_local_storage_at_put(int index, void* value) {
1654 // %%% this is used only in threadLocalStorage.cpp
1655 if (thr_setspecific((thread_key_t)index, value)) {
1656 if (errno == ENOMEM) {
1657 vm_exit_out_of_memory(SMALLINT, "thr_setspecific: out of swap space");
1658 } else {
1659 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1660 "(%s)", strerror(errno)));
1661 }
1662 } else {
1663 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1664 }
1665 }
1667 // This function could be called before TLS is initialized, for example, when
1668 // VM receives an async signal or when VM causes a fatal error during
1669 // initialization. Return NULL if thr_getspecific() fails.
1670 void* os::thread_local_storage_at(int index) {
1671 // %%% this is used only in threadLocalStorage.cpp
1672 void* r = NULL;
1673 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1674 }
1677 // gethrtime can move backwards if read from one cpu and then a different cpu
1678 // getTimeNanos is guaranteed to not move backward on Solaris
1679 // local spinloop created as faster for a CAS on an int than
1680 // a CAS on a 64bit jlong. Also Atomic::cmpxchg for jlong is not
1681 // supported on sparc v8 or pre supports_cx8 intel boxes.
1682 // oldgetTimeNanos for systems which do not support CAS on 64bit jlong
1683 // i.e. sparc v8 and pre supports_cx8 (i486) intel boxes
1684 inline hrtime_t oldgetTimeNanos() {
1685 int gotlock = LOCK_INVALID;
1686 hrtime_t newtime = gethrtime();
1688 for (;;) {
1689 // grab lock for max_hrtime
1690 int curlock = max_hrtime_lock;
1691 if (curlock & LOCK_BUSY) continue;
1692 if (gotlock = Atomic::cmpxchg(LOCK_BUSY, &max_hrtime_lock, LOCK_FREE) != LOCK_FREE) continue;
1693 if (newtime > max_hrtime) {
1694 max_hrtime = newtime;
1695 } else {
1696 newtime = max_hrtime;
1697 }
1698 // release lock
1699 max_hrtime_lock = LOCK_FREE;
1700 return newtime;
1701 }
1702 }
1703 // gethrtime can move backwards if read from one cpu and then a different cpu
1704 // getTimeNanos is guaranteed to not move backward on Solaris
1705 inline hrtime_t getTimeNanos() {
1706 if (VM_Version::supports_cx8()) {
1707 const hrtime_t now = gethrtime();
1708 // Use atomic long load since 32-bit x86 uses 2 registers to keep long.
1709 const hrtime_t prev = Atomic::load((volatile jlong*)&max_hrtime);
1710 if (now <= prev) return prev; // same or retrograde time;
1711 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1712 assert(obsv >= prev, "invariant"); // Monotonicity
1713 // If the CAS succeeded then we're done and return "now".
1714 // If the CAS failed and the observed value "obs" is >= now then
1715 // we should return "obs". If the CAS failed and now > obs > prv then
1716 // some other thread raced this thread and installed a new value, in which case
1717 // we could either (a) retry the entire operation, (b) retry trying to install now
1718 // or (c) just return obs. We use (c). No loop is required although in some cases
1719 // we might discard a higher "now" value in deference to a slightly lower but freshly
1720 // installed obs value. That's entirely benign -- it admits no new orderings compared
1721 // to (a) or (b) -- and greatly reduces coherence traffic.
1722 // We might also condition (c) on the magnitude of the delta between obs and now.
1723 // Avoiding excessive CAS operations to hot RW locations is critical.
1724 // See http://blogs.sun.com/dave/entry/cas_and_cache_trivia_invalidate
1725 return (prev == obsv) ? now : obsv ;
1726 } else {
1727 return oldgetTimeNanos();
1728 }
1729 }
1731 // Time since start-up in seconds to a fine granularity.
1732 // Used by VMSelfDestructTimer and the MemProfiler.
1733 double os::elapsedTime() {
1734 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1735 }
1737 jlong os::elapsed_counter() {
1738 return (jlong)(getTimeNanos() - first_hrtime);
1739 }
1741 jlong os::elapsed_frequency() {
1742 return hrtime_hz;
1743 }
1745 // Return the real, user, and system times in seconds from an
1746 // arbitrary fixed point in the past.
1747 bool os::getTimesSecs(double* process_real_time,
1748 double* process_user_time,
1749 double* process_system_time) {
1750 struct tms ticks;
1751 clock_t real_ticks = times(&ticks);
1753 if (real_ticks == (clock_t) (-1)) {
1754 return false;
1755 } else {
1756 double ticks_per_second = (double) clock_tics_per_sec;
1757 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1758 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1759 // For consistency return the real time from getTimeNanos()
1760 // converted to seconds.
1761 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1763 return true;
1764 }
1765 }
1767 bool os::supports_vtime() { return true; }
1769 bool os::enable_vtime() {
1770 int fd = ::open("/proc/self/ctl", O_WRONLY);
1771 if (fd == -1)
1772 return false;
1774 long cmd[] = { PCSET, PR_MSACCT };
1775 int res = ::write(fd, cmd, sizeof(long) * 2);
1776 ::close(fd);
1777 if (res != sizeof(long) * 2)
1778 return false;
1780 return true;
1781 }
1783 bool os::vtime_enabled() {
1784 int fd = ::open("/proc/self/status", O_RDONLY);
1785 if (fd == -1)
1786 return false;
1788 pstatus_t status;
1789 int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1790 ::close(fd);
1791 if (res != sizeof(pstatus_t))
1792 return false;
1794 return status.pr_flags & PR_MSACCT;
1795 }
1797 double os::elapsedVTime() {
1798 return (double)gethrvtime() / (double)hrtime_hz;
1799 }
1801 // Used internally for comparisons only
1802 // getTimeMillis guaranteed to not move backwards on Solaris
1803 jlong getTimeMillis() {
1804 jlong nanotime = getTimeNanos();
1805 return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1806 }
1808 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1809 jlong os::javaTimeMillis() {
1810 timeval t;
1811 if (gettimeofday( &t, NULL) == -1)
1812 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1813 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
1814 }
1816 jlong os::javaTimeNanos() {
1817 return (jlong)getTimeNanos();
1818 }
1820 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1821 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits
1822 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1823 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1824 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1825 }
1827 char * os::local_time_string(char *buf, size_t buflen) {
1828 struct tm t;
1829 time_t long_time;
1830 time(&long_time);
1831 localtime_r(&long_time, &t);
1832 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1833 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1834 t.tm_hour, t.tm_min, t.tm_sec);
1835 return buf;
1836 }
1838 // Note: os::shutdown() might be called very early during initialization, or
1839 // called from signal handler. Before adding something to os::shutdown(), make
1840 // sure it is async-safe and can handle partially initialized VM.
1841 void os::shutdown() {
1843 // allow PerfMemory to attempt cleanup of any persistent resources
1844 perfMemory_exit();
1846 // needs to remove object in file system
1847 AttachListener::abort();
1849 // flush buffered output, finish log files
1850 ostream_abort();
1852 // Check for abort hook
1853 abort_hook_t abort_hook = Arguments::abort_hook();
1854 if (abort_hook != NULL) {
1855 abort_hook();
1856 }
1857 }
1859 // Note: os::abort() might be called very early during initialization, or
1860 // called from signal handler. Before adding something to os::abort(), make
1861 // sure it is async-safe and can handle partially initialized VM.
1862 void os::abort(bool dump_core) {
1863 os::shutdown();
1864 if (dump_core) {
1865 #ifndef PRODUCT
1866 fdStream out(defaultStream::output_fd());
1867 out.print_raw("Current thread is ");
1868 char buf[16];
1869 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1870 out.print_raw_cr(buf);
1871 out.print_raw_cr("Dumping core ...");
1872 #endif
1873 ::abort(); // dump core (for debugging)
1874 }
1876 ::exit(1);
1877 }
1879 // Die immediately, no exit hook, no abort hook, no cleanup.
1880 void os::die() {
1881 _exit(-1);
1882 }
1884 // unused
1885 void os::set_error_file(const char *logfile) {}
1887 // DLL functions
1889 const char* os::dll_file_extension() { return ".so"; }
1891 // This must be hard coded because it's the system's temporary
1892 // directory not the java application's temp directory, ala java.io.tmpdir.
1893 const char* os::get_temp_directory() { return "/tmp"; }
1895 static bool file_exists(const char* filename) {
1896 struct stat statbuf;
1897 if (filename == NULL || strlen(filename) == 0) {
1898 return false;
1899 }
1900 return os::stat(filename, &statbuf) == 0;
1901 }
1903 void os::dll_build_name(char* buffer, size_t buflen,
1904 const char* pname, const char* fname) {
1905 const size_t pnamelen = pname ? strlen(pname) : 0;
1907 // Quietly truncate on buffer overflow. Should be an error.
1908 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1909 *buffer = '\0';
1910 return;
1911 }
1913 if (pnamelen == 0) {
1914 snprintf(buffer, buflen, "lib%s.so", fname);
1915 } else if (strchr(pname, *os::path_separator()) != NULL) {
1916 int n;
1917 char** pelements = split_path(pname, &n);
1918 for (int i = 0 ; i < n ; i++) {
1919 // really shouldn't be NULL but what the heck, check can't hurt
1920 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1921 continue; // skip the empty path values
1922 }
1923 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1924 if (file_exists(buffer)) {
1925 break;
1926 }
1927 }
1928 // release the storage
1929 for (int i = 0 ; i < n ; i++) {
1930 if (pelements[i] != NULL) {
1931 FREE_C_HEAP_ARRAY(char, pelements[i]);
1932 }
1933 }
1934 if (pelements != NULL) {
1935 FREE_C_HEAP_ARRAY(char*, pelements);
1936 }
1937 } else {
1938 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1939 }
1940 }
1942 const char* os::get_current_directory(char *buf, int buflen) {
1943 return getcwd(buf, buflen);
1944 }
1946 // check if addr is inside libjvm[_g].so
1947 bool os::address_is_in_vm(address addr) {
1948 static address libjvm_base_addr;
1949 Dl_info dlinfo;
1951 if (libjvm_base_addr == NULL) {
1952 dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo);
1953 libjvm_base_addr = (address)dlinfo.dli_fbase;
1954 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1955 }
1957 if (dladdr((void *)addr, &dlinfo)) {
1958 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1959 }
1961 return false;
1962 }
1964 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1965 static dladdr1_func_type dladdr1_func = NULL;
1967 bool os::dll_address_to_function_name(address addr, char *buf,
1968 int buflen, int * offset) {
1969 Dl_info dlinfo;
1971 // dladdr1_func was initialized in os::init()
1972 if (dladdr1_func){
1973 // yes, we have dladdr1
1975 // Support for dladdr1 is checked at runtime; it may be
1976 // available even if the vm is built on a machine that does
1977 // not have dladdr1 support. Make sure there is a value for
1978 // RTLD_DL_SYMENT.
1979 #ifndef RTLD_DL_SYMENT
1980 #define RTLD_DL_SYMENT 1
1981 #endif
1982 #ifdef _LP64
1983 Elf64_Sym * info;
1984 #else
1985 Elf32_Sym * info;
1986 #endif
1987 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1988 RTLD_DL_SYMENT)) {
1989 if ((char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1990 if (buf != NULL) {
1991 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
1992 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1993 }
1994 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1995 return true;
1996 }
1997 }
1998 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
1999 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2000 buf, buflen, offset, dlinfo.dli_fname)) {
2001 return true;
2002 }
2003 }
2004 if (buf != NULL) buf[0] = '\0';
2005 if (offset != NULL) *offset = -1;
2006 return false;
2007 } else {
2008 // no, only dladdr is available
2009 if (dladdr((void *)addr, &dlinfo)) {
2010 if (buf != NULL) {
2011 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen))
2012 jio_snprintf(buf, buflen, dlinfo.dli_sname);
2013 }
2014 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
2015 return true;
2016 } else if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != 0) {
2017 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
2018 buf, buflen, offset, dlinfo.dli_fname)) {
2019 return true;
2020 }
2021 }
2022 if (buf != NULL) buf[0] = '\0';
2023 if (offset != NULL) *offset = -1;
2024 return false;
2025 }
2026 }
2028 bool os::dll_address_to_library_name(address addr, char* buf,
2029 int buflen, int* offset) {
2030 Dl_info dlinfo;
2032 if (dladdr((void*)addr, &dlinfo)){
2033 if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
2034 if (offset) *offset = addr - (address)dlinfo.dli_fbase;
2035 return true;
2036 } else {
2037 if (buf) buf[0] = '\0';
2038 if (offset) *offset = -1;
2039 return false;
2040 }
2041 }
2043 // Prints the names and full paths of all opened dynamic libraries
2044 // for current process
2045 void os::print_dll_info(outputStream * st) {
2046 Dl_info dli;
2047 void *handle;
2048 Link_map *map;
2049 Link_map *p;
2051 st->print_cr("Dynamic libraries:"); st->flush();
2053 if (!dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli)) {
2054 st->print_cr("Error: Cannot print dynamic libraries.");
2055 return;
2056 }
2057 handle = dlopen(dli.dli_fname, RTLD_LAZY);
2058 if (handle == NULL) {
2059 st->print_cr("Error: Cannot print dynamic libraries.");
2060 return;
2061 }
2062 dlinfo(handle, RTLD_DI_LINKMAP, &map);
2063 if (map == NULL) {
2064 st->print_cr("Error: Cannot print dynamic libraries.");
2065 return;
2066 }
2068 while (map->l_prev != NULL)
2069 map = map->l_prev;
2071 while (map != NULL) {
2072 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
2073 map = map->l_next;
2074 }
2076 dlclose(handle);
2077 }
2079 // Loads .dll/.so and
2080 // in case of error it checks if .dll/.so was built for the
2081 // same architecture as Hotspot is running on
2083 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
2084 {
2085 void * result= ::dlopen(filename, RTLD_LAZY);
2086 if (result != NULL) {
2087 // Successful loading
2088 return result;
2089 }
2091 Elf32_Ehdr elf_head;
2093 // Read system error message into ebuf
2094 // It may or may not be overwritten below
2095 ::strncpy(ebuf, ::dlerror(), ebuflen-1);
2096 ebuf[ebuflen-1]='\0';
2097 int diag_msg_max_length=ebuflen-strlen(ebuf);
2098 char* diag_msg_buf=ebuf+strlen(ebuf);
2100 if (diag_msg_max_length==0) {
2101 // No more space in ebuf for additional diagnostics message
2102 return NULL;
2103 }
2106 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
2108 if (file_descriptor < 0) {
2109 // Can't open library, report dlerror() message
2110 return NULL;
2111 }
2113 bool failed_to_read_elf_head=
2114 (sizeof(elf_head)!=
2115 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
2117 ::close(file_descriptor);
2118 if (failed_to_read_elf_head) {
2119 // file i/o error - report dlerror() msg
2120 return NULL;
2121 }
2123 typedef struct {
2124 Elf32_Half code; // Actual value as defined in elf.h
2125 Elf32_Half compat_class; // Compatibility of archs at VM's sense
2126 char elf_class; // 32 or 64 bit
2127 char endianess; // MSB or LSB
2128 char* name; // String representation
2129 } arch_t;
2131 static const arch_t arch_array[]={
2132 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2133 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
2134 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
2135 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
2136 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2137 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
2138 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
2139 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
2140 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
2141 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
2142 };
2144 #if (defined IA32)
2145 static Elf32_Half running_arch_code=EM_386;
2146 #elif (defined AMD64)
2147 static Elf32_Half running_arch_code=EM_X86_64;
2148 #elif (defined IA64)
2149 static Elf32_Half running_arch_code=EM_IA_64;
2150 #elif (defined __sparc) && (defined _LP64)
2151 static Elf32_Half running_arch_code=EM_SPARCV9;
2152 #elif (defined __sparc) && (!defined _LP64)
2153 static Elf32_Half running_arch_code=EM_SPARC;
2154 #elif (defined __powerpc64__)
2155 static Elf32_Half running_arch_code=EM_PPC64;
2156 #elif (defined __powerpc__)
2157 static Elf32_Half running_arch_code=EM_PPC;
2158 #elif (defined ARM)
2159 static Elf32_Half running_arch_code=EM_ARM;
2160 #else
2161 #error Method os::dll_load requires that one of following is defined:\
2162 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
2163 #endif
2165 // Identify compatability class for VM's architecture and library's architecture
2166 // Obtain string descriptions for architectures
2168 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2169 int running_arch_index=-1;
2171 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2172 if (running_arch_code == arch_array[i].code) {
2173 running_arch_index = i;
2174 }
2175 if (lib_arch.code == arch_array[i].code) {
2176 lib_arch.compat_class = arch_array[i].compat_class;
2177 lib_arch.name = arch_array[i].name;
2178 }
2179 }
2181 assert(running_arch_index != -1,
2182 "Didn't find running architecture code (running_arch_code) in arch_array");
2183 if (running_arch_index == -1) {
2184 // Even though running architecture detection failed
2185 // we may still continue with reporting dlerror() message
2186 return NULL;
2187 }
2189 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2190 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2191 return NULL;
2192 }
2194 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2195 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2196 return NULL;
2197 }
2199 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2200 if ( lib_arch.name!=NULL ) {
2201 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2202 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2203 lib_arch.name, arch_array[running_arch_index].name);
2204 } else {
2205 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2206 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2207 lib_arch.code,
2208 arch_array[running_arch_index].name);
2209 }
2210 }
2212 return NULL;
2213 }
2215 void* os::dll_lookup(void* handle, const char* name) {
2216 return dlsym(handle, name);
2217 }
2219 int os::stat(const char *path, struct stat *sbuf) {
2220 char pathbuf[MAX_PATH];
2221 if (strlen(path) > MAX_PATH - 1) {
2222 errno = ENAMETOOLONG;
2223 return -1;
2224 }
2225 os::native_path(strcpy(pathbuf, path));
2226 return ::stat(pathbuf, sbuf);
2227 }
2229 static bool _print_ascii_file(const char* filename, outputStream* st) {
2230 int fd = ::open(filename, O_RDONLY);
2231 if (fd == -1) {
2232 return false;
2233 }
2235 char buf[32];
2236 int bytes;
2237 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2238 st->print_raw(buf, bytes);
2239 }
2241 ::close(fd);
2243 return true;
2244 }
2246 void os::print_os_info(outputStream* st) {
2247 st->print("OS:");
2249 if (!_print_ascii_file("/etc/release", st)) {
2250 st->print("Solaris");
2251 }
2252 st->cr();
2254 // kernel
2255 st->print("uname:");
2256 struct utsname name;
2257 uname(&name);
2258 st->print(name.sysname); st->print(" ");
2259 st->print(name.release); st->print(" ");
2260 st->print(name.version); st->print(" ");
2261 st->print(name.machine);
2263 // libthread
2264 if (os::Solaris::T2_libthread()) st->print(" (T2 libthread)");
2265 else st->print(" (T1 libthread)");
2266 st->cr();
2268 // rlimit
2269 st->print("rlimit:");
2270 struct rlimit rlim;
2272 st->print(" STACK ");
2273 getrlimit(RLIMIT_STACK, &rlim);
2274 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2275 else st->print("%uk", rlim.rlim_cur >> 10);
2277 st->print(", CORE ");
2278 getrlimit(RLIMIT_CORE, &rlim);
2279 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2280 else st->print("%uk", rlim.rlim_cur >> 10);
2282 st->print(", NOFILE ");
2283 getrlimit(RLIMIT_NOFILE, &rlim);
2284 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2285 else st->print("%d", rlim.rlim_cur);
2287 st->print(", AS ");
2288 getrlimit(RLIMIT_AS, &rlim);
2289 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
2290 else st->print("%uk", rlim.rlim_cur >> 10);
2291 st->cr();
2293 // load average
2294 st->print("load average:");
2295 double loadavg[3];
2296 os::loadavg(loadavg, 3);
2297 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
2298 st->cr();
2299 }
2302 static bool check_addr0(outputStream* st) {
2303 jboolean status = false;
2304 int fd = ::open("/proc/self/map",O_RDONLY);
2305 if (fd >= 0) {
2306 prmap_t p;
2307 while(::read(fd, &p, sizeof(p)) > 0) {
2308 if (p.pr_vaddr == 0x0) {
2309 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2310 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2311 st->print("Access:");
2312 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
2313 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2314 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
2315 st->cr();
2316 status = true;
2317 }
2318 ::close(fd);
2319 }
2320 }
2321 return status;
2322 }
2324 void os::pd_print_cpu_info(outputStream* st) {
2325 // Nothing to do for now.
2326 }
2328 void os::print_memory_info(outputStream* st) {
2329 st->print("Memory:");
2330 st->print(" %dk page", os::vm_page_size()>>10);
2331 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2332 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2333 st->cr();
2334 (void) check_addr0(st);
2335 }
2337 // Taken from /usr/include/sys/machsig.h Supposed to be architecture specific
2338 // but they're the same for all the solaris architectures that we support.
2339 const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
2340 "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
2341 "ILL_COPROC", "ILL_BADSTK" };
2343 const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
2344 "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
2345 "FPE_FLTINV", "FPE_FLTSUB" };
2347 const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
2349 const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
2351 void os::print_siginfo(outputStream* st, void* siginfo) {
2352 st->print("siginfo:");
2354 const int buflen = 100;
2355 char buf[buflen];
2356 siginfo_t *si = (siginfo_t*)siginfo;
2357 st->print("si_signo=%s: ", os::exception_name(si->si_signo, buf, buflen));
2358 char *err = strerror(si->si_errno);
2359 if (si->si_errno != 0 && err != NULL) {
2360 st->print("si_errno=%s", err);
2361 } else {
2362 st->print("si_errno=%d", si->si_errno);
2363 }
2364 const int c = si->si_code;
2365 assert(c > 0, "unexpected si_code");
2366 switch (si->si_signo) {
2367 case SIGILL:
2368 st->print(", si_code=%d (%s)", c, c > 8 ? "" : ill_names[c]);
2369 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2370 break;
2371 case SIGFPE:
2372 st->print(", si_code=%d (%s)", c, c > 9 ? "" : fpe_names[c]);
2373 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2374 break;
2375 case SIGSEGV:
2376 st->print(", si_code=%d (%s)", c, c > 2 ? "" : segv_names[c]);
2377 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2378 break;
2379 case SIGBUS:
2380 st->print(", si_code=%d (%s)", c, c > 3 ? "" : bus_names[c]);
2381 st->print(", si_addr=" PTR_FORMAT, si->si_addr);
2382 break;
2383 default:
2384 st->print(", si_code=%d", si->si_code);
2385 // no si_addr
2386 }
2388 if ((si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2389 UseSharedSpaces) {
2390 FileMapInfo* mapinfo = FileMapInfo::current_info();
2391 if (mapinfo->is_in_shared_space(si->si_addr)) {
2392 st->print("\n\nError accessing class data sharing archive." \
2393 " Mapped file inaccessible during execution, " \
2394 " possible disk/network problem.");
2395 }
2396 }
2397 st->cr();
2398 }
2400 // Moved from whole group, because we need them here for diagnostic
2401 // prints.
2402 #define OLDMAXSIGNUM 32
2403 static int Maxsignum = 0;
2404 static int *ourSigFlags = NULL;
2406 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2408 int os::Solaris::get_our_sigflags(int sig) {
2409 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2410 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2411 return ourSigFlags[sig];
2412 }
2414 void os::Solaris::set_our_sigflags(int sig, int flags) {
2415 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2416 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2417 ourSigFlags[sig] = flags;
2418 }
2421 static const char* get_signal_handler_name(address handler,
2422 char* buf, int buflen) {
2423 int offset;
2424 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2425 if (found) {
2426 // skip directory names
2427 const char *p1, *p2;
2428 p1 = buf;
2429 size_t len = strlen(os::file_separator());
2430 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2431 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2432 } else {
2433 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2434 }
2435 return buf;
2436 }
2438 static void print_signal_handler(outputStream* st, int sig,
2439 char* buf, size_t buflen) {
2440 struct sigaction sa;
2442 sigaction(sig, NULL, &sa);
2444 st->print("%s: ", os::exception_name(sig, buf, buflen));
2446 address handler = (sa.sa_flags & SA_SIGINFO)
2447 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2448 : CAST_FROM_FN_PTR(address, sa.sa_handler);
2450 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2451 st->print("SIG_DFL");
2452 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2453 st->print("SIG_IGN");
2454 } else {
2455 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2456 }
2458 st->print(", sa_mask[0]=" PTR32_FORMAT, *(uint32_t*)&sa.sa_mask);
2460 address rh = VMError::get_resetted_sighandler(sig);
2461 // May be, handler was resetted by VMError?
2462 if(rh != NULL) {
2463 handler = rh;
2464 sa.sa_flags = VMError::get_resetted_sigflags(sig);
2465 }
2467 st->print(", sa_flags=" PTR32_FORMAT, sa.sa_flags);
2469 // Check: is it our handler?
2470 if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2471 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2472 // It is our signal handler
2473 // check for flags
2474 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2475 st->print(
2476 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2477 os::Solaris::get_our_sigflags(sig));
2478 }
2479 }
2480 st->cr();
2481 }
2483 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2484 st->print_cr("Signal Handlers:");
2485 print_signal_handler(st, SIGSEGV, buf, buflen);
2486 print_signal_handler(st, SIGBUS , buf, buflen);
2487 print_signal_handler(st, SIGFPE , buf, buflen);
2488 print_signal_handler(st, SIGPIPE, buf, buflen);
2489 print_signal_handler(st, SIGXFSZ, buf, buflen);
2490 print_signal_handler(st, SIGILL , buf, buflen);
2491 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2492 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2493 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2494 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2495 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2496 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2497 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2498 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2499 }
2501 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2503 // Find the full path to the current module, libjvm.so or libjvm_g.so
2504 void os::jvm_path(char *buf, jint buflen) {
2505 // Error checking.
2506 if (buflen < MAXPATHLEN) {
2507 assert(false, "must use a large-enough buffer");
2508 buf[0] = '\0';
2509 return;
2510 }
2511 // Lazy resolve the path to current module.
2512 if (saved_jvm_path[0] != 0) {
2513 strcpy(buf, saved_jvm_path);
2514 return;
2515 }
2517 Dl_info dlinfo;
2518 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2519 assert(ret != 0, "cannot locate libjvm");
2520 realpath((char *)dlinfo.dli_fname, buf);
2522 if (Arguments::created_by_gamma_launcher()) {
2523 // Support for the gamma launcher. Typical value for buf is
2524 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2525 // the right place in the string, then assume we are installed in a JDK and
2526 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2527 // up the path so it looks like libjvm.so is installed there (append a
2528 // fake suffix hotspot/libjvm.so).
2529 const char *p = buf + strlen(buf) - 1;
2530 for (int count = 0; p > buf && count < 5; ++count) {
2531 for (--p; p > buf && *p != '/'; --p)
2532 /* empty */ ;
2533 }
2535 if (strncmp(p, "/jre/lib/", 9) != 0) {
2536 // Look for JAVA_HOME in the environment.
2537 char* java_home_var = ::getenv("JAVA_HOME");
2538 if (java_home_var != NULL && java_home_var[0] != 0) {
2539 char cpu_arch[12];
2540 char* jrelib_p;
2541 int len;
2542 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2543 #ifdef _LP64
2544 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2545 if (strcmp(cpu_arch, "sparc") == 0) {
2546 strcat(cpu_arch, "v9");
2547 } else if (strcmp(cpu_arch, "i386") == 0) {
2548 strcpy(cpu_arch, "amd64");
2549 }
2550 #endif
2551 // Check the current module name "libjvm.so" or "libjvm_g.so".
2552 p = strrchr(buf, '/');
2553 assert(strstr(p, "/libjvm") == p, "invalid library name");
2554 p = strstr(p, "_g") ? "_g" : "";
2556 realpath(java_home_var, buf);
2557 // determine if this is a legacy image or modules image
2558 // modules image doesn't have "jre" subdirectory
2559 len = strlen(buf);
2560 jrelib_p = buf + len;
2561 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2562 if (0 != access(buf, F_OK)) {
2563 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2564 }
2566 if (0 == access(buf, F_OK)) {
2567 // Use current module name "libjvm[_g].so" instead of
2568 // "libjvm"debug_only("_g")".so" since for fastdebug version
2569 // we should have "libjvm.so" but debug_only("_g") adds "_g"!
2570 len = strlen(buf);
2571 snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
2572 } else {
2573 // Go back to path of .so
2574 realpath((char *)dlinfo.dli_fname, buf);
2575 }
2576 }
2577 }
2578 }
2580 strcpy(saved_jvm_path, buf);
2581 }
2584 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2585 // no prefix required, not even "_"
2586 }
2589 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2590 // no suffix required
2591 }
2593 // This method is a copy of JDK's sysGetLastErrorString
2594 // from src/solaris/hpi/src/system_md.c
2596 size_t os::lasterror(char *buf, size_t len) {
2598 if (errno == 0) return 0;
2600 const char *s = ::strerror(errno);
2601 size_t n = ::strlen(s);
2602 if (n >= len) {
2603 n = len - 1;
2604 }
2605 ::strncpy(buf, s, n);
2606 buf[n] = '\0';
2607 return n;
2608 }
2611 // sun.misc.Signal
2613 extern "C" {
2614 static void UserHandler(int sig, void *siginfo, void *context) {
2615 // Ctrl-C is pressed during error reporting, likely because the error
2616 // handler fails to abort. Let VM die immediately.
2617 if (sig == SIGINT && is_error_reported()) {
2618 os::die();
2619 }
2621 os::signal_notify(sig);
2622 // We do not need to reinstate the signal handler each time...
2623 }
2624 }
2626 void* os::user_handler() {
2627 return CAST_FROM_FN_PTR(void*, UserHandler);
2628 }
2630 extern "C" {
2631 typedef void (*sa_handler_t)(int);
2632 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2633 }
2635 void* os::signal(int signal_number, void* handler) {
2636 struct sigaction sigAct, oldSigAct;
2637 sigfillset(&(sigAct.sa_mask));
2638 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2639 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2641 if (sigaction(signal_number, &sigAct, &oldSigAct))
2642 // -1 means registration failed
2643 return (void *)-1;
2645 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2646 }
2648 void os::signal_raise(int signal_number) {
2649 raise(signal_number);
2650 }
2652 /*
2653 * The following code is moved from os.cpp for making this
2654 * code platform specific, which it is by its very nature.
2655 */
2657 // a counter for each possible signal value
2658 static int Sigexit = 0;
2659 static int Maxlibjsigsigs;
2660 static jint *pending_signals = NULL;
2661 static int *preinstalled_sigs = NULL;
2662 static struct sigaction *chainedsigactions = NULL;
2663 static sema_t sig_sem;
2664 typedef int (*version_getting_t)();
2665 version_getting_t os::Solaris::get_libjsig_version = NULL;
2666 static int libjsigversion = NULL;
2668 int os::sigexitnum_pd() {
2669 assert(Sigexit > 0, "signal memory not yet initialized");
2670 return Sigexit;
2671 }
2673 void os::Solaris::init_signal_mem() {
2674 // Initialize signal structures
2675 Maxsignum = SIGRTMAX;
2676 Sigexit = Maxsignum+1;
2677 assert(Maxsignum >0, "Unable to obtain max signal number");
2679 Maxlibjsigsigs = Maxsignum;
2681 // pending_signals has one int per signal
2682 // The additional signal is for SIGEXIT - exit signal to signal_thread
2683 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1));
2684 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2686 if (UseSignalChaining) {
2687 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2688 * (Maxsignum + 1));
2689 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2690 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1));
2691 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2692 }
2693 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ));
2694 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2695 }
2697 void os::signal_init_pd() {
2698 int ret;
2700 ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2701 assert(ret == 0, "sema_init() failed");
2702 }
2704 void os::signal_notify(int signal_number) {
2705 int ret;
2707 Atomic::inc(&pending_signals[signal_number]);
2708 ret = ::sema_post(&sig_sem);
2709 assert(ret == 0, "sema_post() failed");
2710 }
2712 static int check_pending_signals(bool wait_for_signal) {
2713 int ret;
2714 while (true) {
2715 for (int i = 0; i < Sigexit + 1; i++) {
2716 jint n = pending_signals[i];
2717 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2718 return i;
2719 }
2720 }
2721 if (!wait_for_signal) {
2722 return -1;
2723 }
2724 JavaThread *thread = JavaThread::current();
2725 ThreadBlockInVM tbivm(thread);
2727 bool threadIsSuspended;
2728 do {
2729 thread->set_suspend_equivalent();
2730 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2731 while((ret = ::sema_wait(&sig_sem)) == EINTR)
2732 ;
2733 assert(ret == 0, "sema_wait() failed");
2735 // were we externally suspended while we were waiting?
2736 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2737 if (threadIsSuspended) {
2738 //
2739 // The semaphore has been incremented, but while we were waiting
2740 // another thread suspended us. We don't want to continue running
2741 // while suspended because that would surprise the thread that
2742 // suspended us.
2743 //
2744 ret = ::sema_post(&sig_sem);
2745 assert(ret == 0, "sema_post() failed");
2747 thread->java_suspend_self();
2748 }
2749 } while (threadIsSuspended);
2750 }
2751 }
2753 int os::signal_lookup() {
2754 return check_pending_signals(false);
2755 }
2757 int os::signal_wait() {
2758 return check_pending_signals(true);
2759 }
2761 ////////////////////////////////////////////////////////////////////////////////
2762 // Virtual Memory
2764 static int page_size = -1;
2766 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
2767 // clear this var if support is not available.
2768 static bool has_map_align = true;
2770 int os::vm_page_size() {
2771 assert(page_size != -1, "must call os::init");
2772 return page_size;
2773 }
2775 // Solaris allocates memory by pages.
2776 int os::vm_allocation_granularity() {
2777 assert(page_size != -1, "must call os::init");
2778 return page_size;
2779 }
2781 bool os::commit_memory(char* addr, size_t bytes, bool exec) {
2782 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2783 size_t size = bytes;
2784 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2785 if (res != NULL) {
2786 if (UseNUMAInterleaving) {
2787 numa_make_global(addr, bytes);
2788 }
2789 return true;
2790 }
2791 return false;
2792 }
2794 bool os::commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2795 bool exec) {
2796 if (commit_memory(addr, bytes, exec)) {
2797 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
2798 // If the large page size has been set and the VM
2799 // is using large pages, use the large page size
2800 // if it is smaller than the alignment hint. This is
2801 // a case where the VM wants to use a larger alignment size
2802 // for its own reasons but still want to use large pages
2803 // (which is what matters to setting the mpss range.
2804 size_t page_size = 0;
2805 if (large_page_size() < alignment_hint) {
2806 assert(UseLargePages, "Expected to be here for large page use only");
2807 page_size = large_page_size();
2808 } else {
2809 // If the alignment hint is less than the large page
2810 // size, the VM wants a particular alignment (thus the hint)
2811 // for internal reasons. Try to set the mpss range using
2812 // the alignment_hint.
2813 page_size = alignment_hint;
2814 }
2815 // Since this is a hint, ignore any failures.
2816 (void)Solaris::set_mpss_range(addr, bytes, page_size);
2817 }
2818 return true;
2819 }
2820 return false;
2821 }
2823 // Uncommit the pages in a specified region.
2824 void os::free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2825 if (madvise(addr, bytes, MADV_FREE) < 0) {
2826 debug_only(warning("MADV_FREE failed."));
2827 return;
2828 }
2829 }
2831 bool os::create_stack_guard_pages(char* addr, size_t size) {
2832 return os::commit_memory(addr, size);
2833 }
2835 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2836 return os::uncommit_memory(addr, size);
2837 }
2839 // Change the page size in a given range.
2840 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2841 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2842 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2843 if (UseLargePages && UseMPSS) {
2844 Solaris::set_mpss_range(addr, bytes, alignment_hint);
2845 }
2846 }
2848 // Tell the OS to make the range local to the first-touching LWP
2849 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2850 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2851 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2852 debug_only(warning("MADV_ACCESS_LWP failed."));
2853 }
2854 }
2856 // Tell the OS that this range would be accessed from different LWPs.
2857 void os::numa_make_global(char *addr, size_t bytes) {
2858 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2859 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2860 debug_only(warning("MADV_ACCESS_MANY failed."));
2861 }
2862 }
2864 // Get the number of the locality groups.
2865 size_t os::numa_get_groups_num() {
2866 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2867 return n != -1 ? n : 1;
2868 }
2870 // Get a list of leaf locality groups. A leaf lgroup is group that
2871 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2872 // board. An LWP is assigned to one of these groups upon creation.
2873 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2874 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2875 ids[0] = 0;
2876 return 1;
2877 }
2878 int result_size = 0, top = 1, bottom = 0, cur = 0;
2879 for (int k = 0; k < size; k++) {
2880 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2881 (Solaris::lgrp_id_t*)&ids[top], size - top);
2882 if (r == -1) {
2883 ids[0] = 0;
2884 return 1;
2885 }
2886 if (!r) {
2887 // That's a leaf node.
2888 assert (bottom <= cur, "Sanity check");
2889 // Check if the node has memory
2890 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2891 NULL, 0, LGRP_RSRC_MEM) > 0) {
2892 ids[bottom++] = ids[cur];
2893 }
2894 }
2895 top += r;
2896 cur++;
2897 }
2898 if (bottom == 0) {
2899 // Handle a situation, when the OS reports no memory available.
2900 // Assume UMA architecture.
2901 ids[0] = 0;
2902 return 1;
2903 }
2904 return bottom;
2905 }
2907 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2908 bool os::numa_topology_changed() {
2909 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2910 if (is_stale != -1 && is_stale) {
2911 Solaris::lgrp_fini(Solaris::lgrp_cookie());
2912 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2913 assert(c != 0, "Failure to initialize LGRP API");
2914 Solaris::set_lgrp_cookie(c);
2915 return true;
2916 }
2917 return false;
2918 }
2920 // Get the group id of the current LWP.
2921 int os::numa_get_group_id() {
2922 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2923 if (lgrp_id == -1) {
2924 return 0;
2925 }
2926 const int size = os::numa_get_groups_num();
2927 int *ids = (int*)alloca(size * sizeof(int));
2929 // Get the ids of all lgroups with memory; r is the count.
2930 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2931 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2932 if (r <= 0) {
2933 return 0;
2934 }
2935 return ids[os::random() % r];
2936 }
2938 // Request information about the page.
2939 bool os::get_page_info(char *start, page_info* info) {
2940 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2941 uint64_t addr = (uintptr_t)start;
2942 uint64_t outdata[2];
2943 uint_t validity = 0;
2945 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2946 return false;
2947 }
2949 info->size = 0;
2950 info->lgrp_id = -1;
2952 if ((validity & 1) != 0) {
2953 if ((validity & 2) != 0) {
2954 info->lgrp_id = outdata[0];
2955 }
2956 if ((validity & 4) != 0) {
2957 info->size = outdata[1];
2958 }
2959 return true;
2960 }
2961 return false;
2962 }
2964 // Scan the pages from start to end until a page different than
2965 // the one described in the info parameter is encountered.
2966 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2967 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2968 const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2969 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT];
2970 uint_t validity[MAX_MEMINFO_CNT];
2972 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2973 uint64_t p = (uint64_t)start;
2974 while (p < (uint64_t)end) {
2975 addrs[0] = p;
2976 size_t addrs_count = 1;
2977 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] < (uint64_t)end) {
2978 addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2979 addrs_count++;
2980 }
2982 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2983 return NULL;
2984 }
2986 size_t i = 0;
2987 for (; i < addrs_count; i++) {
2988 if ((validity[i] & 1) != 0) {
2989 if ((validity[i] & 4) != 0) {
2990 if (outdata[types * i + 1] != page_expected->size) {
2991 break;
2992 }
2993 } else
2994 if (page_expected->size != 0) {
2995 break;
2996 }
2998 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2999 if (outdata[types * i] != page_expected->lgrp_id) {
3000 break;
3001 }
3002 }
3003 } else {
3004 return NULL;
3005 }
3006 }
3008 if (i != addrs_count) {
3009 if ((validity[i] & 2) != 0) {
3010 page_found->lgrp_id = outdata[types * i];
3011 } else {
3012 page_found->lgrp_id = -1;
3013 }
3014 if ((validity[i] & 4) != 0) {
3015 page_found->size = outdata[types * i + 1];
3016 } else {
3017 page_found->size = 0;
3018 }
3019 return (char*)addrs[i];
3020 }
3022 p = addrs[addrs_count - 1] + page_size;
3023 }
3024 return end;
3025 }
3027 bool os::uncommit_memory(char* addr, size_t bytes) {
3028 size_t size = bytes;
3029 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3030 // uncommitted page. Otherwise, the read/write might succeed if we
3031 // have enough swap space to back the physical page.
3032 return
3033 NULL != Solaris::mmap_chunk(addr, size,
3034 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
3035 PROT_NONE);
3036 }
3038 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
3039 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
3041 if (b == MAP_FAILED) {
3042 return NULL;
3043 }
3044 return b;
3045 }
3047 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
3048 char* addr = requested_addr;
3049 int flags = MAP_PRIVATE | MAP_NORESERVE;
3051 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
3053 if (fixed) {
3054 flags |= MAP_FIXED;
3055 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
3056 flags |= MAP_ALIGN;
3057 addr = (char*) alignment_hint;
3058 }
3060 // Map uncommitted pages PROT_NONE so we fail early if we touch an
3061 // uncommitted page. Otherwise, the read/write might succeed if we
3062 // have enough swap space to back the physical page.
3063 return mmap_chunk(addr, bytes, flags, PROT_NONE);
3064 }
3066 char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
3067 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
3069 guarantee(requested_addr == NULL || requested_addr == addr,
3070 "OS failed to return requested mmap address.");
3071 return addr;
3072 }
3074 // Reserve memory at an arbitrary address, only if that area is
3075 // available (and not reserved for something else).
3077 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3078 const int max_tries = 10;
3079 char* base[max_tries];
3080 size_t size[max_tries];
3082 // Solaris adds a gap between mmap'ed regions. The size of the gap
3083 // is dependent on the requested size and the MMU. Our initial gap
3084 // value here is just a guess and will be corrected later.
3085 bool had_top_overlap = false;
3086 bool have_adjusted_gap = false;
3087 size_t gap = 0x400000;
3089 // Assert only that the size is a multiple of the page size, since
3090 // that's all that mmap requires, and since that's all we really know
3091 // about at this low abstraction level. If we need higher alignment,
3092 // we can either pass an alignment to this method or verify alignment
3093 // in one of the methods further up the call chain. See bug 5044738.
3094 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3096 // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
3097 // Give it a try, if the kernel honors the hint we can return immediately.
3098 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
3099 volatile int err = errno;
3100 if (addr == requested_addr) {
3101 return addr;
3102 } else if (addr != NULL) {
3103 unmap_memory(addr, bytes);
3104 }
3106 if (PrintMiscellaneous && Verbose) {
3107 char buf[256];
3108 buf[0] = '\0';
3109 if (addr == NULL) {
3110 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
3111 }
3112 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
3113 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
3114 "%s", bytes, requested_addr, addr, buf);
3115 }
3117 // Address hint method didn't work. Fall back to the old method.
3118 // In theory, once SNV becomes our oldest supported platform, this
3119 // code will no longer be needed.
3120 //
3121 // Repeatedly allocate blocks until the block is allocated at the
3122 // right spot. Give up after max_tries.
3123 int i;
3124 for (i = 0; i < max_tries; ++i) {
3125 base[i] = reserve_memory(bytes);
3127 if (base[i] != NULL) {
3128 // Is this the block we wanted?
3129 if (base[i] == requested_addr) {
3130 size[i] = bytes;
3131 break;
3132 }
3134 // check that the gap value is right
3135 if (had_top_overlap && !have_adjusted_gap) {
3136 size_t actual_gap = base[i-1] - base[i] - bytes;
3137 if (gap != actual_gap) {
3138 // adjust the gap value and retry the last 2 allocations
3139 assert(i > 0, "gap adjustment code problem");
3140 have_adjusted_gap = true; // adjust the gap only once, just in case
3141 gap = actual_gap;
3142 if (PrintMiscellaneous && Verbose) {
3143 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
3144 }
3145 unmap_memory(base[i], bytes);
3146 unmap_memory(base[i-1], size[i-1]);
3147 i-=2;
3148 continue;
3149 }
3150 }
3152 // Does this overlap the block we wanted? Give back the overlapped
3153 // parts and try again.
3154 //
3155 // There is still a bug in this code: if top_overlap == bytes,
3156 // the overlap is offset from requested region by the value of gap.
3157 // In this case giving back the overlapped part will not work,
3158 // because we'll give back the entire block at base[i] and
3159 // therefore the subsequent allocation will not generate a new gap.
3160 // This could be fixed with a new algorithm that used larger
3161 // or variable size chunks to find the requested region -
3162 // but such a change would introduce additional complications.
3163 // It's rare enough that the planets align for this bug,
3164 // so we'll just wait for a fix for 6204603/5003415 which
3165 // will provide a mmap flag to allow us to avoid this business.
3167 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3168 if (top_overlap >= 0 && top_overlap < bytes) {
3169 had_top_overlap = true;
3170 unmap_memory(base[i], top_overlap);
3171 base[i] += top_overlap;
3172 size[i] = bytes - top_overlap;
3173 } else {
3174 size_t bottom_overlap = base[i] + bytes - requested_addr;
3175 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3176 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3177 warning("attempt_reserve_memory_at: possible alignment bug");
3178 }
3179 unmap_memory(requested_addr, bottom_overlap);
3180 size[i] = bytes - bottom_overlap;
3181 } else {
3182 size[i] = bytes;
3183 }
3184 }
3185 }
3186 }
3188 // Give back the unused reserved pieces.
3190 for (int j = 0; j < i; ++j) {
3191 if (base[j] != NULL) {
3192 unmap_memory(base[j], size[j]);
3193 }
3194 }
3196 return (i < max_tries) ? requested_addr : NULL;
3197 }
3199 bool os::release_memory(char* addr, size_t bytes) {
3200 size_t size = bytes;
3201 return munmap(addr, size) == 0;
3202 }
3204 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3205 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3206 "addr must be page aligned");
3207 int retVal = mprotect(addr, bytes, prot);
3208 return retVal == 0;
3209 }
3211 // Protect memory (Used to pass readonly pages through
3212 // JNI GetArray<type>Elements with empty arrays.)
3213 // Also, used for serialization page and for compressed oops null pointer
3214 // checking.
3215 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3216 bool is_committed) {
3217 unsigned int p = 0;
3218 switch (prot) {
3219 case MEM_PROT_NONE: p = PROT_NONE; break;
3220 case MEM_PROT_READ: p = PROT_READ; break;
3221 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3222 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3223 default:
3224 ShouldNotReachHere();
3225 }
3226 // is_committed is unused.
3227 return solaris_mprotect(addr, bytes, p);
3228 }
3230 // guard_memory and unguard_memory only happens within stack guard pages.
3231 // Since ISM pertains only to the heap, guard and unguard memory should not
3232 /// happen with an ISM region.
3233 bool os::guard_memory(char* addr, size_t bytes) {
3234 return solaris_mprotect(addr, bytes, PROT_NONE);
3235 }
3237 bool os::unguard_memory(char* addr, size_t bytes) {
3238 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3239 }
3241 // Large page support
3243 // UseLargePages is the master flag to enable/disable large page memory.
3244 // UseMPSS and UseISM are supported for compatibility reasons. Their combined
3245 // effects can be described in the following table:
3246 //
3247 // UseLargePages UseMPSS UseISM
3248 // false * * => UseLargePages is the master switch, turning
3249 // it off will turn off both UseMPSS and
3250 // UseISM. VM will not use large page memory
3251 // regardless the settings of UseMPSS/UseISM.
3252 // true false false => Unless future Solaris provides other
3253 // mechanism to use large page memory, this
3254 // combination is equivalent to -UseLargePages,
3255 // VM will not use large page memory
3256 // true true false => JVM will use MPSS for large page memory.
3257 // This is the default behavior.
3258 // true false true => JVM will use ISM for large page memory.
3259 // true true true => JVM will use ISM if it is available.
3260 // Otherwise, JVM will fall back to MPSS.
3261 // Becaues ISM is now available on all
3262 // supported Solaris versions, this combination
3263 // is equivalent to +UseISM -UseMPSS.
3265 static size_t _large_page_size = 0;
3267 bool os::Solaris::ism_sanity_check(bool warn, size_t * page_size) {
3268 // x86 uses either 2M or 4M page, depending on whether PAE (Physical Address
3269 // Extensions) mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. Sparc
3270 // can support multiple page sizes.
3272 // Don't bother to probe page size because getpagesizes() comes with MPSS.
3273 // ISM is only recommended on old Solaris where there is no MPSS support.
3274 // Simply choose a conservative value as default.
3275 *page_size = LargePageSizeInBytes ? LargePageSizeInBytes :
3276 SPARC_ONLY(4 * M) IA32_ONLY(4 * M) AMD64_ONLY(2 * M)
3277 ARM_ONLY(2 * M);
3279 // ISM is available on all supported Solaris versions
3280 return true;
3281 }
3283 // Insertion sort for small arrays (descending order).
3284 static void insertion_sort_descending(size_t* array, int len) {
3285 for (int i = 0; i < len; i++) {
3286 size_t val = array[i];
3287 for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3288 size_t tmp = array[key];
3289 array[key] = array[key - 1];
3290 array[key - 1] = tmp;
3291 }
3292 }
3293 }
3295 bool os::Solaris::mpss_sanity_check(bool warn, size_t * page_size) {
3296 const unsigned int usable_count = VM_Version::page_size_count();
3297 if (usable_count == 1) {
3298 return false;
3299 }
3301 // Find the right getpagesizes interface. When solaris 11 is the minimum
3302 // build platform, getpagesizes() (without the '2') can be called directly.
3303 typedef int (*gps_t)(size_t[], int);
3304 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3305 if (gps_func == NULL) {
3306 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3307 if (gps_func == NULL) {
3308 if (warn) {
3309 warning("MPSS is not supported by the operating system.");
3310 }
3311 return false;
3312 }
3313 }
3315 // Fill the array of page sizes.
3316 int n = (*gps_func)(_page_sizes, page_sizes_max);
3317 assert(n > 0, "Solaris bug?");
3319 if (n == page_sizes_max) {
3320 // Add a sentinel value (necessary only if the array was completely filled
3321 // since it is static (zeroed at initialization)).
3322 _page_sizes[--n] = 0;
3323 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3324 }
3325 assert(_page_sizes[n] == 0, "missing sentinel");
3326 trace_page_sizes("available page sizes", _page_sizes, n);
3328 if (n == 1) return false; // Only one page size available.
3330 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3331 // select up to usable_count elements. First sort the array, find the first
3332 // acceptable value, then copy the usable sizes to the top of the array and
3333 // trim the rest. Make sure to include the default page size :-).
3334 //
3335 // A better policy could get rid of the 4M limit by taking the sizes of the
3336 // important VM memory regions (java heap and possibly the code cache) into
3337 // account.
3338 insertion_sort_descending(_page_sizes, n);
3339 const size_t size_limit =
3340 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3341 int beg;
3342 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3343 const int end = MIN2((int)usable_count, n) - 1;
3344 for (int cur = 0; cur < end; ++cur, ++beg) {
3345 _page_sizes[cur] = _page_sizes[beg];
3346 }
3347 _page_sizes[end] = vm_page_size();
3348 _page_sizes[end + 1] = 0;
3350 if (_page_sizes[end] > _page_sizes[end - 1]) {
3351 // Default page size is not the smallest; sort again.
3352 insertion_sort_descending(_page_sizes, end + 1);
3353 }
3354 *page_size = _page_sizes[0];
3356 trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3357 return true;
3358 }
3360 void os::large_page_init() {
3361 if (!UseLargePages) {
3362 UseISM = false;
3363 UseMPSS = false;
3364 return;
3365 }
3367 // print a warning if any large page related flag is specified on command line
3368 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3369 !FLAG_IS_DEFAULT(UseISM) ||
3370 !FLAG_IS_DEFAULT(UseMPSS) ||
3371 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3372 UseISM = UseISM &&
3373 Solaris::ism_sanity_check(warn_on_failure, &_large_page_size);
3374 if (UseISM) {
3375 // ISM disables MPSS to be compatible with old JDK behavior
3376 UseMPSS = false;
3377 _page_sizes[0] = _large_page_size;
3378 _page_sizes[1] = vm_page_size();
3379 }
3381 UseMPSS = UseMPSS &&
3382 Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3384 UseLargePages = UseISM || UseMPSS;
3385 }
3387 bool os::Solaris::set_mpss_range(caddr_t start, size_t bytes, size_t align) {
3388 // Signal to OS that we want large pages for addresses
3389 // from addr, addr + bytes
3390 struct memcntl_mha mpss_struct;
3391 mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3392 mpss_struct.mha_pagesize = align;
3393 mpss_struct.mha_flags = 0;
3394 if (memcntl(start, bytes, MC_HAT_ADVISE,
3395 (caddr_t) &mpss_struct, 0, 0) < 0) {
3396 debug_only(warning("Attempt to use MPSS failed."));
3397 return false;
3398 }
3399 return true;
3400 }
3402 char* os::reserve_memory_special(size_t size, char* addr, bool exec) {
3403 // "exec" is passed in but not used. Creating the shared image for
3404 // the code cache doesn't have an SHM_X executable permission to check.
3405 assert(UseLargePages && UseISM, "only for ISM large pages");
3407 char* retAddr = NULL;
3408 int shmid;
3409 key_t ismKey;
3411 bool warn_on_failure = UseISM &&
3412 (!FLAG_IS_DEFAULT(UseLargePages) ||
3413 !FLAG_IS_DEFAULT(UseISM) ||
3414 !FLAG_IS_DEFAULT(LargePageSizeInBytes)
3415 );
3416 char msg[128];
3418 ismKey = IPC_PRIVATE;
3420 // Create a large shared memory region to attach to based on size.
3421 // Currently, size is the total size of the heap
3422 shmid = shmget(ismKey, size, SHM_R | SHM_W | IPC_CREAT);
3423 if (shmid == -1){
3424 if (warn_on_failure) {
3425 jio_snprintf(msg, sizeof(msg), "Failed to reserve shared memory (errno = %d).", errno);
3426 warning(msg);
3427 }
3428 return NULL;
3429 }
3431 // Attach to the region
3432 retAddr = (char *) shmat(shmid, 0, SHM_SHARE_MMU | SHM_R | SHM_W);
3433 int err = errno;
3435 // Remove shmid. If shmat() is successful, the actual shared memory segment
3436 // will be deleted when it's detached by shmdt() or when the process
3437 // terminates. If shmat() is not successful this will remove the shared
3438 // segment immediately.
3439 shmctl(shmid, IPC_RMID, NULL);
3441 if (retAddr == (char *) -1) {
3442 if (warn_on_failure) {
3443 jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3444 warning(msg);
3445 }
3446 return NULL;
3447 }
3448 if ((retAddr != NULL) && UseNUMAInterleaving) {
3449 numa_make_global(retAddr, size);
3450 }
3451 return retAddr;
3452 }
3454 bool os::release_memory_special(char* base, size_t bytes) {
3455 // detaching the SHM segment will also delete it, see reserve_memory_special()
3456 int rslt = shmdt(base);
3457 return rslt == 0;
3458 }
3460 size_t os::large_page_size() {
3461 return _large_page_size;
3462 }
3464 // MPSS allows application to commit large page memory on demand; with ISM
3465 // the entire memory region must be allocated as shared memory.
3466 bool os::can_commit_large_page_memory() {
3467 return UseISM ? false : true;
3468 }
3470 bool os::can_execute_large_page_memory() {
3471 return UseISM ? false : true;
3472 }
3474 static int os_sleep(jlong millis, bool interruptible) {
3475 const jlong limit = INT_MAX;
3476 jlong prevtime;
3477 int res;
3479 while (millis > limit) {
3480 if ((res = os_sleep(limit, interruptible)) != OS_OK)
3481 return res;
3482 millis -= limit;
3483 }
3485 // Restart interrupted polls with new parameters until the proper delay
3486 // has been completed.
3488 prevtime = getTimeMillis();
3490 while (millis > 0) {
3491 jlong newtime;
3493 if (!interruptible) {
3494 // Following assert fails for os::yield_all:
3495 // assert(!thread->is_Java_thread(), "must not be java thread");
3496 res = poll(NULL, 0, millis);
3497 } else {
3498 JavaThread *jt = JavaThread::current();
3500 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3501 os::Solaris::clear_interrupted);
3502 }
3504 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3505 // thread.Interrupt.
3507 // See c/r 6751923. Poll can return 0 before time
3508 // has elapsed if time is set via clock_settime (as NTP does).
3509 // res == 0 if poll timed out (see man poll RETURN VALUES)
3510 // using the logic below checks that we really did
3511 // sleep at least "millis" if not we'll sleep again.
3512 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3513 newtime = getTimeMillis();
3514 assert(newtime >= prevtime, "time moving backwards");
3515 /* Doing prevtime and newtime in microseconds doesn't help precision,
3516 and trying to round up to avoid lost milliseconds can result in a
3517 too-short delay. */
3518 millis -= newtime - prevtime;
3519 if(millis <= 0)
3520 return OS_OK;
3521 prevtime = newtime;
3522 } else
3523 return res;
3524 }
3526 return OS_OK;
3527 }
3529 // Read calls from inside the vm need to perform state transitions
3530 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3531 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3532 }
3534 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3535 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3536 }
3538 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3539 assert(thread == Thread::current(), "thread consistency check");
3541 // TODO-FIXME: this should be removed.
3542 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3543 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3544 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3545 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3546 // is fooled into believing that the system is making progress. In the code below we block the
3547 // the watcher thread while safepoint is in progress so that it would not appear as though the
3548 // system is making progress.
3549 if (!Solaris::T2_libthread() &&
3550 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3551 // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3552 // the entire safepoint, the watcher thread will line up here during the safepoint.
3553 Threads_lock->lock_without_safepoint_check();
3554 Threads_lock->unlock();
3555 }
3557 if (thread->is_Java_thread()) {
3558 // This is a JavaThread so we honor the _thread_blocked protocol
3559 // even for sleeps of 0 milliseconds. This was originally done
3560 // as a workaround for bug 4338139. However, now we also do it
3561 // to honor the suspend-equivalent protocol.
3563 JavaThread *jt = (JavaThread *) thread;
3564 ThreadBlockInVM tbivm(jt);
3566 jt->set_suspend_equivalent();
3567 // cleared by handle_special_suspend_equivalent_condition() or
3568 // java_suspend_self() via check_and_wait_while_suspended()
3570 int ret_code;
3571 if (millis <= 0) {
3572 thr_yield();
3573 ret_code = 0;
3574 } else {
3575 // The original sleep() implementation did not create an
3576 // OSThreadWaitState helper for sleeps of 0 milliseconds.
3577 // I'm preserving that decision for now.
3578 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3580 ret_code = os_sleep(millis, interruptible);
3581 }
3583 // were we externally suspended while we were waiting?
3584 jt->check_and_wait_while_suspended();
3586 return ret_code;
3587 }
3589 // non-JavaThread from this point on:
3591 if (millis <= 0) {
3592 thr_yield();
3593 return 0;
3594 }
3596 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3598 return os_sleep(millis, interruptible);
3599 }
3601 int os::naked_sleep() {
3602 // %% make the sleep time an integer flag. for now use 1 millisec.
3603 return os_sleep(1, false);
3604 }
3606 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3607 void os::infinite_sleep() {
3608 while (true) { // sleep forever ...
3609 ::sleep(100); // ... 100 seconds at a time
3610 }
3611 }
3613 // Used to convert frequent JVM_Yield() to nops
3614 bool os::dont_yield() {
3615 if (DontYieldALot) {
3616 static hrtime_t last_time = 0;
3617 hrtime_t diff = getTimeNanos() - last_time;
3619 if (diff < DontYieldALotInterval * 1000000)
3620 return true;
3622 last_time += diff;
3624 return false;
3625 }
3626 else {
3627 return false;
3628 }
3629 }
3631 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3632 // the linux and win32 implementations do not. This should be checked.
3634 void os::yield() {
3635 // Yields to all threads with same or greater priority
3636 os::sleep(Thread::current(), 0, false);
3637 }
3639 // Note that yield semantics are defined by the scheduling class to which
3640 // the thread currently belongs. Typically, yield will _not yield to
3641 // other equal or higher priority threads that reside on the dispatch queues
3642 // of other CPUs.
3644 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3647 // On Solaris we found that yield_all doesn't always yield to all other threads.
3648 // There have been cases where there is a thread ready to execute but it doesn't
3649 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3650 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3651 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3652 // number of times yield_all is called in the one loop and increase the sleep
3653 // time after 8 attempts. If this fails too we increase the concurrency level
3654 // so that the starving thread would get an lwp
3656 void os::yield_all(int attempts) {
3657 // Yields to all threads, including threads with lower priorities
3658 if (attempts == 0) {
3659 os::sleep(Thread::current(), 1, false);
3660 } else {
3661 int iterations = attempts % 30;
3662 if (iterations == 0 && !os::Solaris::T2_libthread()) {
3663 // thr_setconcurrency and _getconcurrency make sense only under T1.
3664 int noofLWPS = thr_getconcurrency();
3665 if (noofLWPS < (Threads::number_of_threads() + 2)) {
3666 thr_setconcurrency(thr_getconcurrency() + 1);
3667 }
3668 } else if (iterations < 25) {
3669 os::sleep(Thread::current(), 1, false);
3670 } else {
3671 os::sleep(Thread::current(), 10, false);
3672 }
3673 }
3674 }
3676 // Called from the tight loops to possibly influence time-sharing heuristics
3677 void os::loop_breaker(int attempts) {
3678 os::yield_all(attempts);
3679 }
3682 // Interface for setting lwp priorities. If we are using T2 libthread,
3683 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3684 // all of our threads will be assigned to real lwp's. Using the thr_setprio
3685 // function is meaningless in this mode so we must adjust the real lwp's priority
3686 // The routines below implement the getting and setting of lwp priorities.
3687 //
3688 // Note: There are three priority scales used on Solaris. Java priotities
3689 // which range from 1 to 10, libthread "thr_setprio" scale which range
3690 // from 0 to 127, and the current scheduling class of the process we
3691 // are running in. This is typically from -60 to +60.
3692 // The setting of the lwp priorities in done after a call to thr_setprio
3693 // so Java priorities are mapped to libthread priorities and we map from
3694 // the latter to lwp priorities. We don't keep priorities stored in
3695 // Java priorities since some of our worker threads want to set priorities
3696 // higher than all Java threads.
3697 //
3698 // For related information:
3699 // (1) man -s 2 priocntl
3700 // (2) man -s 4 priocntl
3701 // (3) man dispadmin
3702 // = librt.so
3703 // = libthread/common/rtsched.c - thrp_setlwpprio().
3704 // = ps -cL <pid> ... to validate priority.
3705 // = sched_get_priority_min and _max
3706 // pthread_create
3707 // sched_setparam
3708 // pthread_setschedparam
3709 //
3710 // Assumptions:
3711 // + We assume that all threads in the process belong to the same
3712 // scheduling class. IE. an homogenous process.
3713 // + Must be root or in IA group to change change "interactive" attribute.
3714 // Priocntl() will fail silently. The only indication of failure is when
3715 // we read-back the value and notice that it hasn't changed.
3716 // + Interactive threads enter the runq at the head, non-interactive at the tail.
3717 // + For RT, change timeslice as well. Invariant:
3718 // constant "priority integral"
3719 // Konst == TimeSlice * (60-Priority)
3720 // Given a priority, compute appropriate timeslice.
3721 // + Higher numerical values have higher priority.
3723 // sched class attributes
3724 typedef struct {
3725 int schedPolicy; // classID
3726 int maxPrio;
3727 int minPrio;
3728 } SchedInfo;
3731 static SchedInfo tsLimits, iaLimits, rtLimits;
3733 #ifdef ASSERT
3734 static int ReadBackValidate = 1;
3735 #endif
3736 static int myClass = 0;
3737 static int myMin = 0;
3738 static int myMax = 0;
3739 static int myCur = 0;
3740 static bool priocntl_enable = false;
3743 // Call the version of priocntl suitable for all supported versions
3744 // of Solaris. We need to call through this wrapper so that we can
3745 // build on Solaris 9 and run on Solaris 8, 9 and 10.
3746 //
3747 // This code should be removed if we ever stop supporting Solaris 8
3748 // and earlier releases.
3750 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3751 typedef long (*priocntl_type)(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg);
3752 static priocntl_type priocntl_ptr = priocntl_stub;
3754 // Stub to set the value of the real pointer, and then call the real
3755 // function.
3757 static long priocntl_stub(int pcver, idtype_t idtype, id_t id, int cmd, caddr_t arg) {
3758 // Try Solaris 8- name only.
3759 priocntl_type tmp = (priocntl_type)dlsym(RTLD_DEFAULT, "__priocntl");
3760 guarantee(tmp != NULL, "priocntl function not found.");
3761 priocntl_ptr = tmp;
3762 return (*priocntl_ptr)(PC_VERSION, idtype, id, cmd, arg);
3763 }
3766 // lwp_priocntl_init
3767 //
3768 // Try to determine the priority scale for our process.
3769 //
3770 // Return errno or 0 if OK.
3771 //
3772 static
3773 int lwp_priocntl_init ()
3774 {
3775 int rslt;
3776 pcinfo_t ClassInfo;
3777 pcparms_t ParmInfo;
3778 int i;
3780 if (!UseThreadPriorities) return 0;
3782 // We are using Bound threads, we need to determine our priority ranges
3783 if (os::Solaris::T2_libthread() || UseBoundThreads) {
3784 // If ThreadPriorityPolicy is 1, switch tables
3785 if (ThreadPriorityPolicy == 1) {
3786 for (i = 0 ; i < MaxPriority+1; i++)
3787 os::java_to_os_priority[i] = prio_policy1[i];
3788 }
3789 }
3790 // Not using Bound Threads, set to ThreadPolicy 1
3791 else {
3792 for ( i = 0 ; i < MaxPriority+1; i++ ) {
3793 os::java_to_os_priority[i] = prio_policy1[i];
3794 }
3795 return 0;
3796 }
3799 // Get IDs for a set of well-known scheduling classes.
3800 // TODO-FIXME: GETCLINFO returns the current # of classes in the
3801 // the system. We should have a loop that iterates over the
3802 // classID values, which are known to be "small" integers.
3804 strcpy(ClassInfo.pc_clname, "TS");
3805 ClassInfo.pc_cid = -1;
3806 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3807 if (rslt < 0) return errno;
3808 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3809 tsLimits.schedPolicy = ClassInfo.pc_cid;
3810 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3811 tsLimits.minPrio = -tsLimits.maxPrio;
3813 strcpy(ClassInfo.pc_clname, "IA");
3814 ClassInfo.pc_cid = -1;
3815 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3816 if (rslt < 0) return errno;
3817 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3818 iaLimits.schedPolicy = ClassInfo.pc_cid;
3819 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3820 iaLimits.minPrio = -iaLimits.maxPrio;
3822 strcpy(ClassInfo.pc_clname, "RT");
3823 ClassInfo.pc_cid = -1;
3824 rslt = (*priocntl_ptr)(PC_VERSION, P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3825 if (rslt < 0) return errno;
3826 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3827 rtLimits.schedPolicy = ClassInfo.pc_cid;
3828 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3829 rtLimits.minPrio = 0;
3832 // Query our "current" scheduling class.
3833 // This will normally be IA,TS or, rarely, RT.
3834 memset (&ParmInfo, 0, sizeof(ParmInfo));
3835 ParmInfo.pc_cid = PC_CLNULL;
3836 rslt = (*priocntl_ptr) (PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo );
3837 if ( rslt < 0 ) return errno;
3838 myClass = ParmInfo.pc_cid;
3840 // We now know our scheduling classId, get specific information
3841 // the class.
3842 ClassInfo.pc_cid = myClass;
3843 ClassInfo.pc_clname[0] = 0;
3844 rslt = (*priocntl_ptr) (PC_VERSION, (idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo );
3845 if ( rslt < 0 ) return errno;
3847 if (ThreadPriorityVerbose)
3848 tty->print_cr ("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3850 memset(&ParmInfo, 0, sizeof(pcparms_t));
3851 ParmInfo.pc_cid = PC_CLNULL;
3852 rslt = (*priocntl_ptr)(PC_VERSION, P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3853 if (rslt < 0) return errno;
3855 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3856 myMin = rtLimits.minPrio;
3857 myMax = rtLimits.maxPrio;
3858 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3859 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3860 myMin = iaLimits.minPrio;
3861 myMax = iaLimits.maxPrio;
3862 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict
3863 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3864 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3865 myMin = tsLimits.minPrio;
3866 myMax = tsLimits.maxPrio;
3867 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
3868 } else {
3869 // No clue - punt
3870 if (ThreadPriorityVerbose)
3871 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3872 return EINVAL; // no clue, punt
3873 }
3875 if (ThreadPriorityVerbose)
3876 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3878 priocntl_enable = true; // Enable changing priorities
3879 return 0;
3880 }
3882 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
3883 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
3884 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
3887 // scale_to_lwp_priority
3888 //
3889 // Convert from the libthread "thr_setprio" scale to our current
3890 // lwp scheduling class scale.
3891 //
3892 static
3893 int scale_to_lwp_priority (int rMin, int rMax, int x)
3894 {
3895 int v;
3897 if (x == 127) return rMax; // avoid round-down
3898 v = (((x*(rMax-rMin)))/128)+rMin;
3899 return v;
3900 }
3903 // set_lwp_priority
3904 //
3905 // Set the priority of the lwp. This call should only be made
3906 // when using bound threads (T2 threads are bound by default).
3907 //
3908 int set_lwp_priority (int ThreadID, int lwpid, int newPrio )
3909 {
3910 int rslt;
3911 int Actual, Expected, prv;
3912 pcparms_t ParmInfo; // for GET-SET
3913 #ifdef ASSERT
3914 pcparms_t ReadBack; // for readback
3915 #endif
3917 // Set priority via PC_GETPARMS, update, PC_SETPARMS
3918 // Query current values.
3919 // TODO: accelerate this by eliminating the PC_GETPARMS call.
3920 // Cache "pcparms_t" in global ParmCache.
3921 // TODO: elide set-to-same-value
3923 // If something went wrong on init, don't change priorities.
3924 if ( !priocntl_enable ) {
3925 if (ThreadPriorityVerbose)
3926 tty->print_cr("Trying to set priority but init failed, ignoring");
3927 return EINVAL;
3928 }
3931 // If lwp hasn't started yet, just return
3932 // the _start routine will call us again.
3933 if ( lwpid <= 0 ) {
3934 if (ThreadPriorityVerbose) {
3935 tty->print_cr ("deferring the set_lwp_priority of thread " INTPTR_FORMAT " to %d, lwpid not set",
3936 ThreadID, newPrio);
3937 }
3938 return 0;
3939 }
3941 if (ThreadPriorityVerbose) {
3942 tty->print_cr ("set_lwp_priority(" INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3943 ThreadID, lwpid, newPrio);
3944 }
3946 memset(&ParmInfo, 0, sizeof(pcparms_t));
3947 ParmInfo.pc_cid = PC_CLNULL;
3948 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3949 if (rslt < 0) return errno;
3951 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3952 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
3953 rtInfo->rt_pri = scale_to_lwp_priority (rtLimits.minPrio, rtLimits.maxPrio, newPrio);
3954 rtInfo->rt_tqsecs = RT_NOCHANGE;
3955 rtInfo->rt_tqnsecs = RT_NOCHANGE;
3956 if (ThreadPriorityVerbose) {
3957 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3958 }
3959 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3960 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3961 int maxClamped = MIN2(iaLimits.maxPrio, (int)iaInfo->ia_uprilim);
3962 iaInfo->ia_upri = scale_to_lwp_priority(iaLimits.minPrio, maxClamped, newPrio);
3963 iaInfo->ia_uprilim = IA_NOCHANGE;
3964 iaInfo->ia_mode = IA_NOCHANGE;
3965 if (ThreadPriorityVerbose) {
3966 tty->print_cr ("IA: [%d...%d] %d->%d\n",
3967 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3968 }
3969 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3970 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3971 int maxClamped = MIN2(tsLimits.maxPrio, (int)tsInfo->ts_uprilim);
3972 prv = tsInfo->ts_upri;
3973 tsInfo->ts_upri = scale_to_lwp_priority(tsLimits.minPrio, maxClamped, newPrio);
3974 tsInfo->ts_uprilim = IA_NOCHANGE;
3975 if (ThreadPriorityVerbose) {
3976 tty->print_cr ("TS: %d [%d...%d] %d->%d\n",
3977 prv, tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3978 }
3979 if (prv == tsInfo->ts_upri) return 0;
3980 } else {
3981 if ( ThreadPriorityVerbose ) {
3982 tty->print_cr ("Unknown scheduling class\n");
3983 }
3984 return EINVAL; // no clue, punt
3985 }
3987 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3988 if (ThreadPriorityVerbose && rslt) {
3989 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3990 }
3991 if (rslt < 0) return errno;
3993 #ifdef ASSERT
3994 // Sanity check: read back what we just attempted to set.
3995 // In theory it could have changed in the interim ...
3996 //
3997 // The priocntl system call is tricky.
3998 // Sometimes it'll validate the priority value argument and
3999 // return EINVAL if unhappy. At other times it fails silently.
4000 // Readbacks are prudent.
4002 if (!ReadBackValidate) return 0;
4004 memset(&ReadBack, 0, sizeof(pcparms_t));
4005 ReadBack.pc_cid = PC_CLNULL;
4006 rslt = (*priocntl_ptr)(PC_VERSION, P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
4007 assert(rslt >= 0, "priocntl failed");
4008 Actual = Expected = 0xBAD;
4009 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
4010 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
4011 Actual = RTPRI(ReadBack)->rt_pri;
4012 Expected = RTPRI(ParmInfo)->rt_pri;
4013 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
4014 Actual = IAPRI(ReadBack)->ia_upri;
4015 Expected = IAPRI(ParmInfo)->ia_upri;
4016 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
4017 Actual = TSPRI(ReadBack)->ts_upri;
4018 Expected = TSPRI(ParmInfo)->ts_upri;
4019 } else {
4020 if ( ThreadPriorityVerbose ) {
4021 tty->print_cr("set_lwp_priority: unexpected class in readback: %d\n", ParmInfo.pc_cid);
4022 }
4023 }
4025 if (Actual != Expected) {
4026 if ( ThreadPriorityVerbose ) {
4027 tty->print_cr ("set_lwp_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
4028 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
4029 }
4030 }
4031 #endif
4033 return 0;
4034 }
4038 // Solaris only gives access to 128 real priorities at a time,
4039 // so we expand Java's ten to fill this range. This would be better
4040 // if we dynamically adjusted relative priorities.
4041 //
4042 // The ThreadPriorityPolicy option allows us to select 2 different
4043 // priority scales.
4044 //
4045 // ThreadPriorityPolicy=0
4046 // Since the Solaris' default priority is MaximumPriority, we do not
4047 // set a priority lower than Max unless a priority lower than
4048 // NormPriority is requested.
4049 //
4050 // ThreadPriorityPolicy=1
4051 // This mode causes the priority table to get filled with
4052 // linear values. NormPriority get's mapped to 50% of the
4053 // Maximum priority an so on. This will cause VM threads
4054 // to get unfair treatment against other Solaris processes
4055 // which do not explicitly alter their thread priorities.
4056 //
4059 int os::java_to_os_priority[MaxPriority + 1] = {
4060 -99999, // 0 Entry should never be used
4062 0, // 1 MinPriority
4063 32, // 2
4064 64, // 3
4066 96, // 4
4067 127, // 5 NormPriority
4068 127, // 6
4070 127, // 7
4071 127, // 8
4072 127, // 9 NearMaxPriority
4074 127 // 10 MaxPriority
4075 };
4078 OSReturn os::set_native_priority(Thread* thread, int newpri) {
4079 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
4080 if ( !UseThreadPriorities ) return OS_OK;
4081 int status = thr_setprio(thread->osthread()->thread_id(), newpri);
4082 if ( os::Solaris::T2_libthread() || (UseBoundThreads && thread->osthread()->is_vm_created()) )
4083 status |= (set_lwp_priority (thread->osthread()->thread_id(),
4084 thread->osthread()->lwp_id(), newpri ));
4085 return (status == 0) ? OS_OK : OS_ERR;
4086 }
4089 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
4090 int p;
4091 if ( !UseThreadPriorities ) {
4092 *priority_ptr = NormalPriority;
4093 return OS_OK;
4094 }
4095 int status = thr_getprio(thread->osthread()->thread_id(), &p);
4096 if (status != 0) {
4097 return OS_ERR;
4098 }
4099 *priority_ptr = p;
4100 return OS_OK;
4101 }
4104 // Hint to the underlying OS that a task switch would not be good.
4105 // Void return because it's a hint and can fail.
4106 void os::hint_no_preempt() {
4107 schedctl_start(schedctl_init());
4108 }
4110 void os::interrupt(Thread* thread) {
4111 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4113 OSThread* osthread = thread->osthread();
4115 int isInterrupted = osthread->interrupted();
4116 if (!isInterrupted) {
4117 osthread->set_interrupted(true);
4118 OrderAccess::fence();
4119 // os::sleep() is implemented with either poll (NULL,0,timeout) or
4120 // by parking on _SleepEvent. If the former, thr_kill will unwedge
4121 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4122 ParkEvent * const slp = thread->_SleepEvent ;
4123 if (slp != NULL) slp->unpark() ;
4124 }
4126 // For JSR166: unpark after setting status but before thr_kill -dl
4127 if (thread->is_Java_thread()) {
4128 ((JavaThread*)thread)->parker()->unpark();
4129 }
4131 // Handle interruptible wait() ...
4132 ParkEvent * const ev = thread->_ParkEvent ;
4133 if (ev != NULL) ev->unpark() ;
4135 // When events are used everywhere for os::sleep, then this thr_kill
4136 // will only be needed if UseVMInterruptibleIO is true.
4138 if (!isInterrupted) {
4139 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4140 assert_status(status == 0, status, "thr_kill");
4142 // Bump thread interruption counter
4143 RuntimeService::record_thread_interrupt_signaled_count();
4144 }
4145 }
4148 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4149 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4151 OSThread* osthread = thread->osthread();
4153 bool res = osthread->interrupted();
4155 // NOTE that since there is no "lock" around these two operations,
4156 // there is the possibility that the interrupted flag will be
4157 // "false" but that the interrupt event will be set. This is
4158 // intentional. The effect of this is that Object.wait() will appear
4159 // to have a spurious wakeup, which is not harmful, and the
4160 // possibility is so rare that it is not worth the added complexity
4161 // to add yet another lock. It has also been recommended not to put
4162 // the interrupted flag into the os::Solaris::Event structure,
4163 // because it hides the issue.
4164 if (res && clear_interrupted) {
4165 osthread->set_interrupted(false);
4166 }
4167 return res;
4168 }
4171 void os::print_statistics() {
4172 }
4174 int os::message_box(const char* title, const char* message) {
4175 int i;
4176 fdStream err(defaultStream::error_fd());
4177 for (i = 0; i < 78; i++) err.print_raw("=");
4178 err.cr();
4179 err.print_raw_cr(title);
4180 for (i = 0; i < 78; i++) err.print_raw("-");
4181 err.cr();
4182 err.print_raw_cr(message);
4183 for (i = 0; i < 78; i++) err.print_raw("=");
4184 err.cr();
4186 char buf[16];
4187 // Prevent process from exiting upon "read error" without consuming all CPU
4188 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4190 return buf[0] == 'y' || buf[0] == 'Y';
4191 }
4193 // A lightweight implementation that does not suspend the target thread and
4194 // thus returns only a hint. Used for profiling only!
4195 ExtendedPC os::get_thread_pc(Thread* thread) {
4196 // Make sure that it is called by the watcher and the Threads lock is owned.
4197 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4198 // For now, is only used to profile the VM Thread
4199 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4200 ExtendedPC epc;
4202 GetThreadPC_Callback cb(ProfileVM_lock);
4203 OSThread *osthread = thread->osthread();
4204 const int time_to_wait = 400; // 400ms wait for initial response
4205 int status = cb.interrupt(thread, time_to_wait);
4207 if (cb.is_done() ) {
4208 epc = cb.addr();
4209 } else {
4210 DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
4211 osthread->thread_id(), status););
4212 // epc is already NULL
4213 }
4214 return epc;
4215 }
4218 // This does not do anything on Solaris. This is basically a hook for being
4219 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4220 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4221 f(value, method, args, thread);
4222 }
4224 // This routine may be used by user applications as a "hook" to catch signals.
4225 // The user-defined signal handler must pass unrecognized signals to this
4226 // routine, and if it returns true (non-zero), then the signal handler must
4227 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4228 // routine will never retun false (zero), but instead will execute a VM panic
4229 // routine kill the process.
4230 //
4231 // If this routine returns false, it is OK to call it again. This allows
4232 // the user-defined signal handler to perform checks either before or after
4233 // the VM performs its own checks. Naturally, the user code would be making
4234 // a serious error if it tried to handle an exception (such as a null check
4235 // or breakpoint) that the VM was generating for its own correct operation.
4236 //
4237 // This routine may recognize any of the following kinds of signals:
4238 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4239 // os::Solaris::SIGasync
4240 // It should be consulted by handlers for any of those signals.
4241 // It explicitly does not recognize os::Solaris::SIGinterrupt
4242 //
4243 // The caller of this routine must pass in the three arguments supplied
4244 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4245 // field of the structure passed to sigaction(). This routine assumes that
4246 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4247 //
4248 // Note that the VM will print warnings if it detects conflicting signal
4249 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4250 //
4251 extern "C" JNIEXPORT int
4252 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4253 int abort_if_unrecognized);
4256 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4257 JVM_handle_solaris_signal(sig, info, ucVoid, true);
4258 }
4260 /* Do not delete - if guarantee is ever removed, a signal handler (even empty)
4261 is needed to provoke threads blocked on IO to return an EINTR
4262 Note: this explicitly does NOT call JVM_handle_solaris_signal and
4263 does NOT participate in signal chaining due to requirement for
4264 NOT setting SA_RESTART to make EINTR work. */
4265 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4266 if (UseSignalChaining) {
4267 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4268 if (actp && actp->sa_handler) {
4269 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4270 }
4271 }
4272 }
4274 // This boolean allows users to forward their own non-matching signals
4275 // to JVM_handle_solaris_signal, harmlessly.
4276 bool os::Solaris::signal_handlers_are_installed = false;
4278 // For signal-chaining
4279 bool os::Solaris::libjsig_is_loaded = false;
4280 typedef struct sigaction *(*get_signal_t)(int);
4281 get_signal_t os::Solaris::get_signal_action = NULL;
4283 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4284 struct sigaction *actp = NULL;
4286 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) {
4287 // Retrieve the old signal handler from libjsig
4288 actp = (*get_signal_action)(sig);
4289 }
4290 if (actp == NULL) {
4291 // Retrieve the preinstalled signal handler from jvm
4292 actp = get_preinstalled_handler(sig);
4293 }
4295 return actp;
4296 }
4298 static bool call_chained_handler(struct sigaction *actp, int sig,
4299 siginfo_t *siginfo, void *context) {
4300 // Call the old signal handler
4301 if (actp->sa_handler == SIG_DFL) {
4302 // It's more reasonable to let jvm treat it as an unexpected exception
4303 // instead of taking the default action.
4304 return false;
4305 } else if (actp->sa_handler != SIG_IGN) {
4306 if ((actp->sa_flags & SA_NODEFER) == 0) {
4307 // automaticlly block the signal
4308 sigaddset(&(actp->sa_mask), sig);
4309 }
4311 sa_handler_t hand;
4312 sa_sigaction_t sa;
4313 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4314 // retrieve the chained handler
4315 if (siginfo_flag_set) {
4316 sa = actp->sa_sigaction;
4317 } else {
4318 hand = actp->sa_handler;
4319 }
4321 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4322 actp->sa_handler = SIG_DFL;
4323 }
4325 // try to honor the signal mask
4326 sigset_t oset;
4327 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4329 // call into the chained handler
4330 if (siginfo_flag_set) {
4331 (*sa)(sig, siginfo, context);
4332 } else {
4333 (*hand)(sig);
4334 }
4336 // restore the signal mask
4337 thr_sigsetmask(SIG_SETMASK, &oset, 0);
4338 }
4339 // Tell jvm's signal handler the signal is taken care of.
4340 return true;
4341 }
4343 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4344 bool chained = false;
4345 // signal-chaining
4346 if (UseSignalChaining) {
4347 struct sigaction *actp = get_chained_signal_action(sig);
4348 if (actp != NULL) {
4349 chained = call_chained_handler(actp, sig, siginfo, context);
4350 }
4351 }
4352 return chained;
4353 }
4355 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4356 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4357 if (preinstalled_sigs[sig] != 0) {
4358 return &chainedsigactions[sig];
4359 }
4360 return NULL;
4361 }
4363 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4365 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4366 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4367 chainedsigactions[sig] = oldAct;
4368 preinstalled_sigs[sig] = 1;
4369 }
4371 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4372 // Check for overwrite.
4373 struct sigaction oldAct;
4374 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4375 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4376 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4377 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4378 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4379 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4380 if (AllowUserSignalHandlers || !set_installed) {
4381 // Do not overwrite; user takes responsibility to forward to us.
4382 return;
4383 } else if (UseSignalChaining) {
4384 if (oktochain) {
4385 // save the old handler in jvm
4386 save_preinstalled_handler(sig, oldAct);
4387 } else {
4388 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4389 }
4390 // libjsig also interposes the sigaction() call below and saves the
4391 // old sigaction on it own.
4392 } else {
4393 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4394 "%#lx for signal %d.", (long)oldhand, sig));
4395 }
4396 }
4398 struct sigaction sigAct;
4399 sigfillset(&(sigAct.sa_mask));
4400 sigAct.sa_handler = SIG_DFL;
4402 sigAct.sa_sigaction = signalHandler;
4403 // Handle SIGSEGV on alternate signal stack if
4404 // not using stack banging
4405 if (!UseStackBanging && sig == SIGSEGV) {
4406 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4407 // Interruptible i/o requires SA_RESTART cleared so EINTR
4408 // is returned instead of restarting system calls
4409 } else if (sig == os::Solaris::SIGinterrupt()) {
4410 sigemptyset(&sigAct.sa_mask);
4411 sigAct.sa_handler = NULL;
4412 sigAct.sa_flags = SA_SIGINFO;
4413 sigAct.sa_sigaction = sigINTRHandler;
4414 } else {
4415 sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4416 }
4417 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4419 sigaction(sig, &sigAct, &oldAct);
4421 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4422 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4423 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4424 }
4427 #define DO_SIGNAL_CHECK(sig) \
4428 if (!sigismember(&check_signal_done, sig)) \
4429 os::Solaris::check_signal_handler(sig)
4431 // This method is a periodic task to check for misbehaving JNI applications
4432 // under CheckJNI, we can add any periodic checks here
4434 void os::run_periodic_checks() {
4435 // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4436 // thereby preventing a NULL checks.
4437 if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4439 if (check_signals == false) return;
4441 // SEGV and BUS if overridden could potentially prevent
4442 // generation of hs*.log in the event of a crash, debugging
4443 // such a case can be very challenging, so we absolutely
4444 // check for the following for a good measure:
4445 DO_SIGNAL_CHECK(SIGSEGV);
4446 DO_SIGNAL_CHECK(SIGILL);
4447 DO_SIGNAL_CHECK(SIGFPE);
4448 DO_SIGNAL_CHECK(SIGBUS);
4449 DO_SIGNAL_CHECK(SIGPIPE);
4450 DO_SIGNAL_CHECK(SIGXFSZ);
4452 // ReduceSignalUsage allows the user to override these handlers
4453 // see comments at the very top and jvm_solaris.h
4454 if (!ReduceSignalUsage) {
4455 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4456 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4457 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4458 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4459 }
4461 // See comments above for using JVM1/JVM2 and UseAltSigs
4462 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4463 DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4465 }
4467 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4469 static os_sigaction_t os_sigaction = NULL;
4471 void os::Solaris::check_signal_handler(int sig) {
4472 char buf[O_BUFLEN];
4473 address jvmHandler = NULL;
4475 struct sigaction act;
4476 if (os_sigaction == NULL) {
4477 // only trust the default sigaction, in case it has been interposed
4478 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4479 if (os_sigaction == NULL) return;
4480 }
4482 os_sigaction(sig, (struct sigaction*)NULL, &act);
4484 address thisHandler = (act.sa_flags & SA_SIGINFO)
4485 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4486 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4489 switch(sig) {
4490 case SIGSEGV:
4491 case SIGBUS:
4492 case SIGFPE:
4493 case SIGPIPE:
4494 case SIGXFSZ:
4495 case SIGILL:
4496 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4497 break;
4499 case SHUTDOWN1_SIGNAL:
4500 case SHUTDOWN2_SIGNAL:
4501 case SHUTDOWN3_SIGNAL:
4502 case BREAK_SIGNAL:
4503 jvmHandler = (address)user_handler();
4504 break;
4506 default:
4507 int intrsig = os::Solaris::SIGinterrupt();
4508 int asynsig = os::Solaris::SIGasync();
4510 if (sig == intrsig) {
4511 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4512 } else if (sig == asynsig) {
4513 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4514 } else {
4515 return;
4516 }
4517 break;
4518 }
4521 if (thisHandler != jvmHandler) {
4522 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4523 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4524 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4525 // No need to check this sig any longer
4526 sigaddset(&check_signal_done, sig);
4527 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4528 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4529 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4530 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4531 // No need to check this sig any longer
4532 sigaddset(&check_signal_done, sig);
4533 }
4535 // Print all the signal handler state
4536 if (sigismember(&check_signal_done, sig)) {
4537 print_signal_handlers(tty, buf, O_BUFLEN);
4538 }
4540 }
4542 void os::Solaris::install_signal_handlers() {
4543 bool libjsigdone = false;
4544 signal_handlers_are_installed = true;
4546 // signal-chaining
4547 typedef void (*signal_setting_t)();
4548 signal_setting_t begin_signal_setting = NULL;
4549 signal_setting_t end_signal_setting = NULL;
4550 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4551 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4552 if (begin_signal_setting != NULL) {
4553 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4554 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4555 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4556 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4557 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4558 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4559 libjsig_is_loaded = true;
4560 if (os::Solaris::get_libjsig_version != NULL) {
4561 libjsigversion = (*os::Solaris::get_libjsig_version)();
4562 }
4563 assert(UseSignalChaining, "should enable signal-chaining");
4564 }
4565 if (libjsig_is_loaded) {
4566 // Tell libjsig jvm is setting signal handlers
4567 (*begin_signal_setting)();
4568 }
4570 set_signal_handler(SIGSEGV, true, true);
4571 set_signal_handler(SIGPIPE, true, true);
4572 set_signal_handler(SIGXFSZ, true, true);
4573 set_signal_handler(SIGBUS, true, true);
4574 set_signal_handler(SIGILL, true, true);
4575 set_signal_handler(SIGFPE, true, true);
4578 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4580 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4581 // can not register overridable signals which might be > 32
4582 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4583 // Tell libjsig jvm has finished setting signal handlers
4584 (*end_signal_setting)();
4585 libjsigdone = true;
4586 }
4587 }
4589 // Never ok to chain our SIGinterrupt
4590 set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4591 set_signal_handler(os::Solaris::SIGasync(), true, true);
4593 if (libjsig_is_loaded && !libjsigdone) {
4594 // Tell libjsig jvm finishes setting signal handlers
4595 (*end_signal_setting)();
4596 }
4598 // We don't activate signal checker if libjsig is in place, we trust ourselves
4599 // and if UserSignalHandler is installed all bets are off.
4600 // Log that signal checking is off only if -verbose:jni is specified.
4601 if (CheckJNICalls) {
4602 if (libjsig_is_loaded) {
4603 if (PrintJNIResolving) {
4604 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4605 }
4606 check_signals = false;
4607 }
4608 if (AllowUserSignalHandlers) {
4609 if (PrintJNIResolving) {
4610 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4611 }
4612 check_signals = false;
4613 }
4614 }
4615 }
4618 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4620 const char * signames[] = {
4621 "SIG0",
4622 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4623 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4624 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4625 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4626 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4627 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4628 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4629 "SIGCANCEL", "SIGLOST"
4630 };
4632 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4633 if (0 < exception_code && exception_code <= SIGRTMAX) {
4634 // signal
4635 if (exception_code < sizeof(signames)/sizeof(const char*)) {
4636 jio_snprintf(buf, size, "%s", signames[exception_code]);
4637 } else {
4638 jio_snprintf(buf, size, "SIG%d", exception_code);
4639 }
4640 return buf;
4641 } else {
4642 return NULL;
4643 }
4644 }
4646 // (Static) wrappers for the new libthread API
4647 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4648 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4649 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4650 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4651 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4653 // (Static) wrapper for getisax(2) call.
4654 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4656 // (Static) wrappers for the liblgrp API
4657 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4658 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4659 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4660 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4661 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4662 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4663 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4664 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4665 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4667 // (Static) wrapper for meminfo() call.
4668 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4670 static address resolve_symbol_lazy(const char* name) {
4671 address addr = (address) dlsym(RTLD_DEFAULT, name);
4672 if(addr == NULL) {
4673 // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4674 addr = (address) dlsym(RTLD_NEXT, name);
4675 }
4676 return addr;
4677 }
4679 static address resolve_symbol(const char* name) {
4680 address addr = resolve_symbol_lazy(name);
4681 if(addr == NULL) {
4682 fatal(dlerror());
4683 }
4684 return addr;
4685 }
4689 // isT2_libthread()
4690 //
4691 // Routine to determine if we are currently using the new T2 libthread.
4692 //
4693 // We determine if we are using T2 by reading /proc/self/lstatus and
4694 // looking for a thread with the ASLWP bit set. If we find this status
4695 // bit set, we must assume that we are NOT using T2. The T2 team
4696 // has approved this algorithm.
4697 //
4698 // We need to determine if we are running with the new T2 libthread
4699 // since setting native thread priorities is handled differently
4700 // when using this library. All threads created using T2 are bound
4701 // threads. Calling thr_setprio is meaningless in this case.
4702 //
4703 bool isT2_libthread() {
4704 static prheader_t * lwpArray = NULL;
4705 static int lwpSize = 0;
4706 static int lwpFile = -1;
4707 lwpstatus_t * that;
4708 char lwpName [128];
4709 bool isT2 = false;
4711 #define ADR(x) ((uintptr_t)(x))
4712 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4714 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4715 if (lwpFile < 0) {
4716 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4717 return false;
4718 }
4719 lwpSize = 16*1024;
4720 for (;;) {
4721 ::lseek64 (lwpFile, 0, SEEK_SET);
4722 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize);
4723 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4724 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4725 break;
4726 }
4727 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4728 // We got a good snapshot - now iterate over the list.
4729 int aslwpcount = 0;
4730 for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4731 that = LWPINDEX(lwpArray,i);
4732 if (that->pr_flags & PR_ASLWP) {
4733 aslwpcount++;
4734 }
4735 }
4736 if (aslwpcount == 0) isT2 = true;
4737 break;
4738 }
4739 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4740 FREE_C_HEAP_ARRAY(char, lwpArray); // retry.
4741 }
4743 FREE_C_HEAP_ARRAY(char, lwpArray);
4744 ::close (lwpFile);
4745 if (ThreadPriorityVerbose) {
4746 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4747 else tty->print_cr("We are not running with a T2 libthread\n");
4748 }
4749 return isT2;
4750 }
4753 void os::Solaris::libthread_init() {
4754 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4756 // Determine if we are running with the new T2 libthread
4757 os::Solaris::set_T2_libthread(isT2_libthread());
4759 lwp_priocntl_init();
4761 // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4762 if(func == NULL) {
4763 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4764 // Guarantee that this VM is running on an new enough OS (5.6 or
4765 // later) that it will have a new enough libthread.so.
4766 guarantee(func != NULL, "libthread.so is too old.");
4767 }
4769 // Initialize the new libthread getstate API wrappers
4770 func = resolve_symbol("thr_getstate");
4771 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4773 func = resolve_symbol("thr_setstate");
4774 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4776 func = resolve_symbol("thr_setmutator");
4777 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4779 func = resolve_symbol("thr_suspend_mutator");
4780 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4782 func = resolve_symbol("thr_continue_mutator");
4783 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4785 int size;
4786 void (*handler_info_func)(address *, int *);
4787 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4788 handler_info_func(&handler_start, &size);
4789 handler_end = handler_start + size;
4790 }
4793 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4794 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4795 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4796 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4797 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4798 int os::Solaris::_mutex_scope = USYNC_THREAD;
4800 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4801 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4802 int_fnP_cond_tP os::Solaris::_cond_signal;
4803 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4804 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4805 int_fnP_cond_tP os::Solaris::_cond_destroy;
4806 int os::Solaris::_cond_scope = USYNC_THREAD;
4808 void os::Solaris::synchronization_init() {
4809 if(UseLWPSynchronization) {
4810 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4811 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4812 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4813 os::Solaris::set_mutex_init(lwp_mutex_init);
4814 os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4815 os::Solaris::set_mutex_scope(USYNC_THREAD);
4817 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4818 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4819 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4820 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4821 os::Solaris::set_cond_init(lwp_cond_init);
4822 os::Solaris::set_cond_destroy(lwp_cond_destroy);
4823 os::Solaris::set_cond_scope(USYNC_THREAD);
4824 }
4825 else {
4826 os::Solaris::set_mutex_scope(USYNC_THREAD);
4827 os::Solaris::set_cond_scope(USYNC_THREAD);
4829 if(UsePthreads) {
4830 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4831 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4832 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4833 os::Solaris::set_mutex_init(pthread_mutex_default_init);
4834 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4836 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4837 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4838 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4839 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4840 os::Solaris::set_cond_init(pthread_cond_default_init);
4841 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4842 }
4843 else {
4844 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4845 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4846 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4847 os::Solaris::set_mutex_init(::mutex_init);
4848 os::Solaris::set_mutex_destroy(::mutex_destroy);
4850 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4851 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4852 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4853 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4854 os::Solaris::set_cond_init(::cond_init);
4855 os::Solaris::set_cond_destroy(::cond_destroy);
4856 }
4857 }
4858 }
4860 bool os::Solaris::liblgrp_init() {
4861 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4862 if (handle != NULL) {
4863 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4864 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4865 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4866 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4867 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4868 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4869 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4870 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4871 dlsym(handle, "lgrp_cookie_stale")));
4873 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4874 set_lgrp_cookie(c);
4875 return true;
4876 }
4877 return false;
4878 }
4880 void os::Solaris::misc_sym_init() {
4881 address func;
4883 // getisax
4884 func = resolve_symbol_lazy("getisax");
4885 if (func != NULL) {
4886 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4887 }
4889 // meminfo
4890 func = resolve_symbol_lazy("meminfo");
4891 if (func != NULL) {
4892 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4893 }
4894 }
4896 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4897 assert(_getisax != NULL, "_getisax not set");
4898 return _getisax(array, n);
4899 }
4901 // Symbol doesn't exist in Solaris 8 pset.h
4902 #ifndef PS_MYID
4903 #define PS_MYID -3
4904 #endif
4906 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4907 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4908 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4910 void init_pset_getloadavg_ptr(void) {
4911 pset_getloadavg_ptr =
4912 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4913 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4914 warning("pset_getloadavg function not found");
4915 }
4916 }
4918 int os::Solaris::_dev_zero_fd = -1;
4920 // this is called _before_ the global arguments have been parsed
4921 void os::init(void) {
4922 _initial_pid = getpid();
4924 max_hrtime = first_hrtime = gethrtime();
4926 init_random(1234567);
4928 page_size = sysconf(_SC_PAGESIZE);
4929 if (page_size == -1)
4930 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4931 strerror(errno)));
4932 init_page_sizes((size_t) page_size);
4934 Solaris::initialize_system_info();
4936 // Initialize misc. symbols as soon as possible, so we can use them
4937 // if we need them.
4938 Solaris::misc_sym_init();
4940 int fd = ::open("/dev/zero", O_RDWR);
4941 if (fd < 0) {
4942 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4943 } else {
4944 Solaris::set_dev_zero_fd(fd);
4946 // Close on exec, child won't inherit.
4947 fcntl(fd, F_SETFD, FD_CLOEXEC);
4948 }
4950 clock_tics_per_sec = CLK_TCK;
4952 // check if dladdr1() exists; dladdr1 can provide more information than
4953 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
4954 // and is available on linker patches for 5.7 and 5.8.
4955 // libdl.so must have been loaded, this call is just an entry lookup
4956 void * hdl = dlopen("libdl.so", RTLD_NOW);
4957 if (hdl)
4958 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
4960 // (Solaris only) this switches to calls that actually do locking.
4961 ThreadCritical::initialize();
4963 main_thread = thr_self();
4965 // Constant minimum stack size allowed. It must be at least
4966 // the minimum of what the OS supports (thr_min_stack()), and
4967 // enough to allow the thread to get to user bytecode execution.
4968 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
4969 // If the pagesize of the VM is greater than 8K determine the appropriate
4970 // number of initial guard pages. The user can change this with the
4971 // command line arguments, if needed.
4972 if (vm_page_size() > 8*K) {
4973 StackYellowPages = 1;
4974 StackRedPages = 1;
4975 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
4976 }
4977 }
4979 // To install functions for atexit system call
4980 extern "C" {
4981 static void perfMemory_exit_helper() {
4982 perfMemory_exit();
4983 }
4984 }
4986 // this is called _after_ the global arguments have been parsed
4987 jint os::init_2(void) {
4988 // try to enable extended file IO ASAP, see 6431278
4989 os::Solaris::try_enable_extended_io();
4991 // Allocate a single page and mark it as readable for safepoint polling. Also
4992 // use this first mmap call to check support for MAP_ALIGN.
4993 address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
4994 page_size,
4995 MAP_PRIVATE | MAP_ALIGN,
4996 PROT_READ);
4997 if (polling_page == NULL) {
4998 has_map_align = false;
4999 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5000 PROT_READ);
5001 }
5003 os::set_polling_page(polling_page);
5005 #ifndef PRODUCT
5006 if( Verbose && PrintMiscellaneous )
5007 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5008 #endif
5010 if (!UseMembar) {
5011 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5012 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5013 os::set_memory_serialize_page( mem_serialize_page );
5015 #ifndef PRODUCT
5016 if(Verbose && PrintMiscellaneous)
5017 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5018 #endif
5019 }
5021 os::large_page_init();
5023 // Check minimum allowable stack size for thread creation and to initialize
5024 // the java system classes, including StackOverflowError - depends on page
5025 // size. Add a page for compiler2 recursion in main thread.
5026 // Add in 2*BytesPerWord times page size to account for VM stack during
5027 // class initialization depending on 32 or 64 bit VM.
5028 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5029 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5030 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5032 size_t threadStackSizeInBytes = ThreadStackSize * K;
5033 if (threadStackSizeInBytes != 0 &&
5034 threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5035 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5036 os::Solaris::min_stack_allowed/K);
5037 return JNI_ERR;
5038 }
5040 // For 64kbps there will be a 64kb page size, which makes
5041 // the usable default stack size quite a bit less. Increase the
5042 // stack for 64kb (or any > than 8kb) pages, this increases
5043 // virtual memory fragmentation (since we're not creating the
5044 // stack on a power of 2 boundary. The real fix for this
5045 // should be to fix the guard page mechanism.
5047 if (vm_page_size() > 8*K) {
5048 threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5049 ? threadStackSizeInBytes +
5050 ((StackYellowPages + StackRedPages) * vm_page_size())
5051 : 0;
5052 ThreadStackSize = threadStackSizeInBytes/K;
5053 }
5055 // Make the stack size a multiple of the page size so that
5056 // the yellow/red zones can be guarded.
5057 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5058 vm_page_size()));
5060 Solaris::libthread_init();
5062 if (UseNUMA) {
5063 if (!Solaris::liblgrp_init()) {
5064 UseNUMA = false;
5065 } else {
5066 size_t lgrp_limit = os::numa_get_groups_num();
5067 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
5068 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5069 FREE_C_HEAP_ARRAY(int, lgrp_ids);
5070 if (lgrp_num < 2) {
5071 // There's only one locality group, disable NUMA.
5072 UseNUMA = false;
5073 }
5074 }
5075 // ISM is not compatible with the NUMA allocator - it always allocates
5076 // pages round-robin across the lgroups.
5077 if (UseNUMA && UseLargePages && UseISM) {
5078 if (!FLAG_IS_DEFAULT(UseNUMA)) {
5079 if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
5080 UseLargePages = false;
5081 } else {
5082 warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
5083 UseNUMA = false;
5084 }
5085 } else {
5086 UseNUMA = false;
5087 }
5088 }
5089 if (!UseNUMA && ForceNUMA) {
5090 UseNUMA = true;
5091 }
5092 }
5094 Solaris::signal_sets_init();
5095 Solaris::init_signal_mem();
5096 Solaris::install_signal_handlers();
5098 if (libjsigversion < JSIG_VERSION_1_4_1) {
5099 Maxlibjsigsigs = OLDMAXSIGNUM;
5100 }
5102 // initialize synchronization primitives to use either thread or
5103 // lwp synchronization (controlled by UseLWPSynchronization)
5104 Solaris::synchronization_init();
5106 if (MaxFDLimit) {
5107 // set the number of file descriptors to max. print out error
5108 // if getrlimit/setrlimit fails but continue regardless.
5109 struct rlimit nbr_files;
5110 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5111 if (status != 0) {
5112 if (PrintMiscellaneous && (Verbose || WizardMode))
5113 perror("os::init_2 getrlimit failed");
5114 } else {
5115 nbr_files.rlim_cur = nbr_files.rlim_max;
5116 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5117 if (status != 0) {
5118 if (PrintMiscellaneous && (Verbose || WizardMode))
5119 perror("os::init_2 setrlimit failed");
5120 }
5121 }
5122 }
5124 // Calculate theoretical max. size of Threads to guard gainst
5125 // artifical out-of-memory situations, where all available address-
5126 // space has been reserved by thread stacks. Default stack size is 1Mb.
5127 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5128 JavaThread::stack_size_at_create() : (1*K*K);
5129 assert(pre_thread_stack_size != 0, "Must have a stack");
5130 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5131 // we should start doing Virtual Memory banging. Currently when the threads will
5132 // have used all but 200Mb of space.
5133 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5134 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5136 // at-exit methods are called in the reverse order of their registration.
5137 // In Solaris 7 and earlier, atexit functions are called on return from
5138 // main or as a result of a call to exit(3C). There can be only 32 of
5139 // these functions registered and atexit() does not set errno. In Solaris
5140 // 8 and later, there is no limit to the number of functions registered
5141 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5142 // functions are called upon dlclose(3DL) in addition to return from main
5143 // and exit(3C).
5145 if (PerfAllowAtExitRegistration) {
5146 // only register atexit functions if PerfAllowAtExitRegistration is set.
5147 // atexit functions can be delayed until process exit time, which
5148 // can be problematic for embedded VM situations. Embedded VMs should
5149 // call DestroyJavaVM() to assure that VM resources are released.
5151 // note: perfMemory_exit_helper atexit function may be removed in
5152 // the future if the appropriate cleanup code can be added to the
5153 // VM_Exit VMOperation's doit method.
5154 if (atexit(perfMemory_exit_helper) != 0) {
5155 warning("os::init2 atexit(perfMemory_exit_helper) failed");
5156 }
5157 }
5159 // Init pset_loadavg function pointer
5160 init_pset_getloadavg_ptr();
5162 return JNI_OK;
5163 }
5165 void os::init_3(void) {
5166 return;
5167 }
5169 // Mark the polling page as unreadable
5170 void os::make_polling_page_unreadable(void) {
5171 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5172 fatal("Could not disable polling page");
5173 };
5175 // Mark the polling page as readable
5176 void os::make_polling_page_readable(void) {
5177 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5178 fatal("Could not enable polling page");
5179 };
5181 // OS interface.
5183 bool os::check_heap(bool force) { return true; }
5185 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5186 static vsnprintf_t sol_vsnprintf = NULL;
5188 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5189 if (!sol_vsnprintf) {
5190 //search for the named symbol in the objects that were loaded after libjvm
5191 void* where = RTLD_NEXT;
5192 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5193 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5194 if (!sol_vsnprintf){
5195 //search for the named symbol in the objects that were loaded before libjvm
5196 where = RTLD_DEFAULT;
5197 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5198 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5199 assert(sol_vsnprintf != NULL, "vsnprintf not found");
5200 }
5201 }
5202 return (*sol_vsnprintf)(buf, count, fmt, argptr);
5203 }
5206 // Is a (classpath) directory empty?
5207 bool os::dir_is_empty(const char* path) {
5208 DIR *dir = NULL;
5209 struct dirent *ptr;
5211 dir = opendir(path);
5212 if (dir == NULL) return true;
5214 /* Scan the directory */
5215 bool result = true;
5216 char buf[sizeof(struct dirent) + MAX_PATH];
5217 struct dirent *dbuf = (struct dirent *) buf;
5218 while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5219 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5220 result = false;
5221 }
5222 }
5223 closedir(dir);
5224 return result;
5225 }
5227 // This code originates from JDK's sysOpen and open64_w
5228 // from src/solaris/hpi/src/system_md.c
5230 #ifndef O_DELETE
5231 #define O_DELETE 0x10000
5232 #endif
5234 // Open a file. Unlink the file immediately after open returns
5235 // if the specified oflag has the O_DELETE flag set.
5236 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5238 int os::open(const char *path, int oflag, int mode) {
5239 if (strlen(path) > MAX_PATH - 1) {
5240 errno = ENAMETOOLONG;
5241 return -1;
5242 }
5243 int fd;
5244 int o_delete = (oflag & O_DELETE);
5245 oflag = oflag & ~O_DELETE;
5247 fd = ::open64(path, oflag, mode);
5248 if (fd == -1) return -1;
5250 //If the open succeeded, the file might still be a directory
5251 {
5252 struct stat64 buf64;
5253 int ret = ::fstat64(fd, &buf64);
5254 int st_mode = buf64.st_mode;
5256 if (ret != -1) {
5257 if ((st_mode & S_IFMT) == S_IFDIR) {
5258 errno = EISDIR;
5259 ::close(fd);
5260 return -1;
5261 }
5262 } else {
5263 ::close(fd);
5264 return -1;
5265 }
5266 }
5267 /*
5268 * 32-bit Solaris systems suffer from:
5269 *
5270 * - an historical default soft limit of 256 per-process file
5271 * descriptors that is too low for many Java programs.
5272 *
5273 * - a design flaw where file descriptors created using stdio
5274 * fopen must be less than 256, _even_ when the first limit above
5275 * has been raised. This can cause calls to fopen (but not calls to
5276 * open, for example) to fail mysteriously, perhaps in 3rd party
5277 * native code (although the JDK itself uses fopen). One can hardly
5278 * criticize them for using this most standard of all functions.
5279 *
5280 * We attempt to make everything work anyways by:
5281 *
5282 * - raising the soft limit on per-process file descriptors beyond
5283 * 256
5284 *
5285 * - As of Solaris 10u4, we can request that Solaris raise the 256
5286 * stdio fopen limit by calling function enable_extended_FILE_stdio.
5287 * This is done in init_2 and recorded in enabled_extended_FILE_stdio
5288 *
5289 * - If we are stuck on an old (pre 10u4) Solaris system, we can
5290 * workaround the bug by remapping non-stdio file descriptors below
5291 * 256 to ones beyond 256, which is done below.
5292 *
5293 * See:
5294 * 1085341: 32-bit stdio routines should support file descriptors >255
5295 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5296 * 6431278: Netbeans crash on 32 bit Solaris: need to call
5297 * enable_extended_FILE_stdio() in VM initialisation
5298 * Giri Mandalika's blog
5299 * http://technopark02.blogspot.com/2005_05_01_archive.html
5300 */
5301 #ifndef _LP64
5302 if ((!enabled_extended_FILE_stdio) && fd < 256) {
5303 int newfd = ::fcntl(fd, F_DUPFD, 256);
5304 if (newfd != -1) {
5305 ::close(fd);
5306 fd = newfd;
5307 }
5308 }
5309 #endif // 32-bit Solaris
5310 /*
5311 * All file descriptors that are opened in the JVM and not
5312 * specifically destined for a subprocess should have the
5313 * close-on-exec flag set. If we don't set it, then careless 3rd
5314 * party native code might fork and exec without closing all
5315 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5316 * UNIXProcess.c), and this in turn might:
5317 *
5318 * - cause end-of-file to fail to be detected on some file
5319 * descriptors, resulting in mysterious hangs, or
5320 *
5321 * - might cause an fopen in the subprocess to fail on a system
5322 * suffering from bug 1085341.
5323 *
5324 * (Yes, the default setting of the close-on-exec flag is a Unix
5325 * design flaw)
5326 *
5327 * See:
5328 * 1085341: 32-bit stdio routines should support file descriptors >255
5329 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5330 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5331 */
5332 #ifdef FD_CLOEXEC
5333 {
5334 int flags = ::fcntl(fd, F_GETFD);
5335 if (flags != -1)
5336 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5337 }
5338 #endif
5340 if (o_delete != 0) {
5341 ::unlink(path);
5342 }
5343 return fd;
5344 }
5346 // create binary file, rewriting existing file if required
5347 int os::create_binary_file(const char* path, bool rewrite_existing) {
5348 int oflags = O_WRONLY | O_CREAT;
5349 if (!rewrite_existing) {
5350 oflags |= O_EXCL;
5351 }
5352 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5353 }
5355 // return current position of file pointer
5356 jlong os::current_file_offset(int fd) {
5357 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5358 }
5360 // move file pointer to the specified offset
5361 jlong os::seek_to_file_offset(int fd, jlong offset) {
5362 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5363 }
5365 jlong os::lseek(int fd, jlong offset, int whence) {
5366 return (jlong) ::lseek64(fd, offset, whence);
5367 }
5369 char * os::native_path(char *path) {
5370 return path;
5371 }
5373 int os::ftruncate(int fd, jlong length) {
5374 return ::ftruncate64(fd, length);
5375 }
5377 int os::fsync(int fd) {
5378 RESTARTABLE_RETURN_INT(::fsync(fd));
5379 }
5381 int os::available(int fd, jlong *bytes) {
5382 jlong cur, end;
5383 int mode;
5384 struct stat64 buf64;
5386 if (::fstat64(fd, &buf64) >= 0) {
5387 mode = buf64.st_mode;
5388 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5389 /*
5390 * XXX: is the following call interruptible? If so, this might
5391 * need to go through the INTERRUPT_IO() wrapper as for other
5392 * blocking, interruptible calls in this file.
5393 */
5394 int n,ioctl_return;
5396 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5397 if (ioctl_return>= 0) {
5398 *bytes = n;
5399 return 1;
5400 }
5401 }
5402 }
5403 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5404 return 0;
5405 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5406 return 0;
5407 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5408 return 0;
5409 }
5410 *bytes = end - cur;
5411 return 1;
5412 }
5414 // Map a block of memory.
5415 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
5416 char *addr, size_t bytes, bool read_only,
5417 bool allow_exec) {
5418 int prot;
5419 int flags;
5421 if (read_only) {
5422 prot = PROT_READ;
5423 flags = MAP_SHARED;
5424 } else {
5425 prot = PROT_READ | PROT_WRITE;
5426 flags = MAP_PRIVATE;
5427 }
5429 if (allow_exec) {
5430 prot |= PROT_EXEC;
5431 }
5433 if (addr != NULL) {
5434 flags |= MAP_FIXED;
5435 }
5437 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5438 fd, file_offset);
5439 if (mapped_address == MAP_FAILED) {
5440 return NULL;
5441 }
5442 return mapped_address;
5443 }
5446 // Remap a block of memory.
5447 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
5448 char *addr, size_t bytes, bool read_only,
5449 bool allow_exec) {
5450 // same as map_memory() on this OS
5451 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5452 allow_exec);
5453 }
5456 // Unmap a block of memory.
5457 bool os::unmap_memory(char* addr, size_t bytes) {
5458 return munmap(addr, bytes) == 0;
5459 }
5461 void os::pause() {
5462 char filename[MAX_PATH];
5463 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5464 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5465 } else {
5466 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5467 }
5469 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5470 if (fd != -1) {
5471 struct stat buf;
5472 ::close(fd);
5473 while (::stat(filename, &buf) == 0) {
5474 (void)::poll(NULL, 0, 100);
5475 }
5476 } else {
5477 jio_fprintf(stderr,
5478 "Could not open pause file '%s', continuing immediately.\n", filename);
5479 }
5480 }
5482 #ifndef PRODUCT
5483 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5484 // Turn this on if you need to trace synch operations.
5485 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5486 // and call record_synch_enable and record_synch_disable
5487 // around the computation of interest.
5489 void record_synch(char* name, bool returning); // defined below
5491 class RecordSynch {
5492 char* _name;
5493 public:
5494 RecordSynch(char* name) :_name(name)
5495 { record_synch(_name, false); }
5496 ~RecordSynch() { record_synch(_name, true); }
5497 };
5499 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \
5500 extern "C" ret name params { \
5501 typedef ret name##_t params; \
5502 static name##_t* implem = NULL; \
5503 static int callcount = 0; \
5504 if (implem == NULL) { \
5505 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \
5506 if (implem == NULL) fatal(dlerror()); \
5507 } \
5508 ++callcount; \
5509 RecordSynch _rs(#name); \
5510 inner; \
5511 return implem args; \
5512 }
5513 // in dbx, examine callcounts this way:
5514 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5516 #define CHECK_POINTER_OK(p) \
5517 (Universe::perm_gen() == NULL || !Universe::is_reserved_heap((oop)(p)))
5518 #define CHECK_MU \
5519 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5520 #define CHECK_CV \
5521 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5522 #define CHECK_P(p) \
5523 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only.");
5525 #define CHECK_MUTEX(mutex_op) \
5526 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5528 CHECK_MUTEX( mutex_lock)
5529 CHECK_MUTEX( _mutex_lock)
5530 CHECK_MUTEX( mutex_unlock)
5531 CHECK_MUTEX(_mutex_unlock)
5532 CHECK_MUTEX( mutex_trylock)
5533 CHECK_MUTEX(_mutex_trylock)
5535 #define CHECK_COND(cond_op) \
5536 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5538 CHECK_COND( cond_wait);
5539 CHECK_COND(_cond_wait);
5540 CHECK_COND(_cond_wait_cancel);
5542 #define CHECK_COND2(cond_op) \
5543 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5545 CHECK_COND2( cond_timedwait);
5546 CHECK_COND2(_cond_timedwait);
5547 CHECK_COND2(_cond_timedwait_cancel);
5549 // do the _lwp_* versions too
5550 #define mutex_t lwp_mutex_t
5551 #define cond_t lwp_cond_t
5552 CHECK_MUTEX( _lwp_mutex_lock)
5553 CHECK_MUTEX( _lwp_mutex_unlock)
5554 CHECK_MUTEX( _lwp_mutex_trylock)
5555 CHECK_MUTEX( __lwp_mutex_lock)
5556 CHECK_MUTEX( __lwp_mutex_unlock)
5557 CHECK_MUTEX( __lwp_mutex_trylock)
5558 CHECK_MUTEX(___lwp_mutex_lock)
5559 CHECK_MUTEX(___lwp_mutex_unlock)
5561 CHECK_COND( _lwp_cond_wait);
5562 CHECK_COND( __lwp_cond_wait);
5563 CHECK_COND(___lwp_cond_wait);
5565 CHECK_COND2( _lwp_cond_timedwait);
5566 CHECK_COND2( __lwp_cond_timedwait);
5567 #undef mutex_t
5568 #undef cond_t
5570 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5571 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5572 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0);
5573 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0);
5574 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5575 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5576 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5577 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5580 // recording machinery:
5582 enum { RECORD_SYNCH_LIMIT = 200 };
5583 char* record_synch_name[RECORD_SYNCH_LIMIT];
5584 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5585 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5586 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5587 int record_synch_count = 0;
5588 bool record_synch_enabled = false;
5590 // in dbx, examine recorded data this way:
5591 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5593 void record_synch(char* name, bool returning) {
5594 if (record_synch_enabled) {
5595 if (record_synch_count < RECORD_SYNCH_LIMIT) {
5596 record_synch_name[record_synch_count] = name;
5597 record_synch_returning[record_synch_count] = returning;
5598 record_synch_thread[record_synch_count] = thr_self();
5599 record_synch_arg0ptr[record_synch_count] = &name;
5600 record_synch_count++;
5601 }
5602 // put more checking code here:
5603 // ...
5604 }
5605 }
5607 void record_synch_enable() {
5608 // start collecting trace data, if not already doing so
5609 if (!record_synch_enabled) record_synch_count = 0;
5610 record_synch_enabled = true;
5611 }
5613 void record_synch_disable() {
5614 // stop collecting trace data
5615 record_synch_enabled = false;
5616 }
5618 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5619 #endif // PRODUCT
5621 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5622 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5623 (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5626 // JVMTI & JVM monitoring and management support
5627 // The thread_cpu_time() and current_thread_cpu_time() are only
5628 // supported if is_thread_cpu_time_supported() returns true.
5629 // They are not supported on Solaris T1.
5631 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5632 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5633 // of a thread.
5634 //
5635 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5636 // returns the fast estimate available on the platform.
5638 // hrtime_t gethrvtime() return value includes
5639 // user time but does not include system time
5640 jlong os::current_thread_cpu_time() {
5641 return (jlong) gethrvtime();
5642 }
5644 jlong os::thread_cpu_time(Thread *thread) {
5645 // return user level CPU time only to be consistent with
5646 // what current_thread_cpu_time returns.
5647 // thread_cpu_time_info() must be changed if this changes
5648 return os::thread_cpu_time(thread, false /* user time only */);
5649 }
5651 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5652 if (user_sys_cpu_time) {
5653 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5654 } else {
5655 return os::current_thread_cpu_time();
5656 }
5657 }
5659 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5660 char proc_name[64];
5661 int count;
5662 prusage_t prusage;
5663 jlong lwp_time;
5664 int fd;
5666 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5667 getpid(),
5668 thread->osthread()->lwp_id());
5669 fd = ::open(proc_name, O_RDONLY);
5670 if ( fd == -1 ) return -1;
5672 do {
5673 count = ::pread(fd,
5674 (void *)&prusage.pr_utime,
5675 thr_time_size,
5676 thr_time_off);
5677 } while (count < 0 && errno == EINTR);
5678 ::close(fd);
5679 if ( count < 0 ) return -1;
5681 if (user_sys_cpu_time) {
5682 // user + system CPU time
5683 lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5684 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5685 (jlong)prusage.pr_stime.tv_nsec +
5686 (jlong)prusage.pr_utime.tv_nsec;
5687 } else {
5688 // user level CPU time only
5689 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5690 (jlong)prusage.pr_utime.tv_nsec;
5691 }
5693 return(lwp_time);
5694 }
5696 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5697 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5698 info_ptr->may_skip_backward = false; // elapsed time not wall time
5699 info_ptr->may_skip_forward = false; // elapsed time not wall time
5700 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5701 }
5703 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5704 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5705 info_ptr->may_skip_backward = false; // elapsed time not wall time
5706 info_ptr->may_skip_forward = false; // elapsed time not wall time
5707 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5708 }
5710 bool os::is_thread_cpu_time_supported() {
5711 if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5712 return true;
5713 } else {
5714 return false;
5715 }
5716 }
5718 // System loadavg support. Returns -1 if load average cannot be obtained.
5719 // Return the load average for our processor set if the primitive exists
5720 // (Solaris 9 and later). Otherwise just return system wide loadavg.
5721 int os::loadavg(double loadavg[], int nelem) {
5722 if (pset_getloadavg_ptr != NULL) {
5723 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5724 } else {
5725 return ::getloadavg(loadavg, nelem);
5726 }
5727 }
5729 //---------------------------------------------------------------------------------
5731 static address same_page(address x, address y) {
5732 intptr_t page_bits = -os::vm_page_size();
5733 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
5734 return x;
5735 else if (x > y)
5736 return (address)(intptr_t(y) | ~page_bits) + 1;
5737 else
5738 return (address)(intptr_t(y) & page_bits);
5739 }
5741 bool os::find(address addr, outputStream* st) {
5742 Dl_info dlinfo;
5743 memset(&dlinfo, 0, sizeof(dlinfo));
5744 if (dladdr(addr, &dlinfo)) {
5745 #ifdef _LP64
5746 st->print("0x%016lx: ", addr);
5747 #else
5748 st->print("0x%08x: ", addr);
5749 #endif
5750 if (dlinfo.dli_sname != NULL)
5751 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5752 else if (dlinfo.dli_fname)
5753 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5754 else
5755 st->print("<absolute address>");
5756 if (dlinfo.dli_fname) st->print(" in %s", dlinfo.dli_fname);
5757 #ifdef _LP64
5758 if (dlinfo.dli_fbase) st->print(" at 0x%016lx", dlinfo.dli_fbase);
5759 #else
5760 if (dlinfo.dli_fbase) st->print(" at 0x%08x", dlinfo.dli_fbase);
5761 #endif
5762 st->cr();
5764 if (Verbose) {
5765 // decode some bytes around the PC
5766 address begin = same_page(addr-40, addr);
5767 address end = same_page(addr+40, addr);
5768 address lowest = (address) dlinfo.dli_sname;
5769 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5770 if (begin < lowest) begin = lowest;
5771 Dl_info dlinfo2;
5772 if (dladdr(end, &dlinfo2) && dlinfo2.dli_saddr != dlinfo.dli_saddr
5773 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5774 end = (address) dlinfo2.dli_saddr;
5775 Disassembler::decode(begin, end, st);
5776 }
5777 return true;
5778 }
5779 return false;
5780 }
5782 // Following function has been added to support HotSparc's libjvm.so running
5783 // under Solaris production JDK 1.2.2 / 1.3.0. These came from
5784 // src/solaris/hpi/native_threads in the EVM codebase.
5785 //
5786 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5787 // libraries and should thus be removed. We will leave it behind for a while
5788 // until we no longer want to able to run on top of 1.3.0 Solaris production
5789 // JDK. See 4341971.
5791 #define STACK_SLACK 0x800
5793 extern "C" {
5794 intptr_t sysThreadAvailableStackWithSlack() {
5795 stack_t st;
5796 intptr_t retval, stack_top;
5797 retval = thr_stksegment(&st);
5798 assert(retval == 0, "incorrect return value from thr_stksegment");
5799 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5800 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5801 stack_top=(intptr_t)st.ss_sp-st.ss_size;
5802 return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5803 }
5804 }
5806 // Just to get the Kernel build to link on solaris for testing.
5808 extern "C" {
5809 class ASGCT_CallTrace;
5810 void AsyncGetCallTrace(ASGCT_CallTrace *trace, jint depth, void* ucontext)
5811 KERNEL_RETURN;
5812 }
5815 // ObjectMonitor park-unpark infrastructure ...
5816 //
5817 // We implement Solaris and Linux PlatformEvents with the
5818 // obvious condvar-mutex-flag triple.
5819 // Another alternative that works quite well is pipes:
5820 // Each PlatformEvent consists of a pipe-pair.
5821 // The thread associated with the PlatformEvent
5822 // calls park(), which reads from the input end of the pipe.
5823 // Unpark() writes into the other end of the pipe.
5824 // The write-side of the pipe must be set NDELAY.
5825 // Unfortunately pipes consume a large # of handles.
5826 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5827 // Using pipes for the 1st few threads might be workable, however.
5828 //
5829 // park() is permitted to return spuriously.
5830 // Callers of park() should wrap the call to park() in
5831 // an appropriate loop. A litmus test for the correct
5832 // usage of park is the following: if park() were modified
5833 // to immediately return 0 your code should still work,
5834 // albeit degenerating to a spin loop.
5835 //
5836 // An interesting optimization for park() is to use a trylock()
5837 // to attempt to acquire the mutex. If the trylock() fails
5838 // then we know that a concurrent unpark() operation is in-progress.
5839 // in that case the park() code could simply set _count to 0
5840 // and return immediately. The subsequent park() operation *might*
5841 // return immediately. That's harmless as the caller of park() is
5842 // expected to loop. By using trylock() we will have avoided a
5843 // avoided a context switch caused by contention on the per-thread mutex.
5844 //
5845 // TODO-FIXME:
5846 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
5847 // objectmonitor implementation.
5848 // 2. Collapse the JSR166 parker event, and the
5849 // objectmonitor ParkEvent into a single "Event" construct.
5850 // 3. In park() and unpark() add:
5851 // assert (Thread::current() == AssociatedWith).
5852 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5853 // 1-out-of-N park() operations will return immediately.
5854 //
5855 // _Event transitions in park()
5856 // -1 => -1 : illegal
5857 // 1 => 0 : pass - return immediately
5858 // 0 => -1 : block
5859 //
5860 // _Event serves as a restricted-range semaphore.
5861 //
5862 // Another possible encoding of _Event would be with
5863 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5864 //
5865 // TODO-FIXME: add DTRACE probes for:
5866 // 1. Tx parks
5867 // 2. Ty unparks Tx
5868 // 3. Tx resumes from park
5871 // value determined through experimentation
5872 #define ROUNDINGFIX 11
5874 // utility to compute the abstime argument to timedwait.
5875 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5877 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5878 // millis is the relative timeout time
5879 // abstime will be the absolute timeout time
5880 if (millis < 0) millis = 0;
5881 struct timeval now;
5882 int status = gettimeofday(&now, NULL);
5883 assert(status == 0, "gettimeofday");
5884 jlong seconds = millis / 1000;
5885 jlong max_wait_period;
5887 if (UseLWPSynchronization) {
5888 // forward port of fix for 4275818 (not sleeping long enough)
5889 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5890 // _lwp_cond_timedwait() used a round_down algorithm rather
5891 // than a round_up. For millis less than our roundfactor
5892 // it rounded down to 0 which doesn't meet the spec.
5893 // For millis > roundfactor we may return a bit sooner, but
5894 // since we can not accurately identify the patch level and
5895 // this has already been fixed in Solaris 9 and 8 we will
5896 // leave it alone rather than always rounding down.
5898 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5899 // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5900 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5901 max_wait_period = 21000000;
5902 } else {
5903 max_wait_period = 50000000;
5904 }
5905 millis %= 1000;
5906 if (seconds > max_wait_period) { // see man cond_timedwait(3T)
5907 seconds = max_wait_period;
5908 }
5909 abstime->tv_sec = now.tv_sec + seconds;
5910 long usec = now.tv_usec + millis * 1000;
5911 if (usec >= 1000000) {
5912 abstime->tv_sec += 1;
5913 usec -= 1000000;
5914 }
5915 abstime->tv_nsec = usec * 1000;
5916 return abstime;
5917 }
5919 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5920 // Conceptually TryPark() should be equivalent to park(0).
5922 int os::PlatformEvent::TryPark() {
5923 for (;;) {
5924 const int v = _Event ;
5925 guarantee ((v == 0) || (v == 1), "invariant") ;
5926 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5927 }
5928 }
5930 void os::PlatformEvent::park() { // AKA: down()
5931 // Invariant: Only the thread associated with the Event/PlatformEvent
5932 // may call park().
5933 int v ;
5934 for (;;) {
5935 v = _Event ;
5936 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5937 }
5938 guarantee (v >= 0, "invariant") ;
5939 if (v == 0) {
5940 // Do this the hard way by blocking ...
5941 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5942 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5943 // Only for SPARC >= V8PlusA
5944 #if defined(__sparc) && defined(COMPILER2)
5945 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5946 #endif
5947 int status = os::Solaris::mutex_lock(_mutex);
5948 assert_status(status == 0, status, "mutex_lock");
5949 guarantee (_nParked == 0, "invariant") ;
5950 ++ _nParked ;
5951 while (_Event < 0) {
5952 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5953 // Treat this the same as if the wait was interrupted
5954 // With usr/lib/lwp going to kernel, always handle ETIME
5955 status = os::Solaris::cond_wait(_cond, _mutex);
5956 if (status == ETIME) status = EINTR ;
5957 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5958 }
5959 -- _nParked ;
5960 _Event = 0 ;
5961 status = os::Solaris::mutex_unlock(_mutex);
5962 assert_status(status == 0, status, "mutex_unlock");
5963 }
5964 }
5966 int os::PlatformEvent::park(jlong millis) {
5967 guarantee (_nParked == 0, "invariant") ;
5968 int v ;
5969 for (;;) {
5970 v = _Event ;
5971 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5972 }
5973 guarantee (v >= 0, "invariant") ;
5974 if (v != 0) return OS_OK ;
5976 int ret = OS_TIMEOUT;
5977 timestruc_t abst;
5978 compute_abstime (&abst, millis);
5980 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5981 // For Solaris SPARC set fprs.FEF=0 prior to parking.
5982 // Only for SPARC >= V8PlusA
5983 #if defined(__sparc) && defined(COMPILER2)
5984 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5985 #endif
5986 int status = os::Solaris::mutex_lock(_mutex);
5987 assert_status(status == 0, status, "mutex_lock");
5988 guarantee (_nParked == 0, "invariant") ;
5989 ++ _nParked ;
5990 while (_Event < 0) {
5991 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
5992 assert_status(status == 0 || status == EINTR ||
5993 status == ETIME || status == ETIMEDOUT,
5994 status, "cond_timedwait");
5995 if (!FilterSpuriousWakeups) break ; // previous semantics
5996 if (status == ETIME || status == ETIMEDOUT) break ;
5997 // We consume and ignore EINTR and spurious wakeups.
5998 }
5999 -- _nParked ;
6000 if (_Event >= 0) ret = OS_OK ;
6001 _Event = 0 ;
6002 status = os::Solaris::mutex_unlock(_mutex);
6003 assert_status(status == 0, status, "mutex_unlock");
6004 return ret;
6005 }
6007 void os::PlatformEvent::unpark() {
6008 int v, AnyWaiters;
6010 // Increment _Event.
6011 // Another acceptable implementation would be to simply swap 1
6012 // into _Event:
6013 // if (Swap (&_Event, 1) < 0) {
6014 // mutex_lock (_mutex) ; AnyWaiters = nParked; mutex_unlock (_mutex) ;
6015 // if (AnyWaiters) cond_signal (_cond) ;
6016 // }
6018 for (;;) {
6019 v = _Event ;
6020 if (v > 0) {
6021 // The LD of _Event could have reordered or be satisfied
6022 // by a read-aside from this processor's write buffer.
6023 // To avoid problems execute a barrier and then
6024 // ratify the value. A degenerate CAS() would also work.
6025 // Viz., CAS (v+0, &_Event, v) == v).
6026 OrderAccess::fence() ;
6027 if (_Event == v) return ;
6028 continue ;
6029 }
6030 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ;
6031 }
6033 // If the thread associated with the event was parked, wake it.
6034 if (v < 0) {
6035 int status ;
6036 // Wait for the thread assoc with the PlatformEvent to vacate.
6037 status = os::Solaris::mutex_lock(_mutex);
6038 assert_status(status == 0, status, "mutex_lock");
6039 AnyWaiters = _nParked ;
6040 status = os::Solaris::mutex_unlock(_mutex);
6041 assert_status(status == 0, status, "mutex_unlock");
6042 guarantee (AnyWaiters == 0 || AnyWaiters == 1, "invariant") ;
6043 if (AnyWaiters != 0) {
6044 // We intentional signal *after* dropping the lock
6045 // to avoid a common class of futile wakeups.
6046 status = os::Solaris::cond_signal(_cond);
6047 assert_status(status == 0, status, "cond_signal");
6048 }
6049 }
6050 }
6052 // JSR166
6053 // -------------------------------------------------------
6055 /*
6056 * The solaris and linux implementations of park/unpark are fairly
6057 * conservative for now, but can be improved. They currently use a
6058 * mutex/condvar pair, plus _counter.
6059 * Park decrements _counter if > 0, else does a condvar wait. Unpark
6060 * sets count to 1 and signals condvar. Only one thread ever waits
6061 * on the condvar. Contention seen when trying to park implies that someone
6062 * is unparking you, so don't wait. And spurious returns are fine, so there
6063 * is no need to track notifications.
6064 */
6066 #define MAX_SECS 100000000
6067 /*
6068 * This code is common to linux and solaris and will be moved to a
6069 * common place in dolphin.
6070 *
6071 * The passed in time value is either a relative time in nanoseconds
6072 * or an absolute time in milliseconds. Either way it has to be unpacked
6073 * into suitable seconds and nanoseconds components and stored in the
6074 * given timespec structure.
6075 * Given time is a 64-bit value and the time_t used in the timespec is only
6076 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6077 * overflow if times way in the future are given. Further on Solaris versions
6078 * prior to 10 there is a restriction (see cond_timedwait) that the specified
6079 * number of seconds, in abstime, is less than current_time + 100,000,000.
6080 * As it will be 28 years before "now + 100000000" will overflow we can
6081 * ignore overflow and just impose a hard-limit on seconds using the value
6082 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6083 * years from "now".
6084 */
6085 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6086 assert (time > 0, "convertTime");
6088 struct timeval now;
6089 int status = gettimeofday(&now, NULL);
6090 assert(status == 0, "gettimeofday");
6092 time_t max_secs = now.tv_sec + MAX_SECS;
6094 if (isAbsolute) {
6095 jlong secs = time / 1000;
6096 if (secs > max_secs) {
6097 absTime->tv_sec = max_secs;
6098 }
6099 else {
6100 absTime->tv_sec = secs;
6101 }
6102 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6103 }
6104 else {
6105 jlong secs = time / NANOSECS_PER_SEC;
6106 if (secs >= MAX_SECS) {
6107 absTime->tv_sec = max_secs;
6108 absTime->tv_nsec = 0;
6109 }
6110 else {
6111 absTime->tv_sec = now.tv_sec + secs;
6112 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6113 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6114 absTime->tv_nsec -= NANOSECS_PER_SEC;
6115 ++absTime->tv_sec; // note: this must be <= max_secs
6116 }
6117 }
6118 }
6119 assert(absTime->tv_sec >= 0, "tv_sec < 0");
6120 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6121 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6122 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6123 }
6125 void Parker::park(bool isAbsolute, jlong time) {
6127 // Optional fast-path check:
6128 // Return immediately if a permit is available.
6129 if (_counter > 0) {
6130 _counter = 0 ;
6131 OrderAccess::fence();
6132 return ;
6133 }
6135 // Optional fast-exit: Check interrupt before trying to wait
6136 Thread* thread = Thread::current();
6137 assert(thread->is_Java_thread(), "Must be JavaThread");
6138 JavaThread *jt = (JavaThread *)thread;
6139 if (Thread::is_interrupted(thread, false)) {
6140 return;
6141 }
6143 // First, demultiplex/decode time arguments
6144 timespec absTime;
6145 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6146 return;
6147 }
6148 if (time > 0) {
6149 // Warning: this code might be exposed to the old Solaris time
6150 // round-down bugs. Grep "roundingFix" for details.
6151 unpackTime(&absTime, isAbsolute, time);
6152 }
6154 // Enter safepoint region
6155 // Beware of deadlocks such as 6317397.
6156 // The per-thread Parker:: _mutex is a classic leaf-lock.
6157 // In particular a thread must never block on the Threads_lock while
6158 // holding the Parker:: mutex. If safepoints are pending both the
6159 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6160 ThreadBlockInVM tbivm(jt);
6162 // Don't wait if cannot get lock since interference arises from
6163 // unblocking. Also. check interrupt before trying wait
6164 if (Thread::is_interrupted(thread, false) ||
6165 os::Solaris::mutex_trylock(_mutex) != 0) {
6166 return;
6167 }
6169 int status ;
6171 if (_counter > 0) { // no wait needed
6172 _counter = 0;
6173 status = os::Solaris::mutex_unlock(_mutex);
6174 assert (status == 0, "invariant") ;
6175 OrderAccess::fence();
6176 return;
6177 }
6179 #ifdef ASSERT
6180 // Don't catch signals while blocked; let the running threads have the signals.
6181 // (This allows a debugger to break into the running thread.)
6182 sigset_t oldsigs;
6183 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6184 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6185 #endif
6187 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6188 jt->set_suspend_equivalent();
6189 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6191 // Do this the hard way by blocking ...
6192 // See http://monaco.sfbay/detail.jsf?cr=5094058.
6193 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6194 // Only for SPARC >= V8PlusA
6195 #if defined(__sparc) && defined(COMPILER2)
6196 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6197 #endif
6199 if (time == 0) {
6200 status = os::Solaris::cond_wait (_cond, _mutex) ;
6201 } else {
6202 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6203 }
6204 // Note that an untimed cond_wait() can sometimes return ETIME on older
6205 // versions of the Solaris.
6206 assert_status(status == 0 || status == EINTR ||
6207 status == ETIME || status == ETIMEDOUT,
6208 status, "cond_timedwait");
6210 #ifdef ASSERT
6211 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6212 #endif
6213 _counter = 0 ;
6214 status = os::Solaris::mutex_unlock(_mutex);
6215 assert_status(status == 0, status, "mutex_unlock") ;
6217 // If externally suspended while waiting, re-suspend
6218 if (jt->handle_special_suspend_equivalent_condition()) {
6219 jt->java_suspend_self();
6220 }
6221 OrderAccess::fence();
6222 }
6224 void Parker::unpark() {
6225 int s, status ;
6226 status = os::Solaris::mutex_lock (_mutex) ;
6227 assert (status == 0, "invariant") ;
6228 s = _counter;
6229 _counter = 1;
6230 status = os::Solaris::mutex_unlock (_mutex) ;
6231 assert (status == 0, "invariant") ;
6233 if (s < 1) {
6234 status = os::Solaris::cond_signal (_cond) ;
6235 assert (status == 0, "invariant") ;
6236 }
6237 }
6239 extern char** environ;
6241 // Run the specified command in a separate process. Return its exit value,
6242 // or -1 on failure (e.g. can't fork a new process).
6243 // Unlike system(), this function can be called from signal handler. It
6244 // doesn't block SIGINT et al.
6245 int os::fork_and_exec(char* cmd) {
6246 char * argv[4];
6247 argv[0] = (char *)"sh";
6248 argv[1] = (char *)"-c";
6249 argv[2] = cmd;
6250 argv[3] = NULL;
6252 // fork is async-safe, fork1 is not so can't use in signal handler
6253 pid_t pid;
6254 Thread* t = ThreadLocalStorage::get_thread_slow();
6255 if (t != NULL && t->is_inside_signal_handler()) {
6256 pid = fork();
6257 } else {
6258 pid = fork1();
6259 }
6261 if (pid < 0) {
6262 // fork failed
6263 warning("fork failed: %s", strerror(errno));
6264 return -1;
6266 } else if (pid == 0) {
6267 // child process
6269 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6270 execve("/usr/bin/sh", argv, environ);
6272 // execve failed
6273 _exit(-1);
6275 } else {
6276 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6277 // care about the actual exit code, for now.
6279 int status;
6281 // Wait for the child process to exit. This returns immediately if
6282 // the child has already exited. */
6283 while (waitpid(pid, &status, 0) < 0) {
6284 switch (errno) {
6285 case ECHILD: return 0;
6286 case EINTR: break;
6287 default: return -1;
6288 }
6289 }
6291 if (WIFEXITED(status)) {
6292 // The child exited normally; get its exit code.
6293 return WEXITSTATUS(status);
6294 } else if (WIFSIGNALED(status)) {
6295 // The child exited because of a signal
6296 // The best value to return is 0x80 + signal number,
6297 // because that is what all Unix shells do, and because
6298 // it allows callers to distinguish between process exit and
6299 // process death by signal.
6300 return 0x80 + WTERMSIG(status);
6301 } else {
6302 // Unknown exit code; pass it through
6303 return status;
6304 }
6305 }
6306 }
6308 // is_headless_jre()
6309 //
6310 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6311 // in order to report if we are running in a headless jre
6312 //
6313 // Since JDK8 xawt/libmawt.so was moved into the same directory
6314 // as libawt.so, and renamed libawt_xawt.so
6315 //
6316 bool os::is_headless_jre() {
6317 struct stat statbuf;
6318 char buf[MAXPATHLEN];
6319 char libmawtpath[MAXPATHLEN];
6320 const char *xawtstr = "/xawt/libmawt.so";
6321 const char *new_xawtstr = "/libawt_xawt.so";
6322 char *p;
6324 // Get path to libjvm.so
6325 os::jvm_path(buf, sizeof(buf));
6327 // Get rid of libjvm.so
6328 p = strrchr(buf, '/');
6329 if (p == NULL) return false;
6330 else *p = '\0';
6332 // Get rid of client or server
6333 p = strrchr(buf, '/');
6334 if (p == NULL) return false;
6335 else *p = '\0';
6337 // check xawt/libmawt.so
6338 strcpy(libmawtpath, buf);
6339 strcat(libmawtpath, xawtstr);
6340 if (::stat(libmawtpath, &statbuf) == 0) return false;
6342 // check libawt_xawt.so
6343 strcpy(libmawtpath, buf);
6344 strcat(libmawtpath, new_xawtstr);
6345 if (::stat(libmawtpath, &statbuf) == 0) return false;
6347 return true;
6348 }
6350 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6351 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6352 }
6354 int os::close(int fd) {
6355 RESTARTABLE_RETURN_INT(::close(fd));
6356 }
6358 int os::socket_close(int fd) {
6359 RESTARTABLE_RETURN_INT(::close(fd));
6360 }
6362 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6363 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6364 }
6366 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6367 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6368 }
6370 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6371 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6372 }
6374 // As both poll and select can be interrupted by signals, we have to be
6375 // prepared to restart the system call after updating the timeout, unless
6376 // a poll() is done with timeout == -1, in which case we repeat with this
6377 // "wait forever" value.
6379 int os::timeout(int fd, long timeout) {
6380 int res;
6381 struct timeval t;
6382 julong prevtime, newtime;
6383 static const char* aNull = 0;
6384 struct pollfd pfd;
6385 pfd.fd = fd;
6386 pfd.events = POLLIN;
6388 gettimeofday(&t, &aNull);
6389 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
6391 for(;;) {
6392 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6393 if(res == OS_ERR && errno == EINTR) {
6394 if(timeout != -1) {
6395 gettimeofday(&t, &aNull);
6396 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
6397 timeout -= newtime - prevtime;
6398 if(timeout <= 0)
6399 return OS_OK;
6400 prevtime = newtime;
6401 }
6402 } else return res;
6403 }
6404 }
6406 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6407 int _result;
6408 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6409 os::Solaris::clear_interrupted);
6411 // Depending on when thread interruption is reset, _result could be
6412 // one of two values when errno == EINTR
6414 if (((_result == OS_INTRPT) || (_result == OS_ERR))
6415 && (errno == EINTR)) {
6416 /* restarting a connect() changes its errno semantics */
6417 INTERRUPTIBLE(::connect(fd, him, len), _result,\
6418 os::Solaris::clear_interrupted);
6419 /* undo these changes */
6420 if (_result == OS_ERR) {
6421 if (errno == EALREADY) {
6422 errno = EINPROGRESS; /* fall through */
6423 } else if (errno == EISCONN) {
6424 errno = 0;
6425 return OS_OK;
6426 }
6427 }
6428 }
6429 return _result;
6430 }
6432 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6433 if (fd < 0) {
6434 return OS_ERR;
6435 }
6436 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6437 os::Solaris::clear_interrupted);
6438 }
6440 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6441 sockaddr* from, socklen_t* fromlen) {
6442 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6443 os::Solaris::clear_interrupted);
6444 }
6446 int os::sendto(int fd, char* buf, size_t len, uint flags,
6447 struct sockaddr* to, socklen_t tolen) {
6448 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6449 os::Solaris::clear_interrupted);
6450 }
6452 int os::socket_available(int fd, jint *pbytes) {
6453 if (fd < 0) {
6454 return OS_OK;
6455 }
6456 int ret;
6457 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6458 // note: ioctl can return 0 when successful, JVM_SocketAvailable
6459 // is expected to return 0 on failure and 1 on success to the jdk.
6460 return (ret == OS_ERR) ? 0 : 1;
6461 }
6463 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6464 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6465 os::Solaris::clear_interrupted);
6466 }