Thu, 29 May 2014 09:56:06 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // no precompiled headers
26 #include "classfile/classLoader.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/compileBroker.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "jvm_solaris.h"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/filemap.hpp"
37 #include "mutex_solaris.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "os_share_solaris.hpp"
40 #include "prims/jniFastGetField.hpp"
41 #include "prims/jvm.h"
42 #include "prims/jvm_misc.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/extendedPC.hpp"
45 #include "runtime/globals.hpp"
46 #include "runtime/interfaceSupport.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/javaCalls.hpp"
49 #include "runtime/mutexLocker.hpp"
50 #include "runtime/objectMonitor.hpp"
51 #include "runtime/osThread.hpp"
52 #include "runtime/perfMemory.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "runtime/statSampler.hpp"
55 #include "runtime/stubRoutines.hpp"
56 #include "runtime/thread.inline.hpp"
57 #include "runtime/threadCritical.hpp"
58 #include "runtime/timer.hpp"
59 #include "services/attachListener.hpp"
60 #include "services/memTracker.hpp"
61 #include "services/runtimeService.hpp"
62 #include "utilities/decoder.hpp"
63 #include "utilities/defaultStream.hpp"
64 #include "utilities/events.hpp"
65 #include "utilities/growableArray.hpp"
66 #include "utilities/vmError.hpp"
68 // put OS-includes here
69 # include <dlfcn.h>
70 # include <errno.h>
71 # include <exception>
72 # include <link.h>
73 # include <poll.h>
74 # include <pthread.h>
75 # include <pwd.h>
76 # include <schedctl.h>
77 # include <setjmp.h>
78 # include <signal.h>
79 # include <stdio.h>
80 # include <alloca.h>
81 # include <sys/filio.h>
82 # include <sys/ipc.h>
83 # include <sys/lwp.h>
84 # include <sys/machelf.h> // for elf Sym structure used by dladdr1
85 # include <sys/mman.h>
86 # include <sys/processor.h>
87 # include <sys/procset.h>
88 # include <sys/pset.h>
89 # include <sys/resource.h>
90 # include <sys/shm.h>
91 # include <sys/socket.h>
92 # include <sys/stat.h>
93 # include <sys/systeminfo.h>
94 # include <sys/time.h>
95 # include <sys/times.h>
96 # include <sys/types.h>
97 # include <sys/wait.h>
98 # include <sys/utsname.h>
99 # include <thread.h>
100 # include <unistd.h>
101 # include <sys/priocntl.h>
102 # include <sys/rtpriocntl.h>
103 # include <sys/tspriocntl.h>
104 # include <sys/iapriocntl.h>
105 # include <sys/fxpriocntl.h>
106 # include <sys/loadavg.h>
107 # include <string.h>
108 # include <stdio.h>
110 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
111 # include <sys/procfs.h> // see comment in <sys/procfs.h>
113 #define MAX_PATH (2 * K)
115 // for timer info max values which include all bits
116 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
119 // Here are some liblgrp types from sys/lgrp_user.h to be able to
120 // compile on older systems without this header file.
122 #ifndef MADV_ACCESS_LWP
123 # define MADV_ACCESS_LWP 7 /* next LWP to access heavily */
124 #endif
125 #ifndef MADV_ACCESS_MANY
126 # define MADV_ACCESS_MANY 8 /* many processes to access heavily */
127 #endif
129 #ifndef LGRP_RSRC_CPU
130 # define LGRP_RSRC_CPU 0 /* CPU resources */
131 #endif
132 #ifndef LGRP_RSRC_MEM
133 # define LGRP_RSRC_MEM 1 /* memory resources */
134 #endif
136 // see thr_setprio(3T) for the basis of these numbers
137 #define MinimumPriority 0
138 #define NormalPriority 64
139 #define MaximumPriority 127
141 // Values for ThreadPriorityPolicy == 1
142 int prio_policy1[CriticalPriority+1] = {
143 -99999, 0, 16, 32, 48, 64,
144 80, 96, 112, 124, 127, 127 };
146 // System parameters used internally
147 static clock_t clock_tics_per_sec = 100;
149 // Track if we have called enable_extended_FILE_stdio (on Solaris 10u4+)
150 static bool enabled_extended_FILE_stdio = false;
152 // For diagnostics to print a message once. see run_periodic_checks
153 static bool check_addr0_done = false;
154 static sigset_t check_signal_done;
155 static bool check_signals = true;
157 address os::Solaris::handler_start; // start pc of thr_sighndlrinfo
158 address os::Solaris::handler_end; // end pc of thr_sighndlrinfo
160 address os::Solaris::_main_stack_base = NULL; // 4352906 workaround
163 // "default" initializers for missing libc APIs
164 extern "C" {
165 static int lwp_mutex_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
166 static int lwp_mutex_destroy(mutex_t *mx) { return 0; }
168 static int lwp_cond_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
169 static int lwp_cond_destroy(cond_t *cv) { return 0; }
170 }
172 // "default" initializers for pthread-based synchronization
173 extern "C" {
174 static int pthread_mutex_default_init(mutex_t *mx, int scope, void *arg) { memset(mx, 0, sizeof(mutex_t)); return 0; }
175 static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
176 }
178 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
180 // Thread Local Storage
181 // This is common to all Solaris platforms so it is defined here,
182 // in this common file.
183 // The declarations are in the os_cpu threadLS*.hpp files.
184 //
185 // Static member initialization for TLS
186 Thread* ThreadLocalStorage::_get_thread_cache[ThreadLocalStorage::_pd_cache_size] = {NULL};
188 #ifndef PRODUCT
189 #define _PCT(n,d) ((100.0*(double)(n))/(double)(d))
191 int ThreadLocalStorage::_tcacheHit = 0;
192 int ThreadLocalStorage::_tcacheMiss = 0;
194 void ThreadLocalStorage::print_statistics() {
195 int total = _tcacheMiss+_tcacheHit;
196 tty->print_cr("Thread cache hits %d misses %d total %d percent %f\n",
197 _tcacheHit, _tcacheMiss, total, _PCT(_tcacheHit, total));
198 }
199 #undef _PCT
200 #endif // PRODUCT
202 Thread* ThreadLocalStorage::get_thread_via_cache_slowly(uintptr_t raw_id,
203 int index) {
204 Thread *thread = get_thread_slow();
205 if (thread != NULL) {
206 address sp = os::current_stack_pointer();
207 guarantee(thread->_stack_base == NULL ||
208 (sp <= thread->_stack_base &&
209 sp >= thread->_stack_base - thread->_stack_size) ||
210 is_error_reported(),
211 "sp must be inside of selected thread stack");
213 thread->set_self_raw_id(raw_id); // mark for quick retrieval
214 _get_thread_cache[ index ] = thread;
215 }
216 return thread;
217 }
220 static const double all_zero[ sizeof(Thread) / sizeof(double) + 1 ] = {0};
221 #define NO_CACHED_THREAD ((Thread*)all_zero)
223 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
225 // Store the new value before updating the cache to prevent a race
226 // between get_thread_via_cache_slowly() and this store operation.
227 os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
229 // Update thread cache with new thread if setting on thread create,
230 // or NO_CACHED_THREAD (zeroed) thread if resetting thread on exit.
231 uintptr_t raw = pd_raw_thread_id();
232 int ix = pd_cache_index(raw);
233 _get_thread_cache[ix] = thread == NULL ? NO_CACHED_THREAD : thread;
234 }
236 void ThreadLocalStorage::pd_init() {
237 for (int i = 0; i < _pd_cache_size; i++) {
238 _get_thread_cache[i] = NO_CACHED_THREAD;
239 }
240 }
242 // Invalidate all the caches (happens to be the same as pd_init).
243 void ThreadLocalStorage::pd_invalidate_all() { pd_init(); }
245 #undef NO_CACHED_THREAD
247 // END Thread Local Storage
249 static inline size_t adjust_stack_size(address base, size_t size) {
250 if ((ssize_t)size < 0) {
251 // 4759953: Compensate for ridiculous stack size.
252 size = max_intx;
253 }
254 if (size > (size_t)base) {
255 // 4812466: Make sure size doesn't allow the stack to wrap the address space.
256 size = (size_t)base;
257 }
258 return size;
259 }
261 static inline stack_t get_stack_info() {
262 stack_t st;
263 int retval = thr_stksegment(&st);
264 st.ss_size = adjust_stack_size((address)st.ss_sp, st.ss_size);
265 assert(retval == 0, "incorrect return value from thr_stksegment");
266 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
267 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
268 return st;
269 }
271 address os::current_stack_base() {
272 int r = thr_main() ;
273 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
274 bool is_primordial_thread = r;
276 // Workaround 4352906, avoid calls to thr_stksegment by
277 // thr_main after the first one (it looks like we trash
278 // some data, causing the value for ss_sp to be incorrect).
279 if (!is_primordial_thread || os::Solaris::_main_stack_base == NULL) {
280 stack_t st = get_stack_info();
281 if (is_primordial_thread) {
282 // cache initial value of stack base
283 os::Solaris::_main_stack_base = (address)st.ss_sp;
284 }
285 return (address)st.ss_sp;
286 } else {
287 guarantee(os::Solaris::_main_stack_base != NULL, "Attempt to use null cached stack base");
288 return os::Solaris::_main_stack_base;
289 }
290 }
292 size_t os::current_stack_size() {
293 size_t size;
295 int r = thr_main() ;
296 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
297 if(!r) {
298 size = get_stack_info().ss_size;
299 } else {
300 struct rlimit limits;
301 getrlimit(RLIMIT_STACK, &limits);
302 size = adjust_stack_size(os::Solaris::_main_stack_base, (size_t)limits.rlim_cur);
303 }
304 // base may not be page aligned
305 address base = current_stack_base();
306 address bottom = (address)align_size_up((intptr_t)(base - size), os::vm_page_size());;
307 return (size_t)(base - bottom);
308 }
310 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
311 return localtime_r(clock, res);
312 }
314 // interruptible infrastructure
316 // setup_interruptible saves the thread state before going into an
317 // interruptible system call.
318 // The saved state is used to restore the thread to
319 // its former state whether or not an interrupt is received.
320 // Used by classloader os::read
321 // os::restartable_read calls skip this layer and stay in _thread_in_native
323 void os::Solaris::setup_interruptible(JavaThread* thread) {
325 JavaThreadState thread_state = thread->thread_state();
327 assert(thread_state != _thread_blocked, "Coming from the wrong thread");
328 assert(thread_state != _thread_in_native, "Native threads skip setup_interruptible");
329 OSThread* osthread = thread->osthread();
330 osthread->set_saved_interrupt_thread_state(thread_state);
331 thread->frame_anchor()->make_walkable(thread);
332 ThreadStateTransition::transition(thread, thread_state, _thread_blocked);
333 }
335 // Version of setup_interruptible() for threads that are already in
336 // _thread_blocked. Used by os_sleep().
337 void os::Solaris::setup_interruptible_already_blocked(JavaThread* thread) {
338 thread->frame_anchor()->make_walkable(thread);
339 }
341 JavaThread* os::Solaris::setup_interruptible() {
342 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
343 setup_interruptible(thread);
344 return thread;
345 }
347 void os::Solaris::try_enable_extended_io() {
348 typedef int (*enable_extended_FILE_stdio_t)(int, int);
350 if (!UseExtendedFileIO) {
351 return;
352 }
354 enable_extended_FILE_stdio_t enabler =
355 (enable_extended_FILE_stdio_t) dlsym(RTLD_DEFAULT,
356 "enable_extended_FILE_stdio");
357 if (enabler) {
358 enabler(-1, -1);
359 }
360 }
363 #ifdef ASSERT
365 JavaThread* os::Solaris::setup_interruptible_native() {
366 JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();
367 JavaThreadState thread_state = thread->thread_state();
368 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
369 return thread;
370 }
372 void os::Solaris::cleanup_interruptible_native(JavaThread* thread) {
373 JavaThreadState thread_state = thread->thread_state();
374 assert(thread_state == _thread_in_native, "Assumed thread_in_native");
375 }
376 #endif
378 // cleanup_interruptible reverses the effects of setup_interruptible
379 // setup_interruptible_already_blocked() does not need any cleanup.
381 void os::Solaris::cleanup_interruptible(JavaThread* thread) {
382 OSThread* osthread = thread->osthread();
384 ThreadStateTransition::transition(thread, _thread_blocked, osthread->saved_interrupt_thread_state());
385 }
387 // I/O interruption related counters called in _INTERRUPTIBLE
389 void os::Solaris::bump_interrupted_before_count() {
390 RuntimeService::record_interrupted_before_count();
391 }
393 void os::Solaris::bump_interrupted_during_count() {
394 RuntimeService::record_interrupted_during_count();
395 }
397 static int _processors_online = 0;
399 jint os::Solaris::_os_thread_limit = 0;
400 volatile jint os::Solaris::_os_thread_count = 0;
402 julong os::available_memory() {
403 return Solaris::available_memory();
404 }
406 julong os::Solaris::available_memory() {
407 return (julong)sysconf(_SC_AVPHYS_PAGES) * os::vm_page_size();
408 }
410 julong os::Solaris::_physical_memory = 0;
412 julong os::physical_memory() {
413 return Solaris::physical_memory();
414 }
416 static hrtime_t first_hrtime = 0;
417 static const hrtime_t hrtime_hz = 1000*1000*1000;
418 static volatile hrtime_t max_hrtime = 0;
421 void os::Solaris::initialize_system_info() {
422 set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
423 _processors_online = sysconf (_SC_NPROCESSORS_ONLN);
424 _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
425 }
427 int os::active_processor_count() {
428 int online_cpus = sysconf(_SC_NPROCESSORS_ONLN);
429 pid_t pid = getpid();
430 psetid_t pset = PS_NONE;
431 // Are we running in a processor set or is there any processor set around?
432 if (pset_bind(PS_QUERY, P_PID, pid, &pset) == 0) {
433 uint_t pset_cpus;
434 // Query the number of cpus available to us.
435 if (pset_info(pset, NULL, &pset_cpus, NULL) == 0) {
436 assert(pset_cpus > 0 && pset_cpus <= online_cpus, "sanity check");
437 _processors_online = pset_cpus;
438 return pset_cpus;
439 }
440 }
441 // Otherwise return number of online cpus
442 return online_cpus;
443 }
445 static bool find_processors_in_pset(psetid_t pset,
446 processorid_t** id_array,
447 uint_t* id_length) {
448 bool result = false;
449 // Find the number of processors in the processor set.
450 if (pset_info(pset, NULL, id_length, NULL) == 0) {
451 // Make up an array to hold their ids.
452 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
453 // Fill in the array with their processor ids.
454 if (pset_info(pset, NULL, id_length, *id_array) == 0) {
455 result = true;
456 }
457 }
458 return result;
459 }
461 // Callers of find_processors_online() must tolerate imprecise results --
462 // the system configuration can change asynchronously because of DR
463 // or explicit psradm operations.
464 //
465 // We also need to take care that the loop (below) terminates as the
466 // number of processors online can change between the _SC_NPROCESSORS_ONLN
467 // request and the loop that builds the list of processor ids. Unfortunately
468 // there's no reliable way to determine the maximum valid processor id,
469 // so we use a manifest constant, MAX_PROCESSOR_ID, instead. See p_online
470 // man pages, which claim the processor id set is "sparse, but
471 // not too sparse". MAX_PROCESSOR_ID is used to ensure that we eventually
472 // exit the loop.
473 //
474 // In the future we'll be able to use sysconf(_SC_CPUID_MAX), but that's
475 // not available on S8.0.
477 static bool find_processors_online(processorid_t** id_array,
478 uint* id_length) {
479 const processorid_t MAX_PROCESSOR_ID = 100000 ;
480 // Find the number of processors online.
481 *id_length = sysconf(_SC_NPROCESSORS_ONLN);
482 // Make up an array to hold their ids.
483 *id_array = NEW_C_HEAP_ARRAY(processorid_t, *id_length, mtInternal);
484 // Processors need not be numbered consecutively.
485 long found = 0;
486 processorid_t next = 0;
487 while (found < *id_length && next < MAX_PROCESSOR_ID) {
488 processor_info_t info;
489 if (processor_info(next, &info) == 0) {
490 // NB, PI_NOINTR processors are effectively online ...
491 if (info.pi_state == P_ONLINE || info.pi_state == P_NOINTR) {
492 (*id_array)[found] = next;
493 found += 1;
494 }
495 }
496 next += 1;
497 }
498 if (found < *id_length) {
499 // The loop above didn't identify the expected number of processors.
500 // We could always retry the operation, calling sysconf(_SC_NPROCESSORS_ONLN)
501 // and re-running the loop, above, but there's no guarantee of progress
502 // if the system configuration is in flux. Instead, we just return what
503 // we've got. Note that in the worst case find_processors_online() could
504 // return an empty set. (As a fall-back in the case of the empty set we
505 // could just return the ID of the current processor).
506 *id_length = found ;
507 }
509 return true;
510 }
512 static bool assign_distribution(processorid_t* id_array,
513 uint id_length,
514 uint* distribution,
515 uint distribution_length) {
516 // We assume we can assign processorid_t's to uint's.
517 assert(sizeof(processorid_t) == sizeof(uint),
518 "can't convert processorid_t to uint");
519 // Quick check to see if we won't succeed.
520 if (id_length < distribution_length) {
521 return false;
522 }
523 // Assign processor ids to the distribution.
524 // Try to shuffle processors to distribute work across boards,
525 // assuming 4 processors per board.
526 const uint processors_per_board = ProcessDistributionStride;
527 // Find the maximum processor id.
528 processorid_t max_id = 0;
529 for (uint m = 0; m < id_length; m += 1) {
530 max_id = MAX2(max_id, id_array[m]);
531 }
532 // The next id, to limit loops.
533 const processorid_t limit_id = max_id + 1;
534 // Make up markers for available processors.
535 bool* available_id = NEW_C_HEAP_ARRAY(bool, limit_id, mtInternal);
536 for (uint c = 0; c < limit_id; c += 1) {
537 available_id[c] = false;
538 }
539 for (uint a = 0; a < id_length; a += 1) {
540 available_id[id_array[a]] = true;
541 }
542 // Step by "boards", then by "slot", copying to "assigned".
543 // NEEDS_CLEANUP: The assignment of processors should be stateful,
544 // remembering which processors have been assigned by
545 // previous calls, etc., so as to distribute several
546 // independent calls of this method. What we'd like is
547 // It would be nice to have an API that let us ask
548 // how many processes are bound to a processor,
549 // but we don't have that, either.
550 // In the short term, "board" is static so that
551 // subsequent distributions don't all start at board 0.
552 static uint board = 0;
553 uint assigned = 0;
554 // Until we've found enough processors ....
555 while (assigned < distribution_length) {
556 // ... find the next available processor in the board.
557 for (uint slot = 0; slot < processors_per_board; slot += 1) {
558 uint try_id = board * processors_per_board + slot;
559 if ((try_id < limit_id) && (available_id[try_id] == true)) {
560 distribution[assigned] = try_id;
561 available_id[try_id] = false;
562 assigned += 1;
563 break;
564 }
565 }
566 board += 1;
567 if (board * processors_per_board + 0 >= limit_id) {
568 board = 0;
569 }
570 }
571 if (available_id != NULL) {
572 FREE_C_HEAP_ARRAY(bool, available_id, mtInternal);
573 }
574 return true;
575 }
577 void os::set_native_thread_name(const char *name) {
578 // Not yet implemented.
579 return;
580 }
582 bool os::distribute_processes(uint length, uint* distribution) {
583 bool result = false;
584 // Find the processor id's of all the available CPUs.
585 processorid_t* id_array = NULL;
586 uint id_length = 0;
587 // There are some races between querying information and using it,
588 // since processor sets can change dynamically.
589 psetid_t pset = PS_NONE;
590 // Are we running in a processor set?
591 if ((pset_bind(PS_QUERY, P_PID, P_MYID, &pset) == 0) && pset != PS_NONE) {
592 result = find_processors_in_pset(pset, &id_array, &id_length);
593 } else {
594 result = find_processors_online(&id_array, &id_length);
595 }
596 if (result == true) {
597 if (id_length >= length) {
598 result = assign_distribution(id_array, id_length, distribution, length);
599 } else {
600 result = false;
601 }
602 }
603 if (id_array != NULL) {
604 FREE_C_HEAP_ARRAY(processorid_t, id_array, mtInternal);
605 }
606 return result;
607 }
609 bool os::bind_to_processor(uint processor_id) {
610 // We assume that a processorid_t can be stored in a uint.
611 assert(sizeof(uint) == sizeof(processorid_t),
612 "can't convert uint to processorid_t");
613 int bind_result =
614 processor_bind(P_LWPID, // bind LWP.
615 P_MYID, // bind current LWP.
616 (processorid_t) processor_id, // id.
617 NULL); // don't return old binding.
618 return (bind_result == 0);
619 }
621 bool os::getenv(const char* name, char* buffer, int len) {
622 char* val = ::getenv( name );
623 if ( val == NULL
624 || strlen(val) + 1 > len ) {
625 if (len > 0) buffer[0] = 0; // return a null string
626 return false;
627 }
628 strcpy( buffer, val );
629 return true;
630 }
633 // Return true if user is running as root.
635 bool os::have_special_privileges() {
636 static bool init = false;
637 static bool privileges = false;
638 if (!init) {
639 privileges = (getuid() != geteuid()) || (getgid() != getegid());
640 init = true;
641 }
642 return privileges;
643 }
646 void os::init_system_properties_values() {
647 // The next steps are taken in the product version:
648 //
649 // Obtain the JAVA_HOME value from the location of libjvm.so.
650 // This library should be located at:
651 // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
652 //
653 // If "/jre/lib/" appears at the right place in the path, then we
654 // assume libjvm.so is installed in a JDK and we use this path.
655 //
656 // Otherwise exit with message: "Could not create the Java virtual machine."
657 //
658 // The following extra steps are taken in the debugging version:
659 //
660 // If "/jre/lib/" does NOT appear at the right place in the path
661 // instead of exit check for $JAVA_HOME environment variable.
662 //
663 // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
664 // then we append a fake suffix "hotspot/libjvm.so" to this path so
665 // it looks like libjvm.so is installed there
666 // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
667 //
668 // Otherwise exit.
669 //
670 // Important note: if the location of libjvm.so changes this
671 // code needs to be changed accordingly.
673 // Base path of extensions installed on the system.
674 #define SYS_EXT_DIR "/usr/jdk/packages"
675 #define EXTENSIONS_DIR "/lib/ext"
676 #define ENDORSED_DIR "/lib/endorsed"
678 char cpu_arch[12];
679 // Buffer that fits several sprintfs.
680 // Note that the space for the colon and the trailing null are provided
681 // by the nulls included by the sizeof operator.
682 const size_t bufsize =
683 MAX4((size_t)MAXPATHLEN, // For dll_dir & friends.
684 sizeof(SYS_EXT_DIR) + sizeof("/lib/") + strlen(cpu_arch), // invariant ld_library_path
685 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR) + sizeof(SYS_EXT_DIR) + sizeof(EXTENSIONS_DIR), // extensions dir
686 (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
687 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
689 // sysclasspath, java_home, dll_dir
690 {
691 char *pslash;
692 os::jvm_path(buf, bufsize);
694 // Found the full path to libjvm.so.
695 // Now cut the path to <java_home>/jre if we can.
696 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
697 pslash = strrchr(buf, '/');
698 if (pslash != NULL) {
699 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
700 }
701 Arguments::set_dll_dir(buf);
703 if (pslash != NULL) {
704 pslash = strrchr(buf, '/');
705 if (pslash != NULL) {
706 *pslash = '\0'; // Get rid of /<arch>.
707 pslash = strrchr(buf, '/');
708 if (pslash != NULL) {
709 *pslash = '\0'; // Get rid of /lib.
710 }
711 }
712 }
713 Arguments::set_java_home(buf);
714 set_boot_path('/', ':');
715 }
717 // Where to look for native libraries.
718 {
719 // Use dlinfo() to determine the correct java.library.path.
720 //
721 // If we're launched by the Java launcher, and the user
722 // does not set java.library.path explicitly on the commandline,
723 // the Java launcher sets LD_LIBRARY_PATH for us and unsets
724 // LD_LIBRARY_PATH_32 and LD_LIBRARY_PATH_64. In this case
725 // dlinfo returns LD_LIBRARY_PATH + crle settings (including
726 // /usr/lib), which is exactly what we want.
727 //
728 // If the user does set java.library.path, it completely
729 // overwrites this setting, and always has.
730 //
731 // If we're not launched by the Java launcher, we may
732 // get here with any/all of the LD_LIBRARY_PATH[_32|64]
733 // settings. Again, dlinfo does exactly what we want.
735 Dl_serinfo info_sz, *info = &info_sz;
736 Dl_serpath *path;
737 char *library_path;
738 char *common_path = buf;
740 // Determine search path count and required buffer size.
741 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFOSIZE, (void *)info) == -1) {
742 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
743 vm_exit_during_initialization("dlinfo SERINFOSIZE request", dlerror());
744 }
746 // Allocate new buffer and initialize.
747 info = (Dl_serinfo*)NEW_C_HEAP_ARRAY(char, info_sz.dls_size, mtInternal);
748 info->dls_size = info_sz.dls_size;
749 info->dls_cnt = info_sz.dls_cnt;
751 // Obtain search path information.
752 if (dlinfo(RTLD_SELF, RTLD_DI_SERINFO, (void *)info) == -1) {
753 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
754 FREE_C_HEAP_ARRAY(char, info, mtInternal);
755 vm_exit_during_initialization("dlinfo SERINFO request", dlerror());
756 }
758 path = &info->dls_serpath[0];
760 // Note: Due to a legacy implementation, most of the library path
761 // is set in the launcher. This was to accomodate linking restrictions
762 // on legacy Solaris implementations (which are no longer supported).
763 // Eventually, all the library path setting will be done here.
764 //
765 // However, to prevent the proliferation of improperly built native
766 // libraries, the new path component /usr/jdk/packages is added here.
768 // Determine the actual CPU architecture.
769 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
770 #ifdef _LP64
771 // If we are a 64-bit vm, perform the following translations:
772 // sparc -> sparcv9
773 // i386 -> amd64
774 if (strcmp(cpu_arch, "sparc") == 0) {
775 strcat(cpu_arch, "v9");
776 } else if (strcmp(cpu_arch, "i386") == 0) {
777 strcpy(cpu_arch, "amd64");
778 }
779 #endif
781 // Construct the invariant part of ld_library_path.
782 sprintf(common_path, SYS_EXT_DIR "/lib/%s", cpu_arch);
784 // Struct size is more than sufficient for the path components obtained
785 // through the dlinfo() call, so only add additional space for the path
786 // components explicitly added here.
787 size_t library_path_size = info->dls_size + strlen(common_path);
788 library_path = (char *)NEW_C_HEAP_ARRAY(char, library_path_size, mtInternal);
789 library_path[0] = '\0';
791 // Construct the desired Java library path from the linker's library
792 // search path.
793 //
794 // For compatibility, it is optimal that we insert the additional path
795 // components specific to the Java VM after those components specified
796 // in LD_LIBRARY_PATH (if any) but before those added by the ld.so
797 // infrastructure.
798 if (info->dls_cnt == 0) { // Not sure this can happen, but allow for it.
799 strcpy(library_path, common_path);
800 } else {
801 int inserted = 0;
802 int i;
803 for (i = 0; i < info->dls_cnt; i++, path++) {
804 uint_t flags = path->dls_flags & LA_SER_MASK;
805 if (((flags & LA_SER_LIBPATH) == 0) && !inserted) {
806 strcat(library_path, common_path);
807 strcat(library_path, os::path_separator());
808 inserted = 1;
809 }
810 strcat(library_path, path->dls_name);
811 strcat(library_path, os::path_separator());
812 }
813 // Eliminate trailing path separator.
814 library_path[strlen(library_path)-1] = '\0';
815 }
817 // happens before argument parsing - can't use a trace flag
818 // tty->print_raw("init_system_properties_values: native lib path: ");
819 // tty->print_raw_cr(library_path);
821 // Callee copies into its own buffer.
822 Arguments::set_library_path(library_path);
824 FREE_C_HEAP_ARRAY(char, library_path, mtInternal);
825 FREE_C_HEAP_ARRAY(char, info, mtInternal);
826 }
828 // Extensions directories.
829 sprintf(buf, "%s" EXTENSIONS_DIR ":" SYS_EXT_DIR EXTENSIONS_DIR, Arguments::get_java_home());
830 Arguments::set_ext_dirs(buf);
832 // Endorsed standards default directory.
833 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
834 Arguments::set_endorsed_dirs(buf);
836 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
838 #undef SYS_EXT_DIR
839 #undef EXTENSIONS_DIR
840 #undef ENDORSED_DIR
841 }
843 void os::breakpoint() {
844 BREAKPOINT;
845 }
847 bool os::obsolete_option(const JavaVMOption *option)
848 {
849 if (!strncmp(option->optionString, "-Xt", 3)) {
850 return true;
851 } else if (!strncmp(option->optionString, "-Xtm", 4)) {
852 return true;
853 } else if (!strncmp(option->optionString, "-Xverifyheap", 12)) {
854 return true;
855 } else if (!strncmp(option->optionString, "-Xmaxjitcodesize", 16)) {
856 return true;
857 }
858 return false;
859 }
861 bool os::Solaris::valid_stack_address(Thread* thread, address sp) {
862 address stackStart = (address)thread->stack_base();
863 address stackEnd = (address)(stackStart - (address)thread->stack_size());
864 if (sp < stackStart && sp >= stackEnd ) return true;
865 return false;
866 }
868 extern "C" void breakpoint() {
869 // use debugger to set breakpoint here
870 }
872 static thread_t main_thread;
874 // Thread start routine for all new Java threads
875 extern "C" void* java_start(void* thread_addr) {
876 // Try to randomize the cache line index of hot stack frames.
877 // This helps when threads of the same stack traces evict each other's
878 // cache lines. The threads can be either from the same JVM instance, or
879 // from different JVM instances. The benefit is especially true for
880 // processors with hyperthreading technology.
881 static int counter = 0;
882 int pid = os::current_process_id();
883 alloca(((pid ^ counter++) & 7) * 128);
885 int prio;
886 Thread* thread = (Thread*)thread_addr;
887 OSThread* osthr = thread->osthread();
889 osthr->set_lwp_id( _lwp_self() ); // Store lwp in case we are bound
890 thread->_schedctl = (void *) schedctl_init () ;
892 if (UseNUMA) {
893 int lgrp_id = os::numa_get_group_id();
894 if (lgrp_id != -1) {
895 thread->set_lgrp_id(lgrp_id);
896 }
897 }
899 // If the creator called set priority before we started,
900 // we need to call set_native_priority now that we have an lwp.
901 // We used to get the priority from thr_getprio (we called
902 // thr_setprio way back in create_thread) and pass it to
903 // set_native_priority, but Solaris scales the priority
904 // in java_to_os_priority, so when we read it back here,
905 // we pass trash to set_native_priority instead of what's
906 // in java_to_os_priority. So we save the native priority
907 // in the osThread and recall it here.
909 if ( osthr->thread_id() != -1 ) {
910 if ( UseThreadPriorities ) {
911 int prio = osthr->native_priority();
912 if (ThreadPriorityVerbose) {
913 tty->print_cr("Starting Thread " INTPTR_FORMAT ", LWP is "
914 INTPTR_FORMAT ", setting priority: %d\n",
915 osthr->thread_id(), osthr->lwp_id(), prio);
916 }
917 os::set_native_priority(thread, prio);
918 }
919 } else if (ThreadPriorityVerbose) {
920 warning("Can't set priority in _start routine, thread id hasn't been set\n");
921 }
923 assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
925 // initialize signal mask for this thread
926 os::Solaris::hotspot_sigmask(thread);
928 thread->run();
930 // One less thread is executing
931 // When the VMThread gets here, the main thread may have already exited
932 // which frees the CodeHeap containing the Atomic::dec code
933 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
934 Atomic::dec(&os::Solaris::_os_thread_count);
935 }
937 if (UseDetachedThreads) {
938 thr_exit(NULL);
939 ShouldNotReachHere();
940 }
941 return NULL;
942 }
944 static OSThread* create_os_thread(Thread* thread, thread_t thread_id) {
945 // Allocate the OSThread object
946 OSThread* osthread = new OSThread(NULL, NULL);
947 if (osthread == NULL) return NULL;
949 // Store info on the Solaris thread into the OSThread
950 osthread->set_thread_id(thread_id);
951 osthread->set_lwp_id(_lwp_self());
952 thread->_schedctl = (void *) schedctl_init () ;
954 if (UseNUMA) {
955 int lgrp_id = os::numa_get_group_id();
956 if (lgrp_id != -1) {
957 thread->set_lgrp_id(lgrp_id);
958 }
959 }
961 if ( ThreadPriorityVerbose ) {
962 tty->print_cr("In create_os_thread, Thread " INTPTR_FORMAT ", LWP is " INTPTR_FORMAT "\n",
963 osthread->thread_id(), osthread->lwp_id() );
964 }
966 // Initial thread state is INITIALIZED, not SUSPENDED
967 osthread->set_state(INITIALIZED);
969 return osthread;
970 }
972 void os::Solaris::hotspot_sigmask(Thread* thread) {
974 //Save caller's signal mask
975 sigset_t sigmask;
976 thr_sigsetmask(SIG_SETMASK, NULL, &sigmask);
977 OSThread *osthread = thread->osthread();
978 osthread->set_caller_sigmask(sigmask);
980 thr_sigsetmask(SIG_UNBLOCK, os::Solaris::unblocked_signals(), NULL);
981 if (!ReduceSignalUsage) {
982 if (thread->is_VM_thread()) {
983 // Only the VM thread handles BREAK_SIGNAL ...
984 thr_sigsetmask(SIG_UNBLOCK, vm_signals(), NULL);
985 } else {
986 // ... all other threads block BREAK_SIGNAL
987 assert(!sigismember(vm_signals(), SIGINT), "SIGINT should not be blocked");
988 thr_sigsetmask(SIG_BLOCK, vm_signals(), NULL);
989 }
990 }
991 }
993 bool os::create_attached_thread(JavaThread* thread) {
994 #ifdef ASSERT
995 thread->verify_not_published();
996 #endif
997 OSThread* osthread = create_os_thread(thread, thr_self());
998 if (osthread == NULL) {
999 return false;
1000 }
1002 // Initial thread state is RUNNABLE
1003 osthread->set_state(RUNNABLE);
1004 thread->set_osthread(osthread);
1006 // initialize signal mask for this thread
1007 // and save the caller's signal mask
1008 os::Solaris::hotspot_sigmask(thread);
1010 return true;
1011 }
1013 bool os::create_main_thread(JavaThread* thread) {
1014 #ifdef ASSERT
1015 thread->verify_not_published();
1016 #endif
1017 if (_starting_thread == NULL) {
1018 _starting_thread = create_os_thread(thread, main_thread);
1019 if (_starting_thread == NULL) {
1020 return false;
1021 }
1022 }
1024 // The primodial thread is runnable from the start
1025 _starting_thread->set_state(RUNNABLE);
1027 thread->set_osthread(_starting_thread);
1029 // initialize signal mask for this thread
1030 // and save the caller's signal mask
1031 os::Solaris::hotspot_sigmask(thread);
1033 return true;
1034 }
1036 // _T2_libthread is true if we believe we are running with the newer
1037 // SunSoft lwp/libthread.so (2.8 patch, 2.9 default)
1038 bool os::Solaris::_T2_libthread = false;
1040 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
1041 // Allocate the OSThread object
1042 OSThread* osthread = new OSThread(NULL, NULL);
1043 if (osthread == NULL) {
1044 return false;
1045 }
1047 if ( ThreadPriorityVerbose ) {
1048 char *thrtyp;
1049 switch ( thr_type ) {
1050 case vm_thread:
1051 thrtyp = (char *)"vm";
1052 break;
1053 case cgc_thread:
1054 thrtyp = (char *)"cgc";
1055 break;
1056 case pgc_thread:
1057 thrtyp = (char *)"pgc";
1058 break;
1059 case java_thread:
1060 thrtyp = (char *)"java";
1061 break;
1062 case compiler_thread:
1063 thrtyp = (char *)"compiler";
1064 break;
1065 case watcher_thread:
1066 thrtyp = (char *)"watcher";
1067 break;
1068 default:
1069 thrtyp = (char *)"unknown";
1070 break;
1071 }
1072 tty->print_cr("In create_thread, creating a %s thread\n", thrtyp);
1073 }
1075 // Calculate stack size if it's not specified by caller.
1076 if (stack_size == 0) {
1077 // The default stack size 1M (2M for LP64).
1078 stack_size = (BytesPerWord >> 2) * K * K;
1080 switch (thr_type) {
1081 case os::java_thread:
1082 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
1083 if (JavaThread::stack_size_at_create() > 0) stack_size = JavaThread::stack_size_at_create();
1084 break;
1085 case os::compiler_thread:
1086 if (CompilerThreadStackSize > 0) {
1087 stack_size = (size_t)(CompilerThreadStackSize * K);
1088 break;
1089 } // else fall through:
1090 // use VMThreadStackSize if CompilerThreadStackSize is not defined
1091 case os::vm_thread:
1092 case os::pgc_thread:
1093 case os::cgc_thread:
1094 case os::watcher_thread:
1095 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
1096 break;
1097 }
1098 }
1099 stack_size = MAX2(stack_size, os::Solaris::min_stack_allowed);
1101 // Initial state is ALLOCATED but not INITIALIZED
1102 osthread->set_state(ALLOCATED);
1104 if (os::Solaris::_os_thread_count > os::Solaris::_os_thread_limit) {
1105 // We got lots of threads. Check if we still have some address space left.
1106 // Need to be at least 5Mb of unreserved address space. We do check by
1107 // trying to reserve some.
1108 const size_t VirtualMemoryBangSize = 20*K*K;
1109 char* mem = os::reserve_memory(VirtualMemoryBangSize);
1110 if (mem == NULL) {
1111 delete osthread;
1112 return false;
1113 } else {
1114 // Release the memory again
1115 os::release_memory(mem, VirtualMemoryBangSize);
1116 }
1117 }
1119 // Setup osthread because the child thread may need it.
1120 thread->set_osthread(osthread);
1122 // Create the Solaris thread
1123 // explicit THR_BOUND for T2_libthread case in case
1124 // that assumption is not accurate, but our alternate signal stack
1125 // handling is based on it which must have bound threads
1126 thread_t tid = 0;
1127 long flags = (UseDetachedThreads ? THR_DETACHED : 0) | THR_SUSPENDED
1128 | ((UseBoundThreads || os::Solaris::T2_libthread() ||
1129 (thr_type == vm_thread) ||
1130 (thr_type == cgc_thread) ||
1131 (thr_type == pgc_thread) ||
1132 (thr_type == compiler_thread && BackgroundCompilation)) ?
1133 THR_BOUND : 0);
1134 int status;
1136 // 4376845 -- libthread/kernel don't provide enough LWPs to utilize all CPUs.
1137 //
1138 // On multiprocessors systems, libthread sometimes under-provisions our
1139 // process with LWPs. On a 30-way systems, for instance, we could have
1140 // 50 user-level threads in ready state and only 2 or 3 LWPs assigned
1141 // to our process. This can result in under utilization of PEs.
1142 // I suspect the problem is related to libthread's LWP
1143 // pool management and to the kernel's SIGBLOCKING "last LWP parked"
1144 // upcall policy.
1145 //
1146 // The following code is palliative -- it attempts to ensure that our
1147 // process has sufficient LWPs to take advantage of multiple PEs.
1148 // Proper long-term cures include using user-level threads bound to LWPs
1149 // (THR_BOUND) or using LWP-based synchronization. Note that there is a
1150 // slight timing window with respect to sampling _os_thread_count, but
1151 // the race is benign. Also, we should periodically recompute
1152 // _processors_online as the min of SC_NPROCESSORS_ONLN and the
1153 // the number of PEs in our partition. You might be tempted to use
1154 // THR_NEW_LWP here, but I'd recommend against it as that could
1155 // result in undesirable growth of the libthread's LWP pool.
1156 // The fix below isn't sufficient; for instance, it doesn't take into count
1157 // LWPs parked on IO. It does, however, help certain CPU-bound benchmarks.
1158 //
1159 // Some pathologies this scheme doesn't handle:
1160 // * Threads can block, releasing the LWPs. The LWPs can age out.
1161 // When a large number of threads become ready again there aren't
1162 // enough LWPs available to service them. This can occur when the
1163 // number of ready threads oscillates.
1164 // * LWPs/Threads park on IO, thus taking the LWP out of circulation.
1165 //
1166 // Finally, we should call thr_setconcurrency() periodically to refresh
1167 // the LWP pool and thwart the LWP age-out mechanism.
1168 // The "+3" term provides a little slop -- we want to slightly overprovision.
1170 if (AdjustConcurrency && os::Solaris::_os_thread_count < (_processors_online+3)) {
1171 if (!(flags & THR_BOUND)) {
1172 thr_setconcurrency (os::Solaris::_os_thread_count); // avoid starvation
1173 }
1174 }
1175 // Although this doesn't hurt, we should warn of undefined behavior
1176 // when using unbound T1 threads with schedctl(). This should never
1177 // happen, as the compiler and VM threads are always created bound
1178 DEBUG_ONLY(
1179 if ((VMThreadHintNoPreempt || CompilerThreadHintNoPreempt) &&
1180 (!os::Solaris::T2_libthread() && (!(flags & THR_BOUND))) &&
1181 ((thr_type == vm_thread) || (thr_type == cgc_thread) ||
1182 (thr_type == pgc_thread) || (thr_type == compiler_thread && BackgroundCompilation))) {
1183 warning("schedctl behavior undefined when Compiler/VM/GC Threads are Unbound");
1184 }
1185 );
1188 // Mark that we don't have an lwp or thread id yet.
1189 // In case we attempt to set the priority before the thread starts.
1190 osthread->set_lwp_id(-1);
1191 osthread->set_thread_id(-1);
1193 status = thr_create(NULL, stack_size, java_start, thread, flags, &tid);
1194 if (status != 0) {
1195 if (PrintMiscellaneous && (Verbose || WizardMode)) {
1196 perror("os::create_thread");
1197 }
1198 thread->set_osthread(NULL);
1199 // Need to clean up stuff we've allocated so far
1200 delete osthread;
1201 return false;
1202 }
1204 Atomic::inc(&os::Solaris::_os_thread_count);
1206 // Store info on the Solaris thread into the OSThread
1207 osthread->set_thread_id(tid);
1209 // Remember that we created this thread so we can set priority on it
1210 osthread->set_vm_created();
1212 // Set the default thread priority. If using bound threads, setting
1213 // lwp priority will be delayed until thread start.
1214 set_native_priority(thread,
1215 DefaultThreadPriority == -1 ?
1216 java_to_os_priority[NormPriority] :
1217 DefaultThreadPriority);
1219 // Initial thread state is INITIALIZED, not SUSPENDED
1220 osthread->set_state(INITIALIZED);
1222 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
1223 return true;
1224 }
1226 /* defined for >= Solaris 10. This allows builds on earlier versions
1227 * of Solaris to take advantage of the newly reserved Solaris JVM signals
1228 * With SIGJVM1, SIGJVM2, INTERRUPT_SIGNAL is SIGJVM1, ASYNC_SIGNAL is SIGJVM2
1229 * and -XX:+UseAltSigs does nothing since these should have no conflict
1230 */
1231 #if !defined(SIGJVM1)
1232 #define SIGJVM1 39
1233 #define SIGJVM2 40
1234 #endif
1236 debug_only(static bool signal_sets_initialized = false);
1237 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
1238 int os::Solaris::_SIGinterrupt = INTERRUPT_SIGNAL;
1239 int os::Solaris::_SIGasync = ASYNC_SIGNAL;
1241 bool os::Solaris::is_sig_ignored(int sig) {
1242 struct sigaction oact;
1243 sigaction(sig, (struct sigaction*)NULL, &oact);
1244 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
1245 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
1246 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
1247 return true;
1248 else
1249 return false;
1250 }
1252 // Note: SIGRTMIN is a macro that calls sysconf() so it will
1253 // dynamically detect SIGRTMIN value for the system at runtime, not buildtime
1254 static bool isJVM1available() {
1255 return SIGJVM1 < SIGRTMIN;
1256 }
1258 void os::Solaris::signal_sets_init() {
1259 // Should also have an assertion stating we are still single-threaded.
1260 assert(!signal_sets_initialized, "Already initialized");
1261 // Fill in signals that are necessarily unblocked for all threads in
1262 // the VM. Currently, we unblock the following signals:
1263 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
1264 // by -Xrs (=ReduceSignalUsage));
1265 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
1266 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
1267 // the dispositions or masks wrt these signals.
1268 // Programs embedding the VM that want to use the above signals for their
1269 // own purposes must, at this time, use the "-Xrs" option to prevent
1270 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
1271 // (See bug 4345157, and other related bugs).
1272 // In reality, though, unblocking these signals is really a nop, since
1273 // these signals are not blocked by default.
1274 sigemptyset(&unblocked_sigs);
1275 sigemptyset(&allowdebug_blocked_sigs);
1276 sigaddset(&unblocked_sigs, SIGILL);
1277 sigaddset(&unblocked_sigs, SIGSEGV);
1278 sigaddset(&unblocked_sigs, SIGBUS);
1279 sigaddset(&unblocked_sigs, SIGFPE);
1281 if (isJVM1available) {
1282 os::Solaris::set_SIGinterrupt(SIGJVM1);
1283 os::Solaris::set_SIGasync(SIGJVM2);
1284 } else if (UseAltSigs) {
1285 os::Solaris::set_SIGinterrupt(ALT_INTERRUPT_SIGNAL);
1286 os::Solaris::set_SIGasync(ALT_ASYNC_SIGNAL);
1287 } else {
1288 os::Solaris::set_SIGinterrupt(INTERRUPT_SIGNAL);
1289 os::Solaris::set_SIGasync(ASYNC_SIGNAL);
1290 }
1292 sigaddset(&unblocked_sigs, os::Solaris::SIGinterrupt());
1293 sigaddset(&unblocked_sigs, os::Solaris::SIGasync());
1295 if (!ReduceSignalUsage) {
1296 if (!os::Solaris::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
1297 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
1298 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
1299 }
1300 if (!os::Solaris::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
1301 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
1302 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
1303 }
1304 if (!os::Solaris::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
1305 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
1306 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
1307 }
1308 }
1309 // Fill in signals that are blocked by all but the VM thread.
1310 sigemptyset(&vm_sigs);
1311 if (!ReduceSignalUsage)
1312 sigaddset(&vm_sigs, BREAK_SIGNAL);
1313 debug_only(signal_sets_initialized = true);
1315 // For diagnostics only used in run_periodic_checks
1316 sigemptyset(&check_signal_done);
1317 }
1319 // These are signals that are unblocked while a thread is running Java.
1320 // (For some reason, they get blocked by default.)
1321 sigset_t* os::Solaris::unblocked_signals() {
1322 assert(signal_sets_initialized, "Not initialized");
1323 return &unblocked_sigs;
1324 }
1326 // These are the signals that are blocked while a (non-VM) thread is
1327 // running Java. Only the VM thread handles these signals.
1328 sigset_t* os::Solaris::vm_signals() {
1329 assert(signal_sets_initialized, "Not initialized");
1330 return &vm_sigs;
1331 }
1333 // These are signals that are blocked during cond_wait to allow debugger in
1334 sigset_t* os::Solaris::allowdebug_blocked_signals() {
1335 assert(signal_sets_initialized, "Not initialized");
1336 return &allowdebug_blocked_sigs;
1337 }
1340 void _handle_uncaught_cxx_exception() {
1341 VMError err("An uncaught C++ exception");
1342 err.report_and_die();
1343 }
1346 // First crack at OS-specific initialization, from inside the new thread.
1347 void os::initialize_thread(Thread* thr) {
1348 int r = thr_main() ;
1349 guarantee (r == 0 || r == 1, "CR6501650 or CR6493689") ;
1350 if (r) {
1351 JavaThread* jt = (JavaThread *)thr;
1352 assert(jt != NULL,"Sanity check");
1353 size_t stack_size;
1354 address base = jt->stack_base();
1355 if (Arguments::created_by_java_launcher()) {
1356 // Use 2MB to allow for Solaris 7 64 bit mode.
1357 stack_size = JavaThread::stack_size_at_create() == 0
1358 ? 2048*K : JavaThread::stack_size_at_create();
1360 // There are rare cases when we may have already used more than
1361 // the basic stack size allotment before this method is invoked.
1362 // Attempt to allow for a normally sized java_stack.
1363 size_t current_stack_offset = (size_t)(base - (address)&stack_size);
1364 stack_size += ReservedSpace::page_align_size_down(current_stack_offset);
1365 } else {
1366 // 6269555: If we were not created by a Java launcher, i.e. if we are
1367 // running embedded in a native application, treat the primordial thread
1368 // as much like a native attached thread as possible. This means using
1369 // the current stack size from thr_stksegment(), unless it is too large
1370 // to reliably setup guard pages. A reasonable max size is 8MB.
1371 size_t current_size = current_stack_size();
1372 // This should never happen, but just in case....
1373 if (current_size == 0) current_size = 2 * K * K;
1374 stack_size = current_size > (8 * K * K) ? (8 * K * K) : current_size;
1375 }
1376 address bottom = (address)align_size_up((intptr_t)(base - stack_size), os::vm_page_size());;
1377 stack_size = (size_t)(base - bottom);
1379 assert(stack_size > 0, "Stack size calculation problem");
1381 if (stack_size > jt->stack_size()) {
1382 NOT_PRODUCT(
1383 struct rlimit limits;
1384 getrlimit(RLIMIT_STACK, &limits);
1385 size_t size = adjust_stack_size(base, (size_t)limits.rlim_cur);
1386 assert(size >= jt->stack_size(), "Stack size problem in main thread");
1387 )
1388 tty->print_cr(
1389 "Stack size of %d Kb exceeds current limit of %d Kb.\n"
1390 "(Stack sizes are rounded up to a multiple of the system page size.)\n"
1391 "See limit(1) to increase the stack size limit.",
1392 stack_size / K, jt->stack_size() / K);
1393 vm_exit(1);
1394 }
1395 assert(jt->stack_size() >= stack_size,
1396 "Attempt to map more stack than was allocated");
1397 jt->set_stack_size(stack_size);
1398 }
1400 // 5/22/01: Right now alternate signal stacks do not handle
1401 // throwing stack overflow exceptions, see bug 4463178
1402 // Until a fix is found for this, T2 will NOT imply alternate signal
1403 // stacks.
1404 // If using T2 libthread threads, install an alternate signal stack.
1405 // Because alternate stacks associate with LWPs on Solaris,
1406 // see sigaltstack(2), if using UNBOUND threads, or if UseBoundThreads
1407 // we prefer to explicitly stack bang.
1408 // If not using T2 libthread, but using UseBoundThreads any threads
1409 // (primordial thread, jni_attachCurrentThread) we do not create,
1410 // probably are not bound, therefore they can not have an alternate
1411 // signal stack. Since our stack banging code is generated and
1412 // is shared across threads, all threads must be bound to allow
1413 // using alternate signal stacks. The alternative is to interpose
1414 // on _lwp_create to associate an alt sig stack with each LWP,
1415 // and this could be a problem when the JVM is embedded.
1416 // We would prefer to use alternate signal stacks with T2
1417 // Since there is currently no accurate way to detect T2
1418 // we do not. Assuming T2 when running T1 causes sig 11s or assertions
1419 // on installing alternate signal stacks
1422 // 05/09/03: removed alternate signal stack support for Solaris
1423 // The alternate signal stack mechanism is no longer needed to
1424 // handle stack overflow. This is now handled by allocating
1425 // guard pages (red zone) and stackbanging.
1426 // Initially the alternate signal stack mechanism was removed because
1427 // it did not work with T1 llibthread. Alternate
1428 // signal stacks MUST have all threads bound to lwps. Applications
1429 // can create their own threads and attach them without their being
1430 // bound under T1. This is frequently the case for the primordial thread.
1431 // If we were ever to reenable this mechanism we would need to
1432 // use the dynamic check for T2 libthread.
1434 os::Solaris::init_thread_fpu_state();
1435 std::set_terminate(_handle_uncaught_cxx_exception);
1436 }
1440 // Free Solaris resources related to the OSThread
1441 void os::free_thread(OSThread* osthread) {
1442 assert(osthread != NULL, "os::free_thread but osthread not set");
1445 // We are told to free resources of the argument thread,
1446 // but we can only really operate on the current thread.
1447 // The main thread must take the VMThread down synchronously
1448 // before the main thread exits and frees up CodeHeap
1449 guarantee((Thread::current()->osthread() == osthread
1450 || (osthread == VMThread::vm_thread()->osthread())), "os::free_thread but not current thread");
1451 if (Thread::current()->osthread() == osthread) {
1452 // Restore caller's signal mask
1453 sigset_t sigmask = osthread->caller_sigmask();
1454 thr_sigsetmask(SIG_SETMASK, &sigmask, NULL);
1455 }
1456 delete osthread;
1457 }
1459 void os::pd_start_thread(Thread* thread) {
1460 int status = thr_continue(thread->osthread()->thread_id());
1461 assert_status(status == 0, status, "thr_continue failed");
1462 }
1465 intx os::current_thread_id() {
1466 return (intx)thr_self();
1467 }
1469 static pid_t _initial_pid = 0;
1471 int os::current_process_id() {
1472 return (int)(_initial_pid ? _initial_pid : getpid());
1473 }
1475 int os::allocate_thread_local_storage() {
1476 // %%% in Win32 this allocates a memory segment pointed to by a
1477 // register. Dan Stein can implement a similar feature in
1478 // Solaris. Alternatively, the VM can do the same thing
1479 // explicitly: malloc some storage and keep the pointer in a
1480 // register (which is part of the thread's context) (or keep it
1481 // in TLS).
1482 // %%% In current versions of Solaris, thr_self and TSD can
1483 // be accessed via short sequences of displaced indirections.
1484 // The value of thr_self is available as %g7(36).
1485 // The value of thr_getspecific(k) is stored in %g7(12)(4)(k*4-4),
1486 // assuming that the current thread already has a value bound to k.
1487 // It may be worth experimenting with such access patterns,
1488 // and later having the parameters formally exported from a Solaris
1489 // interface. I think, however, that it will be faster to
1490 // maintain the invariant that %g2 always contains the
1491 // JavaThread in Java code, and have stubs simply
1492 // treat %g2 as a caller-save register, preserving it in a %lN.
1493 thread_key_t tk;
1494 if (thr_keycreate( &tk, NULL ) )
1495 fatal(err_msg("os::allocate_thread_local_storage: thr_keycreate failed "
1496 "(%s)", strerror(errno)));
1497 return int(tk);
1498 }
1500 void os::free_thread_local_storage(int index) {
1501 // %%% don't think we need anything here
1502 // if ( pthread_key_delete((pthread_key_t) tk) )
1503 // fatal("os::free_thread_local_storage: pthread_key_delete failed");
1504 }
1506 #define SMALLINT 32 // libthread allocate for tsd_common is a version specific
1507 // small number - point is NO swap space available
1508 void os::thread_local_storage_at_put(int index, void* value) {
1509 // %%% this is used only in threadLocalStorage.cpp
1510 if (thr_setspecific((thread_key_t)index, value)) {
1511 if (errno == ENOMEM) {
1512 vm_exit_out_of_memory(SMALLINT, OOM_MALLOC_ERROR,
1513 "thr_setspecific: out of swap space");
1514 } else {
1515 fatal(err_msg("os::thread_local_storage_at_put: thr_setspecific failed "
1516 "(%s)", strerror(errno)));
1517 }
1518 } else {
1519 ThreadLocalStorage::set_thread_in_slot ((Thread *) value) ;
1520 }
1521 }
1523 // This function could be called before TLS is initialized, for example, when
1524 // VM receives an async signal or when VM causes a fatal error during
1525 // initialization. Return NULL if thr_getspecific() fails.
1526 void* os::thread_local_storage_at(int index) {
1527 // %%% this is used only in threadLocalStorage.cpp
1528 void* r = NULL;
1529 return thr_getspecific((thread_key_t)index, &r) != 0 ? NULL : r;
1530 }
1533 // gethrtime() should be monotonic according to the documentation,
1534 // but some virtualized platforms are known to break this guarantee.
1535 // getTimeNanos() must be guaranteed not to move backwards, so we
1536 // are forced to add a check here.
1537 inline hrtime_t getTimeNanos() {
1538 const hrtime_t now = gethrtime();
1539 const hrtime_t prev = max_hrtime;
1540 if (now <= prev) {
1541 return prev; // same or retrograde time;
1542 }
1543 const hrtime_t obsv = Atomic::cmpxchg(now, (volatile jlong*)&max_hrtime, prev);
1544 assert(obsv >= prev, "invariant"); // Monotonicity
1545 // If the CAS succeeded then we're done and return "now".
1546 // If the CAS failed and the observed value "obsv" is >= now then
1547 // we should return "obsv". If the CAS failed and now > obsv > prv then
1548 // some other thread raced this thread and installed a new value, in which case
1549 // we could either (a) retry the entire operation, (b) retry trying to install now
1550 // or (c) just return obsv. We use (c). No loop is required although in some cases
1551 // we might discard a higher "now" value in deference to a slightly lower but freshly
1552 // installed obsv value. That's entirely benign -- it admits no new orderings compared
1553 // to (a) or (b) -- and greatly reduces coherence traffic.
1554 // We might also condition (c) on the magnitude of the delta between obsv and now.
1555 // Avoiding excessive CAS operations to hot RW locations is critical.
1556 // See https://blogs.oracle.com/dave/entry/cas_and_cache_trivia_invalidate
1557 return (prev == obsv) ? now : obsv;
1558 }
1560 // Time since start-up in seconds to a fine granularity.
1561 // Used by VMSelfDestructTimer and the MemProfiler.
1562 double os::elapsedTime() {
1563 return (double)(getTimeNanos() - first_hrtime) / (double)hrtime_hz;
1564 }
1566 jlong os::elapsed_counter() {
1567 return (jlong)(getTimeNanos() - first_hrtime);
1568 }
1570 jlong os::elapsed_frequency() {
1571 return hrtime_hz;
1572 }
1574 // Return the real, user, and system times in seconds from an
1575 // arbitrary fixed point in the past.
1576 bool os::getTimesSecs(double* process_real_time,
1577 double* process_user_time,
1578 double* process_system_time) {
1579 struct tms ticks;
1580 clock_t real_ticks = times(&ticks);
1582 if (real_ticks == (clock_t) (-1)) {
1583 return false;
1584 } else {
1585 double ticks_per_second = (double) clock_tics_per_sec;
1586 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1587 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1588 // For consistency return the real time from getTimeNanos()
1589 // converted to seconds.
1590 *process_real_time = ((double) getTimeNanos()) / ((double) NANOUNITS);
1592 return true;
1593 }
1594 }
1596 bool os::supports_vtime() { return true; }
1598 bool os::enable_vtime() {
1599 int fd = ::open("/proc/self/ctl", O_WRONLY);
1600 if (fd == -1)
1601 return false;
1603 long cmd[] = { PCSET, PR_MSACCT };
1604 int res = ::write(fd, cmd, sizeof(long) * 2);
1605 ::close(fd);
1606 if (res != sizeof(long) * 2)
1607 return false;
1609 return true;
1610 }
1612 bool os::vtime_enabled() {
1613 int fd = ::open("/proc/self/status", O_RDONLY);
1614 if (fd == -1)
1615 return false;
1617 pstatus_t status;
1618 int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
1619 ::close(fd);
1620 if (res != sizeof(pstatus_t))
1621 return false;
1623 return status.pr_flags & PR_MSACCT;
1624 }
1626 double os::elapsedVTime() {
1627 return (double)gethrvtime() / (double)hrtime_hz;
1628 }
1630 // Used internally for comparisons only
1631 // getTimeMillis guaranteed to not move backwards on Solaris
1632 jlong getTimeMillis() {
1633 jlong nanotime = getTimeNanos();
1634 return (jlong)(nanotime / NANOSECS_PER_MILLISEC);
1635 }
1637 // Must return millis since Jan 1 1970 for JVM_CurrentTimeMillis
1638 jlong os::javaTimeMillis() {
1639 timeval t;
1640 if (gettimeofday( &t, NULL) == -1)
1641 fatal(err_msg("os::javaTimeMillis: gettimeofday (%s)", strerror(errno)));
1642 return jlong(t.tv_sec) * 1000 + jlong(t.tv_usec) / 1000;
1643 }
1645 jlong os::javaTimeNanos() {
1646 return (jlong)getTimeNanos();
1647 }
1649 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1650 info_ptr->max_value = ALL_64_BITS; // gethrtime() uses all 64 bits
1651 info_ptr->may_skip_backward = false; // not subject to resetting or drifting
1652 info_ptr->may_skip_forward = false; // not subject to resetting or drifting
1653 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1654 }
1656 char * os::local_time_string(char *buf, size_t buflen) {
1657 struct tm t;
1658 time_t long_time;
1659 time(&long_time);
1660 localtime_r(&long_time, &t);
1661 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1662 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1663 t.tm_hour, t.tm_min, t.tm_sec);
1664 return buf;
1665 }
1667 // Note: os::shutdown() might be called very early during initialization, or
1668 // called from signal handler. Before adding something to os::shutdown(), make
1669 // sure it is async-safe and can handle partially initialized VM.
1670 void os::shutdown() {
1672 // allow PerfMemory to attempt cleanup of any persistent resources
1673 perfMemory_exit();
1675 // needs to remove object in file system
1676 AttachListener::abort();
1678 // flush buffered output, finish log files
1679 ostream_abort();
1681 // Check for abort hook
1682 abort_hook_t abort_hook = Arguments::abort_hook();
1683 if (abort_hook != NULL) {
1684 abort_hook();
1685 }
1686 }
1688 // Note: os::abort() might be called very early during initialization, or
1689 // called from signal handler. Before adding something to os::abort(), make
1690 // sure it is async-safe and can handle partially initialized VM.
1691 void os::abort(bool dump_core) {
1692 os::shutdown();
1693 if (dump_core) {
1694 #ifndef PRODUCT
1695 fdStream out(defaultStream::output_fd());
1696 out.print_raw("Current thread is ");
1697 char buf[16];
1698 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1699 out.print_raw_cr(buf);
1700 out.print_raw_cr("Dumping core ...");
1701 #endif
1702 ::abort(); // dump core (for debugging)
1703 }
1705 ::exit(1);
1706 }
1708 // Die immediately, no exit hook, no abort hook, no cleanup.
1709 void os::die() {
1710 ::abort(); // dump core (for debugging)
1711 }
1713 // DLL functions
1715 const char* os::dll_file_extension() { return ".so"; }
1717 // This must be hard coded because it's the system's temporary
1718 // directory not the java application's temp directory, ala java.io.tmpdir.
1719 const char* os::get_temp_directory() { return "/tmp"; }
1721 static bool file_exists(const char* filename) {
1722 struct stat statbuf;
1723 if (filename == NULL || strlen(filename) == 0) {
1724 return false;
1725 }
1726 return os::stat(filename, &statbuf) == 0;
1727 }
1729 bool os::dll_build_name(char* buffer, size_t buflen,
1730 const char* pname, const char* fname) {
1731 bool retval = false;
1732 const size_t pnamelen = pname ? strlen(pname) : 0;
1734 // Return error on buffer overflow.
1735 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1736 return retval;
1737 }
1739 if (pnamelen == 0) {
1740 snprintf(buffer, buflen, "lib%s.so", fname);
1741 retval = true;
1742 } else if (strchr(pname, *os::path_separator()) != NULL) {
1743 int n;
1744 char** pelements = split_path(pname, &n);
1745 if (pelements == NULL) {
1746 return false;
1747 }
1748 for (int i = 0 ; i < n ; i++) {
1749 // really shouldn't be NULL but what the heck, check can't hurt
1750 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1751 continue; // skip the empty path values
1752 }
1753 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1754 if (file_exists(buffer)) {
1755 retval = true;
1756 break;
1757 }
1758 }
1759 // release the storage
1760 for (int i = 0 ; i < n ; i++) {
1761 if (pelements[i] != NULL) {
1762 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1763 }
1764 }
1765 if (pelements != NULL) {
1766 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1767 }
1768 } else {
1769 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1770 retval = true;
1771 }
1772 return retval;
1773 }
1775 // check if addr is inside libjvm.so
1776 bool os::address_is_in_vm(address addr) {
1777 static address libjvm_base_addr;
1778 Dl_info dlinfo;
1780 if (libjvm_base_addr == NULL) {
1781 if (dladdr(CAST_FROM_FN_PTR(void *, os::address_is_in_vm), &dlinfo) != 0) {
1782 libjvm_base_addr = (address)dlinfo.dli_fbase;
1783 }
1784 assert(libjvm_base_addr !=NULL, "Cannot obtain base address for libjvm");
1785 }
1787 if (dladdr((void *)addr, &dlinfo) != 0) {
1788 if (libjvm_base_addr == (address)dlinfo.dli_fbase) return true;
1789 }
1791 return false;
1792 }
1794 typedef int (*dladdr1_func_type) (void *, Dl_info *, void **, int);
1795 static dladdr1_func_type dladdr1_func = NULL;
1797 bool os::dll_address_to_function_name(address addr, char *buf,
1798 int buflen, int * offset) {
1799 // buf is not optional, but offset is optional
1800 assert(buf != NULL, "sanity check");
1802 Dl_info dlinfo;
1804 // dladdr1_func was initialized in os::init()
1805 if (dladdr1_func != NULL) {
1806 // yes, we have dladdr1
1808 // Support for dladdr1 is checked at runtime; it may be
1809 // available even if the vm is built on a machine that does
1810 // not have dladdr1 support. Make sure there is a value for
1811 // RTLD_DL_SYMENT.
1812 #ifndef RTLD_DL_SYMENT
1813 #define RTLD_DL_SYMENT 1
1814 #endif
1815 #ifdef _LP64
1816 Elf64_Sym * info;
1817 #else
1818 Elf32_Sym * info;
1819 #endif
1820 if (dladdr1_func((void *)addr, &dlinfo, (void **)&info,
1821 RTLD_DL_SYMENT) != 0) {
1822 // see if we have a matching symbol that covers our address
1823 if (dlinfo.dli_saddr != NULL &&
1824 (char *)dlinfo.dli_saddr + info->st_size > (char *)addr) {
1825 if (dlinfo.dli_sname != NULL) {
1826 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1827 jio_snprintf(buf, buflen, "%s", dlinfo.dli_sname);
1828 }
1829 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1830 return true;
1831 }
1832 }
1833 // no matching symbol so try for just file info
1834 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1835 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1836 buf, buflen, offset, dlinfo.dli_fname)) {
1837 return true;
1838 }
1839 }
1840 }
1841 buf[0] = '\0';
1842 if (offset != NULL) *offset = -1;
1843 return false;
1844 }
1846 // no, only dladdr is available
1847 if (dladdr((void *)addr, &dlinfo) != 0) {
1848 // see if we have a matching symbol
1849 if (dlinfo.dli_saddr != NULL && dlinfo.dli_sname != NULL) {
1850 if (!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
1851 jio_snprintf(buf, buflen, dlinfo.dli_sname);
1852 }
1853 if (offset != NULL) *offset = addr - (address)dlinfo.dli_saddr;
1854 return true;
1855 }
1856 // no matching symbol so try for just file info
1857 if (dlinfo.dli_fname != NULL && dlinfo.dli_fbase != NULL) {
1858 if (Decoder::decode((address)(addr - (address)dlinfo.dli_fbase),
1859 buf, buflen, offset, dlinfo.dli_fname)) {
1860 return true;
1861 }
1862 }
1863 }
1864 buf[0] = '\0';
1865 if (offset != NULL) *offset = -1;
1866 return false;
1867 }
1869 bool os::dll_address_to_library_name(address addr, char* buf,
1870 int buflen, int* offset) {
1871 // buf is not optional, but offset is optional
1872 assert(buf != NULL, "sanity check");
1874 Dl_info dlinfo;
1876 if (dladdr((void*)addr, &dlinfo) != 0) {
1877 if (dlinfo.dli_fname != NULL) {
1878 jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
1879 }
1880 if (dlinfo.dli_fbase != NULL && offset != NULL) {
1881 *offset = addr - (address)dlinfo.dli_fbase;
1882 }
1883 return true;
1884 }
1886 buf[0] = '\0';
1887 if (offset) *offset = -1;
1888 return false;
1889 }
1891 // Prints the names and full paths of all opened dynamic libraries
1892 // for current process
1893 void os::print_dll_info(outputStream * st) {
1894 Dl_info dli;
1895 void *handle;
1896 Link_map *map;
1897 Link_map *p;
1899 st->print_cr("Dynamic libraries:"); st->flush();
1901 if (dladdr(CAST_FROM_FN_PTR(void *, os::print_dll_info), &dli) == 0 ||
1902 dli.dli_fname == NULL) {
1903 st->print_cr("Error: Cannot print dynamic libraries.");
1904 return;
1905 }
1906 handle = dlopen(dli.dli_fname, RTLD_LAZY);
1907 if (handle == NULL) {
1908 st->print_cr("Error: Cannot print dynamic libraries.");
1909 return;
1910 }
1911 dlinfo(handle, RTLD_DI_LINKMAP, &map);
1912 if (map == NULL) {
1913 st->print_cr("Error: Cannot print dynamic libraries.");
1914 return;
1915 }
1917 while (map->l_prev != NULL)
1918 map = map->l_prev;
1920 while (map != NULL) {
1921 st->print_cr(PTR_FORMAT " \t%s", map->l_addr, map->l_name);
1922 map = map->l_next;
1923 }
1925 dlclose(handle);
1926 }
1928 // Loads .dll/.so and
1929 // in case of error it checks if .dll/.so was built for the
1930 // same architecture as Hotspot is running on
1932 void * os::dll_load(const char *filename, char *ebuf, int ebuflen)
1933 {
1934 void * result= ::dlopen(filename, RTLD_LAZY);
1935 if (result != NULL) {
1936 // Successful loading
1937 return result;
1938 }
1940 Elf32_Ehdr elf_head;
1942 // Read system error message into ebuf
1943 // It may or may not be overwritten below
1944 ::strncpy(ebuf, ::dlerror(), ebuflen-1);
1945 ebuf[ebuflen-1]='\0';
1946 int diag_msg_max_length=ebuflen-strlen(ebuf);
1947 char* diag_msg_buf=ebuf+strlen(ebuf);
1949 if (diag_msg_max_length==0) {
1950 // No more space in ebuf for additional diagnostics message
1951 return NULL;
1952 }
1955 int file_descriptor= ::open(filename, O_RDONLY | O_NONBLOCK);
1957 if (file_descriptor < 0) {
1958 // Can't open library, report dlerror() message
1959 return NULL;
1960 }
1962 bool failed_to_read_elf_head=
1963 (sizeof(elf_head)!=
1964 (::read(file_descriptor, &elf_head,sizeof(elf_head)))) ;
1966 ::close(file_descriptor);
1967 if (failed_to_read_elf_head) {
1968 // file i/o error - report dlerror() msg
1969 return NULL;
1970 }
1972 typedef struct {
1973 Elf32_Half code; // Actual value as defined in elf.h
1974 Elf32_Half compat_class; // Compatibility of archs at VM's sense
1975 char elf_class; // 32 or 64 bit
1976 char endianess; // MSB or LSB
1977 char* name; // String representation
1978 } arch_t;
1980 static const arch_t arch_array[]={
1981 {EM_386, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1982 {EM_486, EM_386, ELFCLASS32, ELFDATA2LSB, (char*)"IA 32"},
1983 {EM_IA_64, EM_IA_64, ELFCLASS64, ELFDATA2LSB, (char*)"IA 64"},
1984 {EM_X86_64, EM_X86_64, ELFCLASS64, ELFDATA2LSB, (char*)"AMD 64"},
1985 {EM_SPARC, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1986 {EM_SPARC32PLUS, EM_SPARC, ELFCLASS32, ELFDATA2MSB, (char*)"Sparc 32"},
1987 {EM_SPARCV9, EM_SPARCV9, ELFCLASS64, ELFDATA2MSB, (char*)"Sparc v9 64"},
1988 {EM_PPC, EM_PPC, ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
1989 {EM_PPC64, EM_PPC64, ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
1990 {EM_ARM, EM_ARM, ELFCLASS32, ELFDATA2LSB, (char*)"ARM 32"}
1991 };
1993 #if (defined IA32)
1994 static Elf32_Half running_arch_code=EM_386;
1995 #elif (defined AMD64)
1996 static Elf32_Half running_arch_code=EM_X86_64;
1997 #elif (defined IA64)
1998 static Elf32_Half running_arch_code=EM_IA_64;
1999 #elif (defined __sparc) && (defined _LP64)
2000 static Elf32_Half running_arch_code=EM_SPARCV9;
2001 #elif (defined __sparc) && (!defined _LP64)
2002 static Elf32_Half running_arch_code=EM_SPARC;
2003 #elif (defined __powerpc64__)
2004 static Elf32_Half running_arch_code=EM_PPC64;
2005 #elif (defined __powerpc__)
2006 static Elf32_Half running_arch_code=EM_PPC;
2007 #elif (defined ARM)
2008 static Elf32_Half running_arch_code=EM_ARM;
2009 #else
2010 #error Method os::dll_load requires that one of following is defined:\
2011 IA32, AMD64, IA64, __sparc, __powerpc__, ARM, ARM
2012 #endif
2014 // Identify compatability class for VM's architecture and library's architecture
2015 // Obtain string descriptions for architectures
2017 arch_t lib_arch={elf_head.e_machine,0,elf_head.e_ident[EI_CLASS], elf_head.e_ident[EI_DATA], NULL};
2018 int running_arch_index=-1;
2020 for (unsigned int i=0 ; i < ARRAY_SIZE(arch_array) ; i++ ) {
2021 if (running_arch_code == arch_array[i].code) {
2022 running_arch_index = i;
2023 }
2024 if (lib_arch.code == arch_array[i].code) {
2025 lib_arch.compat_class = arch_array[i].compat_class;
2026 lib_arch.name = arch_array[i].name;
2027 }
2028 }
2030 assert(running_arch_index != -1,
2031 "Didn't find running architecture code (running_arch_code) in arch_array");
2032 if (running_arch_index == -1) {
2033 // Even though running architecture detection failed
2034 // we may still continue with reporting dlerror() message
2035 return NULL;
2036 }
2038 if (lib_arch.endianess != arch_array[running_arch_index].endianess) {
2039 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: endianness mismatch)");
2040 return NULL;
2041 }
2043 if (lib_arch.elf_class != arch_array[running_arch_index].elf_class) {
2044 ::snprintf(diag_msg_buf, diag_msg_max_length-1," (Possible cause: architecture word width mismatch)");
2045 return NULL;
2046 }
2048 if (lib_arch.compat_class != arch_array[running_arch_index].compat_class) {
2049 if ( lib_arch.name!=NULL ) {
2050 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2051 " (Possible cause: can't load %s-bit .so on a %s-bit platform)",
2052 lib_arch.name, arch_array[running_arch_index].name);
2053 } else {
2054 ::snprintf(diag_msg_buf, diag_msg_max_length-1,
2055 " (Possible cause: can't load this .so (machine code=0x%x) on a %s-bit platform)",
2056 lib_arch.code,
2057 arch_array[running_arch_index].name);
2058 }
2059 }
2061 return NULL;
2062 }
2064 void* os::dll_lookup(void* handle, const char* name) {
2065 return dlsym(handle, name);
2066 }
2068 void* os::get_default_process_handle() {
2069 return (void*)::dlopen(NULL, RTLD_LAZY);
2070 }
2072 int os::stat(const char *path, struct stat *sbuf) {
2073 char pathbuf[MAX_PATH];
2074 if (strlen(path) > MAX_PATH - 1) {
2075 errno = ENAMETOOLONG;
2076 return -1;
2077 }
2078 os::native_path(strcpy(pathbuf, path));
2079 return ::stat(pathbuf, sbuf);
2080 }
2082 static bool _print_ascii_file(const char* filename, outputStream* st) {
2083 int fd = ::open(filename, O_RDONLY);
2084 if (fd == -1) {
2085 return false;
2086 }
2088 char buf[32];
2089 int bytes;
2090 while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
2091 st->print_raw(buf, bytes);
2092 }
2094 ::close(fd);
2096 return true;
2097 }
2099 void os::print_os_info_brief(outputStream* st) {
2100 os::Solaris::print_distro_info(st);
2102 os::Posix::print_uname_info(st);
2104 os::Solaris::print_libversion_info(st);
2105 }
2107 void os::print_os_info(outputStream* st) {
2108 st->print("OS:");
2110 os::Solaris::print_distro_info(st);
2112 os::Posix::print_uname_info(st);
2114 os::Solaris::print_libversion_info(st);
2116 os::Posix::print_rlimit_info(st);
2118 os::Posix::print_load_average(st);
2119 }
2121 void os::Solaris::print_distro_info(outputStream* st) {
2122 if (!_print_ascii_file("/etc/release", st)) {
2123 st->print("Solaris");
2124 }
2125 st->cr();
2126 }
2128 void os::Solaris::print_libversion_info(outputStream* st) {
2129 if (os::Solaris::T2_libthread()) {
2130 st->print(" (T2 libthread)");
2131 }
2132 else {
2133 st->print(" (T1 libthread)");
2134 }
2135 st->cr();
2136 }
2138 static bool check_addr0(outputStream* st) {
2139 jboolean status = false;
2140 int fd = ::open("/proc/self/map",O_RDONLY);
2141 if (fd >= 0) {
2142 prmap_t p;
2143 while(::read(fd, &p, sizeof(p)) > 0) {
2144 if (p.pr_vaddr == 0x0) {
2145 st->print("Warning: Address: 0x%x, Size: %dK, ",p.pr_vaddr, p.pr_size/1024, p.pr_mapname);
2146 st->print("Mapped file: %s, ", p.pr_mapname[0] == '\0' ? "None" : p.pr_mapname);
2147 st->print("Access:");
2148 st->print("%s",(p.pr_mflags & MA_READ) ? "r" : "-");
2149 st->print("%s",(p.pr_mflags & MA_WRITE) ? "w" : "-");
2150 st->print("%s",(p.pr_mflags & MA_EXEC) ? "x" : "-");
2151 st->cr();
2152 status = true;
2153 }
2154 }
2155 ::close(fd);
2156 }
2157 return status;
2158 }
2160 void os::pd_print_cpu_info(outputStream* st) {
2161 // Nothing to do for now.
2162 }
2164 void os::print_memory_info(outputStream* st) {
2165 st->print("Memory:");
2166 st->print(" %dk page", os::vm_page_size()>>10);
2167 st->print(", physical " UINT64_FORMAT "k", os::physical_memory()>>10);
2168 st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10);
2169 st->cr();
2170 (void) check_addr0(st);
2171 }
2173 void os::print_siginfo(outputStream* st, void* siginfo) {
2174 const siginfo_t* si = (const siginfo_t*)siginfo;
2176 os::Posix::print_siginfo_brief(st, si);
2178 if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
2179 UseSharedSpaces) {
2180 FileMapInfo* mapinfo = FileMapInfo::current_info();
2181 if (mapinfo->is_in_shared_space(si->si_addr)) {
2182 st->print("\n\nError accessing class data sharing archive." \
2183 " Mapped file inaccessible during execution, " \
2184 " possible disk/network problem.");
2185 }
2186 }
2187 st->cr();
2188 }
2190 // Moved from whole group, because we need them here for diagnostic
2191 // prints.
2192 #define OLDMAXSIGNUM 32
2193 static int Maxsignum = 0;
2194 static int *ourSigFlags = NULL;
2196 extern "C" void sigINTRHandler(int, siginfo_t*, void*);
2198 int os::Solaris::get_our_sigflags(int sig) {
2199 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2200 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2201 return ourSigFlags[sig];
2202 }
2204 void os::Solaris::set_our_sigflags(int sig, int flags) {
2205 assert(ourSigFlags!=NULL, "signal data structure not initialized");
2206 assert(sig > 0 && sig < Maxsignum, "vm signal out of expected range");
2207 ourSigFlags[sig] = flags;
2208 }
2211 static const char* get_signal_handler_name(address handler,
2212 char* buf, int buflen) {
2213 int offset;
2214 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
2215 if (found) {
2216 // skip directory names
2217 const char *p1, *p2;
2218 p1 = buf;
2219 size_t len = strlen(os::file_separator());
2220 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
2221 jio_snprintf(buf, buflen, "%s+0x%x", p1, offset);
2222 } else {
2223 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
2224 }
2225 return buf;
2226 }
2228 static void print_signal_handler(outputStream* st, int sig,
2229 char* buf, size_t buflen) {
2230 struct sigaction sa;
2232 sigaction(sig, NULL, &sa);
2234 st->print("%s: ", os::exception_name(sig, buf, buflen));
2236 address handler = (sa.sa_flags & SA_SIGINFO)
2237 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
2238 : CAST_FROM_FN_PTR(address, sa.sa_handler);
2240 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
2241 st->print("SIG_DFL");
2242 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
2243 st->print("SIG_IGN");
2244 } else {
2245 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
2246 }
2248 st->print(", sa_mask[0]=");
2249 os::Posix::print_signal_set_short(st, &sa.sa_mask);
2251 address rh = VMError::get_resetted_sighandler(sig);
2252 // May be, handler was resetted by VMError?
2253 if(rh != NULL) {
2254 handler = rh;
2255 sa.sa_flags = VMError::get_resetted_sigflags(sig);
2256 }
2258 st->print(", sa_flags=");
2259 os::Posix::print_sa_flags(st, sa.sa_flags);
2261 // Check: is it our handler?
2262 if(handler == CAST_FROM_FN_PTR(address, signalHandler) ||
2263 handler == CAST_FROM_FN_PTR(address, sigINTRHandler)) {
2264 // It is our signal handler
2265 // check for flags
2266 if(sa.sa_flags != os::Solaris::get_our_sigflags(sig)) {
2267 st->print(
2268 ", flags was changed from " PTR32_FORMAT ", consider using jsig library",
2269 os::Solaris::get_our_sigflags(sig));
2270 }
2271 }
2272 st->cr();
2273 }
2275 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2276 st->print_cr("Signal Handlers:");
2277 print_signal_handler(st, SIGSEGV, buf, buflen);
2278 print_signal_handler(st, SIGBUS , buf, buflen);
2279 print_signal_handler(st, SIGFPE , buf, buflen);
2280 print_signal_handler(st, SIGPIPE, buf, buflen);
2281 print_signal_handler(st, SIGXFSZ, buf, buflen);
2282 print_signal_handler(st, SIGILL , buf, buflen);
2283 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
2284 print_signal_handler(st, ASYNC_SIGNAL, buf, buflen);
2285 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
2286 print_signal_handler(st, SHUTDOWN1_SIGNAL , buf, buflen);
2287 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
2288 print_signal_handler(st, SHUTDOWN3_SIGNAL, buf, buflen);
2289 print_signal_handler(st, os::Solaris::SIGinterrupt(), buf, buflen);
2290 print_signal_handler(st, os::Solaris::SIGasync(), buf, buflen);
2291 }
2293 static char saved_jvm_path[MAXPATHLEN] = { 0 };
2295 // Find the full path to the current module, libjvm.so
2296 void os::jvm_path(char *buf, jint buflen) {
2297 // Error checking.
2298 if (buflen < MAXPATHLEN) {
2299 assert(false, "must use a large-enough buffer");
2300 buf[0] = '\0';
2301 return;
2302 }
2303 // Lazy resolve the path to current module.
2304 if (saved_jvm_path[0] != 0) {
2305 strcpy(buf, saved_jvm_path);
2306 return;
2307 }
2309 Dl_info dlinfo;
2310 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
2311 assert(ret != 0, "cannot locate libjvm");
2312 if (ret != 0 && dlinfo.dli_fname != NULL) {
2313 realpath((char *)dlinfo.dli_fname, buf);
2314 } else {
2315 buf[0] = '\0';
2316 return;
2317 }
2319 if (Arguments::created_by_gamma_launcher()) {
2320 // Support for the gamma launcher. Typical value for buf is
2321 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at
2322 // the right place in the string, then assume we are installed in a JDK and
2323 // we're done. Otherwise, check for a JAVA_HOME environment variable and fix
2324 // up the path so it looks like libjvm.so is installed there (append a
2325 // fake suffix hotspot/libjvm.so).
2326 const char *p = buf + strlen(buf) - 1;
2327 for (int count = 0; p > buf && count < 5; ++count) {
2328 for (--p; p > buf && *p != '/'; --p)
2329 /* empty */ ;
2330 }
2332 if (strncmp(p, "/jre/lib/", 9) != 0) {
2333 // Look for JAVA_HOME in the environment.
2334 char* java_home_var = ::getenv("JAVA_HOME");
2335 if (java_home_var != NULL && java_home_var[0] != 0) {
2336 char cpu_arch[12];
2337 char* jrelib_p;
2338 int len;
2339 sysinfo(SI_ARCHITECTURE, cpu_arch, sizeof(cpu_arch));
2340 #ifdef _LP64
2341 // If we are on sparc running a 64-bit vm, look in jre/lib/sparcv9.
2342 if (strcmp(cpu_arch, "sparc") == 0) {
2343 strcat(cpu_arch, "v9");
2344 } else if (strcmp(cpu_arch, "i386") == 0) {
2345 strcpy(cpu_arch, "amd64");
2346 }
2347 #endif
2348 // Check the current module name "libjvm.so".
2349 p = strrchr(buf, '/');
2350 assert(strstr(p, "/libjvm") == p, "invalid library name");
2352 realpath(java_home_var, buf);
2353 // determine if this is a legacy image or modules image
2354 // modules image doesn't have "jre" subdirectory
2355 len = strlen(buf);
2356 assert(len < buflen, "Ran out of buffer space");
2357 jrelib_p = buf + len;
2358 snprintf(jrelib_p, buflen-len, "/jre/lib/%s", cpu_arch);
2359 if (0 != access(buf, F_OK)) {
2360 snprintf(jrelib_p, buflen-len, "/lib/%s", cpu_arch);
2361 }
2363 if (0 == access(buf, F_OK)) {
2364 // Use current module name "libjvm.so"
2365 len = strlen(buf);
2366 snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
2367 } else {
2368 // Go back to path of .so
2369 realpath((char *)dlinfo.dli_fname, buf);
2370 }
2371 }
2372 }
2373 }
2375 strncpy(saved_jvm_path, buf, MAXPATHLEN);
2376 }
2379 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2380 // no prefix required, not even "_"
2381 }
2384 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2385 // no suffix required
2386 }
2388 // This method is a copy of JDK's sysGetLastErrorString
2389 // from src/solaris/hpi/src/system_md.c
2391 size_t os::lasterror(char *buf, size_t len) {
2393 if (errno == 0) return 0;
2395 const char *s = ::strerror(errno);
2396 size_t n = ::strlen(s);
2397 if (n >= len) {
2398 n = len - 1;
2399 }
2400 ::strncpy(buf, s, n);
2401 buf[n] = '\0';
2402 return n;
2403 }
2406 // sun.misc.Signal
2408 extern "C" {
2409 static void UserHandler(int sig, void *siginfo, void *context) {
2410 // Ctrl-C is pressed during error reporting, likely because the error
2411 // handler fails to abort. Let VM die immediately.
2412 if (sig == SIGINT && is_error_reported()) {
2413 os::die();
2414 }
2416 os::signal_notify(sig);
2417 // We do not need to reinstate the signal handler each time...
2418 }
2419 }
2421 void* os::user_handler() {
2422 return CAST_FROM_FN_PTR(void*, UserHandler);
2423 }
2425 class Semaphore : public StackObj {
2426 public:
2427 Semaphore();
2428 ~Semaphore();
2429 void signal();
2430 void wait();
2431 bool trywait();
2432 bool timedwait(unsigned int sec, int nsec);
2433 private:
2434 sema_t _semaphore;
2435 };
2438 Semaphore::Semaphore() {
2439 sema_init(&_semaphore, 0, NULL, NULL);
2440 }
2442 Semaphore::~Semaphore() {
2443 sema_destroy(&_semaphore);
2444 }
2446 void Semaphore::signal() {
2447 sema_post(&_semaphore);
2448 }
2450 void Semaphore::wait() {
2451 sema_wait(&_semaphore);
2452 }
2454 bool Semaphore::trywait() {
2455 return sema_trywait(&_semaphore) == 0;
2456 }
2458 bool Semaphore::timedwait(unsigned int sec, int nsec) {
2459 struct timespec ts;
2460 unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
2462 while (1) {
2463 int result = sema_timedwait(&_semaphore, &ts);
2464 if (result == 0) {
2465 return true;
2466 } else if (errno == EINTR) {
2467 continue;
2468 } else if (errno == ETIME) {
2469 return false;
2470 } else {
2471 return false;
2472 }
2473 }
2474 }
2476 extern "C" {
2477 typedef void (*sa_handler_t)(int);
2478 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
2479 }
2481 void* os::signal(int signal_number, void* handler) {
2482 struct sigaction sigAct, oldSigAct;
2483 sigfillset(&(sigAct.sa_mask));
2484 sigAct.sa_flags = SA_RESTART & ~SA_RESETHAND;
2485 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
2487 if (sigaction(signal_number, &sigAct, &oldSigAct))
2488 // -1 means registration failed
2489 return (void *)-1;
2491 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
2492 }
2494 void os::signal_raise(int signal_number) {
2495 raise(signal_number);
2496 }
2498 /*
2499 * The following code is moved from os.cpp for making this
2500 * code platform specific, which it is by its very nature.
2501 */
2503 // a counter for each possible signal value
2504 static int Sigexit = 0;
2505 static int Maxlibjsigsigs;
2506 static jint *pending_signals = NULL;
2507 static int *preinstalled_sigs = NULL;
2508 static struct sigaction *chainedsigactions = NULL;
2509 static sema_t sig_sem;
2510 typedef int (*version_getting_t)();
2511 version_getting_t os::Solaris::get_libjsig_version = NULL;
2512 static int libjsigversion = NULL;
2514 int os::sigexitnum_pd() {
2515 assert(Sigexit > 0, "signal memory not yet initialized");
2516 return Sigexit;
2517 }
2519 void os::Solaris::init_signal_mem() {
2520 // Initialize signal structures
2521 Maxsignum = SIGRTMAX;
2522 Sigexit = Maxsignum+1;
2523 assert(Maxsignum >0, "Unable to obtain max signal number");
2525 Maxlibjsigsigs = Maxsignum;
2527 // pending_signals has one int per signal
2528 // The additional signal is for SIGEXIT - exit signal to signal_thread
2529 pending_signals = (jint *)os::malloc(sizeof(jint) * (Sigexit+1), mtInternal);
2530 memset(pending_signals, 0, (sizeof(jint) * (Sigexit+1)));
2532 if (UseSignalChaining) {
2533 chainedsigactions = (struct sigaction *)malloc(sizeof(struct sigaction)
2534 * (Maxsignum + 1), mtInternal);
2535 memset(chainedsigactions, 0, (sizeof(struct sigaction) * (Maxsignum + 1)));
2536 preinstalled_sigs = (int *)os::malloc(sizeof(int) * (Maxsignum + 1), mtInternal);
2537 memset(preinstalled_sigs, 0, (sizeof(int) * (Maxsignum + 1)));
2538 }
2539 ourSigFlags = (int*)malloc(sizeof(int) * (Maxsignum + 1 ), mtInternal);
2540 memset(ourSigFlags, 0, sizeof(int) * (Maxsignum + 1));
2541 }
2543 void os::signal_init_pd() {
2544 int ret;
2546 ret = ::sema_init(&sig_sem, 0, NULL, NULL);
2547 assert(ret == 0, "sema_init() failed");
2548 }
2550 void os::signal_notify(int signal_number) {
2551 int ret;
2553 Atomic::inc(&pending_signals[signal_number]);
2554 ret = ::sema_post(&sig_sem);
2555 assert(ret == 0, "sema_post() failed");
2556 }
2558 static int check_pending_signals(bool wait_for_signal) {
2559 int ret;
2560 while (true) {
2561 for (int i = 0; i < Sigexit + 1; i++) {
2562 jint n = pending_signals[i];
2563 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2564 return i;
2565 }
2566 }
2567 if (!wait_for_signal) {
2568 return -1;
2569 }
2570 JavaThread *thread = JavaThread::current();
2571 ThreadBlockInVM tbivm(thread);
2573 bool threadIsSuspended;
2574 do {
2575 thread->set_suspend_equivalent();
2576 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2577 while((ret = ::sema_wait(&sig_sem)) == EINTR)
2578 ;
2579 assert(ret == 0, "sema_wait() failed");
2581 // were we externally suspended while we were waiting?
2582 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2583 if (threadIsSuspended) {
2584 //
2585 // The semaphore has been incremented, but while we were waiting
2586 // another thread suspended us. We don't want to continue running
2587 // while suspended because that would surprise the thread that
2588 // suspended us.
2589 //
2590 ret = ::sema_post(&sig_sem);
2591 assert(ret == 0, "sema_post() failed");
2593 thread->java_suspend_self();
2594 }
2595 } while (threadIsSuspended);
2596 }
2597 }
2599 int os::signal_lookup() {
2600 return check_pending_signals(false);
2601 }
2603 int os::signal_wait() {
2604 return check_pending_signals(true);
2605 }
2607 ////////////////////////////////////////////////////////////////////////////////
2608 // Virtual Memory
2610 static int page_size = -1;
2612 // The mmap MAP_ALIGN flag is supported on Solaris 9 and later. init_2() will
2613 // clear this var if support is not available.
2614 static bool has_map_align = true;
2616 int os::vm_page_size() {
2617 assert(page_size != -1, "must call os::init");
2618 return page_size;
2619 }
2621 // Solaris allocates memory by pages.
2622 int os::vm_allocation_granularity() {
2623 assert(page_size != -1, "must call os::init");
2624 return page_size;
2625 }
2627 static bool recoverable_mmap_error(int err) {
2628 // See if the error is one we can let the caller handle. This
2629 // list of errno values comes from the Solaris mmap(2) man page.
2630 switch (err) {
2631 case EBADF:
2632 case EINVAL:
2633 case ENOTSUP:
2634 // let the caller deal with these errors
2635 return true;
2637 default:
2638 // Any remaining errors on this OS can cause our reserved mapping
2639 // to be lost. That can cause confusion where different data
2640 // structures think they have the same memory mapped. The worst
2641 // scenario is if both the VM and a library think they have the
2642 // same memory mapped.
2643 return false;
2644 }
2645 }
2647 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
2648 int err) {
2649 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2650 ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
2651 strerror(err), err);
2652 }
2654 static void warn_fail_commit_memory(char* addr, size_t bytes,
2655 size_t alignment_hint, bool exec,
2656 int err) {
2657 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2658 ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
2659 alignment_hint, exec, strerror(err), err);
2660 }
2662 int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
2663 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2664 size_t size = bytes;
2665 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
2666 if (res != NULL) {
2667 if (UseNUMAInterleaving) {
2668 numa_make_global(addr, bytes);
2669 }
2670 return 0;
2671 }
2673 int err = errno; // save errno from mmap() call in mmap_chunk()
2675 if (!recoverable_mmap_error(err)) {
2676 warn_fail_commit_memory(addr, bytes, exec, err);
2677 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
2678 }
2680 return err;
2681 }
2683 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
2684 return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
2685 }
2687 void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
2688 const char* mesg) {
2689 assert(mesg != NULL, "mesg must be specified");
2690 int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
2691 if (err != 0) {
2692 // the caller wants all commit errors to exit with the specified mesg:
2693 warn_fail_commit_memory(addr, bytes, exec, err);
2694 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2695 }
2696 }
2698 int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
2699 size_t alignment_hint, bool exec) {
2700 int err = Solaris::commit_memory_impl(addr, bytes, exec);
2701 if (err == 0) {
2702 if (UseLargePages && (alignment_hint > (size_t)vm_page_size())) {
2703 // If the large page size has been set and the VM
2704 // is using large pages, use the large page size
2705 // if it is smaller than the alignment hint. This is
2706 // a case where the VM wants to use a larger alignment size
2707 // for its own reasons but still want to use large pages
2708 // (which is what matters to setting the mpss range.
2709 size_t page_size = 0;
2710 if (large_page_size() < alignment_hint) {
2711 assert(UseLargePages, "Expected to be here for large page use only");
2712 page_size = large_page_size();
2713 } else {
2714 // If the alignment hint is less than the large page
2715 // size, the VM wants a particular alignment (thus the hint)
2716 // for internal reasons. Try to set the mpss range using
2717 // the alignment_hint.
2718 page_size = alignment_hint;
2719 }
2720 // Since this is a hint, ignore any failures.
2721 (void)Solaris::setup_large_pages(addr, bytes, page_size);
2722 }
2723 }
2724 return err;
2725 }
2727 bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
2728 bool exec) {
2729 return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
2730 }
2732 void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
2733 size_t alignment_hint, bool exec,
2734 const char* mesg) {
2735 assert(mesg != NULL, "mesg must be specified");
2736 int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
2737 if (err != 0) {
2738 // the caller wants all commit errors to exit with the specified mesg:
2739 warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
2740 vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
2741 }
2742 }
2744 // Uncommit the pages in a specified region.
2745 void os::pd_free_memory(char* addr, size_t bytes, size_t alignment_hint) {
2746 if (madvise(addr, bytes, MADV_FREE) < 0) {
2747 debug_only(warning("MADV_FREE failed."));
2748 return;
2749 }
2750 }
2752 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2753 return os::commit_memory(addr, size, !ExecMem);
2754 }
2756 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2757 return os::uncommit_memory(addr, size);
2758 }
2760 // Change the page size in a given range.
2761 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2762 assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
2763 assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
2764 if (UseLargePages) {
2765 Solaris::setup_large_pages(addr, bytes, alignment_hint);
2766 }
2767 }
2769 // Tell the OS to make the range local to the first-touching LWP
2770 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2771 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2772 if (madvise(addr, bytes, MADV_ACCESS_LWP) < 0) {
2773 debug_only(warning("MADV_ACCESS_LWP failed."));
2774 }
2775 }
2777 // Tell the OS that this range would be accessed from different LWPs.
2778 void os::numa_make_global(char *addr, size_t bytes) {
2779 assert((intptr_t)addr % os::vm_page_size() == 0, "Address should be page-aligned.");
2780 if (madvise(addr, bytes, MADV_ACCESS_MANY) < 0) {
2781 debug_only(warning("MADV_ACCESS_MANY failed."));
2782 }
2783 }
2785 // Get the number of the locality groups.
2786 size_t os::numa_get_groups_num() {
2787 size_t n = Solaris::lgrp_nlgrps(Solaris::lgrp_cookie());
2788 return n != -1 ? n : 1;
2789 }
2791 // Get a list of leaf locality groups. A leaf lgroup is group that
2792 // doesn't have any children. Typical leaf group is a CPU or a CPU/memory
2793 // board. An LWP is assigned to one of these groups upon creation.
2794 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2795 if ((ids[0] = Solaris::lgrp_root(Solaris::lgrp_cookie())) == -1) {
2796 ids[0] = 0;
2797 return 1;
2798 }
2799 int result_size = 0, top = 1, bottom = 0, cur = 0;
2800 for (int k = 0; k < size; k++) {
2801 int r = Solaris::lgrp_children(Solaris::lgrp_cookie(), ids[cur],
2802 (Solaris::lgrp_id_t*)&ids[top], size - top);
2803 if (r == -1) {
2804 ids[0] = 0;
2805 return 1;
2806 }
2807 if (!r) {
2808 // That's a leaf node.
2809 assert (bottom <= cur, "Sanity check");
2810 // Check if the node has memory
2811 if (Solaris::lgrp_resources(Solaris::lgrp_cookie(), ids[cur],
2812 NULL, 0, LGRP_RSRC_MEM) > 0) {
2813 ids[bottom++] = ids[cur];
2814 }
2815 }
2816 top += r;
2817 cur++;
2818 }
2819 if (bottom == 0) {
2820 // Handle a situation, when the OS reports no memory available.
2821 // Assume UMA architecture.
2822 ids[0] = 0;
2823 return 1;
2824 }
2825 return bottom;
2826 }
2828 // Detect the topology change. Typically happens during CPU plugging-unplugging.
2829 bool os::numa_topology_changed() {
2830 int is_stale = Solaris::lgrp_cookie_stale(Solaris::lgrp_cookie());
2831 if (is_stale != -1 && is_stale) {
2832 Solaris::lgrp_fini(Solaris::lgrp_cookie());
2833 Solaris::lgrp_cookie_t c = Solaris::lgrp_init(Solaris::LGRP_VIEW_CALLER);
2834 assert(c != 0, "Failure to initialize LGRP API");
2835 Solaris::set_lgrp_cookie(c);
2836 return true;
2837 }
2838 return false;
2839 }
2841 // Get the group id of the current LWP.
2842 int os::numa_get_group_id() {
2843 int lgrp_id = Solaris::lgrp_home(P_LWPID, P_MYID);
2844 if (lgrp_id == -1) {
2845 return 0;
2846 }
2847 const int size = os::numa_get_groups_num();
2848 int *ids = (int*)alloca(size * sizeof(int));
2850 // Get the ids of all lgroups with memory; r is the count.
2851 int r = Solaris::lgrp_resources(Solaris::lgrp_cookie(), lgrp_id,
2852 (Solaris::lgrp_id_t*)ids, size, LGRP_RSRC_MEM);
2853 if (r <= 0) {
2854 return 0;
2855 }
2856 return ids[os::random() % r];
2857 }
2859 // Request information about the page.
2860 bool os::get_page_info(char *start, page_info* info) {
2861 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2862 uint64_t addr = (uintptr_t)start;
2863 uint64_t outdata[2];
2864 uint_t validity = 0;
2866 if (os::Solaris::meminfo(&addr, 1, info_types, 2, outdata, &validity) < 0) {
2867 return false;
2868 }
2870 info->size = 0;
2871 info->lgrp_id = -1;
2873 if ((validity & 1) != 0) {
2874 if ((validity & 2) != 0) {
2875 info->lgrp_id = outdata[0];
2876 }
2877 if ((validity & 4) != 0) {
2878 info->size = outdata[1];
2879 }
2880 return true;
2881 }
2882 return false;
2883 }
2885 // Scan the pages from start to end until a page different than
2886 // the one described in the info parameter is encountered.
2887 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2888 const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
2889 const size_t types = sizeof(info_types) / sizeof(info_types[0]);
2890 uint64_t addrs[MAX_MEMINFO_CNT], outdata[types * MAX_MEMINFO_CNT + 1];
2891 uint_t validity[MAX_MEMINFO_CNT];
2893 size_t page_size = MAX2((size_t)os::vm_page_size(), page_expected->size);
2894 uint64_t p = (uint64_t)start;
2895 while (p < (uint64_t)end) {
2896 addrs[0] = p;
2897 size_t addrs_count = 1;
2898 while (addrs_count < MAX_MEMINFO_CNT && addrs[addrs_count - 1] + page_size < (uint64_t)end) {
2899 addrs[addrs_count] = addrs[addrs_count - 1] + page_size;
2900 addrs_count++;
2901 }
2903 if (os::Solaris::meminfo(addrs, addrs_count, info_types, types, outdata, validity) < 0) {
2904 return NULL;
2905 }
2907 size_t i = 0;
2908 for (; i < addrs_count; i++) {
2909 if ((validity[i] & 1) != 0) {
2910 if ((validity[i] & 4) != 0) {
2911 if (outdata[types * i + 1] != page_expected->size) {
2912 break;
2913 }
2914 } else
2915 if (page_expected->size != 0) {
2916 break;
2917 }
2919 if ((validity[i] & 2) != 0 && page_expected->lgrp_id > 0) {
2920 if (outdata[types * i] != page_expected->lgrp_id) {
2921 break;
2922 }
2923 }
2924 } else {
2925 return NULL;
2926 }
2927 }
2929 if (i < addrs_count) {
2930 if ((validity[i] & 2) != 0) {
2931 page_found->lgrp_id = outdata[types * i];
2932 } else {
2933 page_found->lgrp_id = -1;
2934 }
2935 if ((validity[i] & 4) != 0) {
2936 page_found->size = outdata[types * i + 1];
2937 } else {
2938 page_found->size = 0;
2939 }
2940 return (char*)addrs[i];
2941 }
2943 p = addrs[addrs_count - 1] + page_size;
2944 }
2945 return end;
2946 }
2948 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
2949 size_t size = bytes;
2950 // Map uncommitted pages PROT_NONE so we fail early if we touch an
2951 // uncommitted page. Otherwise, the read/write might succeed if we
2952 // have enough swap space to back the physical page.
2953 return
2954 NULL != Solaris::mmap_chunk(addr, size,
2955 MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE,
2956 PROT_NONE);
2957 }
2959 char* os::Solaris::mmap_chunk(char *addr, size_t size, int flags, int prot) {
2960 char *b = (char *)mmap(addr, size, prot, flags, os::Solaris::_dev_zero_fd, 0);
2962 if (b == MAP_FAILED) {
2963 return NULL;
2964 }
2965 return b;
2966 }
2968 char* os::Solaris::anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed) {
2969 char* addr = requested_addr;
2970 int flags = MAP_PRIVATE | MAP_NORESERVE;
2972 assert(!(fixed && (alignment_hint > 0)), "alignment hint meaningless with fixed mmap");
2974 if (fixed) {
2975 flags |= MAP_FIXED;
2976 } else if (has_map_align && (alignment_hint > (size_t) vm_page_size())) {
2977 flags |= MAP_ALIGN;
2978 addr = (char*) alignment_hint;
2979 }
2981 // Map uncommitted pages PROT_NONE so we fail early if we touch an
2982 // uncommitted page. Otherwise, the read/write might succeed if we
2983 // have enough swap space to back the physical page.
2984 return mmap_chunk(addr, bytes, flags, PROT_NONE);
2985 }
2987 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2988 char* addr = Solaris::anon_mmap(requested_addr, bytes, alignment_hint, (requested_addr != NULL));
2990 guarantee(requested_addr == NULL || requested_addr == addr,
2991 "OS failed to return requested mmap address.");
2992 return addr;
2993 }
2995 // Reserve memory at an arbitrary address, only if that area is
2996 // available (and not reserved for something else).
2998 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2999 const int max_tries = 10;
3000 char* base[max_tries];
3001 size_t size[max_tries];
3003 // Solaris adds a gap between mmap'ed regions. The size of the gap
3004 // is dependent on the requested size and the MMU. Our initial gap
3005 // value here is just a guess and will be corrected later.
3006 bool had_top_overlap = false;
3007 bool have_adjusted_gap = false;
3008 size_t gap = 0x400000;
3010 // Assert only that the size is a multiple of the page size, since
3011 // that's all that mmap requires, and since that's all we really know
3012 // about at this low abstraction level. If we need higher alignment,
3013 // we can either pass an alignment to this method or verify alignment
3014 // in one of the methods further up the call chain. See bug 5044738.
3015 assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3017 // Since snv_84, Solaris attempts to honor the address hint - see 5003415.
3018 // Give it a try, if the kernel honors the hint we can return immediately.
3019 char* addr = Solaris::anon_mmap(requested_addr, bytes, 0, false);
3021 volatile int err = errno;
3022 if (addr == requested_addr) {
3023 return addr;
3024 } else if (addr != NULL) {
3025 pd_unmap_memory(addr, bytes);
3026 }
3028 if (PrintMiscellaneous && Verbose) {
3029 char buf[256];
3030 buf[0] = '\0';
3031 if (addr == NULL) {
3032 jio_snprintf(buf, sizeof(buf), ": %s", strerror(err));
3033 }
3034 warning("attempt_reserve_memory_at: couldn't reserve " SIZE_FORMAT " bytes at "
3035 PTR_FORMAT ": reserve_memory_helper returned " PTR_FORMAT
3036 "%s", bytes, requested_addr, addr, buf);
3037 }
3039 // Address hint method didn't work. Fall back to the old method.
3040 // In theory, once SNV becomes our oldest supported platform, this
3041 // code will no longer be needed.
3042 //
3043 // Repeatedly allocate blocks until the block is allocated at the
3044 // right spot. Give up after max_tries.
3045 int i;
3046 for (i = 0; i < max_tries; ++i) {
3047 base[i] = reserve_memory(bytes);
3049 if (base[i] != NULL) {
3050 // Is this the block we wanted?
3051 if (base[i] == requested_addr) {
3052 size[i] = bytes;
3053 break;
3054 }
3056 // check that the gap value is right
3057 if (had_top_overlap && !have_adjusted_gap) {
3058 size_t actual_gap = base[i-1] - base[i] - bytes;
3059 if (gap != actual_gap) {
3060 // adjust the gap value and retry the last 2 allocations
3061 assert(i > 0, "gap adjustment code problem");
3062 have_adjusted_gap = true; // adjust the gap only once, just in case
3063 gap = actual_gap;
3064 if (PrintMiscellaneous && Verbose) {
3065 warning("attempt_reserve_memory_at: adjusted gap to 0x%lx", gap);
3066 }
3067 unmap_memory(base[i], bytes);
3068 unmap_memory(base[i-1], size[i-1]);
3069 i-=2;
3070 continue;
3071 }
3072 }
3074 // Does this overlap the block we wanted? Give back the overlapped
3075 // parts and try again.
3076 //
3077 // There is still a bug in this code: if top_overlap == bytes,
3078 // the overlap is offset from requested region by the value of gap.
3079 // In this case giving back the overlapped part will not work,
3080 // because we'll give back the entire block at base[i] and
3081 // therefore the subsequent allocation will not generate a new gap.
3082 // This could be fixed with a new algorithm that used larger
3083 // or variable size chunks to find the requested region -
3084 // but such a change would introduce additional complications.
3085 // It's rare enough that the planets align for this bug,
3086 // so we'll just wait for a fix for 6204603/5003415 which
3087 // will provide a mmap flag to allow us to avoid this business.
3089 size_t top_overlap = requested_addr + (bytes + gap) - base[i];
3090 if (top_overlap >= 0 && top_overlap < bytes) {
3091 had_top_overlap = true;
3092 unmap_memory(base[i], top_overlap);
3093 base[i] += top_overlap;
3094 size[i] = bytes - top_overlap;
3095 } else {
3096 size_t bottom_overlap = base[i] + bytes - requested_addr;
3097 if (bottom_overlap >= 0 && bottom_overlap < bytes) {
3098 if (PrintMiscellaneous && Verbose && bottom_overlap == 0) {
3099 warning("attempt_reserve_memory_at: possible alignment bug");
3100 }
3101 unmap_memory(requested_addr, bottom_overlap);
3102 size[i] = bytes - bottom_overlap;
3103 } else {
3104 size[i] = bytes;
3105 }
3106 }
3107 }
3108 }
3110 // Give back the unused reserved pieces.
3112 for (int j = 0; j < i; ++j) {
3113 if (base[j] != NULL) {
3114 unmap_memory(base[j], size[j]);
3115 }
3116 }
3118 return (i < max_tries) ? requested_addr : NULL;
3119 }
3121 bool os::pd_release_memory(char* addr, size_t bytes) {
3122 size_t size = bytes;
3123 return munmap(addr, size) == 0;
3124 }
3126 static bool solaris_mprotect(char* addr, size_t bytes, int prot) {
3127 assert(addr == (char*)align_size_down((uintptr_t)addr, os::vm_page_size()),
3128 "addr must be page aligned");
3129 int retVal = mprotect(addr, bytes, prot);
3130 return retVal == 0;
3131 }
3133 // Protect memory (Used to pass readonly pages through
3134 // JNI GetArray<type>Elements with empty arrays.)
3135 // Also, used for serialization page and for compressed oops null pointer
3136 // checking.
3137 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3138 bool is_committed) {
3139 unsigned int p = 0;
3140 switch (prot) {
3141 case MEM_PROT_NONE: p = PROT_NONE; break;
3142 case MEM_PROT_READ: p = PROT_READ; break;
3143 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
3144 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
3145 default:
3146 ShouldNotReachHere();
3147 }
3148 // is_committed is unused.
3149 return solaris_mprotect(addr, bytes, p);
3150 }
3152 // guard_memory and unguard_memory only happens within stack guard pages.
3153 // Since ISM pertains only to the heap, guard and unguard memory should not
3154 /// happen with an ISM region.
3155 bool os::guard_memory(char* addr, size_t bytes) {
3156 return solaris_mprotect(addr, bytes, PROT_NONE);
3157 }
3159 bool os::unguard_memory(char* addr, size_t bytes) {
3160 return solaris_mprotect(addr, bytes, PROT_READ|PROT_WRITE);
3161 }
3163 // Large page support
3164 static size_t _large_page_size = 0;
3166 // Insertion sort for small arrays (descending order).
3167 static void insertion_sort_descending(size_t* array, int len) {
3168 for (int i = 0; i < len; i++) {
3169 size_t val = array[i];
3170 for (size_t key = i; key > 0 && array[key - 1] < val; --key) {
3171 size_t tmp = array[key];
3172 array[key] = array[key - 1];
3173 array[key - 1] = tmp;
3174 }
3175 }
3176 }
3178 bool os::Solaris::mpss_sanity_check(bool warn, size_t* page_size) {
3179 const unsigned int usable_count = VM_Version::page_size_count();
3180 if (usable_count == 1) {
3181 return false;
3182 }
3184 // Find the right getpagesizes interface. When solaris 11 is the minimum
3185 // build platform, getpagesizes() (without the '2') can be called directly.
3186 typedef int (*gps_t)(size_t[], int);
3187 gps_t gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes2"));
3188 if (gps_func == NULL) {
3189 gps_func = CAST_TO_FN_PTR(gps_t, dlsym(RTLD_DEFAULT, "getpagesizes"));
3190 if (gps_func == NULL) {
3191 if (warn) {
3192 warning("MPSS is not supported by the operating system.");
3193 }
3194 return false;
3195 }
3196 }
3198 // Fill the array of page sizes.
3199 int n = (*gps_func)(_page_sizes, page_sizes_max);
3200 assert(n > 0, "Solaris bug?");
3202 if (n == page_sizes_max) {
3203 // Add a sentinel value (necessary only if the array was completely filled
3204 // since it is static (zeroed at initialization)).
3205 _page_sizes[--n] = 0;
3206 DEBUG_ONLY(warning("increase the size of the os::_page_sizes array.");)
3207 }
3208 assert(_page_sizes[n] == 0, "missing sentinel");
3209 trace_page_sizes("available page sizes", _page_sizes, n);
3211 if (n == 1) return false; // Only one page size available.
3213 // Skip sizes larger than 4M (or LargePageSizeInBytes if it was set) and
3214 // select up to usable_count elements. First sort the array, find the first
3215 // acceptable value, then copy the usable sizes to the top of the array and
3216 // trim the rest. Make sure to include the default page size :-).
3217 //
3218 // A better policy could get rid of the 4M limit by taking the sizes of the
3219 // important VM memory regions (java heap and possibly the code cache) into
3220 // account.
3221 insertion_sort_descending(_page_sizes, n);
3222 const size_t size_limit =
3223 FLAG_IS_DEFAULT(LargePageSizeInBytes) ? 4 * M : LargePageSizeInBytes;
3224 int beg;
3225 for (beg = 0; beg < n && _page_sizes[beg] > size_limit; ++beg) /* empty */ ;
3226 const int end = MIN2((int)usable_count, n) - 1;
3227 for (int cur = 0; cur < end; ++cur, ++beg) {
3228 _page_sizes[cur] = _page_sizes[beg];
3229 }
3230 _page_sizes[end] = vm_page_size();
3231 _page_sizes[end + 1] = 0;
3233 if (_page_sizes[end] > _page_sizes[end - 1]) {
3234 // Default page size is not the smallest; sort again.
3235 insertion_sort_descending(_page_sizes, end + 1);
3236 }
3237 *page_size = _page_sizes[0];
3239 trace_page_sizes("usable page sizes", _page_sizes, end + 1);
3240 return true;
3241 }
3243 void os::large_page_init() {
3244 if (UseLargePages) {
3245 // print a warning if any large page related flag is specified on command line
3246 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3247 !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3249 UseLargePages = Solaris::mpss_sanity_check(warn_on_failure, &_large_page_size);
3250 }
3251 }
3253 bool os::Solaris::setup_large_pages(caddr_t start, size_t bytes, size_t align) {
3254 // Signal to OS that we want large pages for addresses
3255 // from addr, addr + bytes
3256 struct memcntl_mha mpss_struct;
3257 mpss_struct.mha_cmd = MHA_MAPSIZE_VA;
3258 mpss_struct.mha_pagesize = align;
3259 mpss_struct.mha_flags = 0;
3260 // Upon successful completion, memcntl() returns 0
3261 if (memcntl(start, bytes, MC_HAT_ADVISE, (caddr_t) &mpss_struct, 0, 0)) {
3262 debug_only(warning("Attempt to use MPSS failed."));
3263 return false;
3264 }
3265 return true;
3266 }
3268 char* os::reserve_memory_special(size_t size, size_t alignment, char* addr, bool exec) {
3269 fatal("os::reserve_memory_special should not be called on Solaris.");
3270 return NULL;
3271 }
3273 bool os::release_memory_special(char* base, size_t bytes) {
3274 fatal("os::release_memory_special should not be called on Solaris.");
3275 return false;
3276 }
3278 size_t os::large_page_size() {
3279 return _large_page_size;
3280 }
3282 // MPSS allows application to commit large page memory on demand; with ISM
3283 // the entire memory region must be allocated as shared memory.
3284 bool os::can_commit_large_page_memory() {
3285 return true;
3286 }
3288 bool os::can_execute_large_page_memory() {
3289 return true;
3290 }
3292 static int os_sleep(jlong millis, bool interruptible) {
3293 const jlong limit = INT_MAX;
3294 jlong prevtime;
3295 int res;
3297 while (millis > limit) {
3298 if ((res = os_sleep(limit, interruptible)) != OS_OK)
3299 return res;
3300 millis -= limit;
3301 }
3303 // Restart interrupted polls with new parameters until the proper delay
3304 // has been completed.
3306 prevtime = getTimeMillis();
3308 while (millis > 0) {
3309 jlong newtime;
3311 if (!interruptible) {
3312 // Following assert fails for os::yield_all:
3313 // assert(!thread->is_Java_thread(), "must not be java thread");
3314 res = poll(NULL, 0, millis);
3315 } else {
3316 JavaThread *jt = JavaThread::current();
3318 INTERRUPTIBLE_NORESTART_VM_ALWAYS(poll(NULL, 0, millis), res, jt,
3319 os::Solaris::clear_interrupted);
3320 }
3322 // INTERRUPTIBLE_NORESTART_VM_ALWAYS returns res == OS_INTRPT for
3323 // thread.Interrupt.
3325 // See c/r 6751923. Poll can return 0 before time
3326 // has elapsed if time is set via clock_settime (as NTP does).
3327 // res == 0 if poll timed out (see man poll RETURN VALUES)
3328 // using the logic below checks that we really did
3329 // sleep at least "millis" if not we'll sleep again.
3330 if( ( res == 0 ) || ((res == OS_ERR) && (errno == EINTR))) {
3331 newtime = getTimeMillis();
3332 assert(newtime >= prevtime, "time moving backwards");
3333 /* Doing prevtime and newtime in microseconds doesn't help precision,
3334 and trying to round up to avoid lost milliseconds can result in a
3335 too-short delay. */
3336 millis -= newtime - prevtime;
3337 if(millis <= 0)
3338 return OS_OK;
3339 prevtime = newtime;
3340 } else
3341 return res;
3342 }
3344 return OS_OK;
3345 }
3347 // Read calls from inside the vm need to perform state transitions
3348 size_t os::read(int fd, void *buf, unsigned int nBytes) {
3349 INTERRUPTIBLE_RETURN_INT_VM(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3350 }
3352 size_t os::restartable_read(int fd, void *buf, unsigned int nBytes) {
3353 INTERRUPTIBLE_RETURN_INT(::read(fd, buf, nBytes), os::Solaris::clear_interrupted);
3354 }
3356 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
3357 assert(thread == Thread::current(), "thread consistency check");
3359 // TODO-FIXME: this should be removed.
3360 // On Solaris machines (especially 2.5.1) we found that sometimes the VM gets into a live lock
3361 // situation with a JavaThread being starved out of a lwp. The kernel doesn't seem to generate
3362 // a SIGWAITING signal which would enable the threads library to create a new lwp for the starving
3363 // thread. We suspect that because the Watcher thread keeps waking up at periodic intervals the kernel
3364 // is fooled into believing that the system is making progress. In the code below we block the
3365 // the watcher thread while safepoint is in progress so that it would not appear as though the
3366 // system is making progress.
3367 if (!Solaris::T2_libthread() &&
3368 thread->is_Watcher_thread() && SafepointSynchronize::is_synchronizing() && !Arguments::has_profile()) {
3369 // We now try to acquire the threads lock. Since this lock is held by the VM thread during
3370 // the entire safepoint, the watcher thread will line up here during the safepoint.
3371 Threads_lock->lock_without_safepoint_check();
3372 Threads_lock->unlock();
3373 }
3375 if (thread->is_Java_thread()) {
3376 // This is a JavaThread so we honor the _thread_blocked protocol
3377 // even for sleeps of 0 milliseconds. This was originally done
3378 // as a workaround for bug 4338139. However, now we also do it
3379 // to honor the suspend-equivalent protocol.
3381 JavaThread *jt = (JavaThread *) thread;
3382 ThreadBlockInVM tbivm(jt);
3384 jt->set_suspend_equivalent();
3385 // cleared by handle_special_suspend_equivalent_condition() or
3386 // java_suspend_self() via check_and_wait_while_suspended()
3388 int ret_code;
3389 if (millis <= 0) {
3390 thr_yield();
3391 ret_code = 0;
3392 } else {
3393 // The original sleep() implementation did not create an
3394 // OSThreadWaitState helper for sleeps of 0 milliseconds.
3395 // I'm preserving that decision for now.
3396 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
3398 ret_code = os_sleep(millis, interruptible);
3399 }
3401 // were we externally suspended while we were waiting?
3402 jt->check_and_wait_while_suspended();
3404 return ret_code;
3405 }
3407 // non-JavaThread from this point on:
3409 if (millis <= 0) {
3410 thr_yield();
3411 return 0;
3412 }
3414 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
3416 return os_sleep(millis, interruptible);
3417 }
3419 void os::naked_short_sleep(jlong ms) {
3420 assert(ms < 1000, "Un-interruptable sleep, short time use only");
3422 // usleep is deprecated and removed from POSIX, in favour of nanosleep, but
3423 // Solaris requires -lrt for this.
3424 usleep((ms * 1000));
3426 return;
3427 }
3429 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3430 void os::infinite_sleep() {
3431 while (true) { // sleep forever ...
3432 ::sleep(100); // ... 100 seconds at a time
3433 }
3434 }
3436 // Used to convert frequent JVM_Yield() to nops
3437 bool os::dont_yield() {
3438 if (DontYieldALot) {
3439 static hrtime_t last_time = 0;
3440 hrtime_t diff = getTimeNanos() - last_time;
3442 if (diff < DontYieldALotInterval * 1000000)
3443 return true;
3445 last_time += diff;
3447 return false;
3448 }
3449 else {
3450 return false;
3451 }
3452 }
3454 // Caveat: Solaris os::yield() causes a thread-state transition whereas
3455 // the linux and win32 implementations do not. This should be checked.
3457 void os::yield() {
3458 // Yields to all threads with same or greater priority
3459 os::sleep(Thread::current(), 0, false);
3460 }
3462 // Note that yield semantics are defined by the scheduling class to which
3463 // the thread currently belongs. Typically, yield will _not yield to
3464 // other equal or higher priority threads that reside on the dispatch queues
3465 // of other CPUs.
3467 os::YieldResult os::NakedYield() { thr_yield(); return os::YIELD_UNKNOWN; }
3470 // On Solaris we found that yield_all doesn't always yield to all other threads.
3471 // There have been cases where there is a thread ready to execute but it doesn't
3472 // get an lwp as the VM thread continues to spin with sleeps of 1 millisecond.
3473 // The 1 millisecond wait doesn't seem long enough for the kernel to issue a
3474 // SIGWAITING signal which will cause a new lwp to be created. So we count the
3475 // number of times yield_all is called in the one loop and increase the sleep
3476 // time after 8 attempts. If this fails too we increase the concurrency level
3477 // so that the starving thread would get an lwp
3479 void os::yield_all(int attempts) {
3480 // Yields to all threads, including threads with lower priorities
3481 if (attempts == 0) {
3482 os::sleep(Thread::current(), 1, false);
3483 } else {
3484 int iterations = attempts % 30;
3485 if (iterations == 0 && !os::Solaris::T2_libthread()) {
3486 // thr_setconcurrency and _getconcurrency make sense only under T1.
3487 int noofLWPS = thr_getconcurrency();
3488 if (noofLWPS < (Threads::number_of_threads() + 2)) {
3489 thr_setconcurrency(thr_getconcurrency() + 1);
3490 }
3491 } else if (iterations < 25) {
3492 os::sleep(Thread::current(), 1, false);
3493 } else {
3494 os::sleep(Thread::current(), 10, false);
3495 }
3496 }
3497 }
3499 // Called from the tight loops to possibly influence time-sharing heuristics
3500 void os::loop_breaker(int attempts) {
3501 os::yield_all(attempts);
3502 }
3505 // Interface for setting lwp priorities. If we are using T2 libthread,
3506 // which forces the use of BoundThreads or we manually set UseBoundThreads,
3507 // all of our threads will be assigned to real lwp's. Using the thr_setprio
3508 // function is meaningless in this mode so we must adjust the real lwp's priority
3509 // The routines below implement the getting and setting of lwp priorities.
3510 //
3511 // Note: There are three priority scales used on Solaris. Java priotities
3512 // which range from 1 to 10, libthread "thr_setprio" scale which range
3513 // from 0 to 127, and the current scheduling class of the process we
3514 // are running in. This is typically from -60 to +60.
3515 // The setting of the lwp priorities in done after a call to thr_setprio
3516 // so Java priorities are mapped to libthread priorities and we map from
3517 // the latter to lwp priorities. We don't keep priorities stored in
3518 // Java priorities since some of our worker threads want to set priorities
3519 // higher than all Java threads.
3520 //
3521 // For related information:
3522 // (1) man -s 2 priocntl
3523 // (2) man -s 4 priocntl
3524 // (3) man dispadmin
3525 // = librt.so
3526 // = libthread/common/rtsched.c - thrp_setlwpprio().
3527 // = ps -cL <pid> ... to validate priority.
3528 // = sched_get_priority_min and _max
3529 // pthread_create
3530 // sched_setparam
3531 // pthread_setschedparam
3532 //
3533 // Assumptions:
3534 // + We assume that all threads in the process belong to the same
3535 // scheduling class. IE. an homogenous process.
3536 // + Must be root or in IA group to change change "interactive" attribute.
3537 // Priocntl() will fail silently. The only indication of failure is when
3538 // we read-back the value and notice that it hasn't changed.
3539 // + Interactive threads enter the runq at the head, non-interactive at the tail.
3540 // + For RT, change timeslice as well. Invariant:
3541 // constant "priority integral"
3542 // Konst == TimeSlice * (60-Priority)
3543 // Given a priority, compute appropriate timeslice.
3544 // + Higher numerical values have higher priority.
3546 // sched class attributes
3547 typedef struct {
3548 int schedPolicy; // classID
3549 int maxPrio;
3550 int minPrio;
3551 } SchedInfo;
3554 static SchedInfo tsLimits, iaLimits, rtLimits, fxLimits;
3556 #ifdef ASSERT
3557 static int ReadBackValidate = 1;
3558 #endif
3559 static int myClass = 0;
3560 static int myMin = 0;
3561 static int myMax = 0;
3562 static int myCur = 0;
3563 static bool priocntl_enable = false;
3565 static const int criticalPrio = 60; // FX/60 is critical thread class/priority on T4
3566 static int java_MaxPriority_to_os_priority = 0; // Saved mapping
3569 // lwp_priocntl_init
3570 //
3571 // Try to determine the priority scale for our process.
3572 //
3573 // Return errno or 0 if OK.
3574 //
3575 static int lwp_priocntl_init () {
3576 int rslt;
3577 pcinfo_t ClassInfo;
3578 pcparms_t ParmInfo;
3579 int i;
3581 if (!UseThreadPriorities) return 0;
3583 // We are using Bound threads, we need to determine our priority ranges
3584 if (os::Solaris::T2_libthread() || UseBoundThreads) {
3585 // If ThreadPriorityPolicy is 1, switch tables
3586 if (ThreadPriorityPolicy == 1) {
3587 for (i = 0 ; i < CriticalPriority+1; i++)
3588 os::java_to_os_priority[i] = prio_policy1[i];
3589 }
3590 if (UseCriticalJavaThreadPriority) {
3591 // MaxPriority always maps to the FX scheduling class and criticalPrio.
3592 // See set_native_priority() and set_lwp_class_and_priority().
3593 // Save original MaxPriority mapping in case attempt to
3594 // use critical priority fails.
3595 java_MaxPriority_to_os_priority = os::java_to_os_priority[MaxPriority];
3596 // Set negative to distinguish from other priorities
3597 os::java_to_os_priority[MaxPriority] = -criticalPrio;
3598 }
3599 }
3600 // Not using Bound Threads, set to ThreadPolicy 1
3601 else {
3602 for ( i = 0 ; i < CriticalPriority+1; i++ ) {
3603 os::java_to_os_priority[i] = prio_policy1[i];
3604 }
3605 return 0;
3606 }
3608 // Get IDs for a set of well-known scheduling classes.
3609 // TODO-FIXME: GETCLINFO returns the current # of classes in the
3610 // the system. We should have a loop that iterates over the
3611 // classID values, which are known to be "small" integers.
3613 strcpy(ClassInfo.pc_clname, "TS");
3614 ClassInfo.pc_cid = -1;
3615 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3616 if (rslt < 0) return errno;
3617 assert(ClassInfo.pc_cid != -1, "cid for TS class is -1");
3618 tsLimits.schedPolicy = ClassInfo.pc_cid;
3619 tsLimits.maxPrio = ((tsinfo_t*)ClassInfo.pc_clinfo)->ts_maxupri;
3620 tsLimits.minPrio = -tsLimits.maxPrio;
3622 strcpy(ClassInfo.pc_clname, "IA");
3623 ClassInfo.pc_cid = -1;
3624 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3625 if (rslt < 0) return errno;
3626 assert(ClassInfo.pc_cid != -1, "cid for IA class is -1");
3627 iaLimits.schedPolicy = ClassInfo.pc_cid;
3628 iaLimits.maxPrio = ((iainfo_t*)ClassInfo.pc_clinfo)->ia_maxupri;
3629 iaLimits.minPrio = -iaLimits.maxPrio;
3631 strcpy(ClassInfo.pc_clname, "RT");
3632 ClassInfo.pc_cid = -1;
3633 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3634 if (rslt < 0) return errno;
3635 assert(ClassInfo.pc_cid != -1, "cid for RT class is -1");
3636 rtLimits.schedPolicy = ClassInfo.pc_cid;
3637 rtLimits.maxPrio = ((rtinfo_t*)ClassInfo.pc_clinfo)->rt_maxpri;
3638 rtLimits.minPrio = 0;
3640 strcpy(ClassInfo.pc_clname, "FX");
3641 ClassInfo.pc_cid = -1;
3642 rslt = priocntl(P_ALL, 0, PC_GETCID, (caddr_t)&ClassInfo);
3643 if (rslt < 0) return errno;
3644 assert(ClassInfo.pc_cid != -1, "cid for FX class is -1");
3645 fxLimits.schedPolicy = ClassInfo.pc_cid;
3646 fxLimits.maxPrio = ((fxinfo_t*)ClassInfo.pc_clinfo)->fx_maxupri;
3647 fxLimits.minPrio = 0;
3649 // Query our "current" scheduling class.
3650 // This will normally be IA, TS or, rarely, FX or RT.
3651 memset(&ParmInfo, 0, sizeof(ParmInfo));
3652 ParmInfo.pc_cid = PC_CLNULL;
3653 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3654 if (rslt < 0) return errno;
3655 myClass = ParmInfo.pc_cid;
3657 // We now know our scheduling classId, get specific information
3658 // about the class.
3659 ClassInfo.pc_cid = myClass;
3660 ClassInfo.pc_clname[0] = 0;
3661 rslt = priocntl((idtype)0, 0, PC_GETCLINFO, (caddr_t)&ClassInfo);
3662 if (rslt < 0) return errno;
3664 if (ThreadPriorityVerbose) {
3665 tty->print_cr("lwp_priocntl_init: Class=%d(%s)...", myClass, ClassInfo.pc_clname);
3666 }
3668 memset(&ParmInfo, 0, sizeof(pcparms_t));
3669 ParmInfo.pc_cid = PC_CLNULL;
3670 rslt = priocntl(P_PID, P_MYID, PC_GETPARMS, (caddr_t)&ParmInfo);
3671 if (rslt < 0) return errno;
3673 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3674 myMin = rtLimits.minPrio;
3675 myMax = rtLimits.maxPrio;
3676 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3677 iaparms_t *iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3678 myMin = iaLimits.minPrio;
3679 myMax = iaLimits.maxPrio;
3680 myMax = MIN2(myMax, (int)iaInfo->ia_uprilim); // clamp - restrict
3681 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3682 tsparms_t *tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3683 myMin = tsLimits.minPrio;
3684 myMax = tsLimits.maxPrio;
3685 myMax = MIN2(myMax, (int)tsInfo->ts_uprilim); // clamp - restrict
3686 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3687 fxparms_t *fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3688 myMin = fxLimits.minPrio;
3689 myMax = fxLimits.maxPrio;
3690 myMax = MIN2(myMax, (int)fxInfo->fx_uprilim); // clamp - restrict
3691 } else {
3692 // No clue - punt
3693 if (ThreadPriorityVerbose)
3694 tty->print_cr ("Unknown scheduling class: %s ... \n", ClassInfo.pc_clname);
3695 return EINVAL; // no clue, punt
3696 }
3698 if (ThreadPriorityVerbose) {
3699 tty->print_cr ("Thread priority Range: [%d..%d]\n", myMin, myMax);
3700 }
3702 priocntl_enable = true; // Enable changing priorities
3703 return 0;
3704 }
3706 #define IAPRI(x) ((iaparms_t *)((x).pc_clparms))
3707 #define RTPRI(x) ((rtparms_t *)((x).pc_clparms))
3708 #define TSPRI(x) ((tsparms_t *)((x).pc_clparms))
3709 #define FXPRI(x) ((fxparms_t *)((x).pc_clparms))
3712 // scale_to_lwp_priority
3713 //
3714 // Convert from the libthread "thr_setprio" scale to our current
3715 // lwp scheduling class scale.
3716 //
3717 static
3718 int scale_to_lwp_priority (int rMin, int rMax, int x)
3719 {
3720 int v;
3722 if (x == 127) return rMax; // avoid round-down
3723 v = (((x*(rMax-rMin)))/128)+rMin;
3724 return v;
3725 }
3728 // set_lwp_class_and_priority
3729 //
3730 // Set the class and priority of the lwp. This call should only
3731 // be made when using bound threads (T2 threads are bound by default).
3732 //
3733 int set_lwp_class_and_priority(int ThreadID, int lwpid,
3734 int newPrio, int new_class, bool scale) {
3735 int rslt;
3736 int Actual, Expected, prv;
3737 pcparms_t ParmInfo; // for GET-SET
3738 #ifdef ASSERT
3739 pcparms_t ReadBack; // for readback
3740 #endif
3742 // Set priority via PC_GETPARMS, update, PC_SETPARMS
3743 // Query current values.
3744 // TODO: accelerate this by eliminating the PC_GETPARMS call.
3745 // Cache "pcparms_t" in global ParmCache.
3746 // TODO: elide set-to-same-value
3748 // If something went wrong on init, don't change priorities.
3749 if ( !priocntl_enable ) {
3750 if (ThreadPriorityVerbose)
3751 tty->print_cr("Trying to set priority but init failed, ignoring");
3752 return EINVAL;
3753 }
3755 // If lwp hasn't started yet, just return
3756 // the _start routine will call us again.
3757 if ( lwpid <= 0 ) {
3758 if (ThreadPriorityVerbose) {
3759 tty->print_cr ("deferring the set_lwp_class_and_priority of thread "
3760 INTPTR_FORMAT " to %d, lwpid not set",
3761 ThreadID, newPrio);
3762 }
3763 return 0;
3764 }
3766 if (ThreadPriorityVerbose) {
3767 tty->print_cr ("set_lwp_class_and_priority("
3768 INTPTR_FORMAT "@" INTPTR_FORMAT " %d) ",
3769 ThreadID, lwpid, newPrio);
3770 }
3772 memset(&ParmInfo, 0, sizeof(pcparms_t));
3773 ParmInfo.pc_cid = PC_CLNULL;
3774 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ParmInfo);
3775 if (rslt < 0) return errno;
3777 int cur_class = ParmInfo.pc_cid;
3778 ParmInfo.pc_cid = (id_t)new_class;
3780 if (new_class == rtLimits.schedPolicy) {
3781 rtparms_t *rtInfo = (rtparms_t*)ParmInfo.pc_clparms;
3782 rtInfo->rt_pri = scale ? scale_to_lwp_priority(rtLimits.minPrio,
3783 rtLimits.maxPrio, newPrio)
3784 : newPrio;
3785 rtInfo->rt_tqsecs = RT_NOCHANGE;
3786 rtInfo->rt_tqnsecs = RT_NOCHANGE;
3787 if (ThreadPriorityVerbose) {
3788 tty->print_cr("RT: %d->%d\n", newPrio, rtInfo->rt_pri);
3789 }
3790 } else if (new_class == iaLimits.schedPolicy) {
3791 iaparms_t* iaInfo = (iaparms_t*)ParmInfo.pc_clparms;
3792 int maxClamped = MIN2(iaLimits.maxPrio,
3793 cur_class == new_class
3794 ? (int)iaInfo->ia_uprilim : iaLimits.maxPrio);
3795 iaInfo->ia_upri = scale ? scale_to_lwp_priority(iaLimits.minPrio,
3796 maxClamped, newPrio)
3797 : newPrio;
3798 iaInfo->ia_uprilim = cur_class == new_class
3799 ? IA_NOCHANGE : (pri_t)iaLimits.maxPrio;
3800 iaInfo->ia_mode = IA_NOCHANGE;
3801 if (ThreadPriorityVerbose) {
3802 tty->print_cr("IA: [%d...%d] %d->%d\n",
3803 iaLimits.minPrio, maxClamped, newPrio, iaInfo->ia_upri);
3804 }
3805 } else if (new_class == tsLimits.schedPolicy) {
3806 tsparms_t* tsInfo = (tsparms_t*)ParmInfo.pc_clparms;
3807 int maxClamped = MIN2(tsLimits.maxPrio,
3808 cur_class == new_class
3809 ? (int)tsInfo->ts_uprilim : tsLimits.maxPrio);
3810 tsInfo->ts_upri = scale ? scale_to_lwp_priority(tsLimits.minPrio,
3811 maxClamped, newPrio)
3812 : newPrio;
3813 tsInfo->ts_uprilim = cur_class == new_class
3814 ? TS_NOCHANGE : (pri_t)tsLimits.maxPrio;
3815 if (ThreadPriorityVerbose) {
3816 tty->print_cr("TS: [%d...%d] %d->%d\n",
3817 tsLimits.minPrio, maxClamped, newPrio, tsInfo->ts_upri);
3818 }
3819 } else if (new_class == fxLimits.schedPolicy) {
3820 fxparms_t* fxInfo = (fxparms_t*)ParmInfo.pc_clparms;
3821 int maxClamped = MIN2(fxLimits.maxPrio,
3822 cur_class == new_class
3823 ? (int)fxInfo->fx_uprilim : fxLimits.maxPrio);
3824 fxInfo->fx_upri = scale ? scale_to_lwp_priority(fxLimits.minPrio,
3825 maxClamped, newPrio)
3826 : newPrio;
3827 fxInfo->fx_uprilim = cur_class == new_class
3828 ? FX_NOCHANGE : (pri_t)fxLimits.maxPrio;
3829 fxInfo->fx_tqsecs = FX_NOCHANGE;
3830 fxInfo->fx_tqnsecs = FX_NOCHANGE;
3831 if (ThreadPriorityVerbose) {
3832 tty->print_cr("FX: [%d...%d] %d->%d\n",
3833 fxLimits.minPrio, maxClamped, newPrio, fxInfo->fx_upri);
3834 }
3835 } else {
3836 if (ThreadPriorityVerbose) {
3837 tty->print_cr("Unknown new scheduling class %d\n", new_class);
3838 }
3839 return EINVAL; // no clue, punt
3840 }
3842 rslt = priocntl(P_LWPID, lwpid, PC_SETPARMS, (caddr_t)&ParmInfo);
3843 if (ThreadPriorityVerbose && rslt) {
3844 tty->print_cr ("PC_SETPARMS ->%d %d\n", rslt, errno);
3845 }
3846 if (rslt < 0) return errno;
3848 #ifdef ASSERT
3849 // Sanity check: read back what we just attempted to set.
3850 // In theory it could have changed in the interim ...
3851 //
3852 // The priocntl system call is tricky.
3853 // Sometimes it'll validate the priority value argument and
3854 // return EINVAL if unhappy. At other times it fails silently.
3855 // Readbacks are prudent.
3857 if (!ReadBackValidate) return 0;
3859 memset(&ReadBack, 0, sizeof(pcparms_t));
3860 ReadBack.pc_cid = PC_CLNULL;
3861 rslt = priocntl(P_LWPID, lwpid, PC_GETPARMS, (caddr_t)&ReadBack);
3862 assert(rslt >= 0, "priocntl failed");
3863 Actual = Expected = 0xBAD;
3864 assert(ParmInfo.pc_cid == ReadBack.pc_cid, "cid's don't match");
3865 if (ParmInfo.pc_cid == rtLimits.schedPolicy) {
3866 Actual = RTPRI(ReadBack)->rt_pri;
3867 Expected = RTPRI(ParmInfo)->rt_pri;
3868 } else if (ParmInfo.pc_cid == iaLimits.schedPolicy) {
3869 Actual = IAPRI(ReadBack)->ia_upri;
3870 Expected = IAPRI(ParmInfo)->ia_upri;
3871 } else if (ParmInfo.pc_cid == tsLimits.schedPolicy) {
3872 Actual = TSPRI(ReadBack)->ts_upri;
3873 Expected = TSPRI(ParmInfo)->ts_upri;
3874 } else if (ParmInfo.pc_cid == fxLimits.schedPolicy) {
3875 Actual = FXPRI(ReadBack)->fx_upri;
3876 Expected = FXPRI(ParmInfo)->fx_upri;
3877 } else {
3878 if (ThreadPriorityVerbose) {
3879 tty->print_cr("set_lwp_class_and_priority: unexpected class in readback: %d\n",
3880 ParmInfo.pc_cid);
3881 }
3882 }
3884 if (Actual != Expected) {
3885 if (ThreadPriorityVerbose) {
3886 tty->print_cr ("set_lwp_class_and_priority(%d %d) Class=%d: actual=%d vs expected=%d\n",
3887 lwpid, newPrio, ReadBack.pc_cid, Actual, Expected);
3888 }
3889 }
3890 #endif
3892 return 0;
3893 }
3895 // Solaris only gives access to 128 real priorities at a time,
3896 // so we expand Java's ten to fill this range. This would be better
3897 // if we dynamically adjusted relative priorities.
3898 //
3899 // The ThreadPriorityPolicy option allows us to select 2 different
3900 // priority scales.
3901 //
3902 // ThreadPriorityPolicy=0
3903 // Since the Solaris' default priority is MaximumPriority, we do not
3904 // set a priority lower than Max unless a priority lower than
3905 // NormPriority is requested.
3906 //
3907 // ThreadPriorityPolicy=1
3908 // This mode causes the priority table to get filled with
3909 // linear values. NormPriority get's mapped to 50% of the
3910 // Maximum priority an so on. This will cause VM threads
3911 // to get unfair treatment against other Solaris processes
3912 // which do not explicitly alter their thread priorities.
3913 //
3915 int os::java_to_os_priority[CriticalPriority + 1] = {
3916 -99999, // 0 Entry should never be used
3918 0, // 1 MinPriority
3919 32, // 2
3920 64, // 3
3922 96, // 4
3923 127, // 5 NormPriority
3924 127, // 6
3926 127, // 7
3927 127, // 8
3928 127, // 9 NearMaxPriority
3930 127, // 10 MaxPriority
3932 -criticalPrio // 11 CriticalPriority
3933 };
3935 OSReturn os::set_native_priority(Thread* thread, int newpri) {
3936 OSThread* osthread = thread->osthread();
3938 // Save requested priority in case the thread hasn't been started
3939 osthread->set_native_priority(newpri);
3941 // Check for critical priority request
3942 bool fxcritical = false;
3943 if (newpri == -criticalPrio) {
3944 fxcritical = true;
3945 newpri = criticalPrio;
3946 }
3948 assert(newpri >= MinimumPriority && newpri <= MaximumPriority, "bad priority mapping");
3949 if (!UseThreadPriorities) return OS_OK;
3951 int status = 0;
3953 if (!fxcritical) {
3954 // Use thr_setprio only if we have a priority that thr_setprio understands
3955 status = thr_setprio(thread->osthread()->thread_id(), newpri);
3956 }
3958 if (os::Solaris::T2_libthread() ||
3959 (UseBoundThreads && osthread->is_vm_created())) {
3960 int lwp_status =
3961 set_lwp_class_and_priority(osthread->thread_id(),
3962 osthread->lwp_id(),
3963 newpri,
3964 fxcritical ? fxLimits.schedPolicy : myClass,
3965 !fxcritical);
3966 if (lwp_status != 0 && fxcritical) {
3967 // Try again, this time without changing the scheduling class
3968 newpri = java_MaxPriority_to_os_priority;
3969 lwp_status = set_lwp_class_and_priority(osthread->thread_id(),
3970 osthread->lwp_id(),
3971 newpri, myClass, false);
3972 }
3973 status |= lwp_status;
3974 }
3975 return (status == 0) ? OS_OK : OS_ERR;
3976 }
3979 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
3980 int p;
3981 if ( !UseThreadPriorities ) {
3982 *priority_ptr = NormalPriority;
3983 return OS_OK;
3984 }
3985 int status = thr_getprio(thread->osthread()->thread_id(), &p);
3986 if (status != 0) {
3987 return OS_ERR;
3988 }
3989 *priority_ptr = p;
3990 return OS_OK;
3991 }
3994 // Hint to the underlying OS that a task switch would not be good.
3995 // Void return because it's a hint and can fail.
3996 void os::hint_no_preempt() {
3997 schedctl_start(schedctl_init());
3998 }
4000 static void resume_clear_context(OSThread *osthread) {
4001 osthread->set_ucontext(NULL);
4002 }
4004 static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
4005 osthread->set_ucontext(context);
4006 }
4008 static Semaphore sr_semaphore;
4010 void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
4011 // Save and restore errno to avoid confusing native code with EINTR
4012 // after sigsuspend.
4013 int old_errno = errno;
4015 OSThread* osthread = thread->osthread();
4016 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
4018 os::SuspendResume::State current = osthread->sr.state();
4019 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
4020 suspend_save_context(osthread, uc);
4022 // attempt to switch the state, we assume we had a SUSPEND_REQUEST
4023 os::SuspendResume::State state = osthread->sr.suspended();
4024 if (state == os::SuspendResume::SR_SUSPENDED) {
4025 sigset_t suspend_set; // signals for sigsuspend()
4027 // get current set of blocked signals and unblock resume signal
4028 thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
4029 sigdelset(&suspend_set, os::Solaris::SIGasync());
4031 sr_semaphore.signal();
4032 // wait here until we are resumed
4033 while (1) {
4034 sigsuspend(&suspend_set);
4036 os::SuspendResume::State result = osthread->sr.running();
4037 if (result == os::SuspendResume::SR_RUNNING) {
4038 sr_semaphore.signal();
4039 break;
4040 }
4041 }
4043 } else if (state == os::SuspendResume::SR_RUNNING) {
4044 // request was cancelled, continue
4045 } else {
4046 ShouldNotReachHere();
4047 }
4049 resume_clear_context(osthread);
4050 } else if (current == os::SuspendResume::SR_RUNNING) {
4051 // request was cancelled, continue
4052 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
4053 // ignore
4054 } else {
4055 // ignore
4056 }
4058 errno = old_errno;
4059 }
4062 void os::interrupt(Thread* thread) {
4063 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4065 OSThread* osthread = thread->osthread();
4067 int isInterrupted = osthread->interrupted();
4068 if (!isInterrupted) {
4069 osthread->set_interrupted(true);
4070 OrderAccess::fence();
4071 // os::sleep() is implemented with either poll (NULL,0,timeout) or
4072 // by parking on _SleepEvent. If the former, thr_kill will unwedge
4073 // the sleeper by SIGINTR, otherwise the unpark() will wake the sleeper.
4074 ParkEvent * const slp = thread->_SleepEvent ;
4075 if (slp != NULL) slp->unpark() ;
4076 }
4078 // For JSR166: unpark after setting status but before thr_kill -dl
4079 if (thread->is_Java_thread()) {
4080 ((JavaThread*)thread)->parker()->unpark();
4081 }
4083 // Handle interruptible wait() ...
4084 ParkEvent * const ev = thread->_ParkEvent ;
4085 if (ev != NULL) ev->unpark() ;
4087 // When events are used everywhere for os::sleep, then this thr_kill
4088 // will only be needed if UseVMInterruptibleIO is true.
4090 if (!isInterrupted) {
4091 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGinterrupt());
4092 assert_status(status == 0, status, "thr_kill");
4094 // Bump thread interruption counter
4095 RuntimeService::record_thread_interrupt_signaled_count();
4096 }
4097 }
4100 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
4101 assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
4103 OSThread* osthread = thread->osthread();
4105 bool res = osthread->interrupted();
4107 // NOTE that since there is no "lock" around these two operations,
4108 // there is the possibility that the interrupted flag will be
4109 // "false" but that the interrupt event will be set. This is
4110 // intentional. The effect of this is that Object.wait() will appear
4111 // to have a spurious wakeup, which is not harmful, and the
4112 // possibility is so rare that it is not worth the added complexity
4113 // to add yet another lock. It has also been recommended not to put
4114 // the interrupted flag into the os::Solaris::Event structure,
4115 // because it hides the issue.
4116 if (res && clear_interrupted) {
4117 osthread->set_interrupted(false);
4118 }
4119 return res;
4120 }
4123 void os::print_statistics() {
4124 }
4126 int os::message_box(const char* title, const char* message) {
4127 int i;
4128 fdStream err(defaultStream::error_fd());
4129 for (i = 0; i < 78; i++) err.print_raw("=");
4130 err.cr();
4131 err.print_raw_cr(title);
4132 for (i = 0; i < 78; i++) err.print_raw("-");
4133 err.cr();
4134 err.print_raw_cr(message);
4135 for (i = 0; i < 78; i++) err.print_raw("=");
4136 err.cr();
4138 char buf[16];
4139 // Prevent process from exiting upon "read error" without consuming all CPU
4140 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4142 return buf[0] == 'y' || buf[0] == 'Y';
4143 }
4145 static int sr_notify(OSThread* osthread) {
4146 int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
4147 assert_status(status == 0, status, "thr_kill");
4148 return status;
4149 }
4151 // "Randomly" selected value for how long we want to spin
4152 // before bailing out on suspending a thread, also how often
4153 // we send a signal to a thread we want to resume
4154 static const int RANDOMLY_LARGE_INTEGER = 1000000;
4155 static const int RANDOMLY_LARGE_INTEGER2 = 100;
4157 static bool do_suspend(OSThread* osthread) {
4158 assert(osthread->sr.is_running(), "thread should be running");
4159 assert(!sr_semaphore.trywait(), "semaphore has invalid state");
4161 // mark as suspended and send signal
4162 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
4163 // failed to switch, state wasn't running?
4164 ShouldNotReachHere();
4165 return false;
4166 }
4168 if (sr_notify(osthread) != 0) {
4169 ShouldNotReachHere();
4170 }
4172 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
4173 while (true) {
4174 if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
4175 break;
4176 } else {
4177 // timeout
4178 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
4179 if (cancelled == os::SuspendResume::SR_RUNNING) {
4180 return false;
4181 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
4182 // make sure that we consume the signal on the semaphore as well
4183 sr_semaphore.wait();
4184 break;
4185 } else {
4186 ShouldNotReachHere();
4187 return false;
4188 }
4189 }
4190 }
4192 guarantee(osthread->sr.is_suspended(), "Must be suspended");
4193 return true;
4194 }
4196 static void do_resume(OSThread* osthread) {
4197 assert(osthread->sr.is_suspended(), "thread should be suspended");
4198 assert(!sr_semaphore.trywait(), "invalid semaphore state");
4200 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
4201 // failed to switch to WAKEUP_REQUEST
4202 ShouldNotReachHere();
4203 return;
4204 }
4206 while (true) {
4207 if (sr_notify(osthread) == 0) {
4208 if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
4209 if (osthread->sr.is_running()) {
4210 return;
4211 }
4212 }
4213 } else {
4214 ShouldNotReachHere();
4215 }
4216 }
4218 guarantee(osthread->sr.is_running(), "Must be running!");
4219 }
4221 void os::SuspendedThreadTask::internal_do_task() {
4222 if (do_suspend(_thread->osthread())) {
4223 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4224 do_task(context);
4225 do_resume(_thread->osthread());
4226 }
4227 }
4229 class PcFetcher : public os::SuspendedThreadTask {
4230 public:
4231 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4232 ExtendedPC result();
4233 protected:
4234 void do_task(const os::SuspendedThreadTaskContext& context);
4235 private:
4236 ExtendedPC _epc;
4237 };
4239 ExtendedPC PcFetcher::result() {
4240 guarantee(is_done(), "task is not done yet.");
4241 return _epc;
4242 }
4244 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4245 Thread* thread = context.thread();
4246 OSThread* osthread = thread->osthread();
4247 if (osthread->ucontext() != NULL) {
4248 _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
4249 } else {
4250 // NULL context is unexpected, double-check this is the VMThread
4251 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4252 }
4253 }
4255 // A lightweight implementation that does not suspend the target thread and
4256 // thus returns only a hint. Used for profiling only!
4257 ExtendedPC os::get_thread_pc(Thread* thread) {
4258 // Make sure that it is called by the watcher and the Threads lock is owned.
4259 assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
4260 // For now, is only used to profile the VM Thread
4261 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4262 PcFetcher fetcher(thread);
4263 fetcher.run();
4264 return fetcher.result();
4265 }
4268 // This does not do anything on Solaris. This is basically a hook for being
4269 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
4270 void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
4271 f(value, method, args, thread);
4272 }
4274 // This routine may be used by user applications as a "hook" to catch signals.
4275 // The user-defined signal handler must pass unrecognized signals to this
4276 // routine, and if it returns true (non-zero), then the signal handler must
4277 // return immediately. If the flag "abort_if_unrecognized" is true, then this
4278 // routine will never retun false (zero), but instead will execute a VM panic
4279 // routine kill the process.
4280 //
4281 // If this routine returns false, it is OK to call it again. This allows
4282 // the user-defined signal handler to perform checks either before or after
4283 // the VM performs its own checks. Naturally, the user code would be making
4284 // a serious error if it tried to handle an exception (such as a null check
4285 // or breakpoint) that the VM was generating for its own correct operation.
4286 //
4287 // This routine may recognize any of the following kinds of signals:
4288 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, BREAK_SIGNAL, SIGPIPE, SIGXFSZ,
4289 // os::Solaris::SIGasync
4290 // It should be consulted by handlers for any of those signals.
4291 // It explicitly does not recognize os::Solaris::SIGinterrupt
4292 //
4293 // The caller of this routine must pass in the three arguments supplied
4294 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
4295 // field of the structure passed to sigaction(). This routine assumes that
4296 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
4297 //
4298 // Note that the VM will print warnings if it detects conflicting signal
4299 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
4300 //
4301 extern "C" JNIEXPORT int
4302 JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext,
4303 int abort_if_unrecognized);
4306 void signalHandler(int sig, siginfo_t* info, void* ucVoid) {
4307 int orig_errno = errno; // Preserve errno value over signal handler.
4308 JVM_handle_solaris_signal(sig, info, ucVoid, true);
4309 errno = orig_errno;
4310 }
4312 /* Do not delete - if guarantee is ever removed, a signal handler (even empty)
4313 is needed to provoke threads blocked on IO to return an EINTR
4314 Note: this explicitly does NOT call JVM_handle_solaris_signal and
4315 does NOT participate in signal chaining due to requirement for
4316 NOT setting SA_RESTART to make EINTR work. */
4317 extern "C" void sigINTRHandler(int sig, siginfo_t* info, void* ucVoid) {
4318 if (UseSignalChaining) {
4319 struct sigaction *actp = os::Solaris::get_chained_signal_action(sig);
4320 if (actp && actp->sa_handler) {
4321 vm_exit_during_initialization("Signal chaining detected for VM interrupt signal, try -XX:+UseAltSigs");
4322 }
4323 }
4324 }
4326 // This boolean allows users to forward their own non-matching signals
4327 // to JVM_handle_solaris_signal, harmlessly.
4328 bool os::Solaris::signal_handlers_are_installed = false;
4330 // For signal-chaining
4331 bool os::Solaris::libjsig_is_loaded = false;
4332 typedef struct sigaction *(*get_signal_t)(int);
4333 get_signal_t os::Solaris::get_signal_action = NULL;
4335 struct sigaction* os::Solaris::get_chained_signal_action(int sig) {
4336 struct sigaction *actp = NULL;
4338 if ((libjsig_is_loaded) && (sig <= Maxlibjsigsigs)) {
4339 // Retrieve the old signal handler from libjsig
4340 actp = (*get_signal_action)(sig);
4341 }
4342 if (actp == NULL) {
4343 // Retrieve the preinstalled signal handler from jvm
4344 actp = get_preinstalled_handler(sig);
4345 }
4347 return actp;
4348 }
4350 static bool call_chained_handler(struct sigaction *actp, int sig,
4351 siginfo_t *siginfo, void *context) {
4352 // Call the old signal handler
4353 if (actp->sa_handler == SIG_DFL) {
4354 // It's more reasonable to let jvm treat it as an unexpected exception
4355 // instead of taking the default action.
4356 return false;
4357 } else if (actp->sa_handler != SIG_IGN) {
4358 if ((actp->sa_flags & SA_NODEFER) == 0) {
4359 // automaticlly block the signal
4360 sigaddset(&(actp->sa_mask), sig);
4361 }
4363 sa_handler_t hand;
4364 sa_sigaction_t sa;
4365 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
4366 // retrieve the chained handler
4367 if (siginfo_flag_set) {
4368 sa = actp->sa_sigaction;
4369 } else {
4370 hand = actp->sa_handler;
4371 }
4373 if ((actp->sa_flags & SA_RESETHAND) != 0) {
4374 actp->sa_handler = SIG_DFL;
4375 }
4377 // try to honor the signal mask
4378 sigset_t oset;
4379 thr_sigsetmask(SIG_SETMASK, &(actp->sa_mask), &oset);
4381 // call into the chained handler
4382 if (siginfo_flag_set) {
4383 (*sa)(sig, siginfo, context);
4384 } else {
4385 (*hand)(sig);
4386 }
4388 // restore the signal mask
4389 thr_sigsetmask(SIG_SETMASK, &oset, 0);
4390 }
4391 // Tell jvm's signal handler the signal is taken care of.
4392 return true;
4393 }
4395 bool os::Solaris::chained_handler(int sig, siginfo_t* siginfo, void* context) {
4396 bool chained = false;
4397 // signal-chaining
4398 if (UseSignalChaining) {
4399 struct sigaction *actp = get_chained_signal_action(sig);
4400 if (actp != NULL) {
4401 chained = call_chained_handler(actp, sig, siginfo, context);
4402 }
4403 }
4404 return chained;
4405 }
4407 struct sigaction* os::Solaris::get_preinstalled_handler(int sig) {
4408 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4409 if (preinstalled_sigs[sig] != 0) {
4410 return &chainedsigactions[sig];
4411 }
4412 return NULL;
4413 }
4415 void os::Solaris::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
4417 assert(sig > 0 && sig <= Maxsignum, "vm signal out of expected range");
4418 assert((chainedsigactions != (struct sigaction *)NULL) && (preinstalled_sigs != (int *)NULL) , "signals not yet initialized");
4419 chainedsigactions[sig] = oldAct;
4420 preinstalled_sigs[sig] = 1;
4421 }
4423 void os::Solaris::set_signal_handler(int sig, bool set_installed, bool oktochain) {
4424 // Check for overwrite.
4425 struct sigaction oldAct;
4426 sigaction(sig, (struct sigaction*)NULL, &oldAct);
4427 void* oldhand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4428 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4429 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
4430 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
4431 oldhand != CAST_FROM_FN_PTR(void*, signalHandler)) {
4432 if (AllowUserSignalHandlers || !set_installed) {
4433 // Do not overwrite; user takes responsibility to forward to us.
4434 return;
4435 } else if (UseSignalChaining) {
4436 if (oktochain) {
4437 // save the old handler in jvm
4438 save_preinstalled_handler(sig, oldAct);
4439 } else {
4440 vm_exit_during_initialization("Signal chaining not allowed for VM interrupt signal, try -XX:+UseAltSigs.");
4441 }
4442 // libjsig also interposes the sigaction() call below and saves the
4443 // old sigaction on it own.
4444 } else {
4445 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
4446 "%#lx for signal %d.", (long)oldhand, sig));
4447 }
4448 }
4450 struct sigaction sigAct;
4451 sigfillset(&(sigAct.sa_mask));
4452 sigAct.sa_handler = SIG_DFL;
4454 sigAct.sa_sigaction = signalHandler;
4455 // Handle SIGSEGV on alternate signal stack if
4456 // not using stack banging
4457 if (!UseStackBanging && sig == SIGSEGV) {
4458 sigAct.sa_flags = SA_SIGINFO | SA_RESTART | SA_ONSTACK;
4459 // Interruptible i/o requires SA_RESTART cleared so EINTR
4460 // is returned instead of restarting system calls
4461 } else if (sig == os::Solaris::SIGinterrupt()) {
4462 sigemptyset(&sigAct.sa_mask);
4463 sigAct.sa_handler = NULL;
4464 sigAct.sa_flags = SA_SIGINFO;
4465 sigAct.sa_sigaction = sigINTRHandler;
4466 } else {
4467 sigAct.sa_flags = SA_SIGINFO | SA_RESTART;
4468 }
4469 os::Solaris::set_our_sigflags(sig, sigAct.sa_flags);
4471 sigaction(sig, &sigAct, &oldAct);
4473 void* oldhand2 = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
4474 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
4475 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
4476 }
4479 #define DO_SIGNAL_CHECK(sig) \
4480 if (!sigismember(&check_signal_done, sig)) \
4481 os::Solaris::check_signal_handler(sig)
4483 // This method is a periodic task to check for misbehaving JNI applications
4484 // under CheckJNI, we can add any periodic checks here
4486 void os::run_periodic_checks() {
4487 // A big source of grief is hijacking virt. addr 0x0 on Solaris,
4488 // thereby preventing a NULL checks.
4489 if(!check_addr0_done) check_addr0_done = check_addr0(tty);
4491 if (check_signals == false) return;
4493 // SEGV and BUS if overridden could potentially prevent
4494 // generation of hs*.log in the event of a crash, debugging
4495 // such a case can be very challenging, so we absolutely
4496 // check for the following for a good measure:
4497 DO_SIGNAL_CHECK(SIGSEGV);
4498 DO_SIGNAL_CHECK(SIGILL);
4499 DO_SIGNAL_CHECK(SIGFPE);
4500 DO_SIGNAL_CHECK(SIGBUS);
4501 DO_SIGNAL_CHECK(SIGPIPE);
4502 DO_SIGNAL_CHECK(SIGXFSZ);
4504 // ReduceSignalUsage allows the user to override these handlers
4505 // see comments at the very top and jvm_solaris.h
4506 if (!ReduceSignalUsage) {
4507 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
4508 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
4509 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
4510 DO_SIGNAL_CHECK(BREAK_SIGNAL);
4511 }
4513 // See comments above for using JVM1/JVM2 and UseAltSigs
4514 DO_SIGNAL_CHECK(os::Solaris::SIGinterrupt());
4515 DO_SIGNAL_CHECK(os::Solaris::SIGasync());
4517 }
4519 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
4521 static os_sigaction_t os_sigaction = NULL;
4523 void os::Solaris::check_signal_handler(int sig) {
4524 char buf[O_BUFLEN];
4525 address jvmHandler = NULL;
4527 struct sigaction act;
4528 if (os_sigaction == NULL) {
4529 // only trust the default sigaction, in case it has been interposed
4530 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
4531 if (os_sigaction == NULL) return;
4532 }
4534 os_sigaction(sig, (struct sigaction*)NULL, &act);
4536 address thisHandler = (act.sa_flags & SA_SIGINFO)
4537 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
4538 : CAST_FROM_FN_PTR(address, act.sa_handler) ;
4541 switch(sig) {
4542 case SIGSEGV:
4543 case SIGBUS:
4544 case SIGFPE:
4545 case SIGPIPE:
4546 case SIGXFSZ:
4547 case SIGILL:
4548 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4549 break;
4551 case SHUTDOWN1_SIGNAL:
4552 case SHUTDOWN2_SIGNAL:
4553 case SHUTDOWN3_SIGNAL:
4554 case BREAK_SIGNAL:
4555 jvmHandler = (address)user_handler();
4556 break;
4558 default:
4559 int intrsig = os::Solaris::SIGinterrupt();
4560 int asynsig = os::Solaris::SIGasync();
4562 if (sig == intrsig) {
4563 jvmHandler = CAST_FROM_FN_PTR(address, sigINTRHandler);
4564 } else if (sig == asynsig) {
4565 jvmHandler = CAST_FROM_FN_PTR(address, signalHandler);
4566 } else {
4567 return;
4568 }
4569 break;
4570 }
4573 if (thisHandler != jvmHandler) {
4574 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
4575 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
4576 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
4577 // No need to check this sig any longer
4578 sigaddset(&check_signal_done, sig);
4579 } else if(os::Solaris::get_our_sigflags(sig) != 0 && act.sa_flags != os::Solaris::get_our_sigflags(sig)) {
4580 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
4581 tty->print("expected:" PTR32_FORMAT, os::Solaris::get_our_sigflags(sig));
4582 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
4583 // No need to check this sig any longer
4584 sigaddset(&check_signal_done, sig);
4585 }
4587 // Print all the signal handler state
4588 if (sigismember(&check_signal_done, sig)) {
4589 print_signal_handlers(tty, buf, O_BUFLEN);
4590 }
4592 }
4594 void os::Solaris::install_signal_handlers() {
4595 bool libjsigdone = false;
4596 signal_handlers_are_installed = true;
4598 // signal-chaining
4599 typedef void (*signal_setting_t)();
4600 signal_setting_t begin_signal_setting = NULL;
4601 signal_setting_t end_signal_setting = NULL;
4602 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4603 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
4604 if (begin_signal_setting != NULL) {
4605 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
4606 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
4607 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
4608 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
4609 get_libjsig_version = CAST_TO_FN_PTR(version_getting_t,
4610 dlsym(RTLD_DEFAULT, "JVM_get_libjsig_version"));
4611 libjsig_is_loaded = true;
4612 if (os::Solaris::get_libjsig_version != NULL) {
4613 libjsigversion = (*os::Solaris::get_libjsig_version)();
4614 }
4615 assert(UseSignalChaining, "should enable signal-chaining");
4616 }
4617 if (libjsig_is_loaded) {
4618 // Tell libjsig jvm is setting signal handlers
4619 (*begin_signal_setting)();
4620 }
4622 set_signal_handler(SIGSEGV, true, true);
4623 set_signal_handler(SIGPIPE, true, true);
4624 set_signal_handler(SIGXFSZ, true, true);
4625 set_signal_handler(SIGBUS, true, true);
4626 set_signal_handler(SIGILL, true, true);
4627 set_signal_handler(SIGFPE, true, true);
4630 if (os::Solaris::SIGinterrupt() > OLDMAXSIGNUM || os::Solaris::SIGasync() > OLDMAXSIGNUM) {
4632 // Pre-1.4.1 Libjsig limited to signal chaining signals <= 32 so
4633 // can not register overridable signals which might be > 32
4634 if (libjsig_is_loaded && libjsigversion <= JSIG_VERSION_1_4_1) {
4635 // Tell libjsig jvm has finished setting signal handlers
4636 (*end_signal_setting)();
4637 libjsigdone = true;
4638 }
4639 }
4641 // Never ok to chain our SIGinterrupt
4642 set_signal_handler(os::Solaris::SIGinterrupt(), true, false);
4643 set_signal_handler(os::Solaris::SIGasync(), true, true);
4645 if (libjsig_is_loaded && !libjsigdone) {
4646 // Tell libjsig jvm finishes setting signal handlers
4647 (*end_signal_setting)();
4648 }
4650 // We don't activate signal checker if libjsig is in place, we trust ourselves
4651 // and if UserSignalHandler is installed all bets are off.
4652 // Log that signal checking is off only if -verbose:jni is specified.
4653 if (CheckJNICalls) {
4654 if (libjsig_is_loaded) {
4655 if (PrintJNIResolving) {
4656 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
4657 }
4658 check_signals = false;
4659 }
4660 if (AllowUserSignalHandlers) {
4661 if (PrintJNIResolving) {
4662 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
4663 }
4664 check_signals = false;
4665 }
4666 }
4667 }
4670 void report_error(const char* file_name, int line_no, const char* title, const char* format, ...);
4672 const char * signames[] = {
4673 "SIG0",
4674 "SIGHUP", "SIGINT", "SIGQUIT", "SIGILL", "SIGTRAP",
4675 "SIGABRT", "SIGEMT", "SIGFPE", "SIGKILL", "SIGBUS",
4676 "SIGSEGV", "SIGSYS", "SIGPIPE", "SIGALRM", "SIGTERM",
4677 "SIGUSR1", "SIGUSR2", "SIGCLD", "SIGPWR", "SIGWINCH",
4678 "SIGURG", "SIGPOLL", "SIGSTOP", "SIGTSTP", "SIGCONT",
4679 "SIGTTIN", "SIGTTOU", "SIGVTALRM", "SIGPROF", "SIGXCPU",
4680 "SIGXFSZ", "SIGWAITING", "SIGLWP", "SIGFREEZE", "SIGTHAW",
4681 "SIGCANCEL", "SIGLOST"
4682 };
4684 const char* os::exception_name(int exception_code, char* buf, size_t size) {
4685 if (0 < exception_code && exception_code <= SIGRTMAX) {
4686 // signal
4687 if (exception_code < sizeof(signames)/sizeof(const char*)) {
4688 jio_snprintf(buf, size, "%s", signames[exception_code]);
4689 } else {
4690 jio_snprintf(buf, size, "SIG%d", exception_code);
4691 }
4692 return buf;
4693 } else {
4694 return NULL;
4695 }
4696 }
4698 // (Static) wrappers for the new libthread API
4699 int_fnP_thread_t_iP_uP_stack_tP_gregset_t os::Solaris::_thr_getstate;
4700 int_fnP_thread_t_i_gregset_t os::Solaris::_thr_setstate;
4701 int_fnP_thread_t_i os::Solaris::_thr_setmutator;
4702 int_fnP_thread_t os::Solaris::_thr_suspend_mutator;
4703 int_fnP_thread_t os::Solaris::_thr_continue_mutator;
4705 // (Static) wrapper for getisax(2) call.
4706 os::Solaris::getisax_func_t os::Solaris::_getisax = 0;
4708 // (Static) wrappers for the liblgrp API
4709 os::Solaris::lgrp_home_func_t os::Solaris::_lgrp_home;
4710 os::Solaris::lgrp_init_func_t os::Solaris::_lgrp_init;
4711 os::Solaris::lgrp_fini_func_t os::Solaris::_lgrp_fini;
4712 os::Solaris::lgrp_root_func_t os::Solaris::_lgrp_root;
4713 os::Solaris::lgrp_children_func_t os::Solaris::_lgrp_children;
4714 os::Solaris::lgrp_resources_func_t os::Solaris::_lgrp_resources;
4715 os::Solaris::lgrp_nlgrps_func_t os::Solaris::_lgrp_nlgrps;
4716 os::Solaris::lgrp_cookie_stale_func_t os::Solaris::_lgrp_cookie_stale;
4717 os::Solaris::lgrp_cookie_t os::Solaris::_lgrp_cookie = 0;
4719 // (Static) wrapper for meminfo() call.
4720 os::Solaris::meminfo_func_t os::Solaris::_meminfo = 0;
4722 static address resolve_symbol_lazy(const char* name) {
4723 address addr = (address) dlsym(RTLD_DEFAULT, name);
4724 if(addr == NULL) {
4725 // RTLD_DEFAULT was not defined on some early versions of 2.5.1
4726 addr = (address) dlsym(RTLD_NEXT, name);
4727 }
4728 return addr;
4729 }
4731 static address resolve_symbol(const char* name) {
4732 address addr = resolve_symbol_lazy(name);
4733 if(addr == NULL) {
4734 fatal(dlerror());
4735 }
4736 return addr;
4737 }
4741 // isT2_libthread()
4742 //
4743 // Routine to determine if we are currently using the new T2 libthread.
4744 //
4745 // We determine if we are using T2 by reading /proc/self/lstatus and
4746 // looking for a thread with the ASLWP bit set. If we find this status
4747 // bit set, we must assume that we are NOT using T2. The T2 team
4748 // has approved this algorithm.
4749 //
4750 // We need to determine if we are running with the new T2 libthread
4751 // since setting native thread priorities is handled differently
4752 // when using this library. All threads created using T2 are bound
4753 // threads. Calling thr_setprio is meaningless in this case.
4754 //
4755 bool isT2_libthread() {
4756 static prheader_t * lwpArray = NULL;
4757 static int lwpSize = 0;
4758 static int lwpFile = -1;
4759 lwpstatus_t * that;
4760 char lwpName [128];
4761 bool isT2 = false;
4763 #define ADR(x) ((uintptr_t)(x))
4764 #define LWPINDEX(ary,ix) ((lwpstatus_t *)(((ary)->pr_entsize * (ix)) + (ADR((ary) + 1))))
4766 lwpFile = ::open("/proc/self/lstatus", O_RDONLY, 0);
4767 if (lwpFile < 0) {
4768 if (ThreadPriorityVerbose) warning ("Couldn't open /proc/self/lstatus\n");
4769 return false;
4770 }
4771 lwpSize = 16*1024;
4772 for (;;) {
4773 ::lseek64 (lwpFile, 0, SEEK_SET);
4774 lwpArray = (prheader_t *)NEW_C_HEAP_ARRAY(char, lwpSize, mtInternal);
4775 if (::read(lwpFile, lwpArray, lwpSize) < 0) {
4776 if (ThreadPriorityVerbose) warning("Error reading /proc/self/lstatus\n");
4777 break;
4778 }
4779 if ((lwpArray->pr_nent * lwpArray->pr_entsize) <= lwpSize) {
4780 // We got a good snapshot - now iterate over the list.
4781 int aslwpcount = 0;
4782 for (int i = 0; i < lwpArray->pr_nent; i++ ) {
4783 that = LWPINDEX(lwpArray,i);
4784 if (that->pr_flags & PR_ASLWP) {
4785 aslwpcount++;
4786 }
4787 }
4788 if (aslwpcount == 0) isT2 = true;
4789 break;
4790 }
4791 lwpSize = lwpArray->pr_nent * lwpArray->pr_entsize;
4792 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal); // retry.
4793 }
4795 FREE_C_HEAP_ARRAY(char, lwpArray, mtInternal);
4796 ::close (lwpFile);
4797 if (ThreadPriorityVerbose) {
4798 if (isT2) tty->print_cr("We are running with a T2 libthread\n");
4799 else tty->print_cr("We are not running with a T2 libthread\n");
4800 }
4801 return isT2;
4802 }
4805 void os::Solaris::libthread_init() {
4806 address func = (address)dlsym(RTLD_DEFAULT, "_thr_suspend_allmutators");
4808 // Determine if we are running with the new T2 libthread
4809 os::Solaris::set_T2_libthread(isT2_libthread());
4811 lwp_priocntl_init();
4813 // RTLD_DEFAULT was not defined on some early versions of 5.5.1
4814 if(func == NULL) {
4815 func = (address) dlsym(RTLD_NEXT, "_thr_suspend_allmutators");
4816 // Guarantee that this VM is running on an new enough OS (5.6 or
4817 // later) that it will have a new enough libthread.so.
4818 guarantee(func != NULL, "libthread.so is too old.");
4819 }
4821 // Initialize the new libthread getstate API wrappers
4822 func = resolve_symbol("thr_getstate");
4823 os::Solaris::set_thr_getstate(CAST_TO_FN_PTR(int_fnP_thread_t_iP_uP_stack_tP_gregset_t, func));
4825 func = resolve_symbol("thr_setstate");
4826 os::Solaris::set_thr_setstate(CAST_TO_FN_PTR(int_fnP_thread_t_i_gregset_t, func));
4828 func = resolve_symbol("thr_setmutator");
4829 os::Solaris::set_thr_setmutator(CAST_TO_FN_PTR(int_fnP_thread_t_i, func));
4831 func = resolve_symbol("thr_suspend_mutator");
4832 os::Solaris::set_thr_suspend_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4834 func = resolve_symbol("thr_continue_mutator");
4835 os::Solaris::set_thr_continue_mutator(CAST_TO_FN_PTR(int_fnP_thread_t, func));
4837 int size;
4838 void (*handler_info_func)(address *, int *);
4839 handler_info_func = CAST_TO_FN_PTR(void (*)(address *, int *), resolve_symbol("thr_sighndlrinfo"));
4840 handler_info_func(&handler_start, &size);
4841 handler_end = handler_start + size;
4842 }
4845 int_fnP_mutex_tP os::Solaris::_mutex_lock;
4846 int_fnP_mutex_tP os::Solaris::_mutex_trylock;
4847 int_fnP_mutex_tP os::Solaris::_mutex_unlock;
4848 int_fnP_mutex_tP_i_vP os::Solaris::_mutex_init;
4849 int_fnP_mutex_tP os::Solaris::_mutex_destroy;
4850 int os::Solaris::_mutex_scope = USYNC_THREAD;
4852 int_fnP_cond_tP_mutex_tP_timestruc_tP os::Solaris::_cond_timedwait;
4853 int_fnP_cond_tP_mutex_tP os::Solaris::_cond_wait;
4854 int_fnP_cond_tP os::Solaris::_cond_signal;
4855 int_fnP_cond_tP os::Solaris::_cond_broadcast;
4856 int_fnP_cond_tP_i_vP os::Solaris::_cond_init;
4857 int_fnP_cond_tP os::Solaris::_cond_destroy;
4858 int os::Solaris::_cond_scope = USYNC_THREAD;
4860 void os::Solaris::synchronization_init() {
4861 if(UseLWPSynchronization) {
4862 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_lock")));
4863 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_trylock")));
4864 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("_lwp_mutex_unlock")));
4865 os::Solaris::set_mutex_init(lwp_mutex_init);
4866 os::Solaris::set_mutex_destroy(lwp_mutex_destroy);
4867 os::Solaris::set_mutex_scope(USYNC_THREAD);
4869 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("_lwp_cond_timedwait")));
4870 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("_lwp_cond_wait")));
4871 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_signal")));
4872 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("_lwp_cond_broadcast")));
4873 os::Solaris::set_cond_init(lwp_cond_init);
4874 os::Solaris::set_cond_destroy(lwp_cond_destroy);
4875 os::Solaris::set_cond_scope(USYNC_THREAD);
4876 }
4877 else {
4878 os::Solaris::set_mutex_scope(USYNC_THREAD);
4879 os::Solaris::set_cond_scope(USYNC_THREAD);
4881 if(UsePthreads) {
4882 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_lock")));
4883 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_trylock")));
4884 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_unlock")));
4885 os::Solaris::set_mutex_init(pthread_mutex_default_init);
4886 os::Solaris::set_mutex_destroy(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("pthread_mutex_destroy")));
4888 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("pthread_cond_timedwait")));
4889 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("pthread_cond_wait")));
4890 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_signal")));
4891 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_broadcast")));
4892 os::Solaris::set_cond_init(pthread_cond_default_init);
4893 os::Solaris::set_cond_destroy(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("pthread_cond_destroy")));
4894 }
4895 else {
4896 os::Solaris::set_mutex_lock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_lock")));
4897 os::Solaris::set_mutex_trylock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_trylock")));
4898 os::Solaris::set_mutex_unlock(CAST_TO_FN_PTR(int_fnP_mutex_tP, resolve_symbol("mutex_unlock")));
4899 os::Solaris::set_mutex_init(::mutex_init);
4900 os::Solaris::set_mutex_destroy(::mutex_destroy);
4902 os::Solaris::set_cond_timedwait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP_timestruc_tP, resolve_symbol("cond_timedwait")));
4903 os::Solaris::set_cond_wait(CAST_TO_FN_PTR(int_fnP_cond_tP_mutex_tP, resolve_symbol("cond_wait")));
4904 os::Solaris::set_cond_signal(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_signal")));
4905 os::Solaris::set_cond_broadcast(CAST_TO_FN_PTR(int_fnP_cond_tP, resolve_symbol("cond_broadcast")));
4906 os::Solaris::set_cond_init(::cond_init);
4907 os::Solaris::set_cond_destroy(::cond_destroy);
4908 }
4909 }
4910 }
4912 bool os::Solaris::liblgrp_init() {
4913 void *handle = dlopen("liblgrp.so.1", RTLD_LAZY);
4914 if (handle != NULL) {
4915 os::Solaris::set_lgrp_home(CAST_TO_FN_PTR(lgrp_home_func_t, dlsym(handle, "lgrp_home")));
4916 os::Solaris::set_lgrp_init(CAST_TO_FN_PTR(lgrp_init_func_t, dlsym(handle, "lgrp_init")));
4917 os::Solaris::set_lgrp_fini(CAST_TO_FN_PTR(lgrp_fini_func_t, dlsym(handle, "lgrp_fini")));
4918 os::Solaris::set_lgrp_root(CAST_TO_FN_PTR(lgrp_root_func_t, dlsym(handle, "lgrp_root")));
4919 os::Solaris::set_lgrp_children(CAST_TO_FN_PTR(lgrp_children_func_t, dlsym(handle, "lgrp_children")));
4920 os::Solaris::set_lgrp_resources(CAST_TO_FN_PTR(lgrp_resources_func_t, dlsym(handle, "lgrp_resources")));
4921 os::Solaris::set_lgrp_nlgrps(CAST_TO_FN_PTR(lgrp_nlgrps_func_t, dlsym(handle, "lgrp_nlgrps")));
4922 os::Solaris::set_lgrp_cookie_stale(CAST_TO_FN_PTR(lgrp_cookie_stale_func_t,
4923 dlsym(handle, "lgrp_cookie_stale")));
4925 lgrp_cookie_t c = lgrp_init(LGRP_VIEW_CALLER);
4926 set_lgrp_cookie(c);
4927 return true;
4928 }
4929 return false;
4930 }
4932 void os::Solaris::misc_sym_init() {
4933 address func;
4935 // getisax
4936 func = resolve_symbol_lazy("getisax");
4937 if (func != NULL) {
4938 os::Solaris::_getisax = CAST_TO_FN_PTR(getisax_func_t, func);
4939 }
4941 // meminfo
4942 func = resolve_symbol_lazy("meminfo");
4943 if (func != NULL) {
4944 os::Solaris::set_meminfo(CAST_TO_FN_PTR(meminfo_func_t, func));
4945 }
4946 }
4948 uint_t os::Solaris::getisax(uint32_t* array, uint_t n) {
4949 assert(_getisax != NULL, "_getisax not set");
4950 return _getisax(array, n);
4951 }
4953 // int pset_getloadavg(psetid_t pset, double loadavg[], int nelem);
4954 typedef long (*pset_getloadavg_type)(psetid_t pset, double loadavg[], int nelem);
4955 static pset_getloadavg_type pset_getloadavg_ptr = NULL;
4957 void init_pset_getloadavg_ptr(void) {
4958 pset_getloadavg_ptr =
4959 (pset_getloadavg_type)dlsym(RTLD_DEFAULT, "pset_getloadavg");
4960 if (PrintMiscellaneous && Verbose && pset_getloadavg_ptr == NULL) {
4961 warning("pset_getloadavg function not found");
4962 }
4963 }
4965 int os::Solaris::_dev_zero_fd = -1;
4967 // this is called _before_ the global arguments have been parsed
4968 void os::init(void) {
4969 _initial_pid = getpid();
4971 max_hrtime = first_hrtime = gethrtime();
4973 init_random(1234567);
4975 page_size = sysconf(_SC_PAGESIZE);
4976 if (page_size == -1)
4977 fatal(err_msg("os_solaris.cpp: os::init: sysconf failed (%s)",
4978 strerror(errno)));
4979 init_page_sizes((size_t) page_size);
4981 Solaris::initialize_system_info();
4983 // Initialize misc. symbols as soon as possible, so we can use them
4984 // if we need them.
4985 Solaris::misc_sym_init();
4987 int fd = ::open("/dev/zero", O_RDWR);
4988 if (fd < 0) {
4989 fatal(err_msg("os::init: cannot open /dev/zero (%s)", strerror(errno)));
4990 } else {
4991 Solaris::set_dev_zero_fd(fd);
4993 // Close on exec, child won't inherit.
4994 fcntl(fd, F_SETFD, FD_CLOEXEC);
4995 }
4997 clock_tics_per_sec = CLK_TCK;
4999 // check if dladdr1() exists; dladdr1 can provide more information than
5000 // dladdr for os::dll_address_to_function_name. It comes with SunOS 5.9
5001 // and is available on linker patches for 5.7 and 5.8.
5002 // libdl.so must have been loaded, this call is just an entry lookup
5003 void * hdl = dlopen("libdl.so", RTLD_NOW);
5004 if (hdl)
5005 dladdr1_func = CAST_TO_FN_PTR(dladdr1_func_type, dlsym(hdl, "dladdr1"));
5007 // (Solaris only) this switches to calls that actually do locking.
5008 ThreadCritical::initialize();
5010 main_thread = thr_self();
5012 // Constant minimum stack size allowed. It must be at least
5013 // the minimum of what the OS supports (thr_min_stack()), and
5014 // enough to allow the thread to get to user bytecode execution.
5015 Solaris::min_stack_allowed = MAX2(thr_min_stack(), Solaris::min_stack_allowed);
5016 // If the pagesize of the VM is greater than 8K determine the appropriate
5017 // number of initial guard pages. The user can change this with the
5018 // command line arguments, if needed.
5019 if (vm_page_size() > 8*K) {
5020 StackYellowPages = 1;
5021 StackRedPages = 1;
5022 StackShadowPages = round_to((StackShadowPages*8*K), vm_page_size()) / vm_page_size();
5023 }
5024 }
5026 // To install functions for atexit system call
5027 extern "C" {
5028 static void perfMemory_exit_helper() {
5029 perfMemory_exit();
5030 }
5031 }
5033 // this is called _after_ the global arguments have been parsed
5034 jint os::init_2(void) {
5035 // try to enable extended file IO ASAP, see 6431278
5036 os::Solaris::try_enable_extended_io();
5038 // Allocate a single page and mark it as readable for safepoint polling. Also
5039 // use this first mmap call to check support for MAP_ALIGN.
5040 address polling_page = (address)Solaris::mmap_chunk((char*)page_size,
5041 page_size,
5042 MAP_PRIVATE | MAP_ALIGN,
5043 PROT_READ);
5044 if (polling_page == NULL) {
5045 has_map_align = false;
5046 polling_page = (address)Solaris::mmap_chunk(NULL, page_size, MAP_PRIVATE,
5047 PROT_READ);
5048 }
5050 os::set_polling_page(polling_page);
5052 #ifndef PRODUCT
5053 if( Verbose && PrintMiscellaneous )
5054 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page);
5055 #endif
5057 if (!UseMembar) {
5058 address mem_serialize_page = (address)Solaris::mmap_chunk( NULL, page_size, MAP_PRIVATE, PROT_READ | PROT_WRITE );
5059 guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
5060 os::set_memory_serialize_page( mem_serialize_page );
5062 #ifndef PRODUCT
5063 if(Verbose && PrintMiscellaneous)
5064 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
5065 #endif
5066 }
5068 // Check minimum allowable stack size for thread creation and to initialize
5069 // the java system classes, including StackOverflowError - depends on page
5070 // size. Add a page for compiler2 recursion in main thread.
5071 // Add in 2*BytesPerWord times page size to account for VM stack during
5072 // class initialization depending on 32 or 64 bit VM.
5073 os::Solaris::min_stack_allowed = MAX2(os::Solaris::min_stack_allowed,
5074 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
5075 2*BytesPerWord COMPILER2_PRESENT(+1)) * page_size);
5077 size_t threadStackSizeInBytes = ThreadStackSize * K;
5078 if (threadStackSizeInBytes != 0 &&
5079 threadStackSizeInBytes < os::Solaris::min_stack_allowed) {
5080 tty->print_cr("\nThe stack size specified is too small, Specify at least %dk",
5081 os::Solaris::min_stack_allowed/K);
5082 return JNI_ERR;
5083 }
5085 // For 64kbps there will be a 64kb page size, which makes
5086 // the usable default stack size quite a bit less. Increase the
5087 // stack for 64kb (or any > than 8kb) pages, this increases
5088 // virtual memory fragmentation (since we're not creating the
5089 // stack on a power of 2 boundary. The real fix for this
5090 // should be to fix the guard page mechanism.
5092 if (vm_page_size() > 8*K) {
5093 threadStackSizeInBytes = (threadStackSizeInBytes != 0)
5094 ? threadStackSizeInBytes +
5095 ((StackYellowPages + StackRedPages) * vm_page_size())
5096 : 0;
5097 ThreadStackSize = threadStackSizeInBytes/K;
5098 }
5100 // Make the stack size a multiple of the page size so that
5101 // the yellow/red zones can be guarded.
5102 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
5103 vm_page_size()));
5105 Solaris::libthread_init();
5107 if (UseNUMA) {
5108 if (!Solaris::liblgrp_init()) {
5109 UseNUMA = false;
5110 } else {
5111 size_t lgrp_limit = os::numa_get_groups_num();
5112 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtInternal);
5113 size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
5114 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtInternal);
5115 if (lgrp_num < 2) {
5116 // There's only one locality group, disable NUMA.
5117 UseNUMA = false;
5118 }
5119 }
5120 if (!UseNUMA && ForceNUMA) {
5121 UseNUMA = true;
5122 }
5123 }
5125 Solaris::signal_sets_init();
5126 Solaris::init_signal_mem();
5127 Solaris::install_signal_handlers();
5129 if (libjsigversion < JSIG_VERSION_1_4_1) {
5130 Maxlibjsigsigs = OLDMAXSIGNUM;
5131 }
5133 // initialize synchronization primitives to use either thread or
5134 // lwp synchronization (controlled by UseLWPSynchronization)
5135 Solaris::synchronization_init();
5137 if (MaxFDLimit) {
5138 // set the number of file descriptors to max. print out error
5139 // if getrlimit/setrlimit fails but continue regardless.
5140 struct rlimit nbr_files;
5141 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
5142 if (status != 0) {
5143 if (PrintMiscellaneous && (Verbose || WizardMode))
5144 perror("os::init_2 getrlimit failed");
5145 } else {
5146 nbr_files.rlim_cur = nbr_files.rlim_max;
5147 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
5148 if (status != 0) {
5149 if (PrintMiscellaneous && (Verbose || WizardMode))
5150 perror("os::init_2 setrlimit failed");
5151 }
5152 }
5153 }
5155 // Calculate theoretical max. size of Threads to guard gainst
5156 // artifical out-of-memory situations, where all available address-
5157 // space has been reserved by thread stacks. Default stack size is 1Mb.
5158 size_t pre_thread_stack_size = (JavaThread::stack_size_at_create()) ?
5159 JavaThread::stack_size_at_create() : (1*K*K);
5160 assert(pre_thread_stack_size != 0, "Must have a stack");
5161 // Solaris has a maximum of 4Gb of user programs. Calculate the thread limit when
5162 // we should start doing Virtual Memory banging. Currently when the threads will
5163 // have used all but 200Mb of space.
5164 size_t max_address_space = ((unsigned int)4 * K * K * K) - (200 * K * K);
5165 Solaris::_os_thread_limit = max_address_space / pre_thread_stack_size;
5167 // at-exit methods are called in the reverse order of their registration.
5168 // In Solaris 7 and earlier, atexit functions are called on return from
5169 // main or as a result of a call to exit(3C). There can be only 32 of
5170 // these functions registered and atexit() does not set errno. In Solaris
5171 // 8 and later, there is no limit to the number of functions registered
5172 // and atexit() sets errno. In addition, in Solaris 8 and later, atexit
5173 // functions are called upon dlclose(3DL) in addition to return from main
5174 // and exit(3C).
5176 if (PerfAllowAtExitRegistration) {
5177 // only register atexit functions if PerfAllowAtExitRegistration is set.
5178 // atexit functions can be delayed until process exit time, which
5179 // can be problematic for embedded VM situations. Embedded VMs should
5180 // call DestroyJavaVM() to assure that VM resources are released.
5182 // note: perfMemory_exit_helper atexit function may be removed in
5183 // the future if the appropriate cleanup code can be added to the
5184 // VM_Exit VMOperation's doit method.
5185 if (atexit(perfMemory_exit_helper) != 0) {
5186 warning("os::init2 atexit(perfMemory_exit_helper) failed");
5187 }
5188 }
5190 // Init pset_loadavg function pointer
5191 init_pset_getloadavg_ptr();
5193 return JNI_OK;
5194 }
5196 void os::init_3(void) {
5197 return;
5198 }
5200 // Mark the polling page as unreadable
5201 void os::make_polling_page_unreadable(void) {
5202 if( mprotect((char *)_polling_page, page_size, PROT_NONE) != 0 )
5203 fatal("Could not disable polling page");
5204 };
5206 // Mark the polling page as readable
5207 void os::make_polling_page_readable(void) {
5208 if( mprotect((char *)_polling_page, page_size, PROT_READ) != 0 )
5209 fatal("Could not enable polling page");
5210 };
5212 // OS interface.
5214 bool os::check_heap(bool force) { return true; }
5216 typedef int (*vsnprintf_t)(char* buf, size_t count, const char* fmt, va_list argptr);
5217 static vsnprintf_t sol_vsnprintf = NULL;
5219 int local_vsnprintf(char* buf, size_t count, const char* fmt, va_list argptr) {
5220 if (!sol_vsnprintf) {
5221 //search for the named symbol in the objects that were loaded after libjvm
5222 void* where = RTLD_NEXT;
5223 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5224 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5225 if (!sol_vsnprintf){
5226 //search for the named symbol in the objects that were loaded before libjvm
5227 where = RTLD_DEFAULT;
5228 if ((sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "__vsnprintf"))) == NULL)
5229 sol_vsnprintf = CAST_TO_FN_PTR(vsnprintf_t, dlsym(where, "vsnprintf"));
5230 assert(sol_vsnprintf != NULL, "vsnprintf not found");
5231 }
5232 }
5233 return (*sol_vsnprintf)(buf, count, fmt, argptr);
5234 }
5237 // Is a (classpath) directory empty?
5238 bool os::dir_is_empty(const char* path) {
5239 DIR *dir = NULL;
5240 struct dirent *ptr;
5242 dir = opendir(path);
5243 if (dir == NULL) return true;
5245 /* Scan the directory */
5246 bool result = true;
5247 char buf[sizeof(struct dirent) + MAX_PATH];
5248 struct dirent *dbuf = (struct dirent *) buf;
5249 while (result && (ptr = readdir(dir, dbuf)) != NULL) {
5250 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
5251 result = false;
5252 }
5253 }
5254 closedir(dir);
5255 return result;
5256 }
5258 // This code originates from JDK's sysOpen and open64_w
5259 // from src/solaris/hpi/src/system_md.c
5261 #ifndef O_DELETE
5262 #define O_DELETE 0x10000
5263 #endif
5265 // Open a file. Unlink the file immediately after open returns
5266 // if the specified oflag has the O_DELETE flag set.
5267 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
5269 int os::open(const char *path, int oflag, int mode) {
5270 if (strlen(path) > MAX_PATH - 1) {
5271 errno = ENAMETOOLONG;
5272 return -1;
5273 }
5274 int fd;
5275 int o_delete = (oflag & O_DELETE);
5276 oflag = oflag & ~O_DELETE;
5278 fd = ::open64(path, oflag, mode);
5279 if (fd == -1) return -1;
5281 //If the open succeeded, the file might still be a directory
5282 {
5283 struct stat64 buf64;
5284 int ret = ::fstat64(fd, &buf64);
5285 int st_mode = buf64.st_mode;
5287 if (ret != -1) {
5288 if ((st_mode & S_IFMT) == S_IFDIR) {
5289 errno = EISDIR;
5290 ::close(fd);
5291 return -1;
5292 }
5293 } else {
5294 ::close(fd);
5295 return -1;
5296 }
5297 }
5298 /*
5299 * 32-bit Solaris systems suffer from:
5300 *
5301 * - an historical default soft limit of 256 per-process file
5302 * descriptors that is too low for many Java programs.
5303 *
5304 * - a design flaw where file descriptors created using stdio
5305 * fopen must be less than 256, _even_ when the first limit above
5306 * has been raised. This can cause calls to fopen (but not calls to
5307 * open, for example) to fail mysteriously, perhaps in 3rd party
5308 * native code (although the JDK itself uses fopen). One can hardly
5309 * criticize them for using this most standard of all functions.
5310 *
5311 * We attempt to make everything work anyways by:
5312 *
5313 * - raising the soft limit on per-process file descriptors beyond
5314 * 256
5315 *
5316 * - As of Solaris 10u4, we can request that Solaris raise the 256
5317 * stdio fopen limit by calling function enable_extended_FILE_stdio.
5318 * This is done in init_2 and recorded in enabled_extended_FILE_stdio
5319 *
5320 * - If we are stuck on an old (pre 10u4) Solaris system, we can
5321 * workaround the bug by remapping non-stdio file descriptors below
5322 * 256 to ones beyond 256, which is done below.
5323 *
5324 * See:
5325 * 1085341: 32-bit stdio routines should support file descriptors >255
5326 * 6533291: Work around 32-bit Solaris stdio limit of 256 open files
5327 * 6431278: Netbeans crash on 32 bit Solaris: need to call
5328 * enable_extended_FILE_stdio() in VM initialisation
5329 * Giri Mandalika's blog
5330 * http://technopark02.blogspot.com/2005_05_01_archive.html
5331 */
5332 #ifndef _LP64
5333 if ((!enabled_extended_FILE_stdio) && fd < 256) {
5334 int newfd = ::fcntl(fd, F_DUPFD, 256);
5335 if (newfd != -1) {
5336 ::close(fd);
5337 fd = newfd;
5338 }
5339 }
5340 #endif // 32-bit Solaris
5341 /*
5342 * All file descriptors that are opened in the JVM and not
5343 * specifically destined for a subprocess should have the
5344 * close-on-exec flag set. If we don't set it, then careless 3rd
5345 * party native code might fork and exec without closing all
5346 * appropriate file descriptors (e.g. as we do in closeDescriptors in
5347 * UNIXProcess.c), and this in turn might:
5348 *
5349 * - cause end-of-file to fail to be detected on some file
5350 * descriptors, resulting in mysterious hangs, or
5351 *
5352 * - might cause an fopen in the subprocess to fail on a system
5353 * suffering from bug 1085341.
5354 *
5355 * (Yes, the default setting of the close-on-exec flag is a Unix
5356 * design flaw)
5357 *
5358 * See:
5359 * 1085341: 32-bit stdio routines should support file descriptors >255
5360 * 4843136: (process) pipe file descriptor from Runtime.exec not being closed
5361 * 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
5362 */
5363 #ifdef FD_CLOEXEC
5364 {
5365 int flags = ::fcntl(fd, F_GETFD);
5366 if (flags != -1)
5367 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
5368 }
5369 #endif
5371 if (o_delete != 0) {
5372 ::unlink(path);
5373 }
5374 return fd;
5375 }
5377 // create binary file, rewriting existing file if required
5378 int os::create_binary_file(const char* path, bool rewrite_existing) {
5379 int oflags = O_WRONLY | O_CREAT;
5380 if (!rewrite_existing) {
5381 oflags |= O_EXCL;
5382 }
5383 return ::open64(path, oflags, S_IREAD | S_IWRITE);
5384 }
5386 // return current position of file pointer
5387 jlong os::current_file_offset(int fd) {
5388 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
5389 }
5391 // move file pointer to the specified offset
5392 jlong os::seek_to_file_offset(int fd, jlong offset) {
5393 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
5394 }
5396 jlong os::lseek(int fd, jlong offset, int whence) {
5397 return (jlong) ::lseek64(fd, offset, whence);
5398 }
5400 char * os::native_path(char *path) {
5401 return path;
5402 }
5404 int os::ftruncate(int fd, jlong length) {
5405 return ::ftruncate64(fd, length);
5406 }
5408 int os::fsync(int fd) {
5409 RESTARTABLE_RETURN_INT(::fsync(fd));
5410 }
5412 int os::available(int fd, jlong *bytes) {
5413 jlong cur, end;
5414 int mode;
5415 struct stat64 buf64;
5417 if (::fstat64(fd, &buf64) >= 0) {
5418 mode = buf64.st_mode;
5419 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
5420 /*
5421 * XXX: is the following call interruptible? If so, this might
5422 * need to go through the INTERRUPT_IO() wrapper as for other
5423 * blocking, interruptible calls in this file.
5424 */
5425 int n,ioctl_return;
5427 INTERRUPTIBLE(::ioctl(fd, FIONREAD, &n),ioctl_return,os::Solaris::clear_interrupted);
5428 if (ioctl_return>= 0) {
5429 *bytes = n;
5430 return 1;
5431 }
5432 }
5433 }
5434 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
5435 return 0;
5436 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
5437 return 0;
5438 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
5439 return 0;
5440 }
5441 *bytes = end - cur;
5442 return 1;
5443 }
5445 // Map a block of memory.
5446 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5447 char *addr, size_t bytes, bool read_only,
5448 bool allow_exec) {
5449 int prot;
5450 int flags;
5452 if (read_only) {
5453 prot = PROT_READ;
5454 flags = MAP_SHARED;
5455 } else {
5456 prot = PROT_READ | PROT_WRITE;
5457 flags = MAP_PRIVATE;
5458 }
5460 if (allow_exec) {
5461 prot |= PROT_EXEC;
5462 }
5464 if (addr != NULL) {
5465 flags |= MAP_FIXED;
5466 }
5468 char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
5469 fd, file_offset);
5470 if (mapped_address == MAP_FAILED) {
5471 return NULL;
5472 }
5473 return mapped_address;
5474 }
5477 // Remap a block of memory.
5478 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5479 char *addr, size_t bytes, bool read_only,
5480 bool allow_exec) {
5481 // same as map_memory() on this OS
5482 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
5483 allow_exec);
5484 }
5487 // Unmap a block of memory.
5488 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5489 return munmap(addr, bytes) == 0;
5490 }
5492 void os::pause() {
5493 char filename[MAX_PATH];
5494 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5495 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5496 } else {
5497 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5498 }
5500 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5501 if (fd != -1) {
5502 struct stat buf;
5503 ::close(fd);
5504 while (::stat(filename, &buf) == 0) {
5505 (void)::poll(NULL, 0, 100);
5506 }
5507 } else {
5508 jio_fprintf(stderr,
5509 "Could not open pause file '%s', continuing immediately.\n", filename);
5510 }
5511 }
5513 #ifndef PRODUCT
5514 #ifdef INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5515 // Turn this on if you need to trace synch operations.
5516 // Set RECORD_SYNCH_LIMIT to a large-enough value,
5517 // and call record_synch_enable and record_synch_disable
5518 // around the computation of interest.
5520 void record_synch(char* name, bool returning); // defined below
5522 class RecordSynch {
5523 char* _name;
5524 public:
5525 RecordSynch(char* name) :_name(name)
5526 { record_synch(_name, false); }
5527 ~RecordSynch() { record_synch(_name, true); }
5528 };
5530 #define CHECK_SYNCH_OP(ret, name, params, args, inner) \
5531 extern "C" ret name params { \
5532 typedef ret name##_t params; \
5533 static name##_t* implem = NULL; \
5534 static int callcount = 0; \
5535 if (implem == NULL) { \
5536 implem = (name##_t*) dlsym(RTLD_NEXT, #name); \
5537 if (implem == NULL) fatal(dlerror()); \
5538 } \
5539 ++callcount; \
5540 RecordSynch _rs(#name); \
5541 inner; \
5542 return implem args; \
5543 }
5544 // in dbx, examine callcounts this way:
5545 // for n in $(eval whereis callcount | awk '{print $2}'); do print $n; done
5547 #define CHECK_POINTER_OK(p) \
5548 (!Universe::is_fully_initialized() || !Universe::is_reserved_heap((oop)(p)))
5549 #define CHECK_MU \
5550 if (!CHECK_POINTER_OK(mu)) fatal("Mutex must be in C heap only.");
5551 #define CHECK_CV \
5552 if (!CHECK_POINTER_OK(cv)) fatal("Condvar must be in C heap only.");
5553 #define CHECK_P(p) \
5554 if (!CHECK_POINTER_OK(p)) fatal(false, "Pointer must be in C heap only.");
5556 #define CHECK_MUTEX(mutex_op) \
5557 CHECK_SYNCH_OP(int, mutex_op, (mutex_t *mu), (mu), CHECK_MU);
5559 CHECK_MUTEX( mutex_lock)
5560 CHECK_MUTEX( _mutex_lock)
5561 CHECK_MUTEX( mutex_unlock)
5562 CHECK_MUTEX(_mutex_unlock)
5563 CHECK_MUTEX( mutex_trylock)
5564 CHECK_MUTEX(_mutex_trylock)
5566 #define CHECK_COND(cond_op) \
5567 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu), (cv, mu), CHECK_MU;CHECK_CV);
5569 CHECK_COND( cond_wait);
5570 CHECK_COND(_cond_wait);
5571 CHECK_COND(_cond_wait_cancel);
5573 #define CHECK_COND2(cond_op) \
5574 CHECK_SYNCH_OP(int, cond_op, (cond_t *cv, mutex_t *mu, timestruc_t* ts), (cv, mu, ts), CHECK_MU;CHECK_CV);
5576 CHECK_COND2( cond_timedwait);
5577 CHECK_COND2(_cond_timedwait);
5578 CHECK_COND2(_cond_timedwait_cancel);
5580 // do the _lwp_* versions too
5581 #define mutex_t lwp_mutex_t
5582 #define cond_t lwp_cond_t
5583 CHECK_MUTEX( _lwp_mutex_lock)
5584 CHECK_MUTEX( _lwp_mutex_unlock)
5585 CHECK_MUTEX( _lwp_mutex_trylock)
5586 CHECK_MUTEX( __lwp_mutex_lock)
5587 CHECK_MUTEX( __lwp_mutex_unlock)
5588 CHECK_MUTEX( __lwp_mutex_trylock)
5589 CHECK_MUTEX(___lwp_mutex_lock)
5590 CHECK_MUTEX(___lwp_mutex_unlock)
5592 CHECK_COND( _lwp_cond_wait);
5593 CHECK_COND( __lwp_cond_wait);
5594 CHECK_COND(___lwp_cond_wait);
5596 CHECK_COND2( _lwp_cond_timedwait);
5597 CHECK_COND2( __lwp_cond_timedwait);
5598 #undef mutex_t
5599 #undef cond_t
5601 CHECK_SYNCH_OP(int, _lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5602 CHECK_SYNCH_OP(int,__lwp_suspend2, (int lwp, int *n), (lwp, n), 0);
5603 CHECK_SYNCH_OP(int, _lwp_kill, (int lwp, int n), (lwp, n), 0);
5604 CHECK_SYNCH_OP(int,__lwp_kill, (int lwp, int n), (lwp, n), 0);
5605 CHECK_SYNCH_OP(int, _lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5606 CHECK_SYNCH_OP(int,__lwp_sema_wait, (lwp_sema_t* p), (p), CHECK_P(p));
5607 CHECK_SYNCH_OP(int, _lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5608 CHECK_SYNCH_OP(int,__lwp_cond_broadcast, (lwp_cond_t* cv), (cv), CHECK_CV);
5611 // recording machinery:
5613 enum { RECORD_SYNCH_LIMIT = 200 };
5614 char* record_synch_name[RECORD_SYNCH_LIMIT];
5615 void* record_synch_arg0ptr[RECORD_SYNCH_LIMIT];
5616 bool record_synch_returning[RECORD_SYNCH_LIMIT];
5617 thread_t record_synch_thread[RECORD_SYNCH_LIMIT];
5618 int record_synch_count = 0;
5619 bool record_synch_enabled = false;
5621 // in dbx, examine recorded data this way:
5622 // for n in name arg0ptr returning thread; do print record_synch_$n[0..record_synch_count-1]; done
5624 void record_synch(char* name, bool returning) {
5625 if (record_synch_enabled) {
5626 if (record_synch_count < RECORD_SYNCH_LIMIT) {
5627 record_synch_name[record_synch_count] = name;
5628 record_synch_returning[record_synch_count] = returning;
5629 record_synch_thread[record_synch_count] = thr_self();
5630 record_synch_arg0ptr[record_synch_count] = &name;
5631 record_synch_count++;
5632 }
5633 // put more checking code here:
5634 // ...
5635 }
5636 }
5638 void record_synch_enable() {
5639 // start collecting trace data, if not already doing so
5640 if (!record_synch_enabled) record_synch_count = 0;
5641 record_synch_enabled = true;
5642 }
5644 void record_synch_disable() {
5645 // stop collecting trace data
5646 record_synch_enabled = false;
5647 }
5649 #endif // INTERPOSE_ON_SYSTEM_SYNCH_FUNCTIONS
5650 #endif // PRODUCT
5652 const intptr_t thr_time_off = (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5653 const intptr_t thr_time_size = (intptr_t)(&((prusage_t *)(NULL))->pr_ttime) -
5654 (intptr_t)(&((prusage_t *)(NULL))->pr_utime);
5657 // JVMTI & JVM monitoring and management support
5658 // The thread_cpu_time() and current_thread_cpu_time() are only
5659 // supported if is_thread_cpu_time_supported() returns true.
5660 // They are not supported on Solaris T1.
5662 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
5663 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
5664 // of a thread.
5665 //
5666 // current_thread_cpu_time() and thread_cpu_time(Thread *)
5667 // returns the fast estimate available on the platform.
5669 // hrtime_t gethrvtime() return value includes
5670 // user time but does not include system time
5671 jlong os::current_thread_cpu_time() {
5672 return (jlong) gethrvtime();
5673 }
5675 jlong os::thread_cpu_time(Thread *thread) {
5676 // return user level CPU time only to be consistent with
5677 // what current_thread_cpu_time returns.
5678 // thread_cpu_time_info() must be changed if this changes
5679 return os::thread_cpu_time(thread, false /* user time only */);
5680 }
5682 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
5683 if (user_sys_cpu_time) {
5684 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
5685 } else {
5686 return os::current_thread_cpu_time();
5687 }
5688 }
5690 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
5691 char proc_name[64];
5692 int count;
5693 prusage_t prusage;
5694 jlong lwp_time;
5695 int fd;
5697 sprintf(proc_name, "/proc/%d/lwp/%d/lwpusage",
5698 getpid(),
5699 thread->osthread()->lwp_id());
5700 fd = ::open(proc_name, O_RDONLY);
5701 if ( fd == -1 ) return -1;
5703 do {
5704 count = ::pread(fd,
5705 (void *)&prusage.pr_utime,
5706 thr_time_size,
5707 thr_time_off);
5708 } while (count < 0 && errno == EINTR);
5709 ::close(fd);
5710 if ( count < 0 ) return -1;
5712 if (user_sys_cpu_time) {
5713 // user + system CPU time
5714 lwp_time = (((jlong)prusage.pr_stime.tv_sec +
5715 (jlong)prusage.pr_utime.tv_sec) * (jlong)1000000000) +
5716 (jlong)prusage.pr_stime.tv_nsec +
5717 (jlong)prusage.pr_utime.tv_nsec;
5718 } else {
5719 // user level CPU time only
5720 lwp_time = ((jlong)prusage.pr_utime.tv_sec * (jlong)1000000000) +
5721 (jlong)prusage.pr_utime.tv_nsec;
5722 }
5724 return(lwp_time);
5725 }
5727 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5728 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5729 info_ptr->may_skip_backward = false; // elapsed time not wall time
5730 info_ptr->may_skip_forward = false; // elapsed time not wall time
5731 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5732 }
5734 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
5735 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
5736 info_ptr->may_skip_backward = false; // elapsed time not wall time
5737 info_ptr->may_skip_forward = false; // elapsed time not wall time
5738 info_ptr->kind = JVMTI_TIMER_USER_CPU; // only user time is returned
5739 }
5741 bool os::is_thread_cpu_time_supported() {
5742 if ( os::Solaris::T2_libthread() || UseBoundThreads ) {
5743 return true;
5744 } else {
5745 return false;
5746 }
5747 }
5749 // System loadavg support. Returns -1 if load average cannot be obtained.
5750 // Return the load average for our processor set if the primitive exists
5751 // (Solaris 9 and later). Otherwise just return system wide loadavg.
5752 int os::loadavg(double loadavg[], int nelem) {
5753 if (pset_getloadavg_ptr != NULL) {
5754 return (*pset_getloadavg_ptr)(PS_MYID, loadavg, nelem);
5755 } else {
5756 return ::getloadavg(loadavg, nelem);
5757 }
5758 }
5760 //---------------------------------------------------------------------------------
5762 bool os::find(address addr, outputStream* st) {
5763 Dl_info dlinfo;
5764 memset(&dlinfo, 0, sizeof(dlinfo));
5765 if (dladdr(addr, &dlinfo) != 0) {
5766 st->print(PTR_FORMAT ": ", addr);
5767 if (dlinfo.dli_sname != NULL && dlinfo.dli_saddr != NULL) {
5768 st->print("%s+%#lx", dlinfo.dli_sname, addr-(intptr_t)dlinfo.dli_saddr);
5769 } else if (dlinfo.dli_fbase != NULL)
5770 st->print("<offset %#lx>", addr-(intptr_t)dlinfo.dli_fbase);
5771 else
5772 st->print("<absolute address>");
5773 if (dlinfo.dli_fname != NULL) {
5774 st->print(" in %s", dlinfo.dli_fname);
5775 }
5776 if (dlinfo.dli_fbase != NULL) {
5777 st->print(" at " PTR_FORMAT, dlinfo.dli_fbase);
5778 }
5779 st->cr();
5781 if (Verbose) {
5782 // decode some bytes around the PC
5783 address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
5784 address end = clamp_address_in_page(addr+40, addr, os::vm_page_size());
5785 address lowest = (address) dlinfo.dli_sname;
5786 if (!lowest) lowest = (address) dlinfo.dli_fbase;
5787 if (begin < lowest) begin = lowest;
5788 Dl_info dlinfo2;
5789 if (dladdr(end, &dlinfo2) != 0 && dlinfo2.dli_saddr != dlinfo.dli_saddr
5790 && end > dlinfo2.dli_saddr && dlinfo2.dli_saddr > begin)
5791 end = (address) dlinfo2.dli_saddr;
5792 Disassembler::decode(begin, end, st);
5793 }
5794 return true;
5795 }
5796 return false;
5797 }
5799 // Following function has been added to support HotSparc's libjvm.so running
5800 // under Solaris production JDK 1.2.2 / 1.3.0. These came from
5801 // src/solaris/hpi/native_threads in the EVM codebase.
5802 //
5803 // NOTE: This is no longer needed in the 1.3.1 and 1.4 production release
5804 // libraries and should thus be removed. We will leave it behind for a while
5805 // until we no longer want to able to run on top of 1.3.0 Solaris production
5806 // JDK. See 4341971.
5808 #define STACK_SLACK 0x800
5810 extern "C" {
5811 intptr_t sysThreadAvailableStackWithSlack() {
5812 stack_t st;
5813 intptr_t retval, stack_top;
5814 retval = thr_stksegment(&st);
5815 assert(retval == 0, "incorrect return value from thr_stksegment");
5816 assert((address)&st < (address)st.ss_sp, "Invalid stack base returned");
5817 assert((address)&st > (address)st.ss_sp-st.ss_size, "Invalid stack size returned");
5818 stack_top=(intptr_t)st.ss_sp-st.ss_size;
5819 return ((intptr_t)&stack_top - stack_top - STACK_SLACK);
5820 }
5821 }
5823 // ObjectMonitor park-unpark infrastructure ...
5824 //
5825 // We implement Solaris and Linux PlatformEvents with the
5826 // obvious condvar-mutex-flag triple.
5827 // Another alternative that works quite well is pipes:
5828 // Each PlatformEvent consists of a pipe-pair.
5829 // The thread associated with the PlatformEvent
5830 // calls park(), which reads from the input end of the pipe.
5831 // Unpark() writes into the other end of the pipe.
5832 // The write-side of the pipe must be set NDELAY.
5833 // Unfortunately pipes consume a large # of handles.
5834 // Native solaris lwp_park() and lwp_unpark() work nicely, too.
5835 // Using pipes for the 1st few threads might be workable, however.
5836 //
5837 // park() is permitted to return spuriously.
5838 // Callers of park() should wrap the call to park() in
5839 // an appropriate loop. A litmus test for the correct
5840 // usage of park is the following: if park() were modified
5841 // to immediately return 0 your code should still work,
5842 // albeit degenerating to a spin loop.
5843 //
5844 // An interesting optimization for park() is to use a trylock()
5845 // to attempt to acquire the mutex. If the trylock() fails
5846 // then we know that a concurrent unpark() operation is in-progress.
5847 // in that case the park() code could simply set _count to 0
5848 // and return immediately. The subsequent park() operation *might*
5849 // return immediately. That's harmless as the caller of park() is
5850 // expected to loop. By using trylock() we will have avoided a
5851 // avoided a context switch caused by contention on the per-thread mutex.
5852 //
5853 // TODO-FIXME:
5854 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the
5855 // objectmonitor implementation.
5856 // 2. Collapse the JSR166 parker event, and the
5857 // objectmonitor ParkEvent into a single "Event" construct.
5858 // 3. In park() and unpark() add:
5859 // assert (Thread::current() == AssociatedWith).
5860 // 4. add spurious wakeup injection on a -XX:EarlyParkReturn=N switch.
5861 // 1-out-of-N park() operations will return immediately.
5862 //
5863 // _Event transitions in park()
5864 // -1 => -1 : illegal
5865 // 1 => 0 : pass - return immediately
5866 // 0 => -1 : block
5867 //
5868 // _Event serves as a restricted-range semaphore.
5869 //
5870 // Another possible encoding of _Event would be with
5871 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5872 //
5873 // TODO-FIXME: add DTRACE probes for:
5874 // 1. Tx parks
5875 // 2. Ty unparks Tx
5876 // 3. Tx resumes from park
5879 // value determined through experimentation
5880 #define ROUNDINGFIX 11
5882 // utility to compute the abstime argument to timedwait.
5883 // TODO-FIXME: switch from compute_abstime() to unpackTime().
5885 static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
5886 // millis is the relative timeout time
5887 // abstime will be the absolute timeout time
5888 if (millis < 0) millis = 0;
5889 struct timeval now;
5890 int status = gettimeofday(&now, NULL);
5891 assert(status == 0, "gettimeofday");
5892 jlong seconds = millis / 1000;
5893 jlong max_wait_period;
5895 if (UseLWPSynchronization) {
5896 // forward port of fix for 4275818 (not sleeping long enough)
5897 // There was a bug in Solaris 6, 7 and pre-patch 5 of 8 where
5898 // _lwp_cond_timedwait() used a round_down algorithm rather
5899 // than a round_up. For millis less than our roundfactor
5900 // it rounded down to 0 which doesn't meet the spec.
5901 // For millis > roundfactor we may return a bit sooner, but
5902 // since we can not accurately identify the patch level and
5903 // this has already been fixed in Solaris 9 and 8 we will
5904 // leave it alone rather than always rounding down.
5906 if (millis > 0 && millis < ROUNDINGFIX) millis = ROUNDINGFIX;
5907 // It appears that when we go directly through Solaris _lwp_cond_timedwait()
5908 // the acceptable max time threshold is smaller than for libthread on 2.5.1 and 2.6
5909 max_wait_period = 21000000;
5910 } else {
5911 max_wait_period = 50000000;
5912 }
5913 millis %= 1000;
5914 if (seconds > max_wait_period) { // see man cond_timedwait(3T)
5915 seconds = max_wait_period;
5916 }
5917 abstime->tv_sec = now.tv_sec + seconds;
5918 long usec = now.tv_usec + millis * 1000;
5919 if (usec >= 1000000) {
5920 abstime->tv_sec += 1;
5921 usec -= 1000000;
5922 }
5923 abstime->tv_nsec = usec * 1000;
5924 return abstime;
5925 }
5927 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
5928 // Conceptually TryPark() should be equivalent to park(0).
5930 int os::PlatformEvent::TryPark() {
5931 for (;;) {
5932 const int v = _Event ;
5933 guarantee ((v == 0) || (v == 1), "invariant") ;
5934 if (Atomic::cmpxchg (0, &_Event, v) == v) return v ;
5935 }
5936 }
5938 void os::PlatformEvent::park() { // AKA: down()
5939 // Invariant: Only the thread associated with the Event/PlatformEvent
5940 // may call park().
5941 int v ;
5942 for (;;) {
5943 v = _Event ;
5944 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5945 }
5946 guarantee (v >= 0, "invariant") ;
5947 if (v == 0) {
5948 // Do this the hard way by blocking ...
5949 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5950 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
5951 // Only for SPARC >= V8PlusA
5952 #if defined(__sparc) && defined(COMPILER2)
5953 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5954 #endif
5955 int status = os::Solaris::mutex_lock(_mutex);
5956 assert_status(status == 0, status, "mutex_lock");
5957 guarantee (_nParked == 0, "invariant") ;
5958 ++ _nParked ;
5959 while (_Event < 0) {
5960 // for some reason, under 2.7 lwp_cond_wait() may return ETIME ...
5961 // Treat this the same as if the wait was interrupted
5962 // With usr/lib/lwp going to kernel, always handle ETIME
5963 status = os::Solaris::cond_wait(_cond, _mutex);
5964 if (status == ETIME) status = EINTR ;
5965 assert_status(status == 0 || status == EINTR, status, "cond_wait");
5966 }
5967 -- _nParked ;
5968 _Event = 0 ;
5969 status = os::Solaris::mutex_unlock(_mutex);
5970 assert_status(status == 0, status, "mutex_unlock");
5971 // Paranoia to ensure our locked and lock-free paths interact
5972 // correctly with each other.
5973 OrderAccess::fence();
5974 }
5975 }
5977 int os::PlatformEvent::park(jlong millis) {
5978 guarantee (_nParked == 0, "invariant") ;
5979 int v ;
5980 for (;;) {
5981 v = _Event ;
5982 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ;
5983 }
5984 guarantee (v >= 0, "invariant") ;
5985 if (v != 0) return OS_OK ;
5987 int ret = OS_TIMEOUT;
5988 timestruc_t abst;
5989 compute_abstime (&abst, millis);
5991 // See http://monaco.sfbay/detail.jsf?cr=5094058.
5992 // For Solaris SPARC set fprs.FEF=0 prior to parking.
5993 // Only for SPARC >= V8PlusA
5994 #if defined(__sparc) && defined(COMPILER2)
5995 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
5996 #endif
5997 int status = os::Solaris::mutex_lock(_mutex);
5998 assert_status(status == 0, status, "mutex_lock");
5999 guarantee (_nParked == 0, "invariant") ;
6000 ++ _nParked ;
6001 while (_Event < 0) {
6002 int status = os::Solaris::cond_timedwait(_cond, _mutex, &abst);
6003 assert_status(status == 0 || status == EINTR ||
6004 status == ETIME || status == ETIMEDOUT,
6005 status, "cond_timedwait");
6006 if (!FilterSpuriousWakeups) break ; // previous semantics
6007 if (status == ETIME || status == ETIMEDOUT) break ;
6008 // We consume and ignore EINTR and spurious wakeups.
6009 }
6010 -- _nParked ;
6011 if (_Event >= 0) ret = OS_OK ;
6012 _Event = 0 ;
6013 status = os::Solaris::mutex_unlock(_mutex);
6014 assert_status(status == 0, status, "mutex_unlock");
6015 // Paranoia to ensure our locked and lock-free paths interact
6016 // correctly with each other.
6017 OrderAccess::fence();
6018 return ret;
6019 }
6021 void os::PlatformEvent::unpark() {
6022 // Transitions for _Event:
6023 // 0 :=> 1
6024 // 1 :=> 1
6025 // -1 :=> either 0 or 1; must signal target thread
6026 // That is, we can safely transition _Event from -1 to either
6027 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back
6028 // unpark() calls.
6029 // See also: "Semaphores in Plan 9" by Mullender & Cox
6030 //
6031 // Note: Forcing a transition from "-1" to "1" on an unpark() means
6032 // that it will take two back-to-back park() calls for the owning
6033 // thread to block. This has the benefit of forcing a spurious return
6034 // from the first park() call after an unpark() call which will help
6035 // shake out uses of park() and unpark() without condition variables.
6037 if (Atomic::xchg(1, &_Event) >= 0) return;
6039 // If the thread associated with the event was parked, wake it.
6040 // Wait for the thread assoc with the PlatformEvent to vacate.
6041 int status = os::Solaris::mutex_lock(_mutex);
6042 assert_status(status == 0, status, "mutex_lock");
6043 int AnyWaiters = _nParked;
6044 status = os::Solaris::mutex_unlock(_mutex);
6045 assert_status(status == 0, status, "mutex_unlock");
6046 guarantee(AnyWaiters == 0 || AnyWaiters == 1, "invariant");
6047 if (AnyWaiters != 0) {
6048 // We intentional signal *after* dropping the lock
6049 // to avoid a common class of futile wakeups.
6050 status = os::Solaris::cond_signal(_cond);
6051 assert_status(status == 0, status, "cond_signal");
6052 }
6053 }
6055 // JSR166
6056 // -------------------------------------------------------
6058 /*
6059 * The solaris and linux implementations of park/unpark are fairly
6060 * conservative for now, but can be improved. They currently use a
6061 * mutex/condvar pair, plus _counter.
6062 * Park decrements _counter if > 0, else does a condvar wait. Unpark
6063 * sets count to 1 and signals condvar. Only one thread ever waits
6064 * on the condvar. Contention seen when trying to park implies that someone
6065 * is unparking you, so don't wait. And spurious returns are fine, so there
6066 * is no need to track notifications.
6067 */
6069 #define MAX_SECS 100000000
6070 /*
6071 * This code is common to linux and solaris and will be moved to a
6072 * common place in dolphin.
6073 *
6074 * The passed in time value is either a relative time in nanoseconds
6075 * or an absolute time in milliseconds. Either way it has to be unpacked
6076 * into suitable seconds and nanoseconds components and stored in the
6077 * given timespec structure.
6078 * Given time is a 64-bit value and the time_t used in the timespec is only
6079 * a signed-32-bit value (except on 64-bit Linux) we have to watch for
6080 * overflow if times way in the future are given. Further on Solaris versions
6081 * prior to 10 there is a restriction (see cond_timedwait) that the specified
6082 * number of seconds, in abstime, is less than current_time + 100,000,000.
6083 * As it will be 28 years before "now + 100000000" will overflow we can
6084 * ignore overflow and just impose a hard-limit on seconds using the value
6085 * of "now + 100,000,000". This places a limit on the timeout of about 3.17
6086 * years from "now".
6087 */
6088 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
6089 assert (time > 0, "convertTime");
6091 struct timeval now;
6092 int status = gettimeofday(&now, NULL);
6093 assert(status == 0, "gettimeofday");
6095 time_t max_secs = now.tv_sec + MAX_SECS;
6097 if (isAbsolute) {
6098 jlong secs = time / 1000;
6099 if (secs > max_secs) {
6100 absTime->tv_sec = max_secs;
6101 }
6102 else {
6103 absTime->tv_sec = secs;
6104 }
6105 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
6106 }
6107 else {
6108 jlong secs = time / NANOSECS_PER_SEC;
6109 if (secs >= MAX_SECS) {
6110 absTime->tv_sec = max_secs;
6111 absTime->tv_nsec = 0;
6112 }
6113 else {
6114 absTime->tv_sec = now.tv_sec + secs;
6115 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
6116 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
6117 absTime->tv_nsec -= NANOSECS_PER_SEC;
6118 ++absTime->tv_sec; // note: this must be <= max_secs
6119 }
6120 }
6121 }
6122 assert(absTime->tv_sec >= 0, "tv_sec < 0");
6123 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
6124 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
6125 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
6126 }
6128 void Parker::park(bool isAbsolute, jlong time) {
6129 // Ideally we'd do something useful while spinning, such
6130 // as calling unpackTime().
6132 // Optional fast-path check:
6133 // Return immediately if a permit is available.
6134 // We depend on Atomic::xchg() having full barrier semantics
6135 // since we are doing a lock-free update to _counter.
6136 if (Atomic::xchg(0, &_counter) > 0) return;
6138 // Optional fast-exit: Check interrupt before trying to wait
6139 Thread* thread = Thread::current();
6140 assert(thread->is_Java_thread(), "Must be JavaThread");
6141 JavaThread *jt = (JavaThread *)thread;
6142 if (Thread::is_interrupted(thread, false)) {
6143 return;
6144 }
6146 // First, demultiplex/decode time arguments
6147 timespec absTime;
6148 if (time < 0 || (isAbsolute && time == 0) ) { // don't wait at all
6149 return;
6150 }
6151 if (time > 0) {
6152 // Warning: this code might be exposed to the old Solaris time
6153 // round-down bugs. Grep "roundingFix" for details.
6154 unpackTime(&absTime, isAbsolute, time);
6155 }
6157 // Enter safepoint region
6158 // Beware of deadlocks such as 6317397.
6159 // The per-thread Parker:: _mutex is a classic leaf-lock.
6160 // In particular a thread must never block on the Threads_lock while
6161 // holding the Parker:: mutex. If safepoints are pending both the
6162 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
6163 ThreadBlockInVM tbivm(jt);
6165 // Don't wait if cannot get lock since interference arises from
6166 // unblocking. Also. check interrupt before trying wait
6167 if (Thread::is_interrupted(thread, false) ||
6168 os::Solaris::mutex_trylock(_mutex) != 0) {
6169 return;
6170 }
6172 int status ;
6174 if (_counter > 0) { // no wait needed
6175 _counter = 0;
6176 status = os::Solaris::mutex_unlock(_mutex);
6177 assert (status == 0, "invariant") ;
6178 // Paranoia to ensure our locked and lock-free paths interact
6179 // correctly with each other and Java-level accesses.
6180 OrderAccess::fence();
6181 return;
6182 }
6184 #ifdef ASSERT
6185 // Don't catch signals while blocked; let the running threads have the signals.
6186 // (This allows a debugger to break into the running thread.)
6187 sigset_t oldsigs;
6188 sigset_t* allowdebug_blocked = os::Solaris::allowdebug_blocked_signals();
6189 thr_sigsetmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
6190 #endif
6192 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
6193 jt->set_suspend_equivalent();
6194 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
6196 // Do this the hard way by blocking ...
6197 // See http://monaco.sfbay/detail.jsf?cr=5094058.
6198 // TODO-FIXME: for Solaris SPARC set fprs.FEF=0 prior to parking.
6199 // Only for SPARC >= V8PlusA
6200 #if defined(__sparc) && defined(COMPILER2)
6201 if (ClearFPUAtPark) { _mark_fpu_nosave() ; }
6202 #endif
6204 if (time == 0) {
6205 status = os::Solaris::cond_wait (_cond, _mutex) ;
6206 } else {
6207 status = os::Solaris::cond_timedwait (_cond, _mutex, &absTime);
6208 }
6209 // Note that an untimed cond_wait() can sometimes return ETIME on older
6210 // versions of the Solaris.
6211 assert_status(status == 0 || status == EINTR ||
6212 status == ETIME || status == ETIMEDOUT,
6213 status, "cond_timedwait");
6215 #ifdef ASSERT
6216 thr_sigsetmask(SIG_SETMASK, &oldsigs, NULL);
6217 #endif
6218 _counter = 0 ;
6219 status = os::Solaris::mutex_unlock(_mutex);
6220 assert_status(status == 0, status, "mutex_unlock") ;
6221 // Paranoia to ensure our locked and lock-free paths interact
6222 // correctly with each other and Java-level accesses.
6223 OrderAccess::fence();
6225 // If externally suspended while waiting, re-suspend
6226 if (jt->handle_special_suspend_equivalent_condition()) {
6227 jt->java_suspend_self();
6228 }
6229 }
6231 void Parker::unpark() {
6232 int s, status ;
6233 status = os::Solaris::mutex_lock (_mutex) ;
6234 assert (status == 0, "invariant") ;
6235 s = _counter;
6236 _counter = 1;
6237 status = os::Solaris::mutex_unlock (_mutex) ;
6238 assert (status == 0, "invariant") ;
6240 if (s < 1) {
6241 status = os::Solaris::cond_signal (_cond) ;
6242 assert (status == 0, "invariant") ;
6243 }
6244 }
6246 extern char** environ;
6248 // Run the specified command in a separate process. Return its exit value,
6249 // or -1 on failure (e.g. can't fork a new process).
6250 // Unlike system(), this function can be called from signal handler. It
6251 // doesn't block SIGINT et al.
6252 int os::fork_and_exec(char* cmd) {
6253 char * argv[4];
6254 argv[0] = (char *)"sh";
6255 argv[1] = (char *)"-c";
6256 argv[2] = cmd;
6257 argv[3] = NULL;
6259 // fork is async-safe, fork1 is not so can't use in signal handler
6260 pid_t pid;
6261 Thread* t = ThreadLocalStorage::get_thread_slow();
6262 if (t != NULL && t->is_inside_signal_handler()) {
6263 pid = fork();
6264 } else {
6265 pid = fork1();
6266 }
6268 if (pid < 0) {
6269 // fork failed
6270 warning("fork failed: %s", strerror(errno));
6271 return -1;
6273 } else if (pid == 0) {
6274 // child process
6276 // try to be consistent with system(), which uses "/usr/bin/sh" on Solaris
6277 execve("/usr/bin/sh", argv, environ);
6279 // execve failed
6280 _exit(-1);
6282 } else {
6283 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
6284 // care about the actual exit code, for now.
6286 int status;
6288 // Wait for the child process to exit. This returns immediately if
6289 // the child has already exited. */
6290 while (waitpid(pid, &status, 0) < 0) {
6291 switch (errno) {
6292 case ECHILD: return 0;
6293 case EINTR: break;
6294 default: return -1;
6295 }
6296 }
6298 if (WIFEXITED(status)) {
6299 // The child exited normally; get its exit code.
6300 return WEXITSTATUS(status);
6301 } else if (WIFSIGNALED(status)) {
6302 // The child exited because of a signal
6303 // The best value to return is 0x80 + signal number,
6304 // because that is what all Unix shells do, and because
6305 // it allows callers to distinguish between process exit and
6306 // process death by signal.
6307 return 0x80 + WTERMSIG(status);
6308 } else {
6309 // Unknown exit code; pass it through
6310 return status;
6311 }
6312 }
6313 }
6315 // is_headless_jre()
6316 //
6317 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
6318 // in order to report if we are running in a headless jre
6319 //
6320 // Since JDK8 xawt/libmawt.so was moved into the same directory
6321 // as libawt.so, and renamed libawt_xawt.so
6322 //
6323 bool os::is_headless_jre() {
6324 struct stat statbuf;
6325 char buf[MAXPATHLEN];
6326 char libmawtpath[MAXPATHLEN];
6327 const char *xawtstr = "/xawt/libmawt.so";
6328 const char *new_xawtstr = "/libawt_xawt.so";
6329 char *p;
6331 // Get path to libjvm.so
6332 os::jvm_path(buf, sizeof(buf));
6334 // Get rid of libjvm.so
6335 p = strrchr(buf, '/');
6336 if (p == NULL) return false;
6337 else *p = '\0';
6339 // Get rid of client or server
6340 p = strrchr(buf, '/');
6341 if (p == NULL) return false;
6342 else *p = '\0';
6344 // check xawt/libmawt.so
6345 strcpy(libmawtpath, buf);
6346 strcat(libmawtpath, xawtstr);
6347 if (::stat(libmawtpath, &statbuf) == 0) return false;
6349 // check libawt_xawt.so
6350 strcpy(libmawtpath, buf);
6351 strcat(libmawtpath, new_xawtstr);
6352 if (::stat(libmawtpath, &statbuf) == 0) return false;
6354 return true;
6355 }
6357 size_t os::write(int fd, const void *buf, unsigned int nBytes) {
6358 INTERRUPTIBLE_RETURN_INT(::write(fd, buf, nBytes), os::Solaris::clear_interrupted);
6359 }
6361 int os::close(int fd) {
6362 return ::close(fd);
6363 }
6365 int os::socket_close(int fd) {
6366 return ::close(fd);
6367 }
6369 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
6370 INTERRUPTIBLE_RETURN_INT((int)::recv(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6371 }
6373 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
6374 INTERRUPTIBLE_RETURN_INT((int)::send(fd, buf, nBytes, flags), os::Solaris::clear_interrupted);
6375 }
6377 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
6378 RESTARTABLE_RETURN_INT((int)::send(fd, buf, nBytes, flags));
6379 }
6381 // As both poll and select can be interrupted by signals, we have to be
6382 // prepared to restart the system call after updating the timeout, unless
6383 // a poll() is done with timeout == -1, in which case we repeat with this
6384 // "wait forever" value.
6386 int os::timeout(int fd, long timeout) {
6387 int res;
6388 struct timeval t;
6389 julong prevtime, newtime;
6390 static const char* aNull = 0;
6391 struct pollfd pfd;
6392 pfd.fd = fd;
6393 pfd.events = POLLIN;
6395 gettimeofday(&t, &aNull);
6396 prevtime = ((julong)t.tv_sec * 1000) + t.tv_usec / 1000;
6398 for(;;) {
6399 INTERRUPTIBLE_NORESTART(::poll(&pfd, 1, timeout), res, os::Solaris::clear_interrupted);
6400 if(res == OS_ERR && errno == EINTR) {
6401 if(timeout != -1) {
6402 gettimeofday(&t, &aNull);
6403 newtime = ((julong)t.tv_sec * 1000) + t.tv_usec /1000;
6404 timeout -= newtime - prevtime;
6405 if(timeout <= 0)
6406 return OS_OK;
6407 prevtime = newtime;
6408 }
6409 } else return res;
6410 }
6411 }
6413 int os::connect(int fd, struct sockaddr *him, socklen_t len) {
6414 int _result;
6415 INTERRUPTIBLE_NORESTART(::connect(fd, him, len), _result,\
6416 os::Solaris::clear_interrupted);
6418 // Depending on when thread interruption is reset, _result could be
6419 // one of two values when errno == EINTR
6421 if (((_result == OS_INTRPT) || (_result == OS_ERR))
6422 && (errno == EINTR)) {
6423 /* restarting a connect() changes its errno semantics */
6424 INTERRUPTIBLE(::connect(fd, him, len), _result,\
6425 os::Solaris::clear_interrupted);
6426 /* undo these changes */
6427 if (_result == OS_ERR) {
6428 if (errno == EALREADY) {
6429 errno = EINPROGRESS; /* fall through */
6430 } else if (errno == EISCONN) {
6431 errno = 0;
6432 return OS_OK;
6433 }
6434 }
6435 }
6436 return _result;
6437 }
6439 int os::accept(int fd, struct sockaddr* him, socklen_t* len) {
6440 if (fd < 0) {
6441 return OS_ERR;
6442 }
6443 INTERRUPTIBLE_RETURN_INT((int)::accept(fd, him, len),\
6444 os::Solaris::clear_interrupted);
6445 }
6447 int os::recvfrom(int fd, char* buf, size_t nBytes, uint flags,
6448 sockaddr* from, socklen_t* fromlen) {
6449 INTERRUPTIBLE_RETURN_INT((int)::recvfrom(fd, buf, nBytes, flags, from, fromlen),\
6450 os::Solaris::clear_interrupted);
6451 }
6453 int os::sendto(int fd, char* buf, size_t len, uint flags,
6454 struct sockaddr* to, socklen_t tolen) {
6455 INTERRUPTIBLE_RETURN_INT((int)::sendto(fd, buf, len, flags, to, tolen),\
6456 os::Solaris::clear_interrupted);
6457 }
6459 int os::socket_available(int fd, jint *pbytes) {
6460 if (fd < 0) {
6461 return OS_OK;
6462 }
6463 int ret;
6464 RESTARTABLE(::ioctl(fd, FIONREAD, pbytes), ret);
6465 // note: ioctl can return 0 when successful, JVM_SocketAvailable
6466 // is expected to return 0 on failure and 1 on success to the jdk.
6467 return (ret == OS_ERR) ? 0 : 1;
6468 }
6470 int os::bind(int fd, struct sockaddr* him, socklen_t len) {
6471 INTERRUPTIBLE_RETURN_INT_NORESTART(::bind(fd, him, len),\
6472 os::Solaris::clear_interrupted);
6473 }
6475 // Get the default path to the core file
6476 // Returns the length of the string
6477 int os::get_core_path(char* buffer, size_t bufferSize) {
6478 const char* p = get_current_directory(buffer, bufferSize);
6480 if (p == NULL) {
6481 assert(p != NULL, "failed to get current directory");
6482 return 0;
6483 }
6485 return strlen(buffer);
6486 }
6488 #ifndef PRODUCT
6489 void TestReserveMemorySpecial_test() {
6490 // No tests available for this platform
6491 }
6492 #endif