src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp

Fri, 27 Feb 2009 13:27:09 -0800

author
twisti
date
Fri, 27 Feb 2009 13:27:09 -0800
changeset 1040
98cb887364d3
parent 1020
22e09c0f4b47
child 1063
7bb995fbd3c0
permissions
-rw-r--r--

6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never

duke@435 1 /*
twisti@1020 2 * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // do not include precompiled header file
duke@435 26 # include "incls/_os_solaris_x86.cpp.incl"
duke@435 27
duke@435 28 // put OS-includes here
duke@435 29 # include <sys/types.h>
duke@435 30 # include <sys/mman.h>
duke@435 31 # include <pthread.h>
duke@435 32 # include <signal.h>
duke@435 33 # include <setjmp.h>
duke@435 34 # include <errno.h>
duke@435 35 # include <dlfcn.h>
duke@435 36 # include <stdio.h>
duke@435 37 # include <unistd.h>
duke@435 38 # include <sys/resource.h>
duke@435 39 # include <thread.h>
duke@435 40 # include <sys/stat.h>
duke@435 41 # include <sys/time.h>
duke@435 42 # include <sys/filio.h>
duke@435 43 # include <sys/utsname.h>
duke@435 44 # include <sys/systeminfo.h>
duke@435 45 # include <sys/socket.h>
duke@435 46 # include <sys/trap.h>
duke@435 47 # include <sys/lwp.h>
duke@435 48 # include <pwd.h>
duke@435 49 # include <poll.h>
duke@435 50 # include <sys/lwp.h>
duke@435 51 # include <procfs.h> // see comment in <sys/procfs.h>
duke@435 52
duke@435 53 #ifndef AMD64
duke@435 54 // QQQ seems useless at this point
duke@435 55 # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later
duke@435 56 #endif // AMD64
duke@435 57 # include <sys/procfs.h> // see comment in <sys/procfs.h>
duke@435 58
duke@435 59
duke@435 60 #define MAX_PATH (2 * K)
duke@435 61
duke@435 62 // Minimum stack size for the VM. It's easier to document a constant value
duke@435 63 // but it's different for x86 and sparc because the page sizes are different.
duke@435 64 #ifdef AMD64
duke@435 65 size_t os::Solaris::min_stack_allowed = 224*K;
duke@435 66 #define REG_SP REG_RSP
duke@435 67 #define REG_PC REG_RIP
duke@435 68 #define REG_FP REG_RBP
duke@435 69 #else
duke@435 70 size_t os::Solaris::min_stack_allowed = 64*K;
duke@435 71 #define REG_SP UESP
duke@435 72 #define REG_PC EIP
duke@435 73 #define REG_FP EBP
duke@435 74 // 4900493 counter to prevent runaway LDTR refresh attempt
duke@435 75
duke@435 76 static volatile int ldtr_refresh = 0;
duke@435 77 // the libthread instruction that faults because of the stale LDTR
duke@435 78
duke@435 79 static const unsigned char movlfs[] = { 0x8e, 0xe0 // movl %eax,%fs
duke@435 80 };
duke@435 81 #endif // AMD64
duke@435 82
duke@435 83 char* os::non_memory_address_word() {
duke@435 84 // Must never look like an address returned by reserve_memory,
duke@435 85 // even in its subfields (as defined by the CPU immediate fields,
duke@435 86 // if the CPU splits constants across multiple instructions).
duke@435 87 return (char*) -1;
duke@435 88 }
duke@435 89
duke@435 90 //
duke@435 91 // Validate a ucontext retrieved from walking a uc_link of a ucontext.
duke@435 92 // There are issues with libthread giving out uc_links for different threads
duke@435 93 // on the same uc_link chain and bad or circular links.
duke@435 94 //
duke@435 95 bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) {
duke@435 96 if (valid >= suspect ||
duke@435 97 valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags ||
duke@435 98 valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp ||
duke@435 99 valid->uc_stack.ss_size != suspect->uc_stack.ss_size) {
duke@435 100 DEBUG_ONLY(tty->print_cr("valid_ucontext: failed test 1");)
duke@435 101 return false;
duke@435 102 }
duke@435 103
duke@435 104 if (thread->is_Java_thread()) {
duke@435 105 if (!valid_stack_address(thread, (address)suspect)) {
duke@435 106 DEBUG_ONLY(tty->print_cr("valid_ucontext: uc_link not in thread stack");)
duke@435 107 return false;
duke@435 108 }
duke@435 109 if (!valid_stack_address(thread, (address) suspect->uc_mcontext.gregs[REG_SP])) {
duke@435 110 DEBUG_ONLY(tty->print_cr("valid_ucontext: stackpointer not in thread stack");)
duke@435 111 return false;
duke@435 112 }
duke@435 113 }
duke@435 114 return true;
duke@435 115 }
duke@435 116
duke@435 117 // We will only follow one level of uc_link since there are libthread
duke@435 118 // issues with ucontext linking and it is better to be safe and just
duke@435 119 // let caller retry later.
duke@435 120 ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread,
duke@435 121 ucontext_t *uc) {
duke@435 122
duke@435 123 ucontext_t *retuc = NULL;
duke@435 124
duke@435 125 if (uc != NULL) {
duke@435 126 if (uc->uc_link == NULL) {
duke@435 127 // cannot validate without uc_link so accept current ucontext
duke@435 128 retuc = uc;
duke@435 129 } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) {
duke@435 130 // first ucontext is valid so try the next one
duke@435 131 uc = uc->uc_link;
duke@435 132 if (uc->uc_link == NULL) {
duke@435 133 // cannot validate without uc_link so accept current ucontext
duke@435 134 retuc = uc;
duke@435 135 } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) {
duke@435 136 // the ucontext one level down is also valid so return it
duke@435 137 retuc = uc;
duke@435 138 }
duke@435 139 }
duke@435 140 }
duke@435 141 return retuc;
duke@435 142 }
duke@435 143
duke@435 144 // Assumes ucontext is valid
duke@435 145 ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) {
duke@435 146 return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]);
duke@435 147 }
duke@435 148
duke@435 149 // Assumes ucontext is valid
duke@435 150 intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) {
duke@435 151 return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
duke@435 152 }
duke@435 153
duke@435 154 // Assumes ucontext is valid
duke@435 155 intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) {
duke@435 156 return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
duke@435 157 }
duke@435 158
duke@435 159 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
duke@435 160 // is currently interrupted by SIGPROF.
duke@435 161 //
duke@435 162 // The difference between this and os::fetch_frame_from_context() is that
duke@435 163 // here we try to skip nested signal frames.
duke@435 164 ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread,
duke@435 165 ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
duke@435 166
duke@435 167 assert(thread != NULL, "just checking");
duke@435 168 assert(ret_sp != NULL, "just checking");
duke@435 169 assert(ret_fp != NULL, "just checking");
duke@435 170
duke@435 171 ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc);
duke@435 172 return os::fetch_frame_from_context(luc, ret_sp, ret_fp);
duke@435 173 }
duke@435 174
duke@435 175 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
duke@435 176 intptr_t** ret_sp, intptr_t** ret_fp) {
duke@435 177
duke@435 178 ExtendedPC epc;
duke@435 179 ucontext_t *uc = (ucontext_t*)ucVoid;
duke@435 180
duke@435 181 if (uc != NULL) {
duke@435 182 epc = os::Solaris::ucontext_get_ExtendedPC(uc);
duke@435 183 if (ret_sp) *ret_sp = os::Solaris::ucontext_get_sp(uc);
duke@435 184 if (ret_fp) *ret_fp = os::Solaris::ucontext_get_fp(uc);
duke@435 185 } else {
duke@435 186 // construct empty ExtendedPC for return value checking
duke@435 187 epc = ExtendedPC(NULL);
duke@435 188 if (ret_sp) *ret_sp = (intptr_t *)NULL;
duke@435 189 if (ret_fp) *ret_fp = (intptr_t *)NULL;
duke@435 190 }
duke@435 191
duke@435 192 return epc;
duke@435 193 }
duke@435 194
duke@435 195 frame os::fetch_frame_from_context(void* ucVoid) {
duke@435 196 intptr_t* sp;
duke@435 197 intptr_t* fp;
duke@435 198 ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
duke@435 199 return frame(sp, fp, epc.pc());
duke@435 200 }
duke@435 201
duke@435 202 frame os::get_sender_for_C_frame(frame* fr) {
duke@435 203 return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
duke@435 204 }
duke@435 205
coleenp@907 206 extern "C" intptr_t *_get_current_fp(); // in .il file
duke@435 207
duke@435 208 frame os::current_frame() {
coleenp@907 209 intptr_t* fp = _get_current_fp(); // it's inlined so want current fp
duke@435 210 frame myframe((intptr_t*)os::current_stack_pointer(),
duke@435 211 (intptr_t*)fp,
duke@435 212 CAST_FROM_FN_PTR(address, os::current_frame));
duke@435 213 if (os::is_first_C_frame(&myframe)) {
duke@435 214 // stack is not walkable
sgoldman@542 215 frame ret; // This will be a null useless frame
sgoldman@542 216 return ret;
duke@435 217 } else {
duke@435 218 return os::get_sender_for_C_frame(&myframe);
duke@435 219 }
duke@435 220 }
duke@435 221
duke@435 222 // This is a simple callback that just fetches a PC for an interrupted thread.
duke@435 223 // The thread need not be suspended and the fetched PC is just a hint.
duke@435 224 // This one is currently used for profiling the VMThread ONLY!
duke@435 225
duke@435 226 // Must be synchronous
duke@435 227 void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
duke@435 228 Thread* thread = args->thread();
duke@435 229 ucontext_t* uc = args->ucontext();
duke@435 230 intptr_t* sp;
duke@435 231
duke@435 232 assert(ProfileVM && thread->is_VM_thread(), "just checking");
duke@435 233
duke@435 234 ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
duke@435 235 _addr = new_addr;
duke@435 236 }
duke@435 237
duke@435 238 static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
duke@435 239 char lwpstatusfile[PROCFILE_LENGTH];
duke@435 240 int lwpfd, err;
duke@435 241
duke@435 242 if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs))
duke@435 243 return (err);
duke@435 244 if (*flags == TRS_LWPID) {
duke@435 245 sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(),
duke@435 246 *lwp);
duke@435 247 if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) {
duke@435 248 perror("thr_mutator_status: open lwpstatus");
duke@435 249 return (EINVAL);
duke@435 250 }
duke@435 251 if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) !=
duke@435 252 sizeof (lwpstatus_t)) {
duke@435 253 perror("thr_mutator_status: read lwpstatus");
duke@435 254 (void) close(lwpfd);
duke@435 255 return (EINVAL);
duke@435 256 }
duke@435 257 (void) close(lwpfd);
duke@435 258 }
duke@435 259 return (0);
duke@435 260 }
duke@435 261
duke@435 262 #ifndef AMD64
duke@435 263
duke@435 264 // Detecting SSE support by OS
duke@435 265 // From solaris_i486.s
duke@435 266 extern "C" bool sse_check();
duke@435 267 extern "C" bool sse_unavailable();
duke@435 268
duke@435 269 enum { SSE_UNKNOWN, SSE_NOT_SUPPORTED, SSE_SUPPORTED};
duke@435 270 static int sse_status = SSE_UNKNOWN;
duke@435 271
duke@435 272
duke@435 273 static void check_for_sse_support() {
duke@435 274 if (!VM_Version::supports_sse()) {
duke@435 275 sse_status = SSE_NOT_SUPPORTED;
duke@435 276 return;
duke@435 277 }
duke@435 278 // looking for _sse_hw in libc.so, if it does not exist or
duke@435 279 // the value (int) is 0, OS has no support for SSE
duke@435 280 int *sse_hwp;
duke@435 281 void *h;
duke@435 282
duke@435 283 if ((h=dlopen("/usr/lib/libc.so", RTLD_LAZY)) == NULL) {
duke@435 284 //open failed, presume no support for SSE
duke@435 285 sse_status = SSE_NOT_SUPPORTED;
duke@435 286 return;
duke@435 287 }
duke@435 288 if ((sse_hwp = (int *)dlsym(h, "_sse_hw")) == NULL) {
duke@435 289 sse_status = SSE_NOT_SUPPORTED;
duke@435 290 } else if (*sse_hwp == 0) {
duke@435 291 sse_status = SSE_NOT_SUPPORTED;
duke@435 292 }
duke@435 293 dlclose(h);
duke@435 294
duke@435 295 if (sse_status == SSE_UNKNOWN) {
duke@435 296 bool (*try_sse)() = (bool (*)())sse_check;
duke@435 297 sse_status = (*try_sse)() ? SSE_SUPPORTED : SSE_NOT_SUPPORTED;
duke@435 298 }
duke@435 299
duke@435 300 }
duke@435 301
twisti@1020 302 #endif // AMD64
twisti@1020 303
duke@435 304 bool os::supports_sse() {
twisti@1020 305 #ifdef AMD64
twisti@1020 306 return true;
twisti@1020 307 #else
duke@435 308 if (sse_status == SSE_UNKNOWN)
duke@435 309 check_for_sse_support();
duke@435 310 return sse_status == SSE_SUPPORTED;
twisti@1020 311 #endif // AMD64
duke@435 312 }
duke@435 313
duke@435 314 bool os::is_allocatable(size_t bytes) {
duke@435 315 #ifdef AMD64
duke@435 316 return true;
duke@435 317 #else
duke@435 318
duke@435 319 if (bytes < 2 * G) {
duke@435 320 return true;
duke@435 321 }
duke@435 322
duke@435 323 char* addr = reserve_memory(bytes, NULL);
duke@435 324
duke@435 325 if (addr != NULL) {
duke@435 326 release_memory(addr, bytes);
duke@435 327 }
duke@435 328
duke@435 329 return addr != NULL;
duke@435 330 #endif // AMD64
duke@435 331
duke@435 332 }
duke@435 333
duke@435 334 extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
duke@435 335
duke@435 336 extern "C" void Fetch32PFI () ;
duke@435 337 extern "C" void Fetch32Resume () ;
duke@435 338 #ifdef AMD64
duke@435 339 extern "C" void FetchNPFI () ;
duke@435 340 extern "C" void FetchNResume () ;
duke@435 341 #endif // AMD64
duke@435 342
duke@435 343 int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) {
duke@435 344 ucontext_t* uc = (ucontext_t*) ucVoid;
duke@435 345
duke@435 346 #ifndef AMD64
duke@435 347 if (sig == SIGILL && info->si_addr == (caddr_t)sse_check) {
duke@435 348 // the SSE instruction faulted. supports_sse() need return false.
duke@435 349 uc->uc_mcontext.gregs[EIP] = (greg_t)sse_unavailable;
duke@435 350 return true;
duke@435 351 }
duke@435 352 #endif // !AMD64
duke@435 353
duke@435 354 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
duke@435 355
duke@435 356 SignalHandlerMark shm(t);
duke@435 357
duke@435 358 if(sig == SIGPIPE || sig == SIGXFSZ) {
duke@435 359 if (os::Solaris::chained_handler(sig, info, ucVoid)) {
duke@435 360 return true;
duke@435 361 } else {
duke@435 362 if (PrintMiscellaneous && (WizardMode || Verbose)) {
duke@435 363 char buf[64];
duke@435 364 warning("Ignoring %s - see 4229104 or 6499219",
duke@435 365 os::exception_name(sig, buf, sizeof(buf)));
duke@435 366
duke@435 367 }
duke@435 368 return true;
duke@435 369 }
duke@435 370 }
duke@435 371
duke@435 372 JavaThread* thread = NULL;
duke@435 373 VMThread* vmthread = NULL;
duke@435 374
duke@435 375 if (os::Solaris::signal_handlers_are_installed) {
duke@435 376 if (t != NULL ){
duke@435 377 if(t->is_Java_thread()) {
duke@435 378 thread = (JavaThread*)t;
duke@435 379 }
duke@435 380 else if(t->is_VM_thread()){
duke@435 381 vmthread = (VMThread *)t;
duke@435 382 }
duke@435 383 }
duke@435 384 }
duke@435 385
duke@435 386 guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
duke@435 387
duke@435 388 if (sig == os::Solaris::SIGasync()) {
duke@435 389 if(thread){
duke@435 390 OSThread::InterruptArguments args(thread, uc);
duke@435 391 thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
duke@435 392 return true;
duke@435 393 }
duke@435 394 else if(vmthread){
duke@435 395 OSThread::InterruptArguments args(vmthread, uc);
duke@435 396 vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
duke@435 397 return true;
duke@435 398 } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
duke@435 399 return true;
duke@435 400 } else {
duke@435 401 // If os::Solaris::SIGasync not chained, and this is a non-vm and
duke@435 402 // non-java thread
duke@435 403 return true;
duke@435 404 }
duke@435 405 }
duke@435 406
duke@435 407 if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
duke@435 408 // can't decode this kind of signal
duke@435 409 info = NULL;
duke@435 410 } else {
duke@435 411 assert(sig == info->si_signo, "bad siginfo");
duke@435 412 }
duke@435 413
duke@435 414 // decide if this trap can be handled by a stub
duke@435 415 address stub = NULL;
duke@435 416
duke@435 417 address pc = NULL;
duke@435 418
duke@435 419 //%note os_trap_1
duke@435 420 if (info != NULL && uc != NULL && thread != NULL) {
duke@435 421 // factor me: getPCfromContext
duke@435 422 pc = (address) uc->uc_mcontext.gregs[REG_PC];
duke@435 423
duke@435 424 // SafeFetch32() support
duke@435 425 if (pc == (address) Fetch32PFI) {
duke@435 426 uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
duke@435 427 return true ;
duke@435 428 }
duke@435 429 #ifdef AMD64
duke@435 430 if (pc == (address) FetchNPFI) {
duke@435 431 uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
duke@435 432 return true ;
duke@435 433 }
duke@435 434 #endif // AMD64
duke@435 435
duke@435 436 // Handle ALL stack overflow variations here
duke@435 437 if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
duke@435 438 address addr = (address) info->si_addr;
duke@435 439 if (thread->in_stack_yellow_zone(addr)) {
duke@435 440 thread->disable_stack_yellow_zone();
duke@435 441 if (thread->thread_state() == _thread_in_Java) {
duke@435 442 // Throw a stack overflow exception. Guard pages will be reenabled
duke@435 443 // while unwinding the stack.
duke@435 444 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
duke@435 445 } else {
duke@435 446 // Thread was in the vm or native code. Return and try to finish.
duke@435 447 return true;
duke@435 448 }
duke@435 449 } else if (thread->in_stack_red_zone(addr)) {
duke@435 450 // Fatal red zone violation. Disable the guard pages and fall through
duke@435 451 // to handle_unexpected_exception way down below.
duke@435 452 thread->disable_stack_red_zone();
duke@435 453 tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
duke@435 454 }
duke@435 455 }
duke@435 456
duke@435 457 if (thread->thread_state() == _thread_in_vm) {
duke@435 458 if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) {
duke@435 459 stub = StubRoutines::handler_for_unsafe_access();
duke@435 460 }
duke@435 461 }
duke@435 462
duke@435 463 if (thread->thread_state() == _thread_in_Java) {
duke@435 464 // Support Safepoint Polling
duke@435 465 if ( sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
duke@435 466 stub = SharedRuntime::get_poll_stub(pc);
duke@435 467 }
duke@435 468 else if (sig == SIGBUS && info->si_code == BUS_OBJERR) {
duke@435 469 // BugId 4454115: A read from a MappedByteBuffer can fault
duke@435 470 // here if the underlying file has been truncated.
duke@435 471 // Do not crash the VM in such a case.
duke@435 472 CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
duke@435 473 nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
duke@435 474 if (nm != NULL && nm->has_unsafe_access()) {
duke@435 475 stub = StubRoutines::handler_for_unsafe_access();
duke@435 476 }
duke@435 477 }
duke@435 478 else
duke@435 479 if (sig == SIGFPE && info->si_code == FPE_INTDIV) {
duke@435 480 // integer divide by zero
duke@435 481 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
duke@435 482 }
duke@435 483 #ifndef AMD64
duke@435 484 else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) {
duke@435 485 // floating-point divide by zero
duke@435 486 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
duke@435 487 }
duke@435 488 else if (sig == SIGFPE && info->si_code == FPE_FLTINV) {
duke@435 489 // The encoding of D2I in i486.ad can cause an exception prior
duke@435 490 // to the fist instruction if there was an invalid operation
duke@435 491 // pending. We want to dismiss that exception. From the win_32
duke@435 492 // side it also seems that if it really was the fist causing
duke@435 493 // the exception that we do the d2i by hand with different
duke@435 494 // rounding. Seems kind of weird. QQQ TODO
duke@435 495 // Note that we take the exception at the NEXT floating point instruction.
duke@435 496 if (pc[0] == 0xDB) {
duke@435 497 assert(pc[0] == 0xDB, "not a FIST opcode");
duke@435 498 assert(pc[1] == 0x14, "not a FIST opcode");
duke@435 499 assert(pc[2] == 0x24, "not a FIST opcode");
duke@435 500 return true;
duke@435 501 } else {
duke@435 502 assert(pc[-3] == 0xDB, "not an flt invalid opcode");
duke@435 503 assert(pc[-2] == 0x14, "not an flt invalid opcode");
duke@435 504 assert(pc[-1] == 0x24, "not an flt invalid opcode");
duke@435 505 }
duke@435 506 }
duke@435 507 else if (sig == SIGFPE ) {
duke@435 508 tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code);
duke@435 509 }
duke@435 510 #endif // !AMD64
duke@435 511
duke@435 512 // QQQ It doesn't seem that we need to do this on x86 because we should be able
duke@435 513 // to return properly from the handler without this extra stuff on the back side.
duke@435 514
duke@435 515 else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
duke@435 516 // Determination of interpreter/vtable stub/compiled code null exception
duke@435 517 stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
duke@435 518 }
duke@435 519 }
duke@435 520
duke@435 521 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
duke@435 522 // and the heap gets shrunk before the field access.
duke@435 523 if ((sig == SIGSEGV) || (sig == SIGBUS)) {
duke@435 524 address addr = JNI_FastGetField::find_slowcase_pc(pc);
duke@435 525 if (addr != (address)-1) {
duke@435 526 stub = addr;
duke@435 527 }
duke@435 528 }
duke@435 529
duke@435 530 // Check to see if we caught the safepoint code in the
duke@435 531 // process of write protecting the memory serialization page.
duke@435 532 // It write enables the page immediately after protecting it
duke@435 533 // so we can just return to retry the write.
duke@435 534 if ((sig == SIGSEGV) &&
duke@435 535 os::is_memory_serialize_page(thread, (address)info->si_addr)) {
duke@435 536 // Block current thread until the memory serialize page permission restored.
duke@435 537 os::block_on_serialize_page_trap();
duke@435 538 return true;
duke@435 539 }
duke@435 540 }
duke@435 541
duke@435 542 // Execution protection violation
duke@435 543 //
duke@435 544 // Preventative code for future versions of Solaris which may
duke@435 545 // enable execution protection when running the 32-bit VM on AMD64.
duke@435 546 //
duke@435 547 // This should be kept as the last step in the triage. We don't
duke@435 548 // have a dedicated trap number for a no-execute fault, so be
duke@435 549 // conservative and allow other handlers the first shot.
duke@435 550 //
duke@435 551 // Note: We don't test that info->si_code == SEGV_ACCERR here.
duke@435 552 // this si_code is so generic that it is almost meaningless; and
duke@435 553 // the si_code for this condition may change in the future.
duke@435 554 // Furthermore, a false-positive should be harmless.
duke@435 555 if (UnguardOnExecutionViolation > 0 &&
duke@435 556 (sig == SIGSEGV || sig == SIGBUS) &&
duke@435 557 uc->uc_mcontext.gregs[TRAPNO] == T_PGFLT) { // page fault
duke@435 558 int page_size = os::vm_page_size();
duke@435 559 address addr = (address) info->si_addr;
duke@435 560 address pc = (address) uc->uc_mcontext.gregs[REG_PC];
duke@435 561 // Make sure the pc and the faulting address are sane.
duke@435 562 //
duke@435 563 // If an instruction spans a page boundary, and the page containing
duke@435 564 // the beginning of the instruction is executable but the following
duke@435 565 // page is not, the pc and the faulting address might be slightly
duke@435 566 // different - we still want to unguard the 2nd page in this case.
duke@435 567 //
duke@435 568 // 15 bytes seems to be a (very) safe value for max instruction size.
duke@435 569 bool pc_is_near_addr =
duke@435 570 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
duke@435 571 bool instr_spans_page_boundary =
duke@435 572 (align_size_down((intptr_t) pc ^ (intptr_t) addr,
duke@435 573 (intptr_t) page_size) > 0);
duke@435 574
duke@435 575 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
duke@435 576 static volatile address last_addr =
duke@435 577 (address) os::non_memory_address_word();
duke@435 578
duke@435 579 // In conservative mode, don't unguard unless the address is in the VM
duke@435 580 if (addr != last_addr &&
duke@435 581 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
duke@435 582
coleenp@912 583 // Make memory rwx and retry
duke@435 584 address page_start =
duke@435 585 (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
coleenp@912 586 bool res = os::protect_memory((char*) page_start, page_size,
coleenp@912 587 os::MEM_PROT_RWX);
duke@435 588
duke@435 589 if (PrintMiscellaneous && Verbose) {
duke@435 590 char buf[256];
duke@435 591 jio_snprintf(buf, sizeof(buf), "Execution protection violation "
duke@435 592 "at " INTPTR_FORMAT
duke@435 593 ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr,
duke@435 594 page_start, (res ? "success" : "failed"), errno);
duke@435 595 tty->print_raw_cr(buf);
duke@435 596 }
duke@435 597 stub = pc;
duke@435 598
duke@435 599 // Set last_addr so if we fault again at the same address, we don't end
duke@435 600 // up in an endless loop.
duke@435 601 //
duke@435 602 // There are two potential complications here. Two threads trapping at
duke@435 603 // the same address at the same time could cause one of the threads to
duke@435 604 // think it already unguarded, and abort the VM. Likely very rare.
duke@435 605 //
duke@435 606 // The other race involves two threads alternately trapping at
duke@435 607 // different addresses and failing to unguard the page, resulting in
duke@435 608 // an endless loop. This condition is probably even more unlikely than
duke@435 609 // the first.
duke@435 610 //
duke@435 611 // Although both cases could be avoided by using locks or thread local
duke@435 612 // last_addr, these solutions are unnecessary complication: this
duke@435 613 // handler is a best-effort safety net, not a complete solution. It is
duke@435 614 // disabled by default and should only be used as a workaround in case
duke@435 615 // we missed any no-execute-unsafe VM code.
duke@435 616
duke@435 617 last_addr = addr;
duke@435 618 }
duke@435 619 }
duke@435 620 }
duke@435 621
duke@435 622 if (stub != NULL) {
duke@435 623 // save all thread context in case we need to restore it
duke@435 624
duke@435 625 if (thread != NULL) thread->set_saved_exception_pc(pc);
duke@435 626 // 12/02/99: On Sparc it appears that the full context is also saved
duke@435 627 // but as yet, no one looks at or restores that saved context
duke@435 628 // factor me: setPC
duke@435 629 uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
duke@435 630 return true;
duke@435 631 }
duke@435 632
duke@435 633 // signal-chaining
duke@435 634 if (os::Solaris::chained_handler(sig, info, ucVoid)) {
duke@435 635 return true;
duke@435 636 }
duke@435 637
duke@435 638 #ifndef AMD64
duke@435 639 // Workaround (bug 4900493) for Solaris kernel bug 4966651.
duke@435 640 // Handle an undefined selector caused by an attempt to assign
duke@435 641 // fs in libthread getipriptr(). With the current libthread design every 512
duke@435 642 // thread creations the LDT for a private thread data structure is extended
duke@435 643 // and thre is a hazard that and another thread attempting a thread creation
duke@435 644 // will use a stale LDTR that doesn't reflect the structure's growth,
duke@435 645 // causing a GP fault.
duke@435 646 // Enforce the probable limit of passes through here to guard against an
duke@435 647 // infinite loop if some other move to fs caused the GP fault. Note that
duke@435 648 // this loop counter is ultimately a heuristic as it is possible for
duke@435 649 // more than one thread to generate this fault at a time in an MP system.
duke@435 650 // In the case of the loop count being exceeded or if the poll fails
duke@435 651 // just fall through to a fatal error.
duke@435 652 // If there is some other source of T_GPFLT traps and the text at EIP is
duke@435 653 // unreadable this code will loop infinitely until the stack is exausted.
duke@435 654 // The key to diagnosis in this case is to look for the bottom signal handler
duke@435 655 // frame.
duke@435 656
duke@435 657 if(! IgnoreLibthreadGPFault) {
duke@435 658 if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) {
duke@435 659 const unsigned char *p =
duke@435 660 (unsigned const char *) uc->uc_mcontext.gregs[EIP];
duke@435 661
duke@435 662 // Expected instruction?
duke@435 663
duke@435 664 if(p[0] == movlfs[0] && p[1] == movlfs[1]) {
duke@435 665
duke@435 666 Atomic::inc(&ldtr_refresh);
duke@435 667
duke@435 668 // Infinite loop?
duke@435 669
duke@435 670 if(ldtr_refresh < ((2 << 16) / PAGESIZE)) {
duke@435 671
duke@435 672 // No, force scheduling to get a fresh view of the LDTR
duke@435 673
duke@435 674 if(poll(NULL, 0, 10) == 0) {
duke@435 675
duke@435 676 // Retry the move
duke@435 677
duke@435 678 return false;
duke@435 679 }
duke@435 680 }
duke@435 681 }
duke@435 682 }
duke@435 683 }
duke@435 684 #endif // !AMD64
duke@435 685
duke@435 686 if (!abort_if_unrecognized) {
duke@435 687 // caller wants another chance, so give it to him
duke@435 688 return false;
duke@435 689 }
duke@435 690
duke@435 691 if (!os::Solaris::libjsig_is_loaded) {
duke@435 692 struct sigaction oldAct;
duke@435 693 sigaction(sig, (struct sigaction *)0, &oldAct);
duke@435 694 if (oldAct.sa_sigaction != signalHandler) {
duke@435 695 void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
duke@435 696 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
twisti@1040 697 warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand);
duke@435 698 }
duke@435 699 }
duke@435 700
duke@435 701 if (pc == NULL && uc != NULL) {
duke@435 702 pc = (address) uc->uc_mcontext.gregs[REG_PC];
duke@435 703 }
duke@435 704
duke@435 705 // unmask current signal
duke@435 706 sigset_t newset;
duke@435 707 sigemptyset(&newset);
duke@435 708 sigaddset(&newset, sig);
duke@435 709 sigprocmask(SIG_UNBLOCK, &newset, NULL);
duke@435 710
duke@435 711 VMError err(t, sig, pc, info, ucVoid);
duke@435 712 err.report_and_die();
duke@435 713
duke@435 714 ShouldNotReachHere();
duke@435 715 }
duke@435 716
duke@435 717 void os::print_context(outputStream *st, void *context) {
duke@435 718 if (context == NULL) return;
duke@435 719
duke@435 720 ucontext_t *uc = (ucontext_t*)context;
duke@435 721 st->print_cr("Registers:");
duke@435 722 #ifdef AMD64
duke@435 723 st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
duke@435 724 st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
duke@435 725 st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
duke@435 726 st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
duke@435 727 st->cr();
duke@435 728 st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
duke@435 729 st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
duke@435 730 st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
duke@435 731 st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
duke@435 732 st->cr();
duke@435 733 st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
duke@435 734 st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
duke@435 735 st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
duke@435 736 st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
duke@435 737 st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
duke@435 738 st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
duke@435 739 st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
duke@435 740 st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
duke@435 741 st->cr();
duke@435 742 st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
duke@435 743 st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]);
duke@435 744 #else
duke@435 745 st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]);
duke@435 746 st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]);
duke@435 747 st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]);
duke@435 748 st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]);
duke@435 749 st->cr();
duke@435 750 st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]);
duke@435 751 st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]);
duke@435 752 st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]);
duke@435 753 st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]);
duke@435 754 st->cr();
duke@435 755 st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]);
duke@435 756 st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]);
duke@435 757 #endif // AMD64
duke@435 758 st->cr();
duke@435 759 st->cr();
duke@435 760
duke@435 761 intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc);
duke@435 762 st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
duke@435 763 print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
duke@435 764 st->cr();
duke@435 765
duke@435 766 // Note: it may be unsafe to inspect memory near pc. For example, pc may
duke@435 767 // point to garbage if entry point in an nmethod is corrupted. Leave
duke@435 768 // this at the end, and hope for the best.
duke@435 769 ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc);
duke@435 770 address pc = epc.pc();
duke@435 771 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
duke@435 772 print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
duke@435 773 }
duke@435 774
duke@435 775 #ifdef AMD64
duke@435 776 void os::Solaris::init_thread_fpu_state(void) {
duke@435 777 // Nothing to do
duke@435 778 }
duke@435 779 #else
duke@435 780 // From solaris_i486.s
duke@435 781 extern "C" void fixcw();
duke@435 782
duke@435 783 void os::Solaris::init_thread_fpu_state(void) {
duke@435 784 // Set fpu to 53 bit precision. This happens too early to use a stub.
duke@435 785 fixcw();
duke@435 786 }
duke@435 787
duke@435 788 // These routines are the initial value of atomic_xchg_entry(),
duke@435 789 // atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry()
duke@435 790 // until initialization is complete.
duke@435 791 // TODO - replace with .il implementation when compiler supports it.
duke@435 792
duke@435 793 typedef jint xchg_func_t (jint, volatile jint*);
duke@435 794 typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
duke@435 795 typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
duke@435 796 typedef jint add_func_t (jint, volatile jint*);
duke@435 797 typedef void fence_func_t ();
duke@435 798
duke@435 799 jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
duke@435 800 // try to use the stub:
duke@435 801 xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
duke@435 802
duke@435 803 if (func != NULL) {
duke@435 804 os::atomic_xchg_func = func;
duke@435 805 return (*func)(exchange_value, dest);
duke@435 806 }
duke@435 807 assert(Threads::number_of_threads() == 0, "for bootstrap only");
duke@435 808
duke@435 809 jint old_value = *dest;
duke@435 810 *dest = exchange_value;
duke@435 811 return old_value;
duke@435 812 }
duke@435 813
duke@435 814 jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
duke@435 815 // try to use the stub:
duke@435 816 cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
duke@435 817
duke@435 818 if (func != NULL) {
duke@435 819 os::atomic_cmpxchg_func = func;
duke@435 820 return (*func)(exchange_value, dest, compare_value);
duke@435 821 }
duke@435 822 assert(Threads::number_of_threads() == 0, "for bootstrap only");
duke@435 823
duke@435 824 jint old_value = *dest;
duke@435 825 if (old_value == compare_value)
duke@435 826 *dest = exchange_value;
duke@435 827 return old_value;
duke@435 828 }
duke@435 829
duke@435 830 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
duke@435 831 // try to use the stub:
duke@435 832 cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
duke@435 833
duke@435 834 if (func != NULL) {
duke@435 835 os::atomic_cmpxchg_long_func = func;
duke@435 836 return (*func)(exchange_value, dest, compare_value);
duke@435 837 }
duke@435 838 assert(Threads::number_of_threads() == 0, "for bootstrap only");
duke@435 839
duke@435 840 jlong old_value = *dest;
duke@435 841 if (old_value == compare_value)
duke@435 842 *dest = exchange_value;
duke@435 843 return old_value;
duke@435 844 }
duke@435 845
duke@435 846 jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
duke@435 847 // try to use the stub:
duke@435 848 add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
duke@435 849
duke@435 850 if (func != NULL) {
duke@435 851 os::atomic_add_func = func;
duke@435 852 return (*func)(add_value, dest);
duke@435 853 }
duke@435 854 assert(Threads::number_of_threads() == 0, "for bootstrap only");
duke@435 855
duke@435 856 return (*dest) += add_value;
duke@435 857 }
duke@435 858
duke@435 859 void os::fence_bootstrap() {
duke@435 860 // try to use the stub:
duke@435 861 fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry());
duke@435 862
duke@435 863 if (func != NULL) {
duke@435 864 os::fence_func = func;
duke@435 865 (*func)();
duke@435 866 return;
duke@435 867 }
duke@435 868 assert(Threads::number_of_threads() == 0, "for bootstrap only");
duke@435 869
duke@435 870 // don't have to do anything for a single thread
duke@435 871 }
duke@435 872
duke@435 873 xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap;
duke@435 874 cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
duke@435 875 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
duke@435 876 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
duke@435 877 fence_func_t* os::fence_func = os::fence_bootstrap;
duke@435 878
duke@435 879 extern "C" _solaris_raw_setup_fpu(address ptr);
duke@435 880 void os::setup_fpu() {
duke@435 881 address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
duke@435 882 _solaris_raw_setup_fpu(fpu_cntrl);
duke@435 883 }
duke@435 884 #endif // AMD64

mercurial