duke@435: /* xdono@631: * Copyright 1999-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // do not include precompiled header file duke@435: # include "incls/_os_solaris_x86.cpp.incl" duke@435: duke@435: // put OS-includes here duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include // see comment in duke@435: duke@435: #ifndef AMD64 duke@435: // QQQ seems useless at this point duke@435: # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later duke@435: #endif // AMD64 duke@435: # include // see comment in duke@435: duke@435: duke@435: #define MAX_PATH (2 * K) duke@435: duke@435: // Minimum stack size for the VM. It's easier to document a constant value duke@435: // but it's different for x86 and sparc because the page sizes are different. duke@435: #ifdef AMD64 duke@435: size_t os::Solaris::min_stack_allowed = 224*K; duke@435: #define REG_SP REG_RSP duke@435: #define REG_PC REG_RIP duke@435: #define REG_FP REG_RBP duke@435: #else duke@435: size_t os::Solaris::min_stack_allowed = 64*K; duke@435: #define REG_SP UESP duke@435: #define REG_PC EIP duke@435: #define REG_FP EBP duke@435: // 4900493 counter to prevent runaway LDTR refresh attempt duke@435: duke@435: static volatile int ldtr_refresh = 0; duke@435: // the libthread instruction that faults because of the stale LDTR duke@435: duke@435: static const unsigned char movlfs[] = { 0x8e, 0xe0 // movl %eax,%fs duke@435: }; duke@435: #endif // AMD64 duke@435: duke@435: char* os::non_memory_address_word() { duke@435: // Must never look like an address returned by reserve_memory, duke@435: // even in its subfields (as defined by the CPU immediate fields, duke@435: // if the CPU splits constants across multiple instructions). duke@435: return (char*) -1; duke@435: } duke@435: duke@435: // duke@435: // Validate a ucontext retrieved from walking a uc_link of a ucontext. duke@435: // There are issues with libthread giving out uc_links for different threads duke@435: // on the same uc_link chain and bad or circular links. duke@435: // duke@435: bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { duke@435: if (valid >= suspect || duke@435: valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || duke@435: valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || duke@435: valid->uc_stack.ss_size != suspect->uc_stack.ss_size) { duke@435: DEBUG_ONLY(tty->print_cr("valid_ucontext: failed test 1");) duke@435: return false; duke@435: } duke@435: duke@435: if (thread->is_Java_thread()) { duke@435: if (!valid_stack_address(thread, (address)suspect)) { duke@435: DEBUG_ONLY(tty->print_cr("valid_ucontext: uc_link not in thread stack");) duke@435: return false; duke@435: } duke@435: if (!valid_stack_address(thread, (address) suspect->uc_mcontext.gregs[REG_SP])) { duke@435: DEBUG_ONLY(tty->print_cr("valid_ucontext: stackpointer not in thread stack");) duke@435: return false; duke@435: } duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: // We will only follow one level of uc_link since there are libthread duke@435: // issues with ucontext linking and it is better to be safe and just duke@435: // let caller retry later. duke@435: ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, duke@435: ucontext_t *uc) { duke@435: duke@435: ucontext_t *retuc = NULL; duke@435: duke@435: if (uc != NULL) { duke@435: if (uc->uc_link == NULL) { duke@435: // cannot validate without uc_link so accept current ucontext duke@435: retuc = uc; duke@435: } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { duke@435: // first ucontext is valid so try the next one duke@435: uc = uc->uc_link; duke@435: if (uc->uc_link == NULL) { duke@435: // cannot validate without uc_link so accept current ucontext duke@435: retuc = uc; duke@435: } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { duke@435: // the ucontext one level down is also valid so return it duke@435: retuc = uc; duke@435: } duke@435: } duke@435: } duke@435: return retuc; duke@435: } duke@435: duke@435: // Assumes ucontext is valid duke@435: ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { duke@435: return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]); duke@435: } duke@435: duke@435: // Assumes ucontext is valid duke@435: intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { duke@435: return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; duke@435: } duke@435: duke@435: // Assumes ucontext is valid duke@435: intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { duke@435: return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; duke@435: } duke@435: duke@435: // For Forte Analyzer AsyncGetCallTrace profiling support - thread duke@435: // is currently interrupted by SIGPROF. duke@435: // duke@435: // The difference between this and os::fetch_frame_from_context() is that duke@435: // here we try to skip nested signal frames. duke@435: ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, duke@435: ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { duke@435: duke@435: assert(thread != NULL, "just checking"); duke@435: assert(ret_sp != NULL, "just checking"); duke@435: assert(ret_fp != NULL, "just checking"); duke@435: duke@435: ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); duke@435: return os::fetch_frame_from_context(luc, ret_sp, ret_fp); duke@435: } duke@435: duke@435: ExtendedPC os::fetch_frame_from_context(void* ucVoid, duke@435: intptr_t** ret_sp, intptr_t** ret_fp) { duke@435: duke@435: ExtendedPC epc; duke@435: ucontext_t *uc = (ucontext_t*)ucVoid; duke@435: duke@435: if (uc != NULL) { duke@435: epc = os::Solaris::ucontext_get_ExtendedPC(uc); duke@435: if (ret_sp) *ret_sp = os::Solaris::ucontext_get_sp(uc); duke@435: if (ret_fp) *ret_fp = os::Solaris::ucontext_get_fp(uc); duke@435: } else { duke@435: // construct empty ExtendedPC for return value checking duke@435: epc = ExtendedPC(NULL); duke@435: if (ret_sp) *ret_sp = (intptr_t *)NULL; duke@435: if (ret_fp) *ret_fp = (intptr_t *)NULL; duke@435: } duke@435: duke@435: return epc; duke@435: } duke@435: duke@435: frame os::fetch_frame_from_context(void* ucVoid) { duke@435: intptr_t* sp; duke@435: intptr_t* fp; duke@435: ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); duke@435: return frame(sp, fp, epc.pc()); duke@435: } duke@435: duke@435: frame os::get_sender_for_C_frame(frame* fr) { duke@435: return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); duke@435: } duke@435: coleenp@907: extern "C" intptr_t *_get_current_fp(); // in .il file duke@435: duke@435: frame os::current_frame() { coleenp@907: intptr_t* fp = _get_current_fp(); // it's inlined so want current fp duke@435: frame myframe((intptr_t*)os::current_stack_pointer(), duke@435: (intptr_t*)fp, duke@435: CAST_FROM_FN_PTR(address, os::current_frame)); duke@435: if (os::is_first_C_frame(&myframe)) { duke@435: // stack is not walkable sgoldman@542: frame ret; // This will be a null useless frame sgoldman@542: return ret; duke@435: } else { duke@435: return os::get_sender_for_C_frame(&myframe); duke@435: } duke@435: } duke@435: duke@435: // This is a simple callback that just fetches a PC for an interrupted thread. duke@435: // The thread need not be suspended and the fetched PC is just a hint. duke@435: // This one is currently used for profiling the VMThread ONLY! duke@435: duke@435: // Must be synchronous duke@435: void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) { duke@435: Thread* thread = args->thread(); duke@435: ucontext_t* uc = args->ucontext(); duke@435: intptr_t* sp; duke@435: duke@435: assert(ProfileVM && thread->is_VM_thread(), "just checking"); duke@435: duke@435: ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]); duke@435: _addr = new_addr; duke@435: } duke@435: duke@435: static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { duke@435: char lwpstatusfile[PROCFILE_LENGTH]; duke@435: int lwpfd, err; duke@435: duke@435: if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs)) duke@435: return (err); duke@435: if (*flags == TRS_LWPID) { duke@435: sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(), duke@435: *lwp); duke@435: if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) { duke@435: perror("thr_mutator_status: open lwpstatus"); duke@435: return (EINVAL); duke@435: } duke@435: if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) != duke@435: sizeof (lwpstatus_t)) { duke@435: perror("thr_mutator_status: read lwpstatus"); duke@435: (void) close(lwpfd); duke@435: return (EINVAL); duke@435: } duke@435: (void) close(lwpfd); duke@435: } duke@435: return (0); duke@435: } duke@435: duke@435: #ifndef AMD64 duke@435: duke@435: // Detecting SSE support by OS duke@435: // From solaris_i486.s duke@435: extern "C" bool sse_check(); duke@435: extern "C" bool sse_unavailable(); duke@435: duke@435: enum { SSE_UNKNOWN, SSE_NOT_SUPPORTED, SSE_SUPPORTED}; duke@435: static int sse_status = SSE_UNKNOWN; duke@435: duke@435: duke@435: static void check_for_sse_support() { duke@435: if (!VM_Version::supports_sse()) { duke@435: sse_status = SSE_NOT_SUPPORTED; duke@435: return; duke@435: } duke@435: // looking for _sse_hw in libc.so, if it does not exist or duke@435: // the value (int) is 0, OS has no support for SSE duke@435: int *sse_hwp; duke@435: void *h; duke@435: duke@435: if ((h=dlopen("/usr/lib/libc.so", RTLD_LAZY)) == NULL) { duke@435: //open failed, presume no support for SSE duke@435: sse_status = SSE_NOT_SUPPORTED; duke@435: return; duke@435: } duke@435: if ((sse_hwp = (int *)dlsym(h, "_sse_hw")) == NULL) { duke@435: sse_status = SSE_NOT_SUPPORTED; duke@435: } else if (*sse_hwp == 0) { duke@435: sse_status = SSE_NOT_SUPPORTED; duke@435: } duke@435: dlclose(h); duke@435: duke@435: if (sse_status == SSE_UNKNOWN) { duke@435: bool (*try_sse)() = (bool (*)())sse_check; duke@435: sse_status = (*try_sse)() ? SSE_SUPPORTED : SSE_NOT_SUPPORTED; duke@435: } duke@435: duke@435: } duke@435: duke@435: bool os::supports_sse() { duke@435: if (sse_status == SSE_UNKNOWN) duke@435: check_for_sse_support(); duke@435: return sse_status == SSE_SUPPORTED; duke@435: } duke@435: duke@435: #endif // AMD64 duke@435: duke@435: bool os::is_allocatable(size_t bytes) { duke@435: #ifdef AMD64 duke@435: return true; duke@435: #else duke@435: duke@435: if (bytes < 2 * G) { duke@435: return true; duke@435: } duke@435: duke@435: char* addr = reserve_memory(bytes, NULL); duke@435: duke@435: if (addr != NULL) { duke@435: release_memory(addr, bytes); duke@435: } duke@435: duke@435: return addr != NULL; duke@435: #endif // AMD64 duke@435: duke@435: } duke@435: duke@435: extern "C" int JVM_handle_solaris_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized); duke@435: duke@435: extern "C" void Fetch32PFI () ; duke@435: extern "C" void Fetch32Resume () ; duke@435: #ifdef AMD64 duke@435: extern "C" void FetchNPFI () ; duke@435: extern "C" void FetchNResume () ; duke@435: #endif // AMD64 duke@435: duke@435: int JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) { duke@435: ucontext_t* uc = (ucontext_t*) ucVoid; duke@435: duke@435: #ifndef AMD64 duke@435: if (sig == SIGILL && info->si_addr == (caddr_t)sse_check) { duke@435: // the SSE instruction faulted. supports_sse() need return false. duke@435: uc->uc_mcontext.gregs[EIP] = (greg_t)sse_unavailable; duke@435: return true; duke@435: } duke@435: #endif // !AMD64 duke@435: duke@435: Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady duke@435: duke@435: SignalHandlerMark shm(t); duke@435: duke@435: if(sig == SIGPIPE || sig == SIGXFSZ) { duke@435: if (os::Solaris::chained_handler(sig, info, ucVoid)) { duke@435: return true; duke@435: } else { duke@435: if (PrintMiscellaneous && (WizardMode || Verbose)) { duke@435: char buf[64]; duke@435: warning("Ignoring %s - see 4229104 or 6499219", duke@435: os::exception_name(sig, buf, sizeof(buf))); duke@435: duke@435: } duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: JavaThread* thread = NULL; duke@435: VMThread* vmthread = NULL; duke@435: duke@435: if (os::Solaris::signal_handlers_are_installed) { duke@435: if (t != NULL ){ duke@435: if(t->is_Java_thread()) { duke@435: thread = (JavaThread*)t; duke@435: } duke@435: else if(t->is_VM_thread()){ duke@435: vmthread = (VMThread *)t; duke@435: } duke@435: } duke@435: } duke@435: duke@435: guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); duke@435: duke@435: if (sig == os::Solaris::SIGasync()) { duke@435: if(thread){ duke@435: OSThread::InterruptArguments args(thread, uc); duke@435: thread->osthread()->do_interrupt_callbacks_at_interrupt(&args); duke@435: return true; duke@435: } duke@435: else if(vmthread){ duke@435: OSThread::InterruptArguments args(vmthread, uc); duke@435: vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args); duke@435: return true; duke@435: } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { duke@435: return true; duke@435: } else { duke@435: // If os::Solaris::SIGasync not chained, and this is a non-vm and duke@435: // non-java thread duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { duke@435: // can't decode this kind of signal duke@435: info = NULL; duke@435: } else { duke@435: assert(sig == info->si_signo, "bad siginfo"); duke@435: } duke@435: duke@435: // decide if this trap can be handled by a stub duke@435: address stub = NULL; duke@435: duke@435: address pc = NULL; duke@435: duke@435: //%note os_trap_1 duke@435: if (info != NULL && uc != NULL && thread != NULL) { duke@435: // factor me: getPCfromContext duke@435: pc = (address) uc->uc_mcontext.gregs[REG_PC]; duke@435: duke@435: // SafeFetch32() support duke@435: if (pc == (address) Fetch32PFI) { duke@435: uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; duke@435: return true ; duke@435: } duke@435: #ifdef AMD64 duke@435: if (pc == (address) FetchNPFI) { duke@435: uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ; duke@435: return true ; duke@435: } duke@435: #endif // AMD64 duke@435: duke@435: // Handle ALL stack overflow variations here duke@435: if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { duke@435: address addr = (address) info->si_addr; duke@435: if (thread->in_stack_yellow_zone(addr)) { duke@435: thread->disable_stack_yellow_zone(); duke@435: if (thread->thread_state() == _thread_in_Java) { duke@435: // Throw a stack overflow exception. Guard pages will be reenabled duke@435: // while unwinding the stack. duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); duke@435: } else { duke@435: // Thread was in the vm or native code. Return and try to finish. duke@435: return true; duke@435: } duke@435: } else if (thread->in_stack_red_zone(addr)) { duke@435: // Fatal red zone violation. Disable the guard pages and fall through duke@435: // to handle_unexpected_exception way down below. duke@435: thread->disable_stack_red_zone(); duke@435: tty->print_raw_cr("An irrecoverable stack overflow has occurred."); duke@435: } duke@435: } duke@435: duke@435: if (thread->thread_state() == _thread_in_vm) { duke@435: if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) { duke@435: stub = StubRoutines::handler_for_unsafe_access(); duke@435: } duke@435: } duke@435: duke@435: if (thread->thread_state() == _thread_in_Java) { duke@435: // Support Safepoint Polling duke@435: if ( sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { duke@435: stub = SharedRuntime::get_poll_stub(pc); duke@435: } duke@435: else if (sig == SIGBUS && info->si_code == BUS_OBJERR) { duke@435: // BugId 4454115: A read from a MappedByteBuffer can fault duke@435: // here if the underlying file has been truncated. duke@435: // Do not crash the VM in such a case. duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(pc); duke@435: nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; duke@435: if (nm != NULL && nm->has_unsafe_access()) { duke@435: stub = StubRoutines::handler_for_unsafe_access(); duke@435: } duke@435: } duke@435: else duke@435: if (sig == SIGFPE && info->si_code == FPE_INTDIV) { duke@435: // integer divide by zero duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); duke@435: } duke@435: #ifndef AMD64 duke@435: else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) { duke@435: // floating-point divide by zero duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); duke@435: } duke@435: else if (sig == SIGFPE && info->si_code == FPE_FLTINV) { duke@435: // The encoding of D2I in i486.ad can cause an exception prior duke@435: // to the fist instruction if there was an invalid operation duke@435: // pending. We want to dismiss that exception. From the win_32 duke@435: // side it also seems that if it really was the fist causing duke@435: // the exception that we do the d2i by hand with different duke@435: // rounding. Seems kind of weird. QQQ TODO duke@435: // Note that we take the exception at the NEXT floating point instruction. duke@435: if (pc[0] == 0xDB) { duke@435: assert(pc[0] == 0xDB, "not a FIST opcode"); duke@435: assert(pc[1] == 0x14, "not a FIST opcode"); duke@435: assert(pc[2] == 0x24, "not a FIST opcode"); duke@435: return true; duke@435: } else { duke@435: assert(pc[-3] == 0xDB, "not an flt invalid opcode"); duke@435: assert(pc[-2] == 0x14, "not an flt invalid opcode"); duke@435: assert(pc[-1] == 0x24, "not an flt invalid opcode"); duke@435: } duke@435: } duke@435: else if (sig == SIGFPE ) { duke@435: tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code); duke@435: } duke@435: #endif // !AMD64 duke@435: duke@435: // QQQ It doesn't seem that we need to do this on x86 because we should be able duke@435: // to return properly from the handler without this extra stuff on the back side. duke@435: duke@435: else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { duke@435: // Determination of interpreter/vtable stub/compiled code null exception duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); duke@435: } duke@435: } duke@435: duke@435: // jni_fast_GetField can trap at certain pc's if a GC kicks in duke@435: // and the heap gets shrunk before the field access. duke@435: if ((sig == SIGSEGV) || (sig == SIGBUS)) { duke@435: address addr = JNI_FastGetField::find_slowcase_pc(pc); duke@435: if (addr != (address)-1) { duke@435: stub = addr; duke@435: } duke@435: } duke@435: duke@435: // Check to see if we caught the safepoint code in the duke@435: // process of write protecting the memory serialization page. duke@435: // It write enables the page immediately after protecting it duke@435: // so we can just return to retry the write. duke@435: if ((sig == SIGSEGV) && duke@435: os::is_memory_serialize_page(thread, (address)info->si_addr)) { duke@435: // Block current thread until the memory serialize page permission restored. duke@435: os::block_on_serialize_page_trap(); duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: // Execution protection violation duke@435: // duke@435: // Preventative code for future versions of Solaris which may duke@435: // enable execution protection when running the 32-bit VM on AMD64. duke@435: // duke@435: // This should be kept as the last step in the triage. We don't duke@435: // have a dedicated trap number for a no-execute fault, so be duke@435: // conservative and allow other handlers the first shot. duke@435: // duke@435: // Note: We don't test that info->si_code == SEGV_ACCERR here. duke@435: // this si_code is so generic that it is almost meaningless; and duke@435: // the si_code for this condition may change in the future. duke@435: // Furthermore, a false-positive should be harmless. duke@435: if (UnguardOnExecutionViolation > 0 && duke@435: (sig == SIGSEGV || sig == SIGBUS) && duke@435: uc->uc_mcontext.gregs[TRAPNO] == T_PGFLT) { // page fault duke@435: int page_size = os::vm_page_size(); duke@435: address addr = (address) info->si_addr; duke@435: address pc = (address) uc->uc_mcontext.gregs[REG_PC]; duke@435: // Make sure the pc and the faulting address are sane. duke@435: // duke@435: // If an instruction spans a page boundary, and the page containing duke@435: // the beginning of the instruction is executable but the following duke@435: // page is not, the pc and the faulting address might be slightly duke@435: // different - we still want to unguard the 2nd page in this case. duke@435: // duke@435: // 15 bytes seems to be a (very) safe value for max instruction size. duke@435: bool pc_is_near_addr = duke@435: (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); duke@435: bool instr_spans_page_boundary = duke@435: (align_size_down((intptr_t) pc ^ (intptr_t) addr, duke@435: (intptr_t) page_size) > 0); duke@435: duke@435: if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { duke@435: static volatile address last_addr = duke@435: (address) os::non_memory_address_word(); duke@435: duke@435: // In conservative mode, don't unguard unless the address is in the VM duke@435: if (addr != last_addr && duke@435: (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { duke@435: duke@435: // Unguard and retry duke@435: address page_start = duke@435: (address) align_size_down((intptr_t) addr, (intptr_t) page_size); duke@435: bool res = os::unguard_memory((char*) page_start, page_size); duke@435: duke@435: if (PrintMiscellaneous && Verbose) { duke@435: char buf[256]; duke@435: jio_snprintf(buf, sizeof(buf), "Execution protection violation " duke@435: "at " INTPTR_FORMAT duke@435: ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, duke@435: page_start, (res ? "success" : "failed"), errno); duke@435: tty->print_raw_cr(buf); duke@435: } duke@435: stub = pc; duke@435: duke@435: // Set last_addr so if we fault again at the same address, we don't end duke@435: // up in an endless loop. duke@435: // duke@435: // There are two potential complications here. Two threads trapping at duke@435: // the same address at the same time could cause one of the threads to duke@435: // think it already unguarded, and abort the VM. Likely very rare. duke@435: // duke@435: // The other race involves two threads alternately trapping at duke@435: // different addresses and failing to unguard the page, resulting in duke@435: // an endless loop. This condition is probably even more unlikely than duke@435: // the first. duke@435: // duke@435: // Although both cases could be avoided by using locks or thread local duke@435: // last_addr, these solutions are unnecessary complication: this duke@435: // handler is a best-effort safety net, not a complete solution. It is duke@435: // disabled by default and should only be used as a workaround in case duke@435: // we missed any no-execute-unsafe VM code. duke@435: duke@435: last_addr = addr; duke@435: } duke@435: } duke@435: } duke@435: duke@435: if (stub != NULL) { duke@435: // save all thread context in case we need to restore it duke@435: duke@435: if (thread != NULL) thread->set_saved_exception_pc(pc); duke@435: // 12/02/99: On Sparc it appears that the full context is also saved duke@435: // but as yet, no one looks at or restores that saved context duke@435: // factor me: setPC duke@435: uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; duke@435: return true; duke@435: } duke@435: duke@435: // signal-chaining duke@435: if (os::Solaris::chained_handler(sig, info, ucVoid)) { duke@435: return true; duke@435: } duke@435: duke@435: #ifndef AMD64 duke@435: // Workaround (bug 4900493) for Solaris kernel bug 4966651. duke@435: // Handle an undefined selector caused by an attempt to assign duke@435: // fs in libthread getipriptr(). With the current libthread design every 512 duke@435: // thread creations the LDT for a private thread data structure is extended duke@435: // and thre is a hazard that and another thread attempting a thread creation duke@435: // will use a stale LDTR that doesn't reflect the structure's growth, duke@435: // causing a GP fault. duke@435: // Enforce the probable limit of passes through here to guard against an duke@435: // infinite loop if some other move to fs caused the GP fault. Note that duke@435: // this loop counter is ultimately a heuristic as it is possible for duke@435: // more than one thread to generate this fault at a time in an MP system. duke@435: // In the case of the loop count being exceeded or if the poll fails duke@435: // just fall through to a fatal error. duke@435: // If there is some other source of T_GPFLT traps and the text at EIP is duke@435: // unreadable this code will loop infinitely until the stack is exausted. duke@435: // The key to diagnosis in this case is to look for the bottom signal handler duke@435: // frame. duke@435: duke@435: if(! IgnoreLibthreadGPFault) { duke@435: if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) { duke@435: const unsigned char *p = duke@435: (unsigned const char *) uc->uc_mcontext.gregs[EIP]; duke@435: duke@435: // Expected instruction? duke@435: duke@435: if(p[0] == movlfs[0] && p[1] == movlfs[1]) { duke@435: duke@435: Atomic::inc(&ldtr_refresh); duke@435: duke@435: // Infinite loop? duke@435: duke@435: if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { duke@435: duke@435: // No, force scheduling to get a fresh view of the LDTR duke@435: duke@435: if(poll(NULL, 0, 10) == 0) { duke@435: duke@435: // Retry the move duke@435: duke@435: return false; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: } duke@435: #endif // !AMD64 duke@435: duke@435: if (!abort_if_unrecognized) { duke@435: // caller wants another chance, so give it to him duke@435: return false; duke@435: } duke@435: duke@435: if (!os::Solaris::libjsig_is_loaded) { duke@435: struct sigaction oldAct; duke@435: sigaction(sig, (struct sigaction *)0, &oldAct); duke@435: if (oldAct.sa_sigaction != signalHandler) { duke@435: void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) duke@435: : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); duke@435: warning("Unexpected Signal %d occured under user-defined signal handler %#lx", sig, (long)sighand); duke@435: } duke@435: } duke@435: duke@435: if (pc == NULL && uc != NULL) { duke@435: pc = (address) uc->uc_mcontext.gregs[REG_PC]; duke@435: } duke@435: duke@435: // unmask current signal duke@435: sigset_t newset; duke@435: sigemptyset(&newset); duke@435: sigaddset(&newset, sig); duke@435: sigprocmask(SIG_UNBLOCK, &newset, NULL); duke@435: duke@435: VMError err(t, sig, pc, info, ucVoid); duke@435: err.report_and_die(); duke@435: duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: void os::print_context(outputStream *st, void *context) { duke@435: if (context == NULL) return; duke@435: duke@435: ucontext_t *uc = (ucontext_t*)context; duke@435: st->print_cr("Registers:"); duke@435: #ifdef AMD64 duke@435: st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); duke@435: st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); duke@435: st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); duke@435: st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); duke@435: st->cr(); duke@435: st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); duke@435: st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); duke@435: st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); duke@435: st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); duke@435: st->cr(); duke@435: st->print(", R8=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); duke@435: st->print(", R9=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); duke@435: st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); duke@435: st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); duke@435: st->print(", R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); duke@435: st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); duke@435: st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); duke@435: st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); duke@435: st->cr(); duke@435: st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); duke@435: st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]); duke@435: #else duke@435: st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]); duke@435: st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]); duke@435: st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]); duke@435: st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]); duke@435: st->cr(); duke@435: st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]); duke@435: st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]); duke@435: st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]); duke@435: st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]); duke@435: st->cr(); duke@435: st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]); duke@435: st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]); duke@435: #endif // AMD64 duke@435: st->cr(); duke@435: st->cr(); duke@435: duke@435: intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc); duke@435: st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); duke@435: print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); duke@435: st->cr(); duke@435: duke@435: // Note: it may be unsafe to inspect memory near pc. For example, pc may duke@435: // point to garbage if entry point in an nmethod is corrupted. Leave duke@435: // this at the end, and hope for the best. duke@435: ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc); duke@435: address pc = epc.pc(); duke@435: st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); duke@435: print_hex_dump(st, pc - 16, pc + 16, sizeof(char)); duke@435: } duke@435: duke@435: #ifdef AMD64 duke@435: void os::Solaris::init_thread_fpu_state(void) { duke@435: // Nothing to do duke@435: } duke@435: #else duke@435: // From solaris_i486.s duke@435: extern "C" void fixcw(); duke@435: duke@435: void os::Solaris::init_thread_fpu_state(void) { duke@435: // Set fpu to 53 bit precision. This happens too early to use a stub. duke@435: fixcw(); duke@435: } duke@435: duke@435: // These routines are the initial value of atomic_xchg_entry(), duke@435: // atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry() duke@435: // until initialization is complete. duke@435: // TODO - replace with .il implementation when compiler supports it. duke@435: duke@435: typedef jint xchg_func_t (jint, volatile jint*); duke@435: typedef jint cmpxchg_func_t (jint, volatile jint*, jint); duke@435: typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); duke@435: typedef jint add_func_t (jint, volatile jint*); duke@435: typedef void fence_func_t (); duke@435: duke@435: jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { duke@435: // try to use the stub: duke@435: xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); duke@435: duke@435: if (func != NULL) { duke@435: os::atomic_xchg_func = func; duke@435: return (*func)(exchange_value, dest); duke@435: } duke@435: assert(Threads::number_of_threads() == 0, "for bootstrap only"); duke@435: duke@435: jint old_value = *dest; duke@435: *dest = exchange_value; duke@435: return old_value; duke@435: } duke@435: duke@435: jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { duke@435: // try to use the stub: duke@435: cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); duke@435: duke@435: if (func != NULL) { duke@435: os::atomic_cmpxchg_func = func; duke@435: return (*func)(exchange_value, dest, compare_value); duke@435: } duke@435: assert(Threads::number_of_threads() == 0, "for bootstrap only"); duke@435: duke@435: jint old_value = *dest; duke@435: if (old_value == compare_value) duke@435: *dest = exchange_value; duke@435: return old_value; duke@435: } duke@435: duke@435: jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { duke@435: // try to use the stub: duke@435: cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); duke@435: duke@435: if (func != NULL) { duke@435: os::atomic_cmpxchg_long_func = func; duke@435: return (*func)(exchange_value, dest, compare_value); duke@435: } duke@435: assert(Threads::number_of_threads() == 0, "for bootstrap only"); duke@435: duke@435: jlong old_value = *dest; duke@435: if (old_value == compare_value) duke@435: *dest = exchange_value; duke@435: return old_value; duke@435: } duke@435: duke@435: jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { duke@435: // try to use the stub: duke@435: add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); duke@435: duke@435: if (func != NULL) { duke@435: os::atomic_add_func = func; duke@435: return (*func)(add_value, dest); duke@435: } duke@435: assert(Threads::number_of_threads() == 0, "for bootstrap only"); duke@435: duke@435: return (*dest) += add_value; duke@435: } duke@435: duke@435: void os::fence_bootstrap() { duke@435: // try to use the stub: duke@435: fence_func_t* func = CAST_TO_FN_PTR(fence_func_t*, StubRoutines::fence_entry()); duke@435: duke@435: if (func != NULL) { duke@435: os::fence_func = func; duke@435: (*func)(); duke@435: return; duke@435: } duke@435: assert(Threads::number_of_threads() == 0, "for bootstrap only"); duke@435: duke@435: // don't have to do anything for a single thread duke@435: } duke@435: duke@435: xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; duke@435: cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; duke@435: cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; duke@435: add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; duke@435: fence_func_t* os::fence_func = os::fence_bootstrap; duke@435: duke@435: extern "C" _solaris_raw_setup_fpu(address ptr); duke@435: void os::setup_fpu() { duke@435: address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); duke@435: _solaris_raw_setup_fpu(fpu_cntrl); duke@435: } duke@435: #endif // AMD64