aoqi@0: /* aoqi@0: * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: // no precompiled headers aoqi@0: #include "asm/macroAssembler.hpp" aoqi@0: #include "classfile/classLoader.hpp" aoqi@0: #include "classfile/systemDictionary.hpp" aoqi@0: #include "classfile/vmSymbols.hpp" aoqi@0: #include "code/icBuffer.hpp" aoqi@0: #include "code/vtableStubs.hpp" aoqi@0: #include "interpreter/interpreter.hpp" aoqi@0: #include "jvm_solaris.h" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "mutex_solaris.inline.hpp" aoqi@0: #include "os_share_solaris.hpp" aoqi@0: #include "prims/jniFastGetField.hpp" aoqi@0: #include "prims/jvm.h" aoqi@0: #include "prims/jvm_misc.hpp" aoqi@0: #include "runtime/arguments.hpp" aoqi@0: #include "runtime/extendedPC.hpp" aoqi@0: #include "runtime/frame.inline.hpp" aoqi@0: #include "runtime/interfaceSupport.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: #include "runtime/javaCalls.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "runtime/osThread.hpp" aoqi@0: #include "runtime/sharedRuntime.hpp" aoqi@0: #include "runtime/stubRoutines.hpp" aoqi@0: #include "runtime/thread.inline.hpp" aoqi@0: #include "runtime/timer.hpp" aoqi@0: #include "utilities/events.hpp" aoqi@0: #include "utilities/vmError.hpp" aoqi@0: aoqi@0: // put OS-includes here aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include aoqi@0: # include // see comment in aoqi@0: aoqi@0: #ifndef AMD64 aoqi@0: // QQQ seems useless at this point aoqi@0: # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later aoqi@0: #endif // AMD64 aoqi@0: # include // see comment in aoqi@0: aoqi@0: aoqi@0: #define MAX_PATH (2 * K) aoqi@0: aoqi@0: // Minimum stack size for the VM. It's easier to document a constant value aoqi@0: // but it's different for x86 and sparc because the page sizes are different. aoqi@0: #ifdef AMD64 aoqi@0: size_t os::Solaris::min_stack_allowed = 224*K; aoqi@0: #define REG_SP REG_RSP aoqi@0: #define REG_PC REG_RIP aoqi@0: #define REG_FP REG_RBP aoqi@0: #else aoqi@0: size_t os::Solaris::min_stack_allowed = 64*K; aoqi@0: #define REG_SP UESP aoqi@0: #define REG_PC EIP aoqi@0: #define REG_FP EBP aoqi@0: // 4900493 counter to prevent runaway LDTR refresh attempt aoqi@0: aoqi@0: static volatile int ldtr_refresh = 0; aoqi@0: // the libthread instruction that faults because of the stale LDTR aoqi@0: aoqi@0: static const unsigned char movlfs[] = { 0x8e, 0xe0 // movl %eax,%fs aoqi@0: }; aoqi@0: #endif // AMD64 aoqi@0: aoqi@0: char* os::non_memory_address_word() { aoqi@0: // Must never look like an address returned by reserve_memory, aoqi@0: // even in its subfields (as defined by the CPU immediate fields, aoqi@0: // if the CPU splits constants across multiple instructions). aoqi@0: return (char*) -1; aoqi@0: } aoqi@0: aoqi@0: // aoqi@0: // Validate a ucontext retrieved from walking a uc_link of a ucontext. aoqi@0: // There are issues with libthread giving out uc_links for different threads aoqi@0: // on the same uc_link chain and bad or circular links. aoqi@0: // aoqi@0: bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { aoqi@0: if (valid >= suspect || aoqi@0: valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || aoqi@0: valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || aoqi@0: valid->uc_stack.ss_size != suspect->uc_stack.ss_size) { aoqi@0: DEBUG_ONLY(tty->print_cr("valid_ucontext: failed test 1");) aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (thread->is_Java_thread()) { aoqi@0: if (!valid_stack_address(thread, (address)suspect)) { aoqi@0: DEBUG_ONLY(tty->print_cr("valid_ucontext: uc_link not in thread stack");) aoqi@0: return false; aoqi@0: } aoqi@0: if (!valid_stack_address(thread, (address) suspect->uc_mcontext.gregs[REG_SP])) { aoqi@0: DEBUG_ONLY(tty->print_cr("valid_ucontext: stackpointer not in thread stack");) aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // We will only follow one level of uc_link since there are libthread aoqi@0: // issues with ucontext linking and it is better to be safe and just aoqi@0: // let caller retry later. aoqi@0: ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, aoqi@0: ucontext_t *uc) { aoqi@0: aoqi@0: ucontext_t *retuc = NULL; aoqi@0: aoqi@0: if (uc != NULL) { aoqi@0: if (uc->uc_link == NULL) { aoqi@0: // cannot validate without uc_link so accept current ucontext aoqi@0: retuc = uc; aoqi@0: } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { aoqi@0: // first ucontext is valid so try the next one aoqi@0: uc = uc->uc_link; aoqi@0: if (uc->uc_link == NULL) { aoqi@0: // cannot validate without uc_link so accept current ucontext aoqi@0: retuc = uc; aoqi@0: } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { aoqi@0: // the ucontext one level down is also valid so return it aoqi@0: retuc = uc; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: return retuc; aoqi@0: } aoqi@0: aoqi@0: // Assumes ucontext is valid aoqi@0: ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { aoqi@0: return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]); aoqi@0: } aoqi@0: aoqi@0: // Assumes ucontext is valid aoqi@0: intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { aoqi@0: return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; aoqi@0: } aoqi@0: aoqi@0: // Assumes ucontext is valid aoqi@0: intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { aoqi@0: return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; aoqi@0: } aoqi@0: aoqi@0: address os::Solaris::ucontext_get_pc(ucontext_t *uc) { aoqi@0: return (address) uc->uc_mcontext.gregs[REG_PC]; aoqi@0: } aoqi@0: aoqi@0: // For Forte Analyzer AsyncGetCallTrace profiling support - thread aoqi@0: // is currently interrupted by SIGPROF. aoqi@0: // aoqi@0: // The difference between this and os::fetch_frame_from_context() is that aoqi@0: // here we try to skip nested signal frames. aoqi@0: ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, aoqi@0: ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { aoqi@0: aoqi@0: assert(thread != NULL, "just checking"); aoqi@0: assert(ret_sp != NULL, "just checking"); aoqi@0: assert(ret_fp != NULL, "just checking"); aoqi@0: aoqi@0: ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); aoqi@0: return os::fetch_frame_from_context(luc, ret_sp, ret_fp); aoqi@0: } aoqi@0: aoqi@0: ExtendedPC os::fetch_frame_from_context(void* ucVoid, aoqi@0: intptr_t** ret_sp, intptr_t** ret_fp) { aoqi@0: aoqi@0: ExtendedPC epc; aoqi@0: ucontext_t *uc = (ucontext_t*)ucVoid; aoqi@0: aoqi@0: if (uc != NULL) { aoqi@0: epc = os::Solaris::ucontext_get_ExtendedPC(uc); aoqi@0: if (ret_sp) *ret_sp = os::Solaris::ucontext_get_sp(uc); aoqi@0: if (ret_fp) *ret_fp = os::Solaris::ucontext_get_fp(uc); aoqi@0: } else { aoqi@0: // construct empty ExtendedPC for return value checking aoqi@0: epc = ExtendedPC(NULL); aoqi@0: if (ret_sp) *ret_sp = (intptr_t *)NULL; aoqi@0: if (ret_fp) *ret_fp = (intptr_t *)NULL; aoqi@0: } aoqi@0: aoqi@0: return epc; aoqi@0: } aoqi@0: aoqi@0: frame os::fetch_frame_from_context(void* ucVoid) { aoqi@0: intptr_t* sp; aoqi@0: intptr_t* fp; aoqi@0: ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); aoqi@0: return frame(sp, fp, epc.pc()); aoqi@0: } aoqi@0: aoqi@0: frame os::get_sender_for_C_frame(frame* fr) { aoqi@0: return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); aoqi@0: } aoqi@0: aoqi@0: extern "C" intptr_t *_get_current_sp(); // in .il file aoqi@0: aoqi@0: address os::current_stack_pointer() { aoqi@0: return (address)_get_current_sp(); aoqi@0: } aoqi@0: aoqi@0: extern "C" intptr_t *_get_current_fp(); // in .il file aoqi@0: aoqi@0: frame os::current_frame() { aoqi@0: intptr_t* fp = _get_current_fp(); // it's inlined so want current fp aoqi@0: frame myframe((intptr_t*)os::current_stack_pointer(), aoqi@0: (intptr_t*)fp, aoqi@0: CAST_FROM_FN_PTR(address, os::current_frame)); aoqi@0: if (os::is_first_C_frame(&myframe)) { aoqi@0: // stack is not walkable aoqi@0: frame ret; // This will be a null useless frame aoqi@0: return ret; aoqi@0: } else { aoqi@0: return os::get_sender_for_C_frame(&myframe); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { aoqi@0: char lwpstatusfile[PROCFILE_LENGTH]; aoqi@0: int lwpfd, err; aoqi@0: aoqi@0: if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs)) aoqi@0: return (err); aoqi@0: if (*flags == TRS_LWPID) { aoqi@0: sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(), aoqi@0: *lwp); aoqi@0: if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) { aoqi@0: perror("thr_mutator_status: open lwpstatus"); aoqi@0: return (EINVAL); aoqi@0: } aoqi@0: if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) != aoqi@0: sizeof (lwpstatus_t)) { aoqi@0: perror("thr_mutator_status: read lwpstatus"); aoqi@0: (void) close(lwpfd); aoqi@0: return (EINVAL); aoqi@0: } aoqi@0: (void) close(lwpfd); aoqi@0: } aoqi@0: return (0); aoqi@0: } aoqi@0: aoqi@0: #ifndef AMD64 aoqi@0: aoqi@0: // Detecting SSE support by OS aoqi@0: // From solaris_i486.s aoqi@0: extern "C" bool sse_check(); aoqi@0: extern "C" bool sse_unavailable(); aoqi@0: aoqi@0: enum { SSE_UNKNOWN, SSE_NOT_SUPPORTED, SSE_SUPPORTED}; aoqi@0: static int sse_status = SSE_UNKNOWN; aoqi@0: aoqi@0: aoqi@0: static void check_for_sse_support() { aoqi@0: if (!VM_Version::supports_sse()) { aoqi@0: sse_status = SSE_NOT_SUPPORTED; aoqi@0: return; aoqi@0: } aoqi@0: // looking for _sse_hw in libc.so, if it does not exist or aoqi@0: // the value (int) is 0, OS has no support for SSE aoqi@0: int *sse_hwp; aoqi@0: void *h; aoqi@0: aoqi@0: if ((h=dlopen("/usr/lib/libc.so", RTLD_LAZY)) == NULL) { aoqi@0: //open failed, presume no support for SSE aoqi@0: sse_status = SSE_NOT_SUPPORTED; aoqi@0: return; aoqi@0: } aoqi@0: if ((sse_hwp = (int *)dlsym(h, "_sse_hw")) == NULL) { aoqi@0: sse_status = SSE_NOT_SUPPORTED; aoqi@0: } else if (*sse_hwp == 0) { aoqi@0: sse_status = SSE_NOT_SUPPORTED; aoqi@0: } aoqi@0: dlclose(h); aoqi@0: aoqi@0: if (sse_status == SSE_UNKNOWN) { aoqi@0: bool (*try_sse)() = (bool (*)())sse_check; aoqi@0: sse_status = (*try_sse)() ? SSE_SUPPORTED : SSE_NOT_SUPPORTED; aoqi@0: } aoqi@0: aoqi@0: } aoqi@0: aoqi@0: #endif // AMD64 aoqi@0: aoqi@0: bool os::supports_sse() { aoqi@0: #ifdef AMD64 aoqi@0: return true; aoqi@0: #else aoqi@0: if (sse_status == SSE_UNKNOWN) aoqi@0: check_for_sse_support(); aoqi@0: return sse_status == SSE_SUPPORTED; aoqi@0: #endif // AMD64 aoqi@0: } aoqi@0: aoqi@0: bool os::is_allocatable(size_t bytes) { aoqi@0: #ifdef AMD64 aoqi@0: return true; aoqi@0: #else aoqi@0: aoqi@0: if (bytes < 2 * G) { aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: char* addr = reserve_memory(bytes, NULL); aoqi@0: aoqi@0: if (addr != NULL) { aoqi@0: release_memory(addr, bytes); aoqi@0: } aoqi@0: aoqi@0: return addr != NULL; aoqi@0: #endif // AMD64 aoqi@0: aoqi@0: } aoqi@0: aoqi@0: extern "C" JNIEXPORT int aoqi@0: JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, aoqi@0: int abort_if_unrecognized) { aoqi@0: ucontext_t* uc = (ucontext_t*) ucVoid; aoqi@0: aoqi@0: #ifndef AMD64 aoqi@0: if (sig == SIGILL && info->si_addr == (caddr_t)sse_check) { aoqi@0: // the SSE instruction faulted. supports_sse() need return false. aoqi@0: uc->uc_mcontext.gregs[EIP] = (greg_t)sse_unavailable; aoqi@0: return true; aoqi@0: } aoqi@0: #endif // !AMD64 aoqi@0: aoqi@0: Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady aoqi@0: aoqi@0: // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away aoqi@0: // (no destructors can be run) aoqi@0: os::WatcherThreadCrashProtection::check_crash_protection(sig, t); aoqi@0: aoqi@0: SignalHandlerMark shm(t); aoqi@0: aoqi@0: if(sig == SIGPIPE || sig == SIGXFSZ) { aoqi@0: if (os::Solaris::chained_handler(sig, info, ucVoid)) { aoqi@0: return true; aoqi@0: } else { aoqi@0: if (PrintMiscellaneous && (WizardMode || Verbose)) { aoqi@0: char buf[64]; aoqi@0: warning("Ignoring %s - see 4229104 or 6499219", aoqi@0: os::exception_name(sig, buf, sizeof(buf))); aoqi@0: aoqi@0: } aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: JavaThread* thread = NULL; aoqi@0: VMThread* vmthread = NULL; aoqi@0: aoqi@0: if (os::Solaris::signal_handlers_are_installed) { aoqi@0: if (t != NULL ){ aoqi@0: if(t->is_Java_thread()) { aoqi@0: thread = (JavaThread*)t; aoqi@0: } aoqi@0: else if(t->is_VM_thread()){ aoqi@0: vmthread = (VMThread *)t; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); aoqi@0: aoqi@0: if (sig == os::Solaris::SIGasync()) { aoqi@0: if(thread || vmthread){ aoqi@0: OSThread::SR_handler(t, uc); aoqi@0: return true; aoqi@0: } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { aoqi@0: return true; aoqi@0: } else { aoqi@0: // If os::Solaris::SIGasync not chained, and this is a non-vm and aoqi@0: // non-java thread aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { aoqi@0: // can't decode this kind of signal aoqi@0: info = NULL; aoqi@0: } else { aoqi@0: assert(sig == info->si_signo, "bad siginfo"); aoqi@0: } aoqi@0: aoqi@0: // decide if this trap can be handled by a stub aoqi@0: address stub = NULL; aoqi@0: aoqi@0: address pc = NULL; aoqi@0: aoqi@0: //%note os_trap_1 aoqi@0: if (info != NULL && uc != NULL && thread != NULL) { aoqi@0: // factor me: getPCfromContext aoqi@0: pc = (address) uc->uc_mcontext.gregs[REG_PC]; aoqi@0: aoqi@0: if (StubRoutines::is_safefetch_fault(pc)) { aoqi@0: uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // Handle ALL stack overflow variations here aoqi@0: if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { aoqi@0: address addr = (address) info->si_addr; aoqi@0: if (thread->in_stack_yellow_zone(addr)) { aoqi@0: thread->disable_stack_yellow_zone(); aoqi@0: if (thread->thread_state() == _thread_in_Java) { aoqi@0: // Throw a stack overflow exception. Guard pages will be reenabled aoqi@0: // while unwinding the stack. aoqi@0: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); aoqi@0: } else { aoqi@0: // Thread was in the vm or native code. Return and try to finish. aoqi@0: return true; aoqi@0: } aoqi@0: } else if (thread->in_stack_red_zone(addr)) { aoqi@0: // Fatal red zone violation. Disable the guard pages and fall through aoqi@0: // to handle_unexpected_exception way down below. aoqi@0: thread->disable_stack_red_zone(); aoqi@0: tty->print_raw_cr("An irrecoverable stack overflow has occurred."); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr(pc)) { aoqi@0: // Verify that OS save/restore AVX registers. aoqi@0: stub = VM_Version::cpuinfo_cont_addr(); aoqi@0: } aoqi@0: aoqi@0: if (thread->thread_state() == _thread_in_vm) { aoqi@0: if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) { aoqi@0: stub = StubRoutines::handler_for_unsafe_access(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (thread->thread_state() == _thread_in_Java) { aoqi@0: // Support Safepoint Polling aoqi@0: if ( sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { aoqi@0: stub = SharedRuntime::get_poll_stub(pc); aoqi@0: } aoqi@0: else if (sig == SIGBUS && info->si_code == BUS_OBJERR) { aoqi@0: // BugId 4454115: A read from a MappedByteBuffer can fault aoqi@0: // here if the underlying file has been truncated. aoqi@0: // Do not crash the VM in such a case. aoqi@0: CodeBlob* cb = CodeCache::find_blob_unsafe(pc); aoqi@0: if (cb != NULL) { aoqi@0: nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; aoqi@0: if (nm != NULL && nm->has_unsafe_access()) { aoqi@0: stub = StubRoutines::handler_for_unsafe_access(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: else aoqi@0: if (sig == SIGFPE && info->si_code == FPE_INTDIV) { aoqi@0: // integer divide by zero aoqi@0: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); aoqi@0: } aoqi@0: #ifndef AMD64 aoqi@0: else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) { aoqi@0: // floating-point divide by zero aoqi@0: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); aoqi@0: } aoqi@0: else if (sig == SIGFPE && info->si_code == FPE_FLTINV) { aoqi@0: // The encoding of D2I in i486.ad can cause an exception prior aoqi@0: // to the fist instruction if there was an invalid operation aoqi@0: // pending. We want to dismiss that exception. From the win_32 aoqi@0: // side it also seems that if it really was the fist causing aoqi@0: // the exception that we do the d2i by hand with different aoqi@0: // rounding. Seems kind of weird. QQQ TODO aoqi@0: // Note that we take the exception at the NEXT floating point instruction. aoqi@0: if (pc[0] == 0xDB) { aoqi@0: assert(pc[0] == 0xDB, "not a FIST opcode"); aoqi@0: assert(pc[1] == 0x14, "not a FIST opcode"); aoqi@0: assert(pc[2] == 0x24, "not a FIST opcode"); aoqi@0: return true; aoqi@0: } else { aoqi@0: assert(pc[-3] == 0xDB, "not an flt invalid opcode"); aoqi@0: assert(pc[-2] == 0x14, "not an flt invalid opcode"); aoqi@0: assert(pc[-1] == 0x24, "not an flt invalid opcode"); aoqi@0: } aoqi@0: } aoqi@0: else if (sig == SIGFPE ) { aoqi@0: tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code); aoqi@0: } aoqi@0: #endif // !AMD64 aoqi@0: aoqi@0: // QQQ It doesn't seem that we need to do this on x86 because we should be able aoqi@0: // to return properly from the handler without this extra stuff on the back side. aoqi@0: aoqi@0: else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { aoqi@0: // Determination of interpreter/vtable stub/compiled code null exception aoqi@0: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // jni_fast_GetField can trap at certain pc's if a GC kicks in aoqi@0: // and the heap gets shrunk before the field access. aoqi@0: if ((sig == SIGSEGV) || (sig == SIGBUS)) { aoqi@0: address addr = JNI_FastGetField::find_slowcase_pc(pc); aoqi@0: if (addr != (address)-1) { aoqi@0: stub = addr; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Check to see if we caught the safepoint code in the aoqi@0: // process of write protecting the memory serialization page. aoqi@0: // It write enables the page immediately after protecting it aoqi@0: // so we can just return to retry the write. aoqi@0: if ((sig == SIGSEGV) && aoqi@0: os::is_memory_serialize_page(thread, (address)info->si_addr)) { aoqi@0: // Block current thread until the memory serialize page permission restored. aoqi@0: os::block_on_serialize_page_trap(); aoqi@0: return true; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Execution protection violation aoqi@0: // aoqi@0: // Preventative code for future versions of Solaris which may aoqi@0: // enable execution protection when running the 32-bit VM on AMD64. aoqi@0: // aoqi@0: // This should be kept as the last step in the triage. We don't aoqi@0: // have a dedicated trap number for a no-execute fault, so be aoqi@0: // conservative and allow other handlers the first shot. aoqi@0: // aoqi@0: // Note: We don't test that info->si_code == SEGV_ACCERR here. aoqi@0: // this si_code is so generic that it is almost meaningless; and aoqi@0: // the si_code for this condition may change in the future. aoqi@0: // Furthermore, a false-positive should be harmless. aoqi@0: if (UnguardOnExecutionViolation > 0 && aoqi@0: (sig == SIGSEGV || sig == SIGBUS) && aoqi@0: uc->uc_mcontext.gregs[TRAPNO] == T_PGFLT) { // page fault aoqi@0: int page_size = os::vm_page_size(); aoqi@0: address addr = (address) info->si_addr; aoqi@0: address pc = (address) uc->uc_mcontext.gregs[REG_PC]; aoqi@0: // Make sure the pc and the faulting address are sane. aoqi@0: // aoqi@0: // If an instruction spans a page boundary, and the page containing aoqi@0: // the beginning of the instruction is executable but the following aoqi@0: // page is not, the pc and the faulting address might be slightly aoqi@0: // different - we still want to unguard the 2nd page in this case. aoqi@0: // aoqi@0: // 15 bytes seems to be a (very) safe value for max instruction size. aoqi@0: bool pc_is_near_addr = aoqi@0: (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); aoqi@0: bool instr_spans_page_boundary = aoqi@0: (align_size_down((intptr_t) pc ^ (intptr_t) addr, aoqi@0: (intptr_t) page_size) > 0); aoqi@0: aoqi@0: if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { aoqi@0: static volatile address last_addr = aoqi@0: (address) os::non_memory_address_word(); aoqi@0: aoqi@0: // In conservative mode, don't unguard unless the address is in the VM aoqi@0: if (addr != last_addr && aoqi@0: (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { aoqi@0: aoqi@0: // Make memory rwx and retry aoqi@0: address page_start = aoqi@0: (address) align_size_down((intptr_t) addr, (intptr_t) page_size); aoqi@0: bool res = os::protect_memory((char*) page_start, page_size, aoqi@0: os::MEM_PROT_RWX); aoqi@0: aoqi@0: if (PrintMiscellaneous && Verbose) { aoqi@0: char buf[256]; aoqi@0: jio_snprintf(buf, sizeof(buf), "Execution protection violation " aoqi@0: "at " INTPTR_FORMAT aoqi@0: ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, aoqi@0: page_start, (res ? "success" : "failed"), errno); aoqi@0: tty->print_raw_cr(buf); aoqi@0: } aoqi@0: stub = pc; aoqi@0: aoqi@0: // Set last_addr so if we fault again at the same address, we don't end aoqi@0: // up in an endless loop. aoqi@0: // aoqi@0: // There are two potential complications here. Two threads trapping at aoqi@0: // the same address at the same time could cause one of the threads to aoqi@0: // think it already unguarded, and abort the VM. Likely very rare. aoqi@0: // aoqi@0: // The other race involves two threads alternately trapping at aoqi@0: // different addresses and failing to unguard the page, resulting in aoqi@0: // an endless loop. This condition is probably even more unlikely than aoqi@0: // the first. aoqi@0: // aoqi@0: // Although both cases could be avoided by using locks or thread local aoqi@0: // last_addr, these solutions are unnecessary complication: this aoqi@0: // handler is a best-effort safety net, not a complete solution. It is aoqi@0: // disabled by default and should only be used as a workaround in case aoqi@0: // we missed any no-execute-unsafe VM code. aoqi@0: aoqi@0: last_addr = addr; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (stub != NULL) { aoqi@0: // save all thread context in case we need to restore it aoqi@0: aoqi@0: if (thread != NULL) thread->set_saved_exception_pc(pc); aoqi@0: // 12/02/99: On Sparc it appears that the full context is also saved aoqi@0: // but as yet, no one looks at or restores that saved context aoqi@0: // factor me: setPC aoqi@0: uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: // signal-chaining aoqi@0: if (os::Solaris::chained_handler(sig, info, ucVoid)) { aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: #ifndef AMD64 aoqi@0: // Workaround (bug 4900493) for Solaris kernel bug 4966651. aoqi@0: // Handle an undefined selector caused by an attempt to assign aoqi@0: // fs in libthread getipriptr(). With the current libthread design every 512 aoqi@0: // thread creations the LDT for a private thread data structure is extended aoqi@0: // and thre is a hazard that and another thread attempting a thread creation aoqi@0: // will use a stale LDTR that doesn't reflect the structure's growth, aoqi@0: // causing a GP fault. aoqi@0: // Enforce the probable limit of passes through here to guard against an aoqi@0: // infinite loop if some other move to fs caused the GP fault. Note that aoqi@0: // this loop counter is ultimately a heuristic as it is possible for aoqi@0: // more than one thread to generate this fault at a time in an MP system. aoqi@0: // In the case of the loop count being exceeded or if the poll fails aoqi@0: // just fall through to a fatal error. aoqi@0: // If there is some other source of T_GPFLT traps and the text at EIP is aoqi@0: // unreadable this code will loop infinitely until the stack is exausted. aoqi@0: // The key to diagnosis in this case is to look for the bottom signal handler aoqi@0: // frame. aoqi@0: aoqi@0: if(! IgnoreLibthreadGPFault) { aoqi@0: if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) { aoqi@0: const unsigned char *p = aoqi@0: (unsigned const char *) uc->uc_mcontext.gregs[EIP]; aoqi@0: aoqi@0: // Expected instruction? aoqi@0: aoqi@0: if(p[0] == movlfs[0] && p[1] == movlfs[1]) { aoqi@0: aoqi@0: Atomic::inc(&ldtr_refresh); aoqi@0: aoqi@0: // Infinite loop? aoqi@0: aoqi@0: if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { aoqi@0: aoqi@0: // No, force scheduling to get a fresh view of the LDTR aoqi@0: aoqi@0: if(poll(NULL, 0, 10) == 0) { aoqi@0: aoqi@0: // Retry the move aoqi@0: aoqi@0: return false; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: #endif // !AMD64 aoqi@0: aoqi@0: if (!abort_if_unrecognized) { aoqi@0: // caller wants another chance, so give it to him aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: if (!os::Solaris::libjsig_is_loaded) { aoqi@0: struct sigaction oldAct; aoqi@0: sigaction(sig, (struct sigaction *)0, &oldAct); aoqi@0: if (oldAct.sa_sigaction != signalHandler) { aoqi@0: void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) aoqi@0: : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); aoqi@0: warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (pc == NULL && uc != NULL) { aoqi@0: pc = (address) uc->uc_mcontext.gregs[REG_PC]; aoqi@0: } aoqi@0: aoqi@0: // unmask current signal aoqi@0: sigset_t newset; aoqi@0: sigemptyset(&newset); aoqi@0: sigaddset(&newset, sig); aoqi@0: sigprocmask(SIG_UNBLOCK, &newset, NULL); aoqi@0: aoqi@0: // Determine which sort of error to throw. Out of swap may signal aoqi@0: // on the thread stack, which could get a mapping error when touched. aoqi@0: address addr = (address) info->si_addr; aoqi@0: if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { aoqi@0: vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); aoqi@0: } aoqi@0: aoqi@0: VMError err(t, sig, pc, info, ucVoid); aoqi@0: err.report_and_die(); aoqi@0: aoqi@0: ShouldNotReachHere(); aoqi@0: return false; aoqi@0: } aoqi@0: aoqi@0: void os::print_context(outputStream *st, void *context) { aoqi@0: if (context == NULL) return; aoqi@0: aoqi@0: ucontext_t *uc = (ucontext_t*)context; aoqi@0: st->print_cr("Registers:"); aoqi@0: #ifdef AMD64 aoqi@0: st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); aoqi@0: st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); aoqi@0: st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); aoqi@0: st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); aoqi@0: st->cr(); aoqi@0: st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); aoqi@0: st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); aoqi@0: st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); aoqi@0: st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); aoqi@0: st->cr(); aoqi@0: st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); aoqi@0: st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); aoqi@0: st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); aoqi@0: st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); aoqi@0: st->cr(); aoqi@0: st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); aoqi@0: st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); aoqi@0: st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); aoqi@0: st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); aoqi@0: st->cr(); aoqi@0: st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); aoqi@0: st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]); aoqi@0: #else aoqi@0: st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]); aoqi@0: st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]); aoqi@0: st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]); aoqi@0: st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]); aoqi@0: st->cr(); aoqi@0: st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]); aoqi@0: st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]); aoqi@0: st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]); aoqi@0: st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]); aoqi@0: st->cr(); aoqi@0: st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]); aoqi@0: st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]); aoqi@0: #endif // AMD64 aoqi@0: st->cr(); aoqi@0: st->cr(); aoqi@0: aoqi@0: intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc); aoqi@0: st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); aoqi@0: print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); aoqi@0: st->cr(); aoqi@0: aoqi@0: // Note: it may be unsafe to inspect memory near pc. For example, pc may aoqi@0: // point to garbage if entry point in an nmethod is corrupted. Leave aoqi@0: // this at the end, and hope for the best. aoqi@0: ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc); aoqi@0: address pc = epc.pc(); aoqi@0: st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); aoqi@0: print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); aoqi@0: } aoqi@0: aoqi@0: void os::print_register_info(outputStream *st, void *context) { aoqi@0: if (context == NULL) return; aoqi@0: aoqi@0: ucontext_t *uc = (ucontext_t*)context; aoqi@0: aoqi@0: st->print_cr("Register to memory mapping:"); aoqi@0: st->cr(); aoqi@0: aoqi@0: // this is horrendously verbose but the layout of the registers in the aoqi@0: // context does not match how we defined our abstract Register set, so aoqi@0: // we can't just iterate through the gregs area aoqi@0: aoqi@0: // this is only for the "general purpose" registers aoqi@0: aoqi@0: #ifdef AMD64 aoqi@0: st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]); aoqi@0: st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]); aoqi@0: st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]); aoqi@0: st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]); aoqi@0: st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]); aoqi@0: st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]); aoqi@0: st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]); aoqi@0: st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]); aoqi@0: st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]); aoqi@0: st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]); aoqi@0: st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]); aoqi@0: st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]); aoqi@0: st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]); aoqi@0: st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]); aoqi@0: st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]); aoqi@0: st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]); aoqi@0: #else aoqi@0: st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[EAX]); aoqi@0: st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[EBX]); aoqi@0: st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[ECX]); aoqi@0: st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[EDX]); aoqi@0: st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[UESP]); aoqi@0: st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[EBP]); aoqi@0: st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[ESI]); aoqi@0: st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[EDI]); aoqi@0: #endif aoqi@0: aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #ifdef AMD64 aoqi@0: void os::Solaris::init_thread_fpu_state(void) { aoqi@0: // Nothing to do aoqi@0: } aoqi@0: #else aoqi@0: // From solaris_i486.s aoqi@0: extern "C" void fixcw(); aoqi@0: aoqi@0: void os::Solaris::init_thread_fpu_state(void) { aoqi@0: // Set fpu to 53 bit precision. This happens too early to use a stub. aoqi@0: fixcw(); aoqi@0: } aoqi@0: aoqi@0: // These routines are the initial value of atomic_xchg_entry(), aoqi@0: // atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry() aoqi@0: // until initialization is complete. aoqi@0: // TODO - replace with .il implementation when compiler supports it. aoqi@0: aoqi@0: typedef jint xchg_func_t (jint, volatile jint*); aoqi@0: typedef jint cmpxchg_func_t (jint, volatile jint*, jint); aoqi@0: typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); aoqi@0: typedef jint add_func_t (jint, volatile jint*); aoqi@0: aoqi@0: jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { aoqi@0: // try to use the stub: aoqi@0: xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); aoqi@0: aoqi@0: if (func != NULL) { aoqi@0: os::atomic_xchg_func = func; aoqi@0: return (*func)(exchange_value, dest); aoqi@0: } aoqi@0: assert(Threads::number_of_threads() == 0, "for bootstrap only"); aoqi@0: aoqi@0: jint old_value = *dest; aoqi@0: *dest = exchange_value; aoqi@0: return old_value; aoqi@0: } aoqi@0: aoqi@0: jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { aoqi@0: // try to use the stub: aoqi@0: cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); aoqi@0: aoqi@0: if (func != NULL) { aoqi@0: os::atomic_cmpxchg_func = func; aoqi@0: return (*func)(exchange_value, dest, compare_value); aoqi@0: } aoqi@0: assert(Threads::number_of_threads() == 0, "for bootstrap only"); aoqi@0: aoqi@0: jint old_value = *dest; aoqi@0: if (old_value == compare_value) aoqi@0: *dest = exchange_value; aoqi@0: return old_value; aoqi@0: } aoqi@0: aoqi@0: jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { aoqi@0: // try to use the stub: aoqi@0: cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); aoqi@0: aoqi@0: if (func != NULL) { aoqi@0: os::atomic_cmpxchg_long_func = func; aoqi@0: return (*func)(exchange_value, dest, compare_value); aoqi@0: } aoqi@0: assert(Threads::number_of_threads() == 0, "for bootstrap only"); aoqi@0: aoqi@0: jlong old_value = *dest; aoqi@0: if (old_value == compare_value) aoqi@0: *dest = exchange_value; aoqi@0: return old_value; aoqi@0: } aoqi@0: aoqi@0: jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { aoqi@0: // try to use the stub: aoqi@0: add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); aoqi@0: aoqi@0: if (func != NULL) { aoqi@0: os::atomic_add_func = func; aoqi@0: return (*func)(add_value, dest); aoqi@0: } aoqi@0: assert(Threads::number_of_threads() == 0, "for bootstrap only"); aoqi@0: aoqi@0: return (*dest) += add_value; aoqi@0: } aoqi@0: aoqi@0: xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; aoqi@0: cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; aoqi@0: cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; aoqi@0: add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; aoqi@0: aoqi@0: extern "C" void _solaris_raw_setup_fpu(address ptr); aoqi@0: void os::setup_fpu() { aoqi@0: address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); aoqi@0: _solaris_raw_setup_fpu(fpu_cntrl); aoqi@0: } aoqi@0: #endif // AMD64 aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void os::verify_stack_alignment() { aoqi@0: #ifdef AMD64 aoqi@0: assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); aoqi@0: #endif aoqi@0: } aoqi@0: #endif