duke@435: /* mikael@4153: * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: // no precompiled headers twisti@4318: #include "asm/macroAssembler.hpp" stefank@2314: #include "classfile/classLoader.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "classfile/vmSymbols.hpp" stefank@2314: #include "code/icBuffer.hpp" stefank@2314: #include "code/vtableStubs.hpp" stefank@2314: #include "interpreter/interpreter.hpp" stefank@2314: #include "jvm_linux.h" stefank@2314: #include "memory/allocation.inline.hpp" stefank@2314: #include "mutex_linux.inline.hpp" stefank@2314: #include "os_share_linux.hpp" stefank@2314: #include "prims/jniFastGetField.hpp" stefank@2314: #include "prims/jvm.h" stefank@2314: #include "prims/jvm_misc.hpp" stefank@2314: #include "runtime/arguments.hpp" stefank@2314: #include "runtime/extendedPC.hpp" stefank@2314: #include "runtime/frame.inline.hpp" stefank@2314: #include "runtime/interfaceSupport.hpp" stefank@2314: #include "runtime/java.hpp" stefank@2314: #include "runtime/javaCalls.hpp" stefank@2314: #include "runtime/mutexLocker.hpp" stefank@2314: #include "runtime/osThread.hpp" stefank@2314: #include "runtime/sharedRuntime.hpp" stefank@2314: #include "runtime/stubRoutines.hpp" stefank@4299: #include "runtime/thread.inline.hpp" stefank@2314: #include "runtime/timer.hpp" stefank@2314: #include "utilities/events.hpp" stefank@2314: #include "utilities/vmError.hpp" duke@435: duke@435: // put OS-includes here duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: # include duke@435: duke@435: #ifdef AMD64 duke@435: #define REG_SP REG_RSP duke@435: #define REG_PC REG_RIP duke@435: #define REG_FP REG_RBP duke@435: #define SPELL_REG_SP "rsp" duke@435: #define SPELL_REG_FP "rbp" duke@435: #else duke@435: #define REG_SP REG_UESP duke@435: #define REG_PC REG_EIP duke@435: #define REG_FP REG_EBP duke@435: #define SPELL_REG_SP "esp" duke@435: #define SPELL_REG_FP "ebp" duke@435: #endif // AMD64 duke@435: duke@435: address os::current_stack_pointer() { dcubed@485: #ifdef SPARC_WORKS dcubed@485: register void *esp; dcubed@485: __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); dcubed@485: return (address) ((char*)esp + sizeof(long)*2); dcubed@485: #else duke@435: register void *esp __asm__ (SPELL_REG_SP); duke@435: return (address) esp; dcubed@485: #endif duke@435: } duke@435: duke@435: char* os::non_memory_address_word() { duke@435: // Must never look like an address returned by reserve_memory, duke@435: // even in its subfields (as defined by the CPU immediate fields, duke@435: // if the CPU splits constants across multiple instructions). duke@435: duke@435: return (char*) -1; duke@435: } duke@435: zgu@4079: void os::initialize_thread(Thread* thr) { duke@435: // Nothing to do. duke@435: } duke@435: duke@435: address os::Linux::ucontext_get_pc(ucontext_t * uc) { duke@435: return (address)uc->uc_mcontext.gregs[REG_PC]; duke@435: } duke@435: duke@435: intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { duke@435: return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; duke@435: } duke@435: duke@435: intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { duke@435: return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; duke@435: } duke@435: duke@435: // For Forte Analyzer AsyncGetCallTrace profiling support - thread duke@435: // is currently interrupted by SIGPROF. duke@435: // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal duke@435: // frames. Currently we don't do that on Linux, so it's the same as duke@435: // os::fetch_frame_from_context(). duke@435: ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, duke@435: ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { duke@435: duke@435: assert(thread != NULL, "just checking"); duke@435: assert(ret_sp != NULL, "just checking"); duke@435: assert(ret_fp != NULL, "just checking"); duke@435: duke@435: return os::fetch_frame_from_context(uc, ret_sp, ret_fp); duke@435: } duke@435: duke@435: ExtendedPC os::fetch_frame_from_context(void* ucVoid, duke@435: intptr_t** ret_sp, intptr_t** ret_fp) { duke@435: duke@435: ExtendedPC epc; duke@435: ucontext_t* uc = (ucontext_t*)ucVoid; duke@435: duke@435: if (uc != NULL) { duke@435: epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); duke@435: if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc); duke@435: if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc); duke@435: } else { duke@435: // construct empty ExtendedPC for return value checking duke@435: epc = ExtendedPC(NULL); duke@435: if (ret_sp) *ret_sp = (intptr_t *)NULL; duke@435: if (ret_fp) *ret_fp = (intptr_t *)NULL; duke@435: } duke@435: duke@435: return epc; duke@435: } duke@435: duke@435: frame os::fetch_frame_from_context(void* ucVoid) { duke@435: intptr_t* sp; duke@435: intptr_t* fp; duke@435: ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); duke@435: return frame(sp, fp, epc.pc()); duke@435: } duke@435: duke@435: // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get duke@435: // turned off by -fomit-frame-pointer, duke@435: frame os::get_sender_for_C_frame(frame* fr) { duke@435: return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); duke@435: } duke@435: duke@435: intptr_t* _get_previous_fp() { dcubed@485: #ifdef SPARC_WORKS dcubed@485: register intptr_t **ebp; dcubed@485: __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); dcubed@485: #else duke@435: register intptr_t **ebp __asm__ (SPELL_REG_FP); dcubed@485: #endif duke@435: return (intptr_t*) *ebp; // we want what it points to. duke@435: } duke@435: duke@435: duke@435: frame os::current_frame() { duke@435: intptr_t* fp = _get_previous_fp(); duke@435: frame myframe((intptr_t*)os::current_stack_pointer(), duke@435: (intptr_t*)fp, duke@435: CAST_FROM_FN_PTR(address, os::current_frame)); duke@435: if (os::is_first_C_frame(&myframe)) { duke@435: // stack is not walkable duke@435: return frame(NULL, NULL, NULL); duke@435: } else { duke@435: return os::get_sender_for_C_frame(&myframe); duke@435: } duke@435: } duke@435: duke@435: // Utility functions duke@435: duke@435: // From IA32 System Programming Guide duke@435: enum { duke@435: trap_page_fault = 0xE duke@435: }; duke@435: duke@435: extern "C" void Fetch32PFI () ; duke@435: extern "C" void Fetch32Resume () ; duke@435: #ifdef AMD64 duke@435: extern "C" void FetchNPFI () ; duke@435: extern "C" void FetchNResume () ; duke@435: #endif // AMD64 duke@435: coleenp@2507: extern "C" JNIEXPORT int duke@435: JVM_handle_linux_signal(int sig, duke@435: siginfo_t* info, duke@435: void* ucVoid, duke@435: int abort_if_unrecognized) { duke@435: ucontext_t* uc = (ucontext_t*) ucVoid; duke@435: duke@435: Thread* t = ThreadLocalStorage::get_thread_slow(); duke@435: duke@435: SignalHandlerMark shm(t); duke@435: duke@435: // Note: it's not uncommon that JNI code uses signal/sigset to install duke@435: // then restore certain signal handler (e.g. to temporarily block SIGPIPE, duke@435: // or have a SIGILL handler when detecting CPU type). When that happens, duke@435: // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To duke@435: // avoid unnecessary crash when libjsig is not preloaded, try handle signals duke@435: // that do not require siginfo/ucontext first. duke@435: duke@435: if (sig == SIGPIPE || sig == SIGXFSZ) { duke@435: // allow chained handler to go first duke@435: if (os::Linux::chained_handler(sig, info, ucVoid)) { duke@435: return true; duke@435: } else { duke@435: if (PrintMiscellaneous && (WizardMode || Verbose)) { duke@435: char buf[64]; duke@435: warning("Ignoring %s - see bugs 4229104 or 646499219", duke@435: os::exception_name(sig, buf, sizeof(buf))); duke@435: } duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: JavaThread* thread = NULL; duke@435: VMThread* vmthread = NULL; duke@435: if (os::Linux::signal_handlers_are_installed) { duke@435: if (t != NULL ){ duke@435: if(t->is_Java_thread()) { duke@435: thread = (JavaThread*)t; duke@435: } duke@435: else if(t->is_VM_thread()){ duke@435: vmthread = (VMThread *)t; duke@435: } duke@435: } duke@435: } duke@435: /* duke@435: NOTE: does not seem to work on linux. duke@435: if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { duke@435: // can't decode this kind of signal duke@435: info = NULL; duke@435: } else { duke@435: assert(sig == info->si_signo, "bad siginfo"); duke@435: } duke@435: */ duke@435: // decide if this trap can be handled by a stub duke@435: address stub = NULL; duke@435: duke@435: address pc = NULL; duke@435: duke@435: //%note os_trap_1 duke@435: if (info != NULL && uc != NULL && thread != NULL) { duke@435: pc = (address) os::Linux::ucontext_get_pc(uc); duke@435: duke@435: if (pc == (address) Fetch32PFI) { duke@435: uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; duke@435: return 1 ; duke@435: } duke@435: #ifdef AMD64 duke@435: if (pc == (address) FetchNPFI) { duke@435: uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; duke@435: return 1 ; duke@435: } duke@435: #endif // AMD64 duke@435: duke@435: // Handle ALL stack overflow variations here duke@435: if (sig == SIGSEGV) { duke@435: address addr = (address) info->si_addr; duke@435: duke@435: // check if fault address is within thread stack duke@435: if (addr < thread->stack_base() && duke@435: addr >= thread->stack_base() - thread->stack_size()) { duke@435: // stack overflow duke@435: if (thread->in_stack_yellow_zone(addr)) { duke@435: thread->disable_stack_yellow_zone(); duke@435: if (thread->thread_state() == _thread_in_Java) { duke@435: // Throw a stack overflow exception. Guard pages will be reenabled duke@435: // while unwinding the stack. duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); duke@435: } else { duke@435: // Thread was in the vm or native code. Return and try to finish. duke@435: return 1; duke@435: } duke@435: } else if (thread->in_stack_red_zone(addr)) { duke@435: // Fatal red zone violation. Disable the guard pages and fall through duke@435: // to handle_unexpected_exception way down below. duke@435: thread->disable_stack_red_zone(); duke@435: tty->print_raw_cr("An irrecoverable stack overflow has occurred."); duke@435: } else { duke@435: // Accessing stack address below sp may cause SEGV if current duke@435: // thread has MAP_GROWSDOWN stack. This should only happen when duke@435: // current thread was created by user code with MAP_GROWSDOWN flag duke@435: // and then attached to VM. See notes in os_linux.cpp. duke@435: if (thread->osthread()->expanding_stack() == 0) { duke@435: thread->osthread()->set_expanding_stack(); duke@435: if (os::Linux::manually_expand_stack(thread, addr)) { duke@435: thread->osthread()->clear_expanding_stack(); duke@435: return 1; duke@435: } duke@435: thread->osthread()->clear_expanding_stack(); duke@435: } else { duke@435: fatal("recursive segv. expanding stack."); duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: if (thread->thread_state() == _thread_in_Java) { duke@435: // Java thread running in Java code => find exception handler if any duke@435: // a fault inside compiled code, the interpreter, or a stub duke@435: duke@435: if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { duke@435: stub = SharedRuntime::get_poll_stub(pc); duke@435: } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { duke@435: // BugId 4454115: A read from a MappedByteBuffer can fault duke@435: // here if the underlying file has been truncated. duke@435: // Do not crash the VM in such a case. duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(pc); duke@435: nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; duke@435: if (nm != NULL && nm->has_unsafe_access()) { duke@435: stub = StubRoutines::handler_for_unsafe_access(); duke@435: } duke@435: } duke@435: else duke@435: duke@435: #ifdef AMD64 duke@435: if (sig == SIGFPE && duke@435: (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { duke@435: stub = duke@435: SharedRuntime:: duke@435: continuation_for_implicit_exception(thread, duke@435: pc, duke@435: SharedRuntime:: duke@435: IMPLICIT_DIVIDE_BY_ZERO); duke@435: #else duke@435: if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) { duke@435: // HACK: si_code does not work on linux 2.2.12-20!!! duke@435: int op = pc[0]; duke@435: if (op == 0xDB) { duke@435: // FIST duke@435: // TODO: The encoding of D2I in i486.ad can cause an exception duke@435: // prior to the fist instruction if there was an invalid operation duke@435: // pending. We want to dismiss that exception. From the win_32 duke@435: // side it also seems that if it really was the fist causing duke@435: // the exception that we do the d2i by hand with different duke@435: // rounding. Seems kind of weird. duke@435: // NOTE: that we take the exception at the NEXT floating point instruction. duke@435: assert(pc[0] == 0xDB, "not a FIST opcode"); duke@435: assert(pc[1] == 0x14, "not a FIST opcode"); duke@435: assert(pc[2] == 0x24, "not a FIST opcode"); duke@435: return true; duke@435: } else if (op == 0xF7) { duke@435: // IDIV duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); duke@435: } else { duke@435: // TODO: handle more cases if we are using other x86 instructions duke@435: // that can generate SIGFPE signal on linux. duke@435: tty->print_cr("unknown opcode 0x%X with SIGFPE.", op); duke@435: fatal("please update this code."); duke@435: } duke@435: #endif // AMD64 duke@435: } else if (sig == SIGSEGV && duke@435: !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { duke@435: // Determination of interpreter/vtable stub/compiled code null exception duke@435: stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); duke@435: } duke@435: } else if (thread->thread_state() == _thread_in_vm && duke@435: sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ duke@435: thread->doing_unsafe_access()) { duke@435: stub = StubRoutines::handler_for_unsafe_access(); duke@435: } duke@435: duke@435: // jni_fast_GetField can trap at certain pc's if a GC kicks in duke@435: // and the heap gets shrunk before the field access. duke@435: if ((sig == SIGSEGV) || (sig == SIGBUS)) { duke@435: address addr = JNI_FastGetField::find_slowcase_pc(pc); duke@435: if (addr != (address)-1) { duke@435: stub = addr; duke@435: } duke@435: } duke@435: duke@435: // Check to see if we caught the safepoint code in the duke@435: // process of write protecting the memory serialization page. duke@435: // It write enables the page immediately after protecting it duke@435: // so we can just return to retry the write. duke@435: if ((sig == SIGSEGV) && duke@435: os::is_memory_serialize_page(thread, (address) info->si_addr)) { duke@435: // Block current thread until the memory serialize page permission restored. duke@435: os::block_on_serialize_page_trap(); duke@435: return true; duke@435: } duke@435: } duke@435: duke@435: #ifndef AMD64 duke@435: // Execution protection violation duke@435: // duke@435: // This should be kept as the last step in the triage. We don't duke@435: // have a dedicated trap number for a no-execute fault, so be duke@435: // conservative and allow other handlers the first shot. duke@435: // duke@435: // Note: We don't test that info->si_code == SEGV_ACCERR here. duke@435: // this si_code is so generic that it is almost meaningless; and duke@435: // the si_code for this condition may change in the future. duke@435: // Furthermore, a false-positive should be harmless. duke@435: if (UnguardOnExecutionViolation > 0 && duke@435: (sig == SIGSEGV || sig == SIGBUS) && duke@435: uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) { duke@435: int page_size = os::vm_page_size(); duke@435: address addr = (address) info->si_addr; duke@435: address pc = os::Linux::ucontext_get_pc(uc); duke@435: // Make sure the pc and the faulting address are sane. duke@435: // duke@435: // If an instruction spans a page boundary, and the page containing duke@435: // the beginning of the instruction is executable but the following duke@435: // page is not, the pc and the faulting address might be slightly duke@435: // different - we still want to unguard the 2nd page in this case. duke@435: // duke@435: // 15 bytes seems to be a (very) safe value for max instruction size. duke@435: bool pc_is_near_addr = duke@435: (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); duke@435: bool instr_spans_page_boundary = duke@435: (align_size_down((intptr_t) pc ^ (intptr_t) addr, duke@435: (intptr_t) page_size) > 0); duke@435: duke@435: if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { duke@435: static volatile address last_addr = duke@435: (address) os::non_memory_address_word(); duke@435: duke@435: // In conservative mode, don't unguard unless the address is in the VM duke@435: if (addr != last_addr && duke@435: (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { duke@435: coleenp@912: // Set memory to RWX and retry duke@435: address page_start = duke@435: (address) align_size_down((intptr_t) addr, (intptr_t) page_size); coleenp@912: bool res = os::protect_memory((char*) page_start, page_size, coleenp@912: os::MEM_PROT_RWX); duke@435: duke@435: if (PrintMiscellaneous && Verbose) { duke@435: char buf[256]; duke@435: jio_snprintf(buf, sizeof(buf), "Execution protection violation " duke@435: "at " INTPTR_FORMAT duke@435: ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, duke@435: page_start, (res ? "success" : "failed"), errno); duke@435: tty->print_raw_cr(buf); duke@435: } duke@435: stub = pc; duke@435: duke@435: // Set last_addr so if we fault again at the same address, we don't end duke@435: // up in an endless loop. duke@435: // duke@435: // There are two potential complications here. Two threads trapping at duke@435: // the same address at the same time could cause one of the threads to duke@435: // think it already unguarded, and abort the VM. Likely very rare. duke@435: // duke@435: // The other race involves two threads alternately trapping at duke@435: // different addresses and failing to unguard the page, resulting in duke@435: // an endless loop. This condition is probably even more unlikely than duke@435: // the first. duke@435: // duke@435: // Although both cases could be avoided by using locks or thread local duke@435: // last_addr, these solutions are unnecessary complication: this duke@435: // handler is a best-effort safety net, not a complete solution. It is duke@435: // disabled by default and should only be used as a workaround in case duke@435: // we missed any no-execute-unsafe VM code. duke@435: duke@435: last_addr = addr; duke@435: } duke@435: } duke@435: } duke@435: #endif // !AMD64 duke@435: duke@435: if (stub != NULL) { duke@435: // save all thread context in case we need to restore it duke@435: if (thread != NULL) thread->set_saved_exception_pc(pc); duke@435: duke@435: uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; duke@435: return true; duke@435: } duke@435: duke@435: // signal-chaining duke@435: if (os::Linux::chained_handler(sig, info, ucVoid)) { duke@435: return true; duke@435: } duke@435: duke@435: if (!abort_if_unrecognized) { duke@435: // caller wants another chance, so give it to him duke@435: return false; duke@435: } duke@435: duke@435: if (pc == NULL && uc != NULL) { duke@435: pc = os::Linux::ucontext_get_pc(uc); duke@435: } duke@435: duke@435: // unmask current signal duke@435: sigset_t newset; duke@435: sigemptyset(&newset); duke@435: sigaddset(&newset, sig); duke@435: sigprocmask(SIG_UNBLOCK, &newset, NULL); duke@435: duke@435: VMError err(t, sig, pc, info, ucVoid); duke@435: err.report_and_die(); duke@435: duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: void os::Linux::init_thread_fpu_state(void) { duke@435: #ifndef AMD64 duke@435: // set fpu to 53 bit precision duke@435: set_fpu_control_word(0x27f); duke@435: #endif // !AMD64 duke@435: } duke@435: duke@435: int os::Linux::get_fpu_control_word(void) { duke@435: #ifdef AMD64 duke@435: return 0; duke@435: #else duke@435: int fpu_control; duke@435: _FPU_GETCW(fpu_control); duke@435: return fpu_control & 0xffff; duke@435: #endif // AMD64 duke@435: } duke@435: duke@435: void os::Linux::set_fpu_control_word(int fpu_control) { duke@435: #ifndef AMD64 duke@435: _FPU_SETCW(fpu_control); duke@435: #endif // !AMD64 duke@435: } duke@435: duke@435: // Check that the linux kernel version is 2.4 or higher since earlier duke@435: // versions do not support SSE without patches. duke@435: bool os::supports_sse() { duke@435: #ifdef AMD64 duke@435: return true; duke@435: #else duke@435: struct utsname uts; duke@435: if( uname(&uts) != 0 ) return false; // uname fails? duke@435: char *minor_string; duke@435: int major = strtol(uts.release,&minor_string,10); duke@435: int minor = strtol(minor_string+1,NULL,10); duke@435: bool result = (major > 2 || (major==2 && minor >= 4)); duke@435: #ifndef PRODUCT duke@435: if (PrintMiscellaneous && Verbose) { duke@435: tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", duke@435: major,minor, result ? "DOES" : "does NOT"); duke@435: } duke@435: #endif duke@435: return result; duke@435: #endif // AMD64 duke@435: } duke@435: duke@435: bool os::is_allocatable(size_t bytes) { duke@435: #ifdef AMD64 duke@435: // unused on amd64? duke@435: return true; duke@435: #else duke@435: duke@435: if (bytes < 2 * G) { duke@435: return true; duke@435: } duke@435: duke@435: char* addr = reserve_memory(bytes, NULL); duke@435: duke@435: if (addr != NULL) { duke@435: release_memory(addr, bytes); duke@435: } duke@435: duke@435: return addr != NULL; duke@435: #endif // AMD64 duke@435: } duke@435: duke@435: //////////////////////////////////////////////////////////////////////////////// duke@435: // thread stack duke@435: duke@435: #ifdef AMD64 duke@435: size_t os::Linux::min_stack_allowed = 64 * K; duke@435: duke@435: // amd64: pthread on amd64 is always in floating stack mode duke@435: bool os::Linux::supports_variable_stack_size() { return true; } duke@435: #else duke@435: size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; duke@435: dcubed@485: #ifdef __GNUC__ duke@435: #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) dcubed@485: #endif duke@435: duke@435: // Test if pthread library can support variable thread stack size. LinuxThreads duke@435: // in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads duke@435: // in floating stack mode and NPTL support variable stack size. duke@435: bool os::Linux::supports_variable_stack_size() { duke@435: if (os::Linux::is_NPTL()) { duke@435: // NPTL, yes duke@435: return true; duke@435: duke@435: } else { duke@435: // Note: We can't control default stack size when creating a thread. duke@435: // If we use non-default stack size (pthread_attr_setstacksize), both duke@435: // floating stack and non-floating stack LinuxThreads will return the duke@435: // same value. This makes it impossible to implement this function by duke@435: // detecting thread stack size directly. duke@435: // duke@435: // An alternative approach is to check %gs. Fixed-stack LinuxThreads duke@435: // do not use %gs, so its value is 0. Floating-stack LinuxThreads use duke@435: // %gs (either as LDT selector or GDT selector, depending on kernel) duke@435: // to access thread specific data. duke@435: // duke@435: // Note that %gs is a reserved glibc register since early 2001, so duke@435: // applications are not allowed to change its value (Ulrich Drepper from duke@435: // Redhat confirmed that all known offenders have been modified to use duke@435: // either %fs or TSD). In the worst case scenario, when VM is embedded in duke@435: // a native application that plays with %gs, we might see non-zero %gs duke@435: // even LinuxThreads is running in fixed stack mode. As the result, we'll duke@435: // return true and skip _thread_safety_check(), so we may not be able to duke@435: // detect stack-heap collisions. But otherwise it's harmless. duke@435: // dcubed@485: #ifdef __GNUC__ duke@435: return (GET_GS() != 0); dcubed@485: #else dcubed@485: return false; dcubed@485: #endif duke@435: } duke@435: } duke@435: #endif // AMD64 duke@435: duke@435: // return default stack size for thr_type duke@435: size_t os::Linux::default_stack_size(os::ThreadType thr_type) { duke@435: // default stack size (compiler thread needs larger stack) duke@435: #ifdef AMD64 duke@435: size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); duke@435: #else duke@435: size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); duke@435: #endif // AMD64 duke@435: return s; duke@435: } duke@435: duke@435: size_t os::Linux::default_guard_size(os::ThreadType thr_type) { duke@435: // Creating guard page is very expensive. Java thread has HotSpot duke@435: // guard page, only enable glibc guard page for non-Java threads. duke@435: return (thr_type == java_thread ? 0 : page_size()); duke@435: } duke@435: duke@435: // Java thread: duke@435: // duke@435: // Low memory addresses duke@435: // +------------------------+ duke@435: // | |\ JavaThread created by VM does not have glibc duke@435: // | glibc guard page | - guard, attached Java thread usually has duke@435: // | |/ 1 page glibc guard. duke@435: // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() duke@435: // | |\ duke@435: // | HotSpot Guard Pages | - red and yellow pages duke@435: // | |/ duke@435: // +------------------------+ JavaThread::stack_yellow_zone_base() duke@435: // | |\ duke@435: // | Normal Stack | - duke@435: // | |/ duke@435: // P2 +------------------------+ Thread::stack_base() duke@435: // duke@435: // Non-Java thread: duke@435: // duke@435: // Low memory addresses duke@435: // +------------------------+ duke@435: // | |\ duke@435: // | glibc guard page | - usually 1 page duke@435: // | |/ duke@435: // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() duke@435: // | |\ duke@435: // | Normal Stack | - duke@435: // | |/ duke@435: // P2 +------------------------+ Thread::stack_base() duke@435: // duke@435: // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from duke@435: // pthread_attr_getstack() duke@435: duke@435: static void current_stack_region(address * bottom, size_t * size) { duke@435: if (os::Linux::is_initial_thread()) { duke@435: // initial thread needs special handling because pthread_getattr_np() duke@435: // may return bogus value. duke@435: *bottom = os::Linux::initial_thread_stack_bottom(); duke@435: *size = os::Linux::initial_thread_stack_size(); duke@435: } else { duke@435: pthread_attr_t attr; duke@435: duke@435: int rslt = pthread_getattr_np(pthread_self(), &attr); duke@435: duke@435: // JVM needs to know exact stack location, abort if it fails duke@435: if (rslt != 0) { duke@435: if (rslt == ENOMEM) { duke@435: vm_exit_out_of_memory(0, "pthread_getattr_np"); duke@435: } else { jcoomes@1845: fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); duke@435: } duke@435: } duke@435: duke@435: if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { duke@435: fatal("Can not locate current stack attributes!"); duke@435: } duke@435: duke@435: pthread_attr_destroy(&attr); duke@435: duke@435: } duke@435: assert(os::current_stack_pointer() >= *bottom && duke@435: os::current_stack_pointer() < *bottom + *size, "just checking"); duke@435: } duke@435: duke@435: address os::current_stack_base() { duke@435: address bottom; duke@435: size_t size; duke@435: current_stack_region(&bottom, &size); duke@435: return (bottom + size); duke@435: } duke@435: duke@435: size_t os::current_stack_size() { duke@435: // stack size includes normal stack and HotSpot guard pages duke@435: address bottom; duke@435: size_t size; duke@435: current_stack_region(&bottom, &size); duke@435: return size; duke@435: } duke@435: duke@435: ///////////////////////////////////////////////////////////////////////////// duke@435: // helper functions for fatal error handler duke@435: duke@435: void os::print_context(outputStream *st, void *context) { duke@435: if (context == NULL) return; duke@435: duke@435: ucontext_t *uc = (ucontext_t*)context; duke@435: st->print_cr("Registers:"); duke@435: #ifdef AMD64 duke@435: st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); duke@435: st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); duke@435: st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); duke@435: st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); duke@435: st->cr(); duke@435: st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); duke@435: st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); duke@435: st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); duke@435: st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); duke@435: st->cr(); duke@435: st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); duke@435: st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); duke@435: st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); duke@435: st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); duke@435: st->cr(); duke@435: st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); duke@435: st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); duke@435: st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); duke@435: st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); duke@435: st->cr(); duke@435: st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); never@2262: st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); duke@435: st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]); duke@435: st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]); duke@435: st->cr(); duke@435: st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]); duke@435: #else duke@435: st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]); duke@435: st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]); duke@435: st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]); duke@435: st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]); duke@435: st->cr(); duke@435: st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]); duke@435: st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]); duke@435: st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]); duke@435: st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]); duke@435: st->cr(); duke@435: st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]); never@2262: st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); duke@435: st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2); duke@435: #endif // AMD64 duke@435: st->cr(); duke@435: st->cr(); duke@435: duke@435: intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); duke@435: st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); duke@435: print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); duke@435: st->cr(); duke@435: duke@435: // Note: it may be unsafe to inspect memory near pc. For example, pc may duke@435: // point to garbage if entry point in an nmethod is corrupted. Leave duke@435: // this at the end, and hope for the best. duke@435: address pc = os::Linux::ucontext_get_pc(uc); duke@435: st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); never@2262: print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); never@2262: } never@2262: never@2262: void os::print_register_info(outputStream *st, void *context) { never@2262: if (context == NULL) return; never@2262: never@2262: ucontext_t *uc = (ucontext_t*)context; never@2262: never@2262: st->print_cr("Register to memory mapping:"); never@2262: st->cr(); never@2262: never@2262: // this is horrendously verbose but the layout of the registers in the never@2262: // context does not match how we defined our abstract Register set, so never@2262: // we can't just iterate through the gregs area never@2262: never@2262: // this is only for the "general purpose" registers never@2262: never@2262: #ifdef AMD64 never@2262: st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]); never@2262: st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]); never@2262: st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]); never@2262: st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]); never@2262: st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]); never@2262: st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]); never@2262: st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]); never@2262: st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]); never@2262: st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]); never@2262: st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]); never@2262: st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]); never@2262: st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]); never@2262: st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]); never@2262: st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]); never@2262: st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]); never@2262: st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]); never@2262: #else never@2262: st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[REG_EAX]); never@2262: st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[REG_EBX]); never@2262: st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[REG_ECX]); never@2262: st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[REG_EDX]); never@2262: st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[REG_ESP]); never@2262: st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[REG_EBP]); never@2262: st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[REG_ESI]); never@2262: st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[REG_EDI]); never@2262: #endif // AMD64 never@2262: never@2262: st->cr(); duke@435: } duke@435: duke@435: void os::setup_fpu() { duke@435: #ifndef AMD64 duke@435: address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); duke@435: __asm__ volatile ( "fldcw (%0)" : duke@435: : "r" (fpu_cntrl) : "memory"); duke@435: #endif // !AMD64 duke@435: } roland@3606: roland@3606: #ifndef PRODUCT roland@3606: void os::verify_stack_alignment() { roland@3606: #ifdef AMD64 roland@3606: assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); roland@3606: #endif roland@3606: } roland@3606: #endif