src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp

Fri, 08 Feb 2013 12:48:24 +0100

author
sla
date
Fri, 08 Feb 2013 12:48:24 +0100
changeset 4564
758935f7c23f
parent 4528
12285410684f
child 4731
71f13276159d
permissions
-rw-r--r--

8006423: SA: NullPointerException in sun.jvm.hotspot.debugger.bsd.BsdThread.getContext(BsdThread.java:67)
Summary: Do not rely on mach thread port names to identify threads from SA
Reviewed-by: dholmes, minqi, rbackman

     1 /*
     2  * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 // no precompiled headers
    26 #include "asm/macroAssembler.hpp"
    27 #include "classfile/classLoader.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "code/icBuffer.hpp"
    31 #include "code/vtableStubs.hpp"
    32 #include "interpreter/interpreter.hpp"
    33 #include "jvm_bsd.h"
    34 #include "memory/allocation.inline.hpp"
    35 #include "mutex_bsd.inline.hpp"
    36 #include "os_share_bsd.hpp"
    37 #include "prims/jniFastGetField.hpp"
    38 #include "prims/jvm.h"
    39 #include "prims/jvm_misc.hpp"
    40 #include "runtime/arguments.hpp"
    41 #include "runtime/extendedPC.hpp"
    42 #include "runtime/frame.inline.hpp"
    43 #include "runtime/interfaceSupport.hpp"
    44 #include "runtime/java.hpp"
    45 #include "runtime/javaCalls.hpp"
    46 #include "runtime/mutexLocker.hpp"
    47 #include "runtime/osThread.hpp"
    48 #include "runtime/sharedRuntime.hpp"
    49 #include "runtime/stubRoutines.hpp"
    50 #include "runtime/thread.inline.hpp"
    51 #include "runtime/timer.hpp"
    52 #include "utilities/events.hpp"
    53 #include "utilities/vmError.hpp"
    55 // put OS-includes here
    56 # include <sys/types.h>
    57 # include <sys/mman.h>
    58 # include <pthread.h>
    59 # include <signal.h>
    60 # include <errno.h>
    61 # include <dlfcn.h>
    62 # include <stdlib.h>
    63 # include <stdio.h>
    64 # include <unistd.h>
    65 # include <sys/resource.h>
    66 # include <pthread.h>
    67 # include <sys/stat.h>
    68 # include <sys/time.h>
    69 # include <sys/utsname.h>
    70 # include <sys/socket.h>
    71 # include <sys/wait.h>
    72 # include <pwd.h>
    73 # include <poll.h>
    74 #ifndef __OpenBSD__
    75 # include <ucontext.h>
    76 #endif
    78 #if !defined(__APPLE__) && !defined(__NetBSD__)
    79 # include <pthread_np.h>
    80 #endif
    82 #ifdef AMD64
    83 #define SPELL_REG_SP "rsp"
    84 #define SPELL_REG_FP "rbp"
    85 #else
    86 #define SPELL_REG_SP "esp"
    87 #define SPELL_REG_FP "ebp"
    88 #endif // AMD64
    90 #ifdef __FreeBSD__
    91 # define context_trapno uc_mcontext.mc_trapno
    92 # ifdef AMD64
    93 #  define context_pc uc_mcontext.mc_rip
    94 #  define context_sp uc_mcontext.mc_rsp
    95 #  define context_fp uc_mcontext.mc_rbp
    96 #  define context_rip uc_mcontext.mc_rip
    97 #  define context_rsp uc_mcontext.mc_rsp
    98 #  define context_rbp uc_mcontext.mc_rbp
    99 #  define context_rax uc_mcontext.mc_rax
   100 #  define context_rbx uc_mcontext.mc_rbx
   101 #  define context_rcx uc_mcontext.mc_rcx
   102 #  define context_rdx uc_mcontext.mc_rdx
   103 #  define context_rsi uc_mcontext.mc_rsi
   104 #  define context_rdi uc_mcontext.mc_rdi
   105 #  define context_r8  uc_mcontext.mc_r8
   106 #  define context_r9  uc_mcontext.mc_r9
   107 #  define context_r10 uc_mcontext.mc_r10
   108 #  define context_r11 uc_mcontext.mc_r11
   109 #  define context_r12 uc_mcontext.mc_r12
   110 #  define context_r13 uc_mcontext.mc_r13
   111 #  define context_r14 uc_mcontext.mc_r14
   112 #  define context_r15 uc_mcontext.mc_r15
   113 #  define context_flags uc_mcontext.mc_flags
   114 #  define context_err uc_mcontext.mc_err
   115 # else
   116 #  define context_pc uc_mcontext.mc_eip
   117 #  define context_sp uc_mcontext.mc_esp
   118 #  define context_fp uc_mcontext.mc_ebp
   119 #  define context_eip uc_mcontext.mc_eip
   120 #  define context_esp uc_mcontext.mc_esp
   121 #  define context_eax uc_mcontext.mc_eax
   122 #  define context_ebx uc_mcontext.mc_ebx
   123 #  define context_ecx uc_mcontext.mc_ecx
   124 #  define context_edx uc_mcontext.mc_edx
   125 #  define context_ebp uc_mcontext.mc_ebp
   126 #  define context_esi uc_mcontext.mc_esi
   127 #  define context_edi uc_mcontext.mc_edi
   128 #  define context_eflags uc_mcontext.mc_eflags
   129 #  define context_trapno uc_mcontext.mc_trapno
   130 # endif
   131 #endif
   133 #ifdef __APPLE__
   134 # if __DARWIN_UNIX03 && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_5)
   135   // 10.5 UNIX03 member name prefixes
   136   #define DU3_PREFIX(s, m) __ ## s.__ ## m
   137 # else
   138   #define DU3_PREFIX(s, m) s ## . ## m
   139 # endif
   141 # ifdef AMD64
   142 #  define context_pc context_rip
   143 #  define context_sp context_rsp
   144 #  define context_fp context_rbp
   145 #  define context_rip uc_mcontext->DU3_PREFIX(ss,rip)
   146 #  define context_rsp uc_mcontext->DU3_PREFIX(ss,rsp)
   147 #  define context_rax uc_mcontext->DU3_PREFIX(ss,rax)
   148 #  define context_rbx uc_mcontext->DU3_PREFIX(ss,rbx)
   149 #  define context_rcx uc_mcontext->DU3_PREFIX(ss,rcx)
   150 #  define context_rdx uc_mcontext->DU3_PREFIX(ss,rdx)
   151 #  define context_rbp uc_mcontext->DU3_PREFIX(ss,rbp)
   152 #  define context_rsi uc_mcontext->DU3_PREFIX(ss,rsi)
   153 #  define context_rdi uc_mcontext->DU3_PREFIX(ss,rdi)
   154 #  define context_r8  uc_mcontext->DU3_PREFIX(ss,r8)
   155 #  define context_r9  uc_mcontext->DU3_PREFIX(ss,r9)
   156 #  define context_r10 uc_mcontext->DU3_PREFIX(ss,r10)
   157 #  define context_r11 uc_mcontext->DU3_PREFIX(ss,r11)
   158 #  define context_r12 uc_mcontext->DU3_PREFIX(ss,r12)
   159 #  define context_r13 uc_mcontext->DU3_PREFIX(ss,r13)
   160 #  define context_r14 uc_mcontext->DU3_PREFIX(ss,r14)
   161 #  define context_r15 uc_mcontext->DU3_PREFIX(ss,r15)
   162 #  define context_flags uc_mcontext->DU3_PREFIX(ss,rflags)
   163 #  define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
   164 #  define context_err uc_mcontext->DU3_PREFIX(es,err)
   165 # else
   166 #  define context_pc context_eip
   167 #  define context_sp context_esp
   168 #  define context_fp context_ebp
   169 #  define context_eip uc_mcontext->DU3_PREFIX(ss,eip)
   170 #  define context_esp uc_mcontext->DU3_PREFIX(ss,esp)
   171 #  define context_eax uc_mcontext->DU3_PREFIX(ss,eax)
   172 #  define context_ebx uc_mcontext->DU3_PREFIX(ss,ebx)
   173 #  define context_ecx uc_mcontext->DU3_PREFIX(ss,ecx)
   174 #  define context_edx uc_mcontext->DU3_PREFIX(ss,edx)
   175 #  define context_ebp uc_mcontext->DU3_PREFIX(ss,ebp)
   176 #  define context_esi uc_mcontext->DU3_PREFIX(ss,esi)
   177 #  define context_edi uc_mcontext->DU3_PREFIX(ss,edi)
   178 #  define context_eflags uc_mcontext->DU3_PREFIX(ss,eflags)
   179 #  define context_trapno uc_mcontext->DU3_PREFIX(es,trapno)
   180 # endif
   181 #endif
   183 #ifdef __OpenBSD__
   184 # define context_trapno sc_trapno
   185 # ifdef AMD64
   186 #  define context_pc sc_rip
   187 #  define context_sp sc_rsp
   188 #  define context_fp sc_rbp
   189 #  define context_rip sc_rip
   190 #  define context_rsp sc_rsp
   191 #  define context_rbp sc_rbp
   192 #  define context_rax sc_rax
   193 #  define context_rbx sc_rbx
   194 #  define context_rcx sc_rcx
   195 #  define context_rdx sc_rdx
   196 #  define context_rsi sc_rsi
   197 #  define context_rdi sc_rdi
   198 #  define context_r8  sc_r8
   199 #  define context_r9  sc_r9
   200 #  define context_r10 sc_r10
   201 #  define context_r11 sc_r11
   202 #  define context_r12 sc_r12
   203 #  define context_r13 sc_r13
   204 #  define context_r14 sc_r14
   205 #  define context_r15 sc_r15
   206 #  define context_flags sc_rflags
   207 #  define context_err sc_err
   208 # else
   209 #  define context_pc sc_eip
   210 #  define context_sp sc_esp
   211 #  define context_fp sc_ebp
   212 #  define context_eip sc_eip
   213 #  define context_esp sc_esp
   214 #  define context_eax sc_eax
   215 #  define context_ebx sc_ebx
   216 #  define context_ecx sc_ecx
   217 #  define context_edx sc_edx
   218 #  define context_ebp sc_ebp
   219 #  define context_esi sc_esi
   220 #  define context_edi sc_edi
   221 #  define context_eflags sc_eflags
   222 #  define context_trapno sc_trapno
   223 # endif
   224 #endif
   226 #ifdef __NetBSD__
   227 # define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
   228 # ifdef AMD64
   229 #  define __register_t __greg_t
   230 #  define context_pc uc_mcontext.__gregs[_REG_RIP]
   231 #  define context_sp uc_mcontext.__gregs[_REG_URSP]
   232 #  define context_fp uc_mcontext.__gregs[_REG_RBP]
   233 #  define context_rip uc_mcontext.__gregs[_REG_RIP]
   234 #  define context_rsp uc_mcontext.__gregs[_REG_URSP]
   235 #  define context_rax uc_mcontext.__gregs[_REG_RAX]
   236 #  define context_rbx uc_mcontext.__gregs[_REG_RBX]
   237 #  define context_rcx uc_mcontext.__gregs[_REG_RCX]
   238 #  define context_rdx uc_mcontext.__gregs[_REG_RDX]
   239 #  define context_rbp uc_mcontext.__gregs[_REG_RBP]
   240 #  define context_rsi uc_mcontext.__gregs[_REG_RSI]
   241 #  define context_rdi uc_mcontext.__gregs[_REG_RDI]
   242 #  define context_r8  uc_mcontext.__gregs[_REG_R8]
   243 #  define context_r9  uc_mcontext.__gregs[_REG_R9]
   244 #  define context_r10 uc_mcontext.__gregs[_REG_R10]
   245 #  define context_r11 uc_mcontext.__gregs[_REG_R11]
   246 #  define context_r12 uc_mcontext.__gregs[_REG_R12]
   247 #  define context_r13 uc_mcontext.__gregs[_REG_R13]
   248 #  define context_r14 uc_mcontext.__gregs[_REG_R14]
   249 #  define context_r15 uc_mcontext.__gregs[_REG_R15]
   250 #  define context_flags uc_mcontext.__gregs[_REG_RFL]
   251 #  define context_err uc_mcontext.__gregs[_REG_ERR]
   252 # else
   253 #  define context_pc uc_mcontext.__gregs[_REG_EIP]
   254 #  define context_sp uc_mcontext.__gregs[_REG_UESP]
   255 #  define context_fp uc_mcontext.__gregs[_REG_EBP]
   256 #  define context_eip uc_mcontext.__gregs[_REG_EIP]
   257 #  define context_esp uc_mcontext.__gregs[_REG_UESP]
   258 #  define context_eax uc_mcontext.__gregs[_REG_EAX]
   259 #  define context_ebx uc_mcontext.__gregs[_REG_EBX]
   260 #  define context_ecx uc_mcontext.__gregs[_REG_ECX]
   261 #  define context_edx uc_mcontext.__gregs[_REG_EDX]
   262 #  define context_ebp uc_mcontext.__gregs[_REG_EBP]
   263 #  define context_esi uc_mcontext.__gregs[_REG_ESI]
   264 #  define context_edi uc_mcontext.__gregs[_REG_EDI]
   265 #  define context_eflags uc_mcontext.__gregs[_REG_EFL]
   266 #  define context_trapno uc_mcontext.__gregs[_REG_TRAPNO]
   267 # endif
   268 #endif
   270 address os::current_stack_pointer() {
   271 #if defined(__clang__) || defined(__llvm__)
   272   register void *esp;
   273   __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
   274   return (address) esp;
   275 #elif defined(SPARC_WORKS)
   276   register void *esp;
   277   __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp));
   278   return (address) ((char*)esp + sizeof(long)*2);
   279 #else
   280   register void *esp __asm__ (SPELL_REG_SP);
   281   return (address) esp;
   282 #endif
   283 }
   285 char* os::non_memory_address_word() {
   286   // Must never look like an address returned by reserve_memory,
   287   // even in its subfields (as defined by the CPU immediate fields,
   288   // if the CPU splits constants across multiple instructions).
   290   return (char*) -1;
   291 }
   293 void os::initialize_thread(Thread* thr) {
   294 // Nothing to do.
   295 }
   297 address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
   298   return (address)uc->context_pc;
   299 }
   301 intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
   302   return (intptr_t*)uc->context_sp;
   303 }
   305 intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
   306   return (intptr_t*)uc->context_fp;
   307 }
   309 // For Forte Analyzer AsyncGetCallTrace profiling support - thread
   310 // is currently interrupted by SIGPROF.
   311 // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
   312 // frames. Currently we don't do that on Bsd, so it's the same as
   313 // os::fetch_frame_from_context().
   314 ExtendedPC os::Bsd::fetch_frame_from_ucontext(Thread* thread,
   315   ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
   317   assert(thread != NULL, "just checking");
   318   assert(ret_sp != NULL, "just checking");
   319   assert(ret_fp != NULL, "just checking");
   321   return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
   322 }
   324 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
   325                     intptr_t** ret_sp, intptr_t** ret_fp) {
   327   ExtendedPC  epc;
   328   ucontext_t* uc = (ucontext_t*)ucVoid;
   330   if (uc != NULL) {
   331     epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
   332     if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
   333     if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
   334   } else {
   335     // construct empty ExtendedPC for return value checking
   336     epc = ExtendedPC(NULL);
   337     if (ret_sp) *ret_sp = (intptr_t *)NULL;
   338     if (ret_fp) *ret_fp = (intptr_t *)NULL;
   339   }
   341   return epc;
   342 }
   344 frame os::fetch_frame_from_context(void* ucVoid) {
   345   intptr_t* sp;
   346   intptr_t* fp;
   347   ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
   348   return frame(sp, fp, epc.pc());
   349 }
   351 // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get
   352 // turned off by -fomit-frame-pointer,
   353 frame os::get_sender_for_C_frame(frame* fr) {
   354   return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
   355 }
   357 intptr_t* _get_previous_fp() {
   358 #if defined(SPARC_WORKS) || defined(__clang__) || defined(__llvm__)
   359   register intptr_t **ebp;
   360   __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp));
   361 #else
   362   register intptr_t **ebp __asm__ (SPELL_REG_FP);
   363 #endif
   364   return (intptr_t*) *ebp;   // we want what it points to.
   365 }
   368 frame os::current_frame() {
   369   intptr_t* fp = _get_previous_fp();
   370   frame myframe((intptr_t*)os::current_stack_pointer(),
   371                 (intptr_t*)fp,
   372                 CAST_FROM_FN_PTR(address, os::current_frame));
   373   if (os::is_first_C_frame(&myframe)) {
   374     // stack is not walkable
   375     return frame();
   376   } else {
   377     return os::get_sender_for_C_frame(&myframe);
   378   }
   379 }
   381 // Utility functions
   383 // From IA32 System Programming Guide
   384 enum {
   385   trap_page_fault = 0xE
   386 };
   388 extern "C" void Fetch32PFI () ;
   389 extern "C" void Fetch32Resume () ;
   390 #ifdef AMD64
   391 extern "C" void FetchNPFI () ;
   392 extern "C" void FetchNResume () ;
   393 #endif // AMD64
   395 extern "C" JNIEXPORT int
   396 JVM_handle_bsd_signal(int sig,
   397                         siginfo_t* info,
   398                         void* ucVoid,
   399                         int abort_if_unrecognized) {
   400   ucontext_t* uc = (ucontext_t*) ucVoid;
   402   Thread* t = ThreadLocalStorage::get_thread_slow();
   404   SignalHandlerMark shm(t);
   406   // Note: it's not uncommon that JNI code uses signal/sigset to install
   407   // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
   408   // or have a SIGILL handler when detecting CPU type). When that happens,
   409   // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To
   410   // avoid unnecessary crash when libjsig is not preloaded, try handle signals
   411   // that do not require siginfo/ucontext first.
   413   if (sig == SIGPIPE || sig == SIGXFSZ) {
   414     // allow chained handler to go first
   415     if (os::Bsd::chained_handler(sig, info, ucVoid)) {
   416       return true;
   417     } else {
   418       if (PrintMiscellaneous && (WizardMode || Verbose)) {
   419         char buf[64];
   420         warning("Ignoring %s - see bugs 4229104 or 646499219",
   421                 os::exception_name(sig, buf, sizeof(buf)));
   422       }
   423       return true;
   424     }
   425   }
   427   JavaThread* thread = NULL;
   428   VMThread* vmthread = NULL;
   429   if (os::Bsd::signal_handlers_are_installed) {
   430     if (t != NULL ){
   431       if(t->is_Java_thread()) {
   432         thread = (JavaThread*)t;
   433       }
   434       else if(t->is_VM_thread()){
   435         vmthread = (VMThread *)t;
   436       }
   437     }
   438   }
   439 /*
   440   NOTE: does not seem to work on bsd.
   441   if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
   442     // can't decode this kind of signal
   443     info = NULL;
   444   } else {
   445     assert(sig == info->si_signo, "bad siginfo");
   446   }
   447 */
   448   // decide if this trap can be handled by a stub
   449   address stub = NULL;
   451   address pc          = NULL;
   453   //%note os_trap_1
   454   if (info != NULL && uc != NULL && thread != NULL) {
   455     pc = (address) os::Bsd::ucontext_get_pc(uc);
   457     if (pc == (address) Fetch32PFI) {
   458        uc->context_pc = intptr_t(Fetch32Resume) ;
   459        return 1 ;
   460     }
   461 #ifdef AMD64
   462     if (pc == (address) FetchNPFI) {
   463        uc->context_pc = intptr_t (FetchNResume) ;
   464        return 1 ;
   465     }
   466 #endif // AMD64
   468     // Handle ALL stack overflow variations here
   469     if (sig == SIGSEGV || sig == SIGBUS) {
   470       address addr = (address) info->si_addr;
   472       // check if fault address is within thread stack
   473       if (addr < thread->stack_base() &&
   474           addr >= thread->stack_base() - thread->stack_size()) {
   475         // stack overflow
   476         if (thread->in_stack_yellow_zone(addr)) {
   477           thread->disable_stack_yellow_zone();
   478           if (thread->thread_state() == _thread_in_Java) {
   479             // Throw a stack overflow exception.  Guard pages will be reenabled
   480             // while unwinding the stack.
   481             stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
   482           } else {
   483             // Thread was in the vm or native code.  Return and try to finish.
   484             return 1;
   485           }
   486         } else if (thread->in_stack_red_zone(addr)) {
   487           // Fatal red zone violation.  Disable the guard pages and fall through
   488           // to handle_unexpected_exception way down below.
   489           thread->disable_stack_red_zone();
   490           tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
   491         }
   492       }
   493     }
   495     // We test if stub is already set (by the stack overflow code
   496     // above) so it is not overwritten by the code that follows. This
   497     // check is not required on other platforms, because on other
   498     // platforms we check for SIGSEGV only or SIGBUS only, where here
   499     // we have to check for both SIGSEGV and SIGBUS.
   500     if (thread->thread_state() == _thread_in_Java && stub == NULL) {
   501       // Java thread running in Java code => find exception handler if any
   502       // a fault inside compiled code, the interpreter, or a stub
   504       if ((sig == SIGSEGV || sig == SIGBUS) && os::is_poll_address((address)info->si_addr)) {
   505         stub = SharedRuntime::get_poll_stub(pc);
   506 #if defined(__APPLE__)
   507       // 32-bit Darwin reports a SIGBUS for nearly all memory access exceptions.
   508       // 64-bit Darwin may also use a SIGBUS (seen with compressed oops).
   509       // Catching SIGBUS here prevents the implicit SIGBUS NULL check below from
   510       // being called, so only do so if the implicit NULL check is not necessary.
   511       } else if (sig == SIGBUS && MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
   512 #else
   513       } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
   514 #endif
   515         // BugId 4454115: A read from a MappedByteBuffer can fault
   516         // here if the underlying file has been truncated.
   517         // Do not crash the VM in such a case.
   518         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
   519         nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
   520         if (nm != NULL && nm->has_unsafe_access()) {
   521           stub = StubRoutines::handler_for_unsafe_access();
   522         }
   523       }
   524       else
   526 #ifdef AMD64
   527       if (sig == SIGFPE  &&
   528           (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
   529         stub =
   530           SharedRuntime::
   531           continuation_for_implicit_exception(thread,
   532                                               pc,
   533                                               SharedRuntime::
   534                                               IMPLICIT_DIVIDE_BY_ZERO);
   535 #ifdef __APPLE__
   536       } else if (sig == SIGFPE && info->si_code == FPE_NOOP) {
   537         int op = pc[0];
   539         // Skip REX
   540         if ((pc[0] & 0xf0) == 0x40) {
   541           op = pc[1];
   542         } else {
   543           op = pc[0];
   544         }
   546         // Check for IDIV
   547         if (op == 0xF7) {
   548           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime:: IMPLICIT_DIVIDE_BY_ZERO);
   549         } else {
   550           // TODO: handle more cases if we are using other x86 instructions
   551           //   that can generate SIGFPE signal.
   552           tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
   553           fatal("please update this code.");
   554         }
   555 #endif /* __APPLE__ */
   557 #else
   558       if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
   559         // HACK: si_code does not work on bsd 2.2.12-20!!!
   560         int op = pc[0];
   561         if (op == 0xDB) {
   562           // FIST
   563           // TODO: The encoding of D2I in i486.ad can cause an exception
   564           // prior to the fist instruction if there was an invalid operation
   565           // pending. We want to dismiss that exception. From the win_32
   566           // side it also seems that if it really was the fist causing
   567           // the exception that we do the d2i by hand with different
   568           // rounding. Seems kind of weird.
   569           // NOTE: that we take the exception at the NEXT floating point instruction.
   570           assert(pc[0] == 0xDB, "not a FIST opcode");
   571           assert(pc[1] == 0x14, "not a FIST opcode");
   572           assert(pc[2] == 0x24, "not a FIST opcode");
   573           return true;
   574         } else if (op == 0xF7) {
   575           // IDIV
   576           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
   577         } else {
   578           // TODO: handle more cases if we are using other x86 instructions
   579           //   that can generate SIGFPE signal on bsd.
   580           tty->print_cr("unknown opcode 0x%X with SIGFPE.", op);
   581           fatal("please update this code.");
   582         }
   583 #endif // AMD64
   584       } else if ((sig == SIGSEGV || sig == SIGBUS) &&
   585                !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
   586           // Determination of interpreter/vtable stub/compiled code null exception
   587           stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
   588       }
   589     } else if (thread->thread_state() == _thread_in_vm &&
   590                sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
   591                thread->doing_unsafe_access()) {
   592         stub = StubRoutines::handler_for_unsafe_access();
   593     }
   595     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
   596     // and the heap gets shrunk before the field access.
   597     if ((sig == SIGSEGV) || (sig == SIGBUS)) {
   598       address addr = JNI_FastGetField::find_slowcase_pc(pc);
   599       if (addr != (address)-1) {
   600         stub = addr;
   601       }
   602     }
   604     // Check to see if we caught the safepoint code in the
   605     // process of write protecting the memory serialization page.
   606     // It write enables the page immediately after protecting it
   607     // so we can just return to retry the write.
   608     if ((sig == SIGSEGV || sig == SIGBUS) &&
   609         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
   610       // Block current thread until the memory serialize page permission restored.
   611       os::block_on_serialize_page_trap();
   612       return true;
   613     }
   614   }
   616 #ifndef AMD64
   617   // Execution protection violation
   618   //
   619   // This should be kept as the last step in the triage.  We don't
   620   // have a dedicated trap number for a no-execute fault, so be
   621   // conservative and allow other handlers the first shot.
   622   //
   623   // Note: We don't test that info->si_code == SEGV_ACCERR here.
   624   // this si_code is so generic that it is almost meaningless; and
   625   // the si_code for this condition may change in the future.
   626   // Furthermore, a false-positive should be harmless.
   627   if (UnguardOnExecutionViolation > 0 &&
   628       (sig == SIGSEGV || sig == SIGBUS) &&
   629       uc->context_trapno == trap_page_fault) {
   630     int page_size = os::vm_page_size();
   631     address addr = (address) info->si_addr;
   632     address pc = os::Bsd::ucontext_get_pc(uc);
   633     // Make sure the pc and the faulting address are sane.
   634     //
   635     // If an instruction spans a page boundary, and the page containing
   636     // the beginning of the instruction is executable but the following
   637     // page is not, the pc and the faulting address might be slightly
   638     // different - we still want to unguard the 2nd page in this case.
   639     //
   640     // 15 bytes seems to be a (very) safe value for max instruction size.
   641     bool pc_is_near_addr =
   642       (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
   643     bool instr_spans_page_boundary =
   644       (align_size_down((intptr_t) pc ^ (intptr_t) addr,
   645                        (intptr_t) page_size) > 0);
   647     if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
   648       static volatile address last_addr =
   649         (address) os::non_memory_address_word();
   651       // In conservative mode, don't unguard unless the address is in the VM
   652       if (addr != last_addr &&
   653           (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
   655         // Set memory to RWX and retry
   656         address page_start =
   657           (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
   658         bool res = os::protect_memory((char*) page_start, page_size,
   659                                       os::MEM_PROT_RWX);
   661         if (PrintMiscellaneous && Verbose) {
   662           char buf[256];
   663           jio_snprintf(buf, sizeof(buf), "Execution protection violation "
   664                        "at " INTPTR_FORMAT
   665                        ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr,
   666                        page_start, (res ? "success" : "failed"), errno);
   667           tty->print_raw_cr(buf);
   668         }
   669         stub = pc;
   671         // Set last_addr so if we fault again at the same address, we don't end
   672         // up in an endless loop.
   673         //
   674         // There are two potential complications here.  Two threads trapping at
   675         // the same address at the same time could cause one of the threads to
   676         // think it already unguarded, and abort the VM.  Likely very rare.
   677         //
   678         // The other race involves two threads alternately trapping at
   679         // different addresses and failing to unguard the page, resulting in
   680         // an endless loop.  This condition is probably even more unlikely than
   681         // the first.
   682         //
   683         // Although both cases could be avoided by using locks or thread local
   684         // last_addr, these solutions are unnecessary complication: this
   685         // handler is a best-effort safety net, not a complete solution.  It is
   686         // disabled by default and should only be used as a workaround in case
   687         // we missed any no-execute-unsafe VM code.
   689         last_addr = addr;
   690       }
   691     }
   692   }
   693 #endif // !AMD64
   695   if (stub != NULL) {
   696     // save all thread context in case we need to restore it
   697     if (thread != NULL) thread->set_saved_exception_pc(pc);
   699     uc->context_pc = (intptr_t)stub;
   700     return true;
   701   }
   703   // signal-chaining
   704   if (os::Bsd::chained_handler(sig, info, ucVoid)) {
   705      return true;
   706   }
   708   if (!abort_if_unrecognized) {
   709     // caller wants another chance, so give it to him
   710     return false;
   711   }
   713   if (pc == NULL && uc != NULL) {
   714     pc = os::Bsd::ucontext_get_pc(uc);
   715   }
   717   // unmask current signal
   718   sigset_t newset;
   719   sigemptyset(&newset);
   720   sigaddset(&newset, sig);
   721   sigprocmask(SIG_UNBLOCK, &newset, NULL);
   723   VMError err(t, sig, pc, info, ucVoid);
   724   err.report_and_die();
   726   ShouldNotReachHere();
   727 }
   729 // From solaris_i486.s ported to bsd_i486.s
   730 extern "C" void fixcw();
   732 void os::Bsd::init_thread_fpu_state(void) {
   733 #ifndef AMD64
   734   // Set fpu to 53 bit precision. This happens too early to use a stub.
   735   fixcw();
   736 #endif // !AMD64
   737 }
   740 // Check that the bsd kernel version is 2.4 or higher since earlier
   741 // versions do not support SSE without patches.
   742 bool os::supports_sse() {
   743   return true;
   744 }
   746 bool os::is_allocatable(size_t bytes) {
   747 #ifdef AMD64
   748   // unused on amd64?
   749   return true;
   750 #else
   752   if (bytes < 2 * G) {
   753     return true;
   754   }
   756   char* addr = reserve_memory(bytes, NULL);
   758   if (addr != NULL) {
   759     release_memory(addr, bytes);
   760   }
   762   return addr != NULL;
   763 #endif // AMD64
   764 }
   766 ////////////////////////////////////////////////////////////////////////////////
   767 // thread stack
   769 #ifdef AMD64
   770 size_t os::Bsd::min_stack_allowed  = 64 * K;
   772 // amd64: pthread on amd64 is always in floating stack mode
   773 bool os::Bsd::supports_variable_stack_size() {  return true; }
   774 #else
   775 size_t os::Bsd::min_stack_allowed  =  (48 DEBUG_ONLY(+4))*K;
   777 #ifdef __GNUC__
   778 #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
   779 #endif
   781 bool os::Bsd::supports_variable_stack_size() { return true; }
   782 #endif // AMD64
   784 // return default stack size for thr_type
   785 size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
   786   // default stack size (compiler thread needs larger stack)
   787 #ifdef AMD64
   788   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   789 #else
   790   size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
   791 #endif // AMD64
   792   return s;
   793 }
   795 size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
   796   // Creating guard page is very expensive. Java thread has HotSpot
   797   // guard page, only enable glibc guard page for non-Java threads.
   798   return (thr_type == java_thread ? 0 : page_size());
   799 }
   801 // Java thread:
   802 //
   803 //   Low memory addresses
   804 //    +------------------------+
   805 //    |                        |\  JavaThread created by VM does not have glibc
   806 //    |    glibc guard page    | - guard, attached Java thread usually has
   807 //    |                        |/  1 page glibc guard.
   808 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
   809 //    |                        |\
   810 //    |  HotSpot Guard Pages   | - red and yellow pages
   811 //    |                        |/
   812 //    +------------------------+ JavaThread::stack_yellow_zone_base()
   813 //    |                        |\
   814 //    |      Normal Stack      | -
   815 //    |                        |/
   816 // P2 +------------------------+ Thread::stack_base()
   817 //
   818 // Non-Java thread:
   819 //
   820 //   Low memory addresses
   821 //    +------------------------+
   822 //    |                        |\
   823 //    |  glibc guard page      | - usually 1 page
   824 //    |                        |/
   825 // P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
   826 //    |                        |\
   827 //    |      Normal Stack      | -
   828 //    |                        |/
   829 // P2 +------------------------+ Thread::stack_base()
   830 //
   831 // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
   832 //    pthread_attr_getstack()
   834 static void current_stack_region(address * bottom, size_t * size) {
   835 #ifdef __APPLE__
   836   pthread_t self = pthread_self();
   837   void *stacktop = pthread_get_stackaddr_np(self);
   838   *size = pthread_get_stacksize_np(self);
   839   *bottom = (address) stacktop - *size;
   840 #elif defined(__OpenBSD__)
   841   stack_t ss;
   842   int rslt = pthread_stackseg_np(pthread_self(), &ss);
   844   if (rslt != 0)
   845     fatal(err_msg("pthread_stackseg_np failed with err = %d", rslt));
   847   *bottom = (address)((char *)ss.ss_sp - ss.ss_size);
   848   *size   = ss.ss_size;
   849 #else
   850   pthread_attr_t attr;
   852   int rslt = pthread_attr_init(&attr);
   854   // JVM needs to know exact stack location, abort if it fails
   855   if (rslt != 0)
   856     fatal(err_msg("pthread_attr_init failed with err = %d", rslt));
   858   rslt = pthread_attr_get_np(pthread_self(), &attr);
   860   if (rslt != 0)
   861     fatal(err_msg("pthread_attr_get_np failed with err = %d", rslt));
   863   if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
   864     pthread_attr_getstacksize(&attr, size) != 0) {
   865     fatal("Can not locate current stack attributes!");
   866   }
   868   pthread_attr_destroy(&attr);
   869 #endif
   870   assert(os::current_stack_pointer() >= *bottom &&
   871          os::current_stack_pointer() < *bottom + *size, "just checking");
   872 }
   874 address os::current_stack_base() {
   875   address bottom;
   876   size_t size;
   877   current_stack_region(&bottom, &size);
   878   return (bottom + size);
   879 }
   881 size_t os::current_stack_size() {
   882   // stack size includes normal stack and HotSpot guard pages
   883   address bottom;
   884   size_t size;
   885   current_stack_region(&bottom, &size);
   886   return size;
   887 }
   889 /////////////////////////////////////////////////////////////////////////////
   890 // helper functions for fatal error handler
   892 void os::print_context(outputStream *st, void *context) {
   893   if (context == NULL) return;
   895   ucontext_t *uc = (ucontext_t*)context;
   896   st->print_cr("Registers:");
   897 #ifdef AMD64
   898   st->print(  "RAX=" INTPTR_FORMAT, uc->context_rax);
   899   st->print(", RBX=" INTPTR_FORMAT, uc->context_rbx);
   900   st->print(", RCX=" INTPTR_FORMAT, uc->context_rcx);
   901   st->print(", RDX=" INTPTR_FORMAT, uc->context_rdx);
   902   st->cr();
   903   st->print(  "RSP=" INTPTR_FORMAT, uc->context_rsp);
   904   st->print(", RBP=" INTPTR_FORMAT, uc->context_rbp);
   905   st->print(", RSI=" INTPTR_FORMAT, uc->context_rsi);
   906   st->print(", RDI=" INTPTR_FORMAT, uc->context_rdi);
   907   st->cr();
   908   st->print(  "R8 =" INTPTR_FORMAT, uc->context_r8);
   909   st->print(", R9 =" INTPTR_FORMAT, uc->context_r9);
   910   st->print(", R10=" INTPTR_FORMAT, uc->context_r10);
   911   st->print(", R11=" INTPTR_FORMAT, uc->context_r11);
   912   st->cr();
   913   st->print(  "R12=" INTPTR_FORMAT, uc->context_r12);
   914   st->print(", R13=" INTPTR_FORMAT, uc->context_r13);
   915   st->print(", R14=" INTPTR_FORMAT, uc->context_r14);
   916   st->print(", R15=" INTPTR_FORMAT, uc->context_r15);
   917   st->cr();
   918   st->print(  "RIP=" INTPTR_FORMAT, uc->context_rip);
   919   st->print(", EFLAGS=" INTPTR_FORMAT, uc->context_flags);
   920   st->print(", ERR=" INTPTR_FORMAT, uc->context_err);
   921   st->cr();
   922   st->print("  TRAPNO=" INTPTR_FORMAT, uc->context_trapno);
   923 #else
   924   st->print(  "EAX=" INTPTR_FORMAT, uc->context_eax);
   925   st->print(", EBX=" INTPTR_FORMAT, uc->context_ebx);
   926   st->print(", ECX=" INTPTR_FORMAT, uc->context_ecx);
   927   st->print(", EDX=" INTPTR_FORMAT, uc->context_edx);
   928   st->cr();
   929   st->print(  "ESP=" INTPTR_FORMAT, uc->context_esp);
   930   st->print(", EBP=" INTPTR_FORMAT, uc->context_ebp);
   931   st->print(", ESI=" INTPTR_FORMAT, uc->context_esi);
   932   st->print(", EDI=" INTPTR_FORMAT, uc->context_edi);
   933   st->cr();
   934   st->print(  "EIP=" INTPTR_FORMAT, uc->context_eip);
   935   st->print(", EFLAGS=" INTPTR_FORMAT, uc->context_eflags);
   936 #endif // AMD64
   937   st->cr();
   938   st->cr();
   940   intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc);
   941   st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
   942   print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
   943   st->cr();
   945   // Note: it may be unsafe to inspect memory near pc. For example, pc may
   946   // point to garbage if entry point in an nmethod is corrupted. Leave
   947   // this at the end, and hope for the best.
   948   address pc = os::Bsd::ucontext_get_pc(uc);
   949   st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
   950   print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
   951 }
   953 void os::print_register_info(outputStream *st, void *context) {
   954   if (context == NULL) return;
   956   ucontext_t *uc = (ucontext_t*)context;
   958   st->print_cr("Register to memory mapping:");
   959   st->cr();
   961   // this is horrendously verbose but the layout of the registers in the
   962   // context does not match how we defined our abstract Register set, so
   963   // we can't just iterate through the gregs area
   965   // this is only for the "general purpose" registers
   967 #ifdef AMD64
   968   st->print("RAX="); print_location(st, uc->context_rax);
   969   st->print("RBX="); print_location(st, uc->context_rbx);
   970   st->print("RCX="); print_location(st, uc->context_rcx);
   971   st->print("RDX="); print_location(st, uc->context_rdx);
   972   st->print("RSP="); print_location(st, uc->context_rsp);
   973   st->print("RBP="); print_location(st, uc->context_rbp);
   974   st->print("RSI="); print_location(st, uc->context_rsi);
   975   st->print("RDI="); print_location(st, uc->context_rdi);
   976   st->print("R8 ="); print_location(st, uc->context_r8);
   977   st->print("R9 ="); print_location(st, uc->context_r9);
   978   st->print("R10="); print_location(st, uc->context_r10);
   979   st->print("R11="); print_location(st, uc->context_r11);
   980   st->print("R12="); print_location(st, uc->context_r12);
   981   st->print("R13="); print_location(st, uc->context_r13);
   982   st->print("R14="); print_location(st, uc->context_r14);
   983   st->print("R15="); print_location(st, uc->context_r15);
   984 #else
   985   st->print("EAX="); print_location(st, uc->context_eax);
   986   st->print("EBX="); print_location(st, uc->context_ebx);
   987   st->print("ECX="); print_location(st, uc->context_ecx);
   988   st->print("EDX="); print_location(st, uc->context_edx);
   989   st->print("ESP="); print_location(st, uc->context_esp);
   990   st->print("EBP="); print_location(st, uc->context_ebp);
   991   st->print("ESI="); print_location(st, uc->context_esi);
   992   st->print("EDI="); print_location(st, uc->context_edi);
   993 #endif // AMD64
   995   st->cr();
   996 }
   998 void os::setup_fpu() {
   999 #ifndef AMD64
  1000   address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std();
  1001   __asm__ volatile (  "fldcw (%0)" :
  1002                       : "r" (fpu_cntrl) : "memory");
  1003 #endif // !AMD64
  1006 #ifndef PRODUCT
  1007 void os::verify_stack_alignment() {
  1009 #endif

mercurial