Tue, 30 Apr 2013 11:56:52 -0700
8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
Reviewed-by: coleenp, zgu, hseigel
duke@435 | 1 | /* |
mikael@4153 | 2 | * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | // no precompiled headers |
twisti@4318 | 26 | #include "asm/macroAssembler.hpp" |
stefank@2314 | 27 | #include "classfile/classLoader.hpp" |
stefank@2314 | 28 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 29 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 30 | #include "code/icBuffer.hpp" |
stefank@2314 | 31 | #include "code/vtableStubs.hpp" |
stefank@2314 | 32 | #include "interpreter/interpreter.hpp" |
stefank@2314 | 33 | #include "jvm_linux.h" |
stefank@2314 | 34 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 35 | #include "mutex_linux.inline.hpp" |
stefank@2314 | 36 | #include "os_share_linux.hpp" |
stefank@2314 | 37 | #include "prims/jniFastGetField.hpp" |
stefank@2314 | 38 | #include "prims/jvm.h" |
stefank@2314 | 39 | #include "prims/jvm_misc.hpp" |
stefank@2314 | 40 | #include "runtime/arguments.hpp" |
stefank@2314 | 41 | #include "runtime/extendedPC.hpp" |
stefank@2314 | 42 | #include "runtime/frame.inline.hpp" |
stefank@2314 | 43 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 44 | #include "runtime/java.hpp" |
stefank@2314 | 45 | #include "runtime/javaCalls.hpp" |
stefank@2314 | 46 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 47 | #include "runtime/osThread.hpp" |
stefank@2314 | 48 | #include "runtime/sharedRuntime.hpp" |
stefank@2314 | 49 | #include "runtime/stubRoutines.hpp" |
stefank@4299 | 50 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 51 | #include "runtime/timer.hpp" |
stefank@2314 | 52 | #include "utilities/events.hpp" |
stefank@2314 | 53 | #include "utilities/vmError.hpp" |
duke@435 | 54 | |
duke@435 | 55 | // put OS-includes here |
duke@435 | 56 | # include <sys/types.h> |
duke@435 | 57 | # include <sys/mman.h> |
duke@435 | 58 | # include <pthread.h> |
duke@435 | 59 | # include <signal.h> |
duke@435 | 60 | # include <errno.h> |
duke@435 | 61 | # include <dlfcn.h> |
duke@435 | 62 | # include <stdlib.h> |
duke@435 | 63 | # include <stdio.h> |
duke@435 | 64 | # include <unistd.h> |
duke@435 | 65 | # include <sys/resource.h> |
duke@435 | 66 | # include <pthread.h> |
duke@435 | 67 | # include <sys/stat.h> |
duke@435 | 68 | # include <sys/time.h> |
duke@435 | 69 | # include <sys/utsname.h> |
duke@435 | 70 | # include <sys/socket.h> |
duke@435 | 71 | # include <sys/wait.h> |
duke@435 | 72 | # include <pwd.h> |
duke@435 | 73 | # include <poll.h> |
duke@435 | 74 | # include <ucontext.h> |
duke@435 | 75 | # include <fpu_control.h> |
duke@435 | 76 | |
duke@435 | 77 | #ifdef AMD64 |
duke@435 | 78 | #define REG_SP REG_RSP |
duke@435 | 79 | #define REG_PC REG_RIP |
duke@435 | 80 | #define REG_FP REG_RBP |
duke@435 | 81 | #define SPELL_REG_SP "rsp" |
duke@435 | 82 | #define SPELL_REG_FP "rbp" |
duke@435 | 83 | #else |
duke@435 | 84 | #define REG_SP REG_UESP |
duke@435 | 85 | #define REG_PC REG_EIP |
duke@435 | 86 | #define REG_FP REG_EBP |
duke@435 | 87 | #define SPELL_REG_SP "esp" |
duke@435 | 88 | #define SPELL_REG_FP "ebp" |
duke@435 | 89 | #endif // AMD64 |
duke@435 | 90 | |
duke@435 | 91 | address os::current_stack_pointer() { |
dcubed@485 | 92 | #ifdef SPARC_WORKS |
dcubed@485 | 93 | register void *esp; |
dcubed@485 | 94 | __asm__("mov %%"SPELL_REG_SP", %0":"=r"(esp)); |
dcubed@485 | 95 | return (address) ((char*)esp + sizeof(long)*2); |
dcubed@485 | 96 | #else |
duke@435 | 97 | register void *esp __asm__ (SPELL_REG_SP); |
duke@435 | 98 | return (address) esp; |
dcubed@485 | 99 | #endif |
duke@435 | 100 | } |
duke@435 | 101 | |
duke@435 | 102 | char* os::non_memory_address_word() { |
duke@435 | 103 | // Must never look like an address returned by reserve_memory, |
duke@435 | 104 | // even in its subfields (as defined by the CPU immediate fields, |
duke@435 | 105 | // if the CPU splits constants across multiple instructions). |
duke@435 | 106 | |
duke@435 | 107 | return (char*) -1; |
duke@435 | 108 | } |
duke@435 | 109 | |
zgu@4079 | 110 | void os::initialize_thread(Thread* thr) { |
duke@435 | 111 | // Nothing to do. |
duke@435 | 112 | } |
duke@435 | 113 | |
duke@435 | 114 | address os::Linux::ucontext_get_pc(ucontext_t * uc) { |
duke@435 | 115 | return (address)uc->uc_mcontext.gregs[REG_PC]; |
duke@435 | 116 | } |
duke@435 | 117 | |
duke@435 | 118 | intptr_t* os::Linux::ucontext_get_sp(ucontext_t * uc) { |
duke@435 | 119 | return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | intptr_t* os::Linux::ucontext_get_fp(ucontext_t * uc) { |
duke@435 | 123 | return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; |
duke@435 | 124 | } |
duke@435 | 125 | |
duke@435 | 126 | // For Forte Analyzer AsyncGetCallTrace profiling support - thread |
duke@435 | 127 | // is currently interrupted by SIGPROF. |
duke@435 | 128 | // os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal |
duke@435 | 129 | // frames. Currently we don't do that on Linux, so it's the same as |
duke@435 | 130 | // os::fetch_frame_from_context(). |
duke@435 | 131 | ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread, |
duke@435 | 132 | ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { |
duke@435 | 133 | |
duke@435 | 134 | assert(thread != NULL, "just checking"); |
duke@435 | 135 | assert(ret_sp != NULL, "just checking"); |
duke@435 | 136 | assert(ret_fp != NULL, "just checking"); |
duke@435 | 137 | |
duke@435 | 138 | return os::fetch_frame_from_context(uc, ret_sp, ret_fp); |
duke@435 | 139 | } |
duke@435 | 140 | |
duke@435 | 141 | ExtendedPC os::fetch_frame_from_context(void* ucVoid, |
duke@435 | 142 | intptr_t** ret_sp, intptr_t** ret_fp) { |
duke@435 | 143 | |
duke@435 | 144 | ExtendedPC epc; |
duke@435 | 145 | ucontext_t* uc = (ucontext_t*)ucVoid; |
duke@435 | 146 | |
duke@435 | 147 | if (uc != NULL) { |
duke@435 | 148 | epc = ExtendedPC(os::Linux::ucontext_get_pc(uc)); |
duke@435 | 149 | if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc); |
duke@435 | 150 | if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc); |
duke@435 | 151 | } else { |
duke@435 | 152 | // construct empty ExtendedPC for return value checking |
duke@435 | 153 | epc = ExtendedPC(NULL); |
duke@435 | 154 | if (ret_sp) *ret_sp = (intptr_t *)NULL; |
duke@435 | 155 | if (ret_fp) *ret_fp = (intptr_t *)NULL; |
duke@435 | 156 | } |
duke@435 | 157 | |
duke@435 | 158 | return epc; |
duke@435 | 159 | } |
duke@435 | 160 | |
duke@435 | 161 | frame os::fetch_frame_from_context(void* ucVoid) { |
duke@435 | 162 | intptr_t* sp; |
duke@435 | 163 | intptr_t* fp; |
duke@435 | 164 | ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
duke@435 | 165 | return frame(sp, fp, epc.pc()); |
duke@435 | 166 | } |
duke@435 | 167 | |
duke@435 | 168 | // By default, gcc always save frame pointer (%ebp/%rbp) on stack. It may get |
duke@435 | 169 | // turned off by -fomit-frame-pointer, |
duke@435 | 170 | frame os::get_sender_for_C_frame(frame* fr) { |
duke@435 | 171 | return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); |
duke@435 | 172 | } |
duke@435 | 173 | |
duke@435 | 174 | intptr_t* _get_previous_fp() { |
dcubed@485 | 175 | #ifdef SPARC_WORKS |
dcubed@485 | 176 | register intptr_t **ebp; |
dcubed@485 | 177 | __asm__("mov %%"SPELL_REG_FP", %0":"=r"(ebp)); |
dcubed@485 | 178 | #else |
duke@435 | 179 | register intptr_t **ebp __asm__ (SPELL_REG_FP); |
dcubed@485 | 180 | #endif |
duke@435 | 181 | return (intptr_t*) *ebp; // we want what it points to. |
duke@435 | 182 | } |
duke@435 | 183 | |
duke@435 | 184 | |
duke@435 | 185 | frame os::current_frame() { |
duke@435 | 186 | intptr_t* fp = _get_previous_fp(); |
duke@435 | 187 | frame myframe((intptr_t*)os::current_stack_pointer(), |
duke@435 | 188 | (intptr_t*)fp, |
duke@435 | 189 | CAST_FROM_FN_PTR(address, os::current_frame)); |
duke@435 | 190 | if (os::is_first_C_frame(&myframe)) { |
duke@435 | 191 | // stack is not walkable |
dholmes@4528 | 192 | return frame(); |
duke@435 | 193 | } else { |
duke@435 | 194 | return os::get_sender_for_C_frame(&myframe); |
duke@435 | 195 | } |
duke@435 | 196 | } |
duke@435 | 197 | |
duke@435 | 198 | // Utility functions |
duke@435 | 199 | |
duke@435 | 200 | // From IA32 System Programming Guide |
duke@435 | 201 | enum { |
duke@435 | 202 | trap_page_fault = 0xE |
duke@435 | 203 | }; |
duke@435 | 204 | |
duke@435 | 205 | extern "C" void Fetch32PFI () ; |
duke@435 | 206 | extern "C" void Fetch32Resume () ; |
duke@435 | 207 | #ifdef AMD64 |
duke@435 | 208 | extern "C" void FetchNPFI () ; |
duke@435 | 209 | extern "C" void FetchNResume () ; |
duke@435 | 210 | #endif // AMD64 |
duke@435 | 211 | |
coleenp@2507 | 212 | extern "C" JNIEXPORT int |
duke@435 | 213 | JVM_handle_linux_signal(int sig, |
duke@435 | 214 | siginfo_t* info, |
duke@435 | 215 | void* ucVoid, |
duke@435 | 216 | int abort_if_unrecognized) { |
duke@435 | 217 | ucontext_t* uc = (ucontext_t*) ucVoid; |
duke@435 | 218 | |
duke@435 | 219 | Thread* t = ThreadLocalStorage::get_thread_slow(); |
duke@435 | 220 | |
duke@435 | 221 | SignalHandlerMark shm(t); |
duke@435 | 222 | |
duke@435 | 223 | // Note: it's not uncommon that JNI code uses signal/sigset to install |
duke@435 | 224 | // then restore certain signal handler (e.g. to temporarily block SIGPIPE, |
duke@435 | 225 | // or have a SIGILL handler when detecting CPU type). When that happens, |
duke@435 | 226 | // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To |
duke@435 | 227 | // avoid unnecessary crash when libjsig is not preloaded, try handle signals |
duke@435 | 228 | // that do not require siginfo/ucontext first. |
duke@435 | 229 | |
duke@435 | 230 | if (sig == SIGPIPE || sig == SIGXFSZ) { |
duke@435 | 231 | // allow chained handler to go first |
duke@435 | 232 | if (os::Linux::chained_handler(sig, info, ucVoid)) { |
duke@435 | 233 | return true; |
duke@435 | 234 | } else { |
duke@435 | 235 | if (PrintMiscellaneous && (WizardMode || Verbose)) { |
duke@435 | 236 | char buf[64]; |
duke@435 | 237 | warning("Ignoring %s - see bugs 4229104 or 646499219", |
duke@435 | 238 | os::exception_name(sig, buf, sizeof(buf))); |
duke@435 | 239 | } |
duke@435 | 240 | return true; |
duke@435 | 241 | } |
duke@435 | 242 | } |
duke@435 | 243 | |
duke@435 | 244 | JavaThread* thread = NULL; |
duke@435 | 245 | VMThread* vmthread = NULL; |
duke@435 | 246 | if (os::Linux::signal_handlers_are_installed) { |
duke@435 | 247 | if (t != NULL ){ |
duke@435 | 248 | if(t->is_Java_thread()) { |
duke@435 | 249 | thread = (JavaThread*)t; |
duke@435 | 250 | } |
duke@435 | 251 | else if(t->is_VM_thread()){ |
duke@435 | 252 | vmthread = (VMThread *)t; |
duke@435 | 253 | } |
duke@435 | 254 | } |
duke@435 | 255 | } |
duke@435 | 256 | /* |
duke@435 | 257 | NOTE: does not seem to work on linux. |
duke@435 | 258 | if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { |
duke@435 | 259 | // can't decode this kind of signal |
duke@435 | 260 | info = NULL; |
duke@435 | 261 | } else { |
duke@435 | 262 | assert(sig == info->si_signo, "bad siginfo"); |
duke@435 | 263 | } |
duke@435 | 264 | */ |
duke@435 | 265 | // decide if this trap can be handled by a stub |
duke@435 | 266 | address stub = NULL; |
duke@435 | 267 | |
duke@435 | 268 | address pc = NULL; |
duke@435 | 269 | |
duke@435 | 270 | //%note os_trap_1 |
duke@435 | 271 | if (info != NULL && uc != NULL && thread != NULL) { |
duke@435 | 272 | pc = (address) os::Linux::ucontext_get_pc(uc); |
duke@435 | 273 | |
duke@435 | 274 | if (pc == (address) Fetch32PFI) { |
duke@435 | 275 | uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ; |
duke@435 | 276 | return 1 ; |
duke@435 | 277 | } |
duke@435 | 278 | #ifdef AMD64 |
duke@435 | 279 | if (pc == (address) FetchNPFI) { |
duke@435 | 280 | uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ; |
duke@435 | 281 | return 1 ; |
duke@435 | 282 | } |
duke@435 | 283 | #endif // AMD64 |
duke@435 | 284 | |
duke@435 | 285 | // Handle ALL stack overflow variations here |
duke@435 | 286 | if (sig == SIGSEGV) { |
duke@435 | 287 | address addr = (address) info->si_addr; |
duke@435 | 288 | |
duke@435 | 289 | // check if fault address is within thread stack |
duke@435 | 290 | if (addr < thread->stack_base() && |
duke@435 | 291 | addr >= thread->stack_base() - thread->stack_size()) { |
duke@435 | 292 | // stack overflow |
duke@435 | 293 | if (thread->in_stack_yellow_zone(addr)) { |
duke@435 | 294 | thread->disable_stack_yellow_zone(); |
duke@435 | 295 | if (thread->thread_state() == _thread_in_Java) { |
duke@435 | 296 | // Throw a stack overflow exception. Guard pages will be reenabled |
duke@435 | 297 | // while unwinding the stack. |
duke@435 | 298 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
duke@435 | 299 | } else { |
duke@435 | 300 | // Thread was in the vm or native code. Return and try to finish. |
duke@435 | 301 | return 1; |
duke@435 | 302 | } |
duke@435 | 303 | } else if (thread->in_stack_red_zone(addr)) { |
duke@435 | 304 | // Fatal red zone violation. Disable the guard pages and fall through |
duke@435 | 305 | // to handle_unexpected_exception way down below. |
duke@435 | 306 | thread->disable_stack_red_zone(); |
duke@435 | 307 | tty->print_raw_cr("An irrecoverable stack overflow has occurred."); |
iklam@4710 | 308 | |
iklam@4710 | 309 | // This is a likely cause, but hard to verify. Let's just print |
iklam@4710 | 310 | // it as a hint. |
iklam@4710 | 311 | tty->print_raw_cr("Please check if any of your loaded .so files has " |
iklam@4710 | 312 | "enabled executable stack (see man page execstack(8))"); |
duke@435 | 313 | } else { |
duke@435 | 314 | // Accessing stack address below sp may cause SEGV if current |
duke@435 | 315 | // thread has MAP_GROWSDOWN stack. This should only happen when |
duke@435 | 316 | // current thread was created by user code with MAP_GROWSDOWN flag |
duke@435 | 317 | // and then attached to VM. See notes in os_linux.cpp. |
duke@435 | 318 | if (thread->osthread()->expanding_stack() == 0) { |
duke@435 | 319 | thread->osthread()->set_expanding_stack(); |
duke@435 | 320 | if (os::Linux::manually_expand_stack(thread, addr)) { |
duke@435 | 321 | thread->osthread()->clear_expanding_stack(); |
duke@435 | 322 | return 1; |
duke@435 | 323 | } |
duke@435 | 324 | thread->osthread()->clear_expanding_stack(); |
duke@435 | 325 | } else { |
duke@435 | 326 | fatal("recursive segv. expanding stack."); |
duke@435 | 327 | } |
duke@435 | 328 | } |
duke@435 | 329 | } |
duke@435 | 330 | } |
duke@435 | 331 | |
duke@435 | 332 | if (thread->thread_state() == _thread_in_Java) { |
duke@435 | 333 | // Java thread running in Java code => find exception handler if any |
duke@435 | 334 | // a fault inside compiled code, the interpreter, or a stub |
duke@435 | 335 | |
duke@435 | 336 | if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { |
duke@435 | 337 | stub = SharedRuntime::get_poll_stub(pc); |
duke@435 | 338 | } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) { |
duke@435 | 339 | // BugId 4454115: A read from a MappedByteBuffer can fault |
duke@435 | 340 | // here if the underlying file has been truncated. |
duke@435 | 341 | // Do not crash the VM in such a case. |
duke@435 | 342 | CodeBlob* cb = CodeCache::find_blob_unsafe(pc); |
morris@4763 | 343 | nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL; |
duke@435 | 344 | if (nm != NULL && nm->has_unsafe_access()) { |
duke@435 | 345 | stub = StubRoutines::handler_for_unsafe_access(); |
duke@435 | 346 | } |
duke@435 | 347 | } |
duke@435 | 348 | else |
duke@435 | 349 | |
duke@435 | 350 | #ifdef AMD64 |
duke@435 | 351 | if (sig == SIGFPE && |
duke@435 | 352 | (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) { |
duke@435 | 353 | stub = |
duke@435 | 354 | SharedRuntime:: |
duke@435 | 355 | continuation_for_implicit_exception(thread, |
duke@435 | 356 | pc, |
duke@435 | 357 | SharedRuntime:: |
duke@435 | 358 | IMPLICIT_DIVIDE_BY_ZERO); |
duke@435 | 359 | #else |
duke@435 | 360 | if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) { |
duke@435 | 361 | // HACK: si_code does not work on linux 2.2.12-20!!! |
duke@435 | 362 | int op = pc[0]; |
duke@435 | 363 | if (op == 0xDB) { |
duke@435 | 364 | // FIST |
duke@435 | 365 | // TODO: The encoding of D2I in i486.ad can cause an exception |
duke@435 | 366 | // prior to the fist instruction if there was an invalid operation |
duke@435 | 367 | // pending. We want to dismiss that exception. From the win_32 |
duke@435 | 368 | // side it also seems that if it really was the fist causing |
duke@435 | 369 | // the exception that we do the d2i by hand with different |
duke@435 | 370 | // rounding. Seems kind of weird. |
duke@435 | 371 | // NOTE: that we take the exception at the NEXT floating point instruction. |
duke@435 | 372 | assert(pc[0] == 0xDB, "not a FIST opcode"); |
duke@435 | 373 | assert(pc[1] == 0x14, "not a FIST opcode"); |
duke@435 | 374 | assert(pc[2] == 0x24, "not a FIST opcode"); |
duke@435 | 375 | return true; |
duke@435 | 376 | } else if (op == 0xF7) { |
duke@435 | 377 | // IDIV |
duke@435 | 378 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
duke@435 | 379 | } else { |
duke@435 | 380 | // TODO: handle more cases if we are using other x86 instructions |
duke@435 | 381 | // that can generate SIGFPE signal on linux. |
duke@435 | 382 | tty->print_cr("unknown opcode 0x%X with SIGFPE.", op); |
duke@435 | 383 | fatal("please update this code."); |
duke@435 | 384 | } |
duke@435 | 385 | #endif // AMD64 |
duke@435 | 386 | } else if (sig == SIGSEGV && |
duke@435 | 387 | !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { |
duke@435 | 388 | // Determination of interpreter/vtable stub/compiled code null exception |
duke@435 | 389 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); |
duke@435 | 390 | } |
duke@435 | 391 | } else if (thread->thread_state() == _thread_in_vm && |
duke@435 | 392 | sig == SIGBUS && /* info->si_code == BUS_OBJERR && */ |
duke@435 | 393 | thread->doing_unsafe_access()) { |
duke@435 | 394 | stub = StubRoutines::handler_for_unsafe_access(); |
duke@435 | 395 | } |
duke@435 | 396 | |
duke@435 | 397 | // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in |
duke@435 | 398 | // and the heap gets shrunk before the field access. |
duke@435 | 399 | if ((sig == SIGSEGV) || (sig == SIGBUS)) { |
duke@435 | 400 | address addr = JNI_FastGetField::find_slowcase_pc(pc); |
duke@435 | 401 | if (addr != (address)-1) { |
duke@435 | 402 | stub = addr; |
duke@435 | 403 | } |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | // Check to see if we caught the safepoint code in the |
duke@435 | 407 | // process of write protecting the memory serialization page. |
duke@435 | 408 | // It write enables the page immediately after protecting it |
duke@435 | 409 | // so we can just return to retry the write. |
duke@435 | 410 | if ((sig == SIGSEGV) && |
duke@435 | 411 | os::is_memory_serialize_page(thread, (address) info->si_addr)) { |
duke@435 | 412 | // Block current thread until the memory serialize page permission restored. |
duke@435 | 413 | os::block_on_serialize_page_trap(); |
duke@435 | 414 | return true; |
duke@435 | 415 | } |
duke@435 | 416 | } |
duke@435 | 417 | |
duke@435 | 418 | #ifndef AMD64 |
duke@435 | 419 | // Execution protection violation |
duke@435 | 420 | // |
duke@435 | 421 | // This should be kept as the last step in the triage. We don't |
duke@435 | 422 | // have a dedicated trap number for a no-execute fault, so be |
duke@435 | 423 | // conservative and allow other handlers the first shot. |
duke@435 | 424 | // |
duke@435 | 425 | // Note: We don't test that info->si_code == SEGV_ACCERR here. |
duke@435 | 426 | // this si_code is so generic that it is almost meaningless; and |
duke@435 | 427 | // the si_code for this condition may change in the future. |
duke@435 | 428 | // Furthermore, a false-positive should be harmless. |
duke@435 | 429 | if (UnguardOnExecutionViolation > 0 && |
duke@435 | 430 | (sig == SIGSEGV || sig == SIGBUS) && |
duke@435 | 431 | uc->uc_mcontext.gregs[REG_TRAPNO] == trap_page_fault) { |
duke@435 | 432 | int page_size = os::vm_page_size(); |
duke@435 | 433 | address addr = (address) info->si_addr; |
duke@435 | 434 | address pc = os::Linux::ucontext_get_pc(uc); |
duke@435 | 435 | // Make sure the pc and the faulting address are sane. |
duke@435 | 436 | // |
duke@435 | 437 | // If an instruction spans a page boundary, and the page containing |
duke@435 | 438 | // the beginning of the instruction is executable but the following |
duke@435 | 439 | // page is not, the pc and the faulting address might be slightly |
duke@435 | 440 | // different - we still want to unguard the 2nd page in this case. |
duke@435 | 441 | // |
duke@435 | 442 | // 15 bytes seems to be a (very) safe value for max instruction size. |
duke@435 | 443 | bool pc_is_near_addr = |
duke@435 | 444 | (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); |
duke@435 | 445 | bool instr_spans_page_boundary = |
duke@435 | 446 | (align_size_down((intptr_t) pc ^ (intptr_t) addr, |
duke@435 | 447 | (intptr_t) page_size) > 0); |
duke@435 | 448 | |
duke@435 | 449 | if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { |
duke@435 | 450 | static volatile address last_addr = |
duke@435 | 451 | (address) os::non_memory_address_word(); |
duke@435 | 452 | |
duke@435 | 453 | // In conservative mode, don't unguard unless the address is in the VM |
duke@435 | 454 | if (addr != last_addr && |
duke@435 | 455 | (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { |
duke@435 | 456 | |
coleenp@912 | 457 | // Set memory to RWX and retry |
duke@435 | 458 | address page_start = |
duke@435 | 459 | (address) align_size_down((intptr_t) addr, (intptr_t) page_size); |
coleenp@912 | 460 | bool res = os::protect_memory((char*) page_start, page_size, |
coleenp@912 | 461 | os::MEM_PROT_RWX); |
duke@435 | 462 | |
duke@435 | 463 | if (PrintMiscellaneous && Verbose) { |
duke@435 | 464 | char buf[256]; |
duke@435 | 465 | jio_snprintf(buf, sizeof(buf), "Execution protection violation " |
duke@435 | 466 | "at " INTPTR_FORMAT |
duke@435 | 467 | ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, |
duke@435 | 468 | page_start, (res ? "success" : "failed"), errno); |
duke@435 | 469 | tty->print_raw_cr(buf); |
duke@435 | 470 | } |
duke@435 | 471 | stub = pc; |
duke@435 | 472 | |
duke@435 | 473 | // Set last_addr so if we fault again at the same address, we don't end |
duke@435 | 474 | // up in an endless loop. |
duke@435 | 475 | // |
duke@435 | 476 | // There are two potential complications here. Two threads trapping at |
duke@435 | 477 | // the same address at the same time could cause one of the threads to |
duke@435 | 478 | // think it already unguarded, and abort the VM. Likely very rare. |
duke@435 | 479 | // |
duke@435 | 480 | // The other race involves two threads alternately trapping at |
duke@435 | 481 | // different addresses and failing to unguard the page, resulting in |
duke@435 | 482 | // an endless loop. This condition is probably even more unlikely than |
duke@435 | 483 | // the first. |
duke@435 | 484 | // |
duke@435 | 485 | // Although both cases could be avoided by using locks or thread local |
duke@435 | 486 | // last_addr, these solutions are unnecessary complication: this |
duke@435 | 487 | // handler is a best-effort safety net, not a complete solution. It is |
duke@435 | 488 | // disabled by default and should only be used as a workaround in case |
duke@435 | 489 | // we missed any no-execute-unsafe VM code. |
duke@435 | 490 | |
duke@435 | 491 | last_addr = addr; |
duke@435 | 492 | } |
duke@435 | 493 | } |
duke@435 | 494 | } |
duke@435 | 495 | #endif // !AMD64 |
duke@435 | 496 | |
duke@435 | 497 | if (stub != NULL) { |
duke@435 | 498 | // save all thread context in case we need to restore it |
duke@435 | 499 | if (thread != NULL) thread->set_saved_exception_pc(pc); |
duke@435 | 500 | |
duke@435 | 501 | uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; |
duke@435 | 502 | return true; |
duke@435 | 503 | } |
duke@435 | 504 | |
duke@435 | 505 | // signal-chaining |
duke@435 | 506 | if (os::Linux::chained_handler(sig, info, ucVoid)) { |
duke@435 | 507 | return true; |
duke@435 | 508 | } |
duke@435 | 509 | |
duke@435 | 510 | if (!abort_if_unrecognized) { |
duke@435 | 511 | // caller wants another chance, so give it to him |
duke@435 | 512 | return false; |
duke@435 | 513 | } |
duke@435 | 514 | |
duke@435 | 515 | if (pc == NULL && uc != NULL) { |
duke@435 | 516 | pc = os::Linux::ucontext_get_pc(uc); |
duke@435 | 517 | } |
duke@435 | 518 | |
duke@435 | 519 | // unmask current signal |
duke@435 | 520 | sigset_t newset; |
duke@435 | 521 | sigemptyset(&newset); |
duke@435 | 522 | sigaddset(&newset, sig); |
duke@435 | 523 | sigprocmask(SIG_UNBLOCK, &newset, NULL); |
duke@435 | 524 | |
duke@435 | 525 | VMError err(t, sig, pc, info, ucVoid); |
duke@435 | 526 | err.report_and_die(); |
duke@435 | 527 | |
duke@435 | 528 | ShouldNotReachHere(); |
duke@435 | 529 | } |
duke@435 | 530 | |
duke@435 | 531 | void os::Linux::init_thread_fpu_state(void) { |
duke@435 | 532 | #ifndef AMD64 |
duke@435 | 533 | // set fpu to 53 bit precision |
duke@435 | 534 | set_fpu_control_word(0x27f); |
duke@435 | 535 | #endif // !AMD64 |
duke@435 | 536 | } |
duke@435 | 537 | |
duke@435 | 538 | int os::Linux::get_fpu_control_word(void) { |
duke@435 | 539 | #ifdef AMD64 |
duke@435 | 540 | return 0; |
duke@435 | 541 | #else |
duke@435 | 542 | int fpu_control; |
duke@435 | 543 | _FPU_GETCW(fpu_control); |
duke@435 | 544 | return fpu_control & 0xffff; |
duke@435 | 545 | #endif // AMD64 |
duke@435 | 546 | } |
duke@435 | 547 | |
duke@435 | 548 | void os::Linux::set_fpu_control_word(int fpu_control) { |
duke@435 | 549 | #ifndef AMD64 |
duke@435 | 550 | _FPU_SETCW(fpu_control); |
duke@435 | 551 | #endif // !AMD64 |
duke@435 | 552 | } |
duke@435 | 553 | |
duke@435 | 554 | // Check that the linux kernel version is 2.4 or higher since earlier |
duke@435 | 555 | // versions do not support SSE without patches. |
duke@435 | 556 | bool os::supports_sse() { |
duke@435 | 557 | #ifdef AMD64 |
duke@435 | 558 | return true; |
duke@435 | 559 | #else |
duke@435 | 560 | struct utsname uts; |
duke@435 | 561 | if( uname(&uts) != 0 ) return false; // uname fails? |
duke@435 | 562 | char *minor_string; |
duke@435 | 563 | int major = strtol(uts.release,&minor_string,10); |
duke@435 | 564 | int minor = strtol(minor_string+1,NULL,10); |
duke@435 | 565 | bool result = (major > 2 || (major==2 && minor >= 4)); |
duke@435 | 566 | #ifndef PRODUCT |
duke@435 | 567 | if (PrintMiscellaneous && Verbose) { |
duke@435 | 568 | tty->print("OS version is %d.%d, which %s support SSE/SSE2\n", |
duke@435 | 569 | major,minor, result ? "DOES" : "does NOT"); |
duke@435 | 570 | } |
duke@435 | 571 | #endif |
duke@435 | 572 | return result; |
duke@435 | 573 | #endif // AMD64 |
duke@435 | 574 | } |
duke@435 | 575 | |
duke@435 | 576 | bool os::is_allocatable(size_t bytes) { |
duke@435 | 577 | #ifdef AMD64 |
duke@435 | 578 | // unused on amd64? |
duke@435 | 579 | return true; |
duke@435 | 580 | #else |
duke@435 | 581 | |
duke@435 | 582 | if (bytes < 2 * G) { |
duke@435 | 583 | return true; |
duke@435 | 584 | } |
duke@435 | 585 | |
duke@435 | 586 | char* addr = reserve_memory(bytes, NULL); |
duke@435 | 587 | |
duke@435 | 588 | if (addr != NULL) { |
duke@435 | 589 | release_memory(addr, bytes); |
duke@435 | 590 | } |
duke@435 | 591 | |
duke@435 | 592 | return addr != NULL; |
duke@435 | 593 | #endif // AMD64 |
duke@435 | 594 | } |
duke@435 | 595 | |
duke@435 | 596 | //////////////////////////////////////////////////////////////////////////////// |
duke@435 | 597 | // thread stack |
duke@435 | 598 | |
duke@435 | 599 | #ifdef AMD64 |
duke@435 | 600 | size_t os::Linux::min_stack_allowed = 64 * K; |
duke@435 | 601 | |
duke@435 | 602 | // amd64: pthread on amd64 is always in floating stack mode |
duke@435 | 603 | bool os::Linux::supports_variable_stack_size() { return true; } |
duke@435 | 604 | #else |
duke@435 | 605 | size_t os::Linux::min_stack_allowed = (48 DEBUG_ONLY(+4))*K; |
duke@435 | 606 | |
dcubed@485 | 607 | #ifdef __GNUC__ |
duke@435 | 608 | #define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;}) |
dcubed@485 | 609 | #endif |
duke@435 | 610 | |
duke@435 | 611 | // Test if pthread library can support variable thread stack size. LinuxThreads |
duke@435 | 612 | // in fixed stack mode allocates 2M fixed slot for each thread. LinuxThreads |
duke@435 | 613 | // in floating stack mode and NPTL support variable stack size. |
duke@435 | 614 | bool os::Linux::supports_variable_stack_size() { |
duke@435 | 615 | if (os::Linux::is_NPTL()) { |
duke@435 | 616 | // NPTL, yes |
duke@435 | 617 | return true; |
duke@435 | 618 | |
duke@435 | 619 | } else { |
duke@435 | 620 | // Note: We can't control default stack size when creating a thread. |
duke@435 | 621 | // If we use non-default stack size (pthread_attr_setstacksize), both |
duke@435 | 622 | // floating stack and non-floating stack LinuxThreads will return the |
duke@435 | 623 | // same value. This makes it impossible to implement this function by |
duke@435 | 624 | // detecting thread stack size directly. |
duke@435 | 625 | // |
duke@435 | 626 | // An alternative approach is to check %gs. Fixed-stack LinuxThreads |
duke@435 | 627 | // do not use %gs, so its value is 0. Floating-stack LinuxThreads use |
duke@435 | 628 | // %gs (either as LDT selector or GDT selector, depending on kernel) |
duke@435 | 629 | // to access thread specific data. |
duke@435 | 630 | // |
duke@435 | 631 | // Note that %gs is a reserved glibc register since early 2001, so |
duke@435 | 632 | // applications are not allowed to change its value (Ulrich Drepper from |
duke@435 | 633 | // Redhat confirmed that all known offenders have been modified to use |
duke@435 | 634 | // either %fs or TSD). In the worst case scenario, when VM is embedded in |
duke@435 | 635 | // a native application that plays with %gs, we might see non-zero %gs |
duke@435 | 636 | // even LinuxThreads is running in fixed stack mode. As the result, we'll |
duke@435 | 637 | // return true and skip _thread_safety_check(), so we may not be able to |
duke@435 | 638 | // detect stack-heap collisions. But otherwise it's harmless. |
duke@435 | 639 | // |
dcubed@485 | 640 | #ifdef __GNUC__ |
duke@435 | 641 | return (GET_GS() != 0); |
dcubed@485 | 642 | #else |
dcubed@485 | 643 | return false; |
dcubed@485 | 644 | #endif |
duke@435 | 645 | } |
duke@435 | 646 | } |
duke@435 | 647 | #endif // AMD64 |
duke@435 | 648 | |
duke@435 | 649 | // return default stack size for thr_type |
duke@435 | 650 | size_t os::Linux::default_stack_size(os::ThreadType thr_type) { |
duke@435 | 651 | // default stack size (compiler thread needs larger stack) |
duke@435 | 652 | #ifdef AMD64 |
duke@435 | 653 | size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M); |
duke@435 | 654 | #else |
duke@435 | 655 | size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K); |
duke@435 | 656 | #endif // AMD64 |
duke@435 | 657 | return s; |
duke@435 | 658 | } |
duke@435 | 659 | |
duke@435 | 660 | size_t os::Linux::default_guard_size(os::ThreadType thr_type) { |
duke@435 | 661 | // Creating guard page is very expensive. Java thread has HotSpot |
duke@435 | 662 | // guard page, only enable glibc guard page for non-Java threads. |
duke@435 | 663 | return (thr_type == java_thread ? 0 : page_size()); |
duke@435 | 664 | } |
duke@435 | 665 | |
duke@435 | 666 | // Java thread: |
duke@435 | 667 | // |
duke@435 | 668 | // Low memory addresses |
duke@435 | 669 | // +------------------------+ |
duke@435 | 670 | // | |\ JavaThread created by VM does not have glibc |
duke@435 | 671 | // | glibc guard page | - guard, attached Java thread usually has |
duke@435 | 672 | // | |/ 1 page glibc guard. |
duke@435 | 673 | // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() |
duke@435 | 674 | // | |\ |
duke@435 | 675 | // | HotSpot Guard Pages | - red and yellow pages |
duke@435 | 676 | // | |/ |
duke@435 | 677 | // +------------------------+ JavaThread::stack_yellow_zone_base() |
duke@435 | 678 | // | |\ |
duke@435 | 679 | // | Normal Stack | - |
duke@435 | 680 | // | |/ |
duke@435 | 681 | // P2 +------------------------+ Thread::stack_base() |
duke@435 | 682 | // |
duke@435 | 683 | // Non-Java thread: |
duke@435 | 684 | // |
duke@435 | 685 | // Low memory addresses |
duke@435 | 686 | // +------------------------+ |
duke@435 | 687 | // | |\ |
duke@435 | 688 | // | glibc guard page | - usually 1 page |
duke@435 | 689 | // | |/ |
duke@435 | 690 | // P1 +------------------------+ Thread::stack_base() - Thread::stack_size() |
duke@435 | 691 | // | |\ |
duke@435 | 692 | // | Normal Stack | - |
duke@435 | 693 | // | |/ |
duke@435 | 694 | // P2 +------------------------+ Thread::stack_base() |
duke@435 | 695 | // |
duke@435 | 696 | // ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from |
duke@435 | 697 | // pthread_attr_getstack() |
duke@435 | 698 | |
duke@435 | 699 | static void current_stack_region(address * bottom, size_t * size) { |
duke@435 | 700 | if (os::Linux::is_initial_thread()) { |
duke@435 | 701 | // initial thread needs special handling because pthread_getattr_np() |
duke@435 | 702 | // may return bogus value. |
duke@435 | 703 | *bottom = os::Linux::initial_thread_stack_bottom(); |
duke@435 | 704 | *size = os::Linux::initial_thread_stack_size(); |
duke@435 | 705 | } else { |
duke@435 | 706 | pthread_attr_t attr; |
duke@435 | 707 | |
duke@435 | 708 | int rslt = pthread_getattr_np(pthread_self(), &attr); |
duke@435 | 709 | |
duke@435 | 710 | // JVM needs to know exact stack location, abort if it fails |
duke@435 | 711 | if (rslt != 0) { |
duke@435 | 712 | if (rslt == ENOMEM) { |
ccheung@4993 | 713 | vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np"); |
duke@435 | 714 | } else { |
jcoomes@1845 | 715 | fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt)); |
duke@435 | 716 | } |
duke@435 | 717 | } |
duke@435 | 718 | |
duke@435 | 719 | if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) { |
duke@435 | 720 | fatal("Can not locate current stack attributes!"); |
duke@435 | 721 | } |
duke@435 | 722 | |
duke@435 | 723 | pthread_attr_destroy(&attr); |
duke@435 | 724 | |
duke@435 | 725 | } |
duke@435 | 726 | assert(os::current_stack_pointer() >= *bottom && |
duke@435 | 727 | os::current_stack_pointer() < *bottom + *size, "just checking"); |
duke@435 | 728 | } |
duke@435 | 729 | |
duke@435 | 730 | address os::current_stack_base() { |
duke@435 | 731 | address bottom; |
duke@435 | 732 | size_t size; |
duke@435 | 733 | current_stack_region(&bottom, &size); |
duke@435 | 734 | return (bottom + size); |
duke@435 | 735 | } |
duke@435 | 736 | |
duke@435 | 737 | size_t os::current_stack_size() { |
duke@435 | 738 | // stack size includes normal stack and HotSpot guard pages |
duke@435 | 739 | address bottom; |
duke@435 | 740 | size_t size; |
duke@435 | 741 | current_stack_region(&bottom, &size); |
duke@435 | 742 | return size; |
duke@435 | 743 | } |
duke@435 | 744 | |
duke@435 | 745 | ///////////////////////////////////////////////////////////////////////////// |
duke@435 | 746 | // helper functions for fatal error handler |
duke@435 | 747 | |
duke@435 | 748 | void os::print_context(outputStream *st, void *context) { |
duke@435 | 749 | if (context == NULL) return; |
duke@435 | 750 | |
duke@435 | 751 | ucontext_t *uc = (ucontext_t*)context; |
duke@435 | 752 | st->print_cr("Registers:"); |
duke@435 | 753 | #ifdef AMD64 |
duke@435 | 754 | st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); |
duke@435 | 755 | st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); |
duke@435 | 756 | st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); |
duke@435 | 757 | st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); |
duke@435 | 758 | st->cr(); |
duke@435 | 759 | st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); |
duke@435 | 760 | st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); |
duke@435 | 761 | st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); |
duke@435 | 762 | st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); |
duke@435 | 763 | st->cr(); |
duke@435 | 764 | st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); |
duke@435 | 765 | st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); |
duke@435 | 766 | st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); |
duke@435 | 767 | st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); |
duke@435 | 768 | st->cr(); |
duke@435 | 769 | st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); |
duke@435 | 770 | st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); |
duke@435 | 771 | st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); |
duke@435 | 772 | st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); |
duke@435 | 773 | st->cr(); |
duke@435 | 774 | st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); |
never@2262 | 775 | st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); |
duke@435 | 776 | st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]); |
duke@435 | 777 | st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]); |
duke@435 | 778 | st->cr(); |
duke@435 | 779 | st->print(" TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]); |
duke@435 | 780 | #else |
duke@435 | 781 | st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EAX]); |
duke@435 | 782 | st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBX]); |
duke@435 | 783 | st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ECX]); |
duke@435 | 784 | st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDX]); |
duke@435 | 785 | st->cr(); |
duke@435 | 786 | st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_UESP]); |
duke@435 | 787 | st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EBP]); |
duke@435 | 788 | st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ESI]); |
duke@435 | 789 | st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EDI]); |
duke@435 | 790 | st->cr(); |
duke@435 | 791 | st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EIP]); |
never@2262 | 792 | st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]); |
duke@435 | 793 | st->print(", CR2=" INTPTR_FORMAT, uc->uc_mcontext.cr2); |
duke@435 | 794 | #endif // AMD64 |
duke@435 | 795 | st->cr(); |
duke@435 | 796 | st->cr(); |
duke@435 | 797 | |
duke@435 | 798 | intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc); |
duke@435 | 799 | st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); |
duke@435 | 800 | print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); |
duke@435 | 801 | st->cr(); |
duke@435 | 802 | |
duke@435 | 803 | // Note: it may be unsafe to inspect memory near pc. For example, pc may |
duke@435 | 804 | // point to garbage if entry point in an nmethod is corrupted. Leave |
duke@435 | 805 | // this at the end, and hope for the best. |
duke@435 | 806 | address pc = os::Linux::ucontext_get_pc(uc); |
duke@435 | 807 | st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); |
never@2262 | 808 | print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); |
never@2262 | 809 | } |
never@2262 | 810 | |
never@2262 | 811 | void os::print_register_info(outputStream *st, void *context) { |
never@2262 | 812 | if (context == NULL) return; |
never@2262 | 813 | |
never@2262 | 814 | ucontext_t *uc = (ucontext_t*)context; |
never@2262 | 815 | |
never@2262 | 816 | st->print_cr("Register to memory mapping:"); |
never@2262 | 817 | st->cr(); |
never@2262 | 818 | |
never@2262 | 819 | // this is horrendously verbose but the layout of the registers in the |
never@2262 | 820 | // context does not match how we defined our abstract Register set, so |
never@2262 | 821 | // we can't just iterate through the gregs area |
never@2262 | 822 | |
never@2262 | 823 | // this is only for the "general purpose" registers |
never@2262 | 824 | |
never@2262 | 825 | #ifdef AMD64 |
never@2262 | 826 | st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]); |
never@2262 | 827 | st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]); |
never@2262 | 828 | st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]); |
never@2262 | 829 | st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]); |
never@2262 | 830 | st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]); |
never@2262 | 831 | st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]); |
never@2262 | 832 | st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]); |
never@2262 | 833 | st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]); |
never@2262 | 834 | st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]); |
never@2262 | 835 | st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]); |
never@2262 | 836 | st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]); |
never@2262 | 837 | st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]); |
never@2262 | 838 | st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]); |
never@2262 | 839 | st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]); |
never@2262 | 840 | st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]); |
never@2262 | 841 | st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]); |
never@2262 | 842 | #else |
never@2262 | 843 | st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[REG_EAX]); |
never@2262 | 844 | st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[REG_EBX]); |
never@2262 | 845 | st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[REG_ECX]); |
never@2262 | 846 | st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[REG_EDX]); |
never@2262 | 847 | st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[REG_ESP]); |
never@2262 | 848 | st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[REG_EBP]); |
never@2262 | 849 | st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[REG_ESI]); |
never@2262 | 850 | st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[REG_EDI]); |
never@2262 | 851 | #endif // AMD64 |
never@2262 | 852 | |
never@2262 | 853 | st->cr(); |
duke@435 | 854 | } |
duke@435 | 855 | |
duke@435 | 856 | void os::setup_fpu() { |
duke@435 | 857 | #ifndef AMD64 |
duke@435 | 858 | address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); |
duke@435 | 859 | __asm__ volatile ( "fldcw (%0)" : |
duke@435 | 860 | : "r" (fpu_cntrl) : "memory"); |
duke@435 | 861 | #endif // !AMD64 |
duke@435 | 862 | } |
roland@3606 | 863 | |
roland@3606 | 864 | #ifndef PRODUCT |
roland@3606 | 865 | void os::verify_stack_alignment() { |
roland@3606 | 866 | #ifdef AMD64 |
roland@3606 | 867 | assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); |
roland@3606 | 868 | #endif |
roland@3606 | 869 | } |
roland@3606 | 870 | #endif |