Thu, 20 Jun 2013 15:02:05 +0200
8016697: Use stubs to implement safefetch
Summary: Implement Safefetch as stub routines. This reduces compiler and os dependencies.
Reviewed-by: twisti, kvn
duke@435 | 1 | /* |
sla@5237 | 2 | * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | // no precompiled headers |
twisti@4318 | 26 | #include "asm/macroAssembler.hpp" |
stefank@2314 | 27 | #include "classfile/classLoader.hpp" |
stefank@2314 | 28 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 29 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 30 | #include "code/icBuffer.hpp" |
stefank@2314 | 31 | #include "code/vtableStubs.hpp" |
stefank@2314 | 32 | #include "interpreter/interpreter.hpp" |
stefank@2314 | 33 | #include "jvm_solaris.h" |
stefank@2314 | 34 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 35 | #include "mutex_solaris.inline.hpp" |
stefank@2314 | 36 | #include "os_share_solaris.hpp" |
stefank@2314 | 37 | #include "prims/jniFastGetField.hpp" |
stefank@2314 | 38 | #include "prims/jvm.h" |
stefank@2314 | 39 | #include "prims/jvm_misc.hpp" |
stefank@2314 | 40 | #include "runtime/arguments.hpp" |
stefank@2314 | 41 | #include "runtime/extendedPC.hpp" |
stefank@2314 | 42 | #include "runtime/frame.inline.hpp" |
stefank@2314 | 43 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 44 | #include "runtime/java.hpp" |
stefank@2314 | 45 | #include "runtime/javaCalls.hpp" |
stefank@2314 | 46 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 47 | #include "runtime/osThread.hpp" |
stefank@2314 | 48 | #include "runtime/sharedRuntime.hpp" |
stefank@2314 | 49 | #include "runtime/stubRoutines.hpp" |
stefank@4299 | 50 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 51 | #include "runtime/timer.hpp" |
stefank@2314 | 52 | #include "utilities/events.hpp" |
stefank@2314 | 53 | #include "utilities/vmError.hpp" |
duke@435 | 54 | |
duke@435 | 55 | // put OS-includes here |
duke@435 | 56 | # include <sys/types.h> |
duke@435 | 57 | # include <sys/mman.h> |
duke@435 | 58 | # include <pthread.h> |
duke@435 | 59 | # include <signal.h> |
duke@435 | 60 | # include <setjmp.h> |
duke@435 | 61 | # include <errno.h> |
duke@435 | 62 | # include <dlfcn.h> |
duke@435 | 63 | # include <stdio.h> |
duke@435 | 64 | # include <unistd.h> |
duke@435 | 65 | # include <sys/resource.h> |
duke@435 | 66 | # include <thread.h> |
duke@435 | 67 | # include <sys/stat.h> |
duke@435 | 68 | # include <sys/time.h> |
duke@435 | 69 | # include <sys/filio.h> |
duke@435 | 70 | # include <sys/utsname.h> |
duke@435 | 71 | # include <sys/systeminfo.h> |
duke@435 | 72 | # include <sys/socket.h> |
duke@435 | 73 | # include <sys/trap.h> |
duke@435 | 74 | # include <sys/lwp.h> |
duke@435 | 75 | # include <pwd.h> |
duke@435 | 76 | # include <poll.h> |
duke@435 | 77 | # include <sys/lwp.h> |
duke@435 | 78 | # include <procfs.h> // see comment in <sys/procfs.h> |
duke@435 | 79 | |
duke@435 | 80 | #ifndef AMD64 |
duke@435 | 81 | // QQQ seems useless at this point |
duke@435 | 82 | # define _STRUCTURED_PROC 1 // this gets us the new structured proc interfaces of 5.6 & later |
duke@435 | 83 | #endif // AMD64 |
duke@435 | 84 | # include <sys/procfs.h> // see comment in <sys/procfs.h> |
duke@435 | 85 | |
duke@435 | 86 | |
duke@435 | 87 | #define MAX_PATH (2 * K) |
duke@435 | 88 | |
duke@435 | 89 | // Minimum stack size for the VM. It's easier to document a constant value |
duke@435 | 90 | // but it's different for x86 and sparc because the page sizes are different. |
duke@435 | 91 | #ifdef AMD64 |
duke@435 | 92 | size_t os::Solaris::min_stack_allowed = 224*K; |
duke@435 | 93 | #define REG_SP REG_RSP |
duke@435 | 94 | #define REG_PC REG_RIP |
duke@435 | 95 | #define REG_FP REG_RBP |
duke@435 | 96 | #else |
duke@435 | 97 | size_t os::Solaris::min_stack_allowed = 64*K; |
duke@435 | 98 | #define REG_SP UESP |
duke@435 | 99 | #define REG_PC EIP |
duke@435 | 100 | #define REG_FP EBP |
duke@435 | 101 | // 4900493 counter to prevent runaway LDTR refresh attempt |
duke@435 | 102 | |
duke@435 | 103 | static volatile int ldtr_refresh = 0; |
duke@435 | 104 | // the libthread instruction that faults because of the stale LDTR |
duke@435 | 105 | |
duke@435 | 106 | static const unsigned char movlfs[] = { 0x8e, 0xe0 // movl %eax,%fs |
duke@435 | 107 | }; |
duke@435 | 108 | #endif // AMD64 |
duke@435 | 109 | |
duke@435 | 110 | char* os::non_memory_address_word() { |
duke@435 | 111 | // Must never look like an address returned by reserve_memory, |
duke@435 | 112 | // even in its subfields (as defined by the CPU immediate fields, |
duke@435 | 113 | // if the CPU splits constants across multiple instructions). |
duke@435 | 114 | return (char*) -1; |
duke@435 | 115 | } |
duke@435 | 116 | |
duke@435 | 117 | // |
duke@435 | 118 | // Validate a ucontext retrieved from walking a uc_link of a ucontext. |
duke@435 | 119 | // There are issues with libthread giving out uc_links for different threads |
duke@435 | 120 | // on the same uc_link chain and bad or circular links. |
duke@435 | 121 | // |
duke@435 | 122 | bool os::Solaris::valid_ucontext(Thread* thread, ucontext_t* valid, ucontext_t* suspect) { |
duke@435 | 123 | if (valid >= suspect || |
duke@435 | 124 | valid->uc_stack.ss_flags != suspect->uc_stack.ss_flags || |
duke@435 | 125 | valid->uc_stack.ss_sp != suspect->uc_stack.ss_sp || |
duke@435 | 126 | valid->uc_stack.ss_size != suspect->uc_stack.ss_size) { |
duke@435 | 127 | DEBUG_ONLY(tty->print_cr("valid_ucontext: failed test 1");) |
duke@435 | 128 | return false; |
duke@435 | 129 | } |
duke@435 | 130 | |
duke@435 | 131 | if (thread->is_Java_thread()) { |
duke@435 | 132 | if (!valid_stack_address(thread, (address)suspect)) { |
duke@435 | 133 | DEBUG_ONLY(tty->print_cr("valid_ucontext: uc_link not in thread stack");) |
duke@435 | 134 | return false; |
duke@435 | 135 | } |
duke@435 | 136 | if (!valid_stack_address(thread, (address) suspect->uc_mcontext.gregs[REG_SP])) { |
duke@435 | 137 | DEBUG_ONLY(tty->print_cr("valid_ucontext: stackpointer not in thread stack");) |
duke@435 | 138 | return false; |
duke@435 | 139 | } |
duke@435 | 140 | } |
duke@435 | 141 | return true; |
duke@435 | 142 | } |
duke@435 | 143 | |
duke@435 | 144 | // We will only follow one level of uc_link since there are libthread |
duke@435 | 145 | // issues with ucontext linking and it is better to be safe and just |
duke@435 | 146 | // let caller retry later. |
duke@435 | 147 | ucontext_t* os::Solaris::get_valid_uc_in_signal_handler(Thread *thread, |
duke@435 | 148 | ucontext_t *uc) { |
duke@435 | 149 | |
duke@435 | 150 | ucontext_t *retuc = NULL; |
duke@435 | 151 | |
duke@435 | 152 | if (uc != NULL) { |
duke@435 | 153 | if (uc->uc_link == NULL) { |
duke@435 | 154 | // cannot validate without uc_link so accept current ucontext |
duke@435 | 155 | retuc = uc; |
duke@435 | 156 | } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { |
duke@435 | 157 | // first ucontext is valid so try the next one |
duke@435 | 158 | uc = uc->uc_link; |
duke@435 | 159 | if (uc->uc_link == NULL) { |
duke@435 | 160 | // cannot validate without uc_link so accept current ucontext |
duke@435 | 161 | retuc = uc; |
duke@435 | 162 | } else if (os::Solaris::valid_ucontext(thread, uc, uc->uc_link)) { |
duke@435 | 163 | // the ucontext one level down is also valid so return it |
duke@435 | 164 | retuc = uc; |
duke@435 | 165 | } |
duke@435 | 166 | } |
duke@435 | 167 | } |
duke@435 | 168 | return retuc; |
duke@435 | 169 | } |
duke@435 | 170 | |
duke@435 | 171 | // Assumes ucontext is valid |
duke@435 | 172 | ExtendedPC os::Solaris::ucontext_get_ExtendedPC(ucontext_t *uc) { |
duke@435 | 173 | return ExtendedPC((address)uc->uc_mcontext.gregs[REG_PC]); |
duke@435 | 174 | } |
duke@435 | 175 | |
duke@435 | 176 | // Assumes ucontext is valid |
duke@435 | 177 | intptr_t* os::Solaris::ucontext_get_sp(ucontext_t *uc) { |
duke@435 | 178 | return (intptr_t*)uc->uc_mcontext.gregs[REG_SP]; |
duke@435 | 179 | } |
duke@435 | 180 | |
duke@435 | 181 | // Assumes ucontext is valid |
duke@435 | 182 | intptr_t* os::Solaris::ucontext_get_fp(ucontext_t *uc) { |
duke@435 | 183 | return (intptr_t*)uc->uc_mcontext.gregs[REG_FP]; |
duke@435 | 184 | } |
duke@435 | 185 | |
sla@5237 | 186 | address os::Solaris::ucontext_get_pc(ucontext_t *uc) { |
sla@5237 | 187 | return (address) uc->uc_mcontext.gregs[REG_PC]; |
sla@5237 | 188 | } |
sla@5237 | 189 | |
duke@435 | 190 | // For Forte Analyzer AsyncGetCallTrace profiling support - thread |
duke@435 | 191 | // is currently interrupted by SIGPROF. |
duke@435 | 192 | // |
duke@435 | 193 | // The difference between this and os::fetch_frame_from_context() is that |
duke@435 | 194 | // here we try to skip nested signal frames. |
duke@435 | 195 | ExtendedPC os::Solaris::fetch_frame_from_ucontext(Thread* thread, |
duke@435 | 196 | ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) { |
duke@435 | 197 | |
duke@435 | 198 | assert(thread != NULL, "just checking"); |
duke@435 | 199 | assert(ret_sp != NULL, "just checking"); |
duke@435 | 200 | assert(ret_fp != NULL, "just checking"); |
duke@435 | 201 | |
duke@435 | 202 | ucontext_t *luc = os::Solaris::get_valid_uc_in_signal_handler(thread, uc); |
duke@435 | 203 | return os::fetch_frame_from_context(luc, ret_sp, ret_fp); |
duke@435 | 204 | } |
duke@435 | 205 | |
duke@435 | 206 | ExtendedPC os::fetch_frame_from_context(void* ucVoid, |
duke@435 | 207 | intptr_t** ret_sp, intptr_t** ret_fp) { |
duke@435 | 208 | |
duke@435 | 209 | ExtendedPC epc; |
duke@435 | 210 | ucontext_t *uc = (ucontext_t*)ucVoid; |
duke@435 | 211 | |
duke@435 | 212 | if (uc != NULL) { |
duke@435 | 213 | epc = os::Solaris::ucontext_get_ExtendedPC(uc); |
duke@435 | 214 | if (ret_sp) *ret_sp = os::Solaris::ucontext_get_sp(uc); |
duke@435 | 215 | if (ret_fp) *ret_fp = os::Solaris::ucontext_get_fp(uc); |
duke@435 | 216 | } else { |
duke@435 | 217 | // construct empty ExtendedPC for return value checking |
duke@435 | 218 | epc = ExtendedPC(NULL); |
duke@435 | 219 | if (ret_sp) *ret_sp = (intptr_t *)NULL; |
duke@435 | 220 | if (ret_fp) *ret_fp = (intptr_t *)NULL; |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | return epc; |
duke@435 | 224 | } |
duke@435 | 225 | |
duke@435 | 226 | frame os::fetch_frame_from_context(void* ucVoid) { |
duke@435 | 227 | intptr_t* sp; |
duke@435 | 228 | intptr_t* fp; |
duke@435 | 229 | ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp); |
duke@435 | 230 | return frame(sp, fp, epc.pc()); |
duke@435 | 231 | } |
duke@435 | 232 | |
duke@435 | 233 | frame os::get_sender_for_C_frame(frame* fr) { |
duke@435 | 234 | return frame(fr->sender_sp(), fr->link(), fr->sender_pc()); |
duke@435 | 235 | } |
duke@435 | 236 | |
roland@3606 | 237 | extern "C" intptr_t *_get_current_sp(); // in .il file |
roland@3606 | 238 | |
roland@3606 | 239 | address os::current_stack_pointer() { |
roland@3606 | 240 | return (address)_get_current_sp(); |
roland@3606 | 241 | } |
roland@3606 | 242 | |
coleenp@907 | 243 | extern "C" intptr_t *_get_current_fp(); // in .il file |
duke@435 | 244 | |
duke@435 | 245 | frame os::current_frame() { |
coleenp@907 | 246 | intptr_t* fp = _get_current_fp(); // it's inlined so want current fp |
duke@435 | 247 | frame myframe((intptr_t*)os::current_stack_pointer(), |
duke@435 | 248 | (intptr_t*)fp, |
duke@435 | 249 | CAST_FROM_FN_PTR(address, os::current_frame)); |
duke@435 | 250 | if (os::is_first_C_frame(&myframe)) { |
duke@435 | 251 | // stack is not walkable |
sgoldman@542 | 252 | frame ret; // This will be a null useless frame |
sgoldman@542 | 253 | return ret; |
duke@435 | 254 | } else { |
duke@435 | 255 | return os::get_sender_for_C_frame(&myframe); |
duke@435 | 256 | } |
duke@435 | 257 | } |
duke@435 | 258 | |
duke@435 | 259 | static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) { |
duke@435 | 260 | char lwpstatusfile[PROCFILE_LENGTH]; |
duke@435 | 261 | int lwpfd, err; |
duke@435 | 262 | |
duke@435 | 263 | if (err = os::Solaris::thr_getstate(tid, flags, lwp, ss, rs)) |
duke@435 | 264 | return (err); |
duke@435 | 265 | if (*flags == TRS_LWPID) { |
duke@435 | 266 | sprintf(lwpstatusfile, "/proc/%d/lwp/%d/lwpstatus", getpid(), |
duke@435 | 267 | *lwp); |
duke@435 | 268 | if ((lwpfd = open(lwpstatusfile, O_RDONLY)) < 0) { |
duke@435 | 269 | perror("thr_mutator_status: open lwpstatus"); |
duke@435 | 270 | return (EINVAL); |
duke@435 | 271 | } |
duke@435 | 272 | if (pread(lwpfd, lwpstatus, sizeof (lwpstatus_t), (off_t)0) != |
duke@435 | 273 | sizeof (lwpstatus_t)) { |
duke@435 | 274 | perror("thr_mutator_status: read lwpstatus"); |
duke@435 | 275 | (void) close(lwpfd); |
duke@435 | 276 | return (EINVAL); |
duke@435 | 277 | } |
duke@435 | 278 | (void) close(lwpfd); |
duke@435 | 279 | } |
duke@435 | 280 | return (0); |
duke@435 | 281 | } |
duke@435 | 282 | |
duke@435 | 283 | #ifndef AMD64 |
duke@435 | 284 | |
duke@435 | 285 | // Detecting SSE support by OS |
duke@435 | 286 | // From solaris_i486.s |
duke@435 | 287 | extern "C" bool sse_check(); |
duke@435 | 288 | extern "C" bool sse_unavailable(); |
duke@435 | 289 | |
duke@435 | 290 | enum { SSE_UNKNOWN, SSE_NOT_SUPPORTED, SSE_SUPPORTED}; |
duke@435 | 291 | static int sse_status = SSE_UNKNOWN; |
duke@435 | 292 | |
duke@435 | 293 | |
duke@435 | 294 | static void check_for_sse_support() { |
duke@435 | 295 | if (!VM_Version::supports_sse()) { |
duke@435 | 296 | sse_status = SSE_NOT_SUPPORTED; |
duke@435 | 297 | return; |
duke@435 | 298 | } |
duke@435 | 299 | // looking for _sse_hw in libc.so, if it does not exist or |
duke@435 | 300 | // the value (int) is 0, OS has no support for SSE |
duke@435 | 301 | int *sse_hwp; |
duke@435 | 302 | void *h; |
duke@435 | 303 | |
duke@435 | 304 | if ((h=dlopen("/usr/lib/libc.so", RTLD_LAZY)) == NULL) { |
duke@435 | 305 | //open failed, presume no support for SSE |
duke@435 | 306 | sse_status = SSE_NOT_SUPPORTED; |
duke@435 | 307 | return; |
duke@435 | 308 | } |
duke@435 | 309 | if ((sse_hwp = (int *)dlsym(h, "_sse_hw")) == NULL) { |
duke@435 | 310 | sse_status = SSE_NOT_SUPPORTED; |
duke@435 | 311 | } else if (*sse_hwp == 0) { |
duke@435 | 312 | sse_status = SSE_NOT_SUPPORTED; |
duke@435 | 313 | } |
duke@435 | 314 | dlclose(h); |
duke@435 | 315 | |
duke@435 | 316 | if (sse_status == SSE_UNKNOWN) { |
duke@435 | 317 | bool (*try_sse)() = (bool (*)())sse_check; |
duke@435 | 318 | sse_status = (*try_sse)() ? SSE_SUPPORTED : SSE_NOT_SUPPORTED; |
duke@435 | 319 | } |
duke@435 | 320 | |
duke@435 | 321 | } |
duke@435 | 322 | |
twisti@1020 | 323 | #endif // AMD64 |
twisti@1020 | 324 | |
duke@435 | 325 | bool os::supports_sse() { |
twisti@1020 | 326 | #ifdef AMD64 |
twisti@1020 | 327 | return true; |
twisti@1020 | 328 | #else |
duke@435 | 329 | if (sse_status == SSE_UNKNOWN) |
duke@435 | 330 | check_for_sse_support(); |
duke@435 | 331 | return sse_status == SSE_SUPPORTED; |
twisti@1020 | 332 | #endif // AMD64 |
duke@435 | 333 | } |
duke@435 | 334 | |
duke@435 | 335 | bool os::is_allocatable(size_t bytes) { |
duke@435 | 336 | #ifdef AMD64 |
duke@435 | 337 | return true; |
duke@435 | 338 | #else |
duke@435 | 339 | |
duke@435 | 340 | if (bytes < 2 * G) { |
duke@435 | 341 | return true; |
duke@435 | 342 | } |
duke@435 | 343 | |
duke@435 | 344 | char* addr = reserve_memory(bytes, NULL); |
duke@435 | 345 | |
duke@435 | 346 | if (addr != NULL) { |
duke@435 | 347 | release_memory(addr, bytes); |
duke@435 | 348 | } |
duke@435 | 349 | |
duke@435 | 350 | return addr != NULL; |
duke@435 | 351 | #endif // AMD64 |
duke@435 | 352 | |
duke@435 | 353 | } |
duke@435 | 354 | |
coleenp@2507 | 355 | extern "C" JNIEXPORT int |
coleenp@2507 | 356 | JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid, |
coleenp@2507 | 357 | int abort_if_unrecognized) { |
duke@435 | 358 | ucontext_t* uc = (ucontext_t*) ucVoid; |
duke@435 | 359 | |
duke@435 | 360 | #ifndef AMD64 |
duke@435 | 361 | if (sig == SIGILL && info->si_addr == (caddr_t)sse_check) { |
duke@435 | 362 | // the SSE instruction faulted. supports_sse() need return false. |
duke@435 | 363 | uc->uc_mcontext.gregs[EIP] = (greg_t)sse_unavailable; |
duke@435 | 364 | return true; |
duke@435 | 365 | } |
duke@435 | 366 | #endif // !AMD64 |
duke@435 | 367 | |
duke@435 | 368 | Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady |
duke@435 | 369 | |
duke@435 | 370 | SignalHandlerMark shm(t); |
duke@435 | 371 | |
duke@435 | 372 | if(sig == SIGPIPE || sig == SIGXFSZ) { |
duke@435 | 373 | if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
duke@435 | 374 | return true; |
duke@435 | 375 | } else { |
duke@435 | 376 | if (PrintMiscellaneous && (WizardMode || Verbose)) { |
duke@435 | 377 | char buf[64]; |
duke@435 | 378 | warning("Ignoring %s - see 4229104 or 6499219", |
duke@435 | 379 | os::exception_name(sig, buf, sizeof(buf))); |
duke@435 | 380 | |
duke@435 | 381 | } |
duke@435 | 382 | return true; |
duke@435 | 383 | } |
duke@435 | 384 | } |
duke@435 | 385 | |
duke@435 | 386 | JavaThread* thread = NULL; |
duke@435 | 387 | VMThread* vmthread = NULL; |
duke@435 | 388 | |
duke@435 | 389 | if (os::Solaris::signal_handlers_are_installed) { |
duke@435 | 390 | if (t != NULL ){ |
duke@435 | 391 | if(t->is_Java_thread()) { |
duke@435 | 392 | thread = (JavaThread*)t; |
duke@435 | 393 | } |
duke@435 | 394 | else if(t->is_VM_thread()){ |
duke@435 | 395 | vmthread = (VMThread *)t; |
duke@435 | 396 | } |
duke@435 | 397 | } |
duke@435 | 398 | } |
duke@435 | 399 | |
duke@435 | 400 | guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs"); |
duke@435 | 401 | |
duke@435 | 402 | if (sig == os::Solaris::SIGasync()) { |
sla@5237 | 403 | if(thread || vmthread){ |
sla@5237 | 404 | OSThread::SR_handler(t, uc); |
duke@435 | 405 | return true; |
duke@435 | 406 | } else if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
duke@435 | 407 | return true; |
duke@435 | 408 | } else { |
duke@435 | 409 | // If os::Solaris::SIGasync not chained, and this is a non-vm and |
duke@435 | 410 | // non-java thread |
duke@435 | 411 | return true; |
duke@435 | 412 | } |
duke@435 | 413 | } |
duke@435 | 414 | |
duke@435 | 415 | if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) { |
duke@435 | 416 | // can't decode this kind of signal |
duke@435 | 417 | info = NULL; |
duke@435 | 418 | } else { |
duke@435 | 419 | assert(sig == info->si_signo, "bad siginfo"); |
duke@435 | 420 | } |
duke@435 | 421 | |
duke@435 | 422 | // decide if this trap can be handled by a stub |
duke@435 | 423 | address stub = NULL; |
duke@435 | 424 | |
duke@435 | 425 | address pc = NULL; |
duke@435 | 426 | |
duke@435 | 427 | //%note os_trap_1 |
duke@435 | 428 | if (info != NULL && uc != NULL && thread != NULL) { |
duke@435 | 429 | // factor me: getPCfromContext |
duke@435 | 430 | pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
duke@435 | 431 | |
goetz@5400 | 432 | if (StubRoutines::is_safefetch_fault(pc)) { |
goetz@5400 | 433 | uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc)); |
goetz@5400 | 434 | return true; |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | // Handle ALL stack overflow variations here |
duke@435 | 438 | if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) { |
duke@435 | 439 | address addr = (address) info->si_addr; |
duke@435 | 440 | if (thread->in_stack_yellow_zone(addr)) { |
duke@435 | 441 | thread->disable_stack_yellow_zone(); |
duke@435 | 442 | if (thread->thread_state() == _thread_in_Java) { |
duke@435 | 443 | // Throw a stack overflow exception. Guard pages will be reenabled |
duke@435 | 444 | // while unwinding the stack. |
duke@435 | 445 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW); |
duke@435 | 446 | } else { |
duke@435 | 447 | // Thread was in the vm or native code. Return and try to finish. |
duke@435 | 448 | return true; |
duke@435 | 449 | } |
duke@435 | 450 | } else if (thread->in_stack_red_zone(addr)) { |
duke@435 | 451 | // Fatal red zone violation. Disable the guard pages and fall through |
duke@435 | 452 | // to handle_unexpected_exception way down below. |
duke@435 | 453 | thread->disable_stack_red_zone(); |
duke@435 | 454 | tty->print_raw_cr("An irrecoverable stack overflow has occurred."); |
duke@435 | 455 | } |
duke@435 | 456 | } |
duke@435 | 457 | |
duke@435 | 458 | if (thread->thread_state() == _thread_in_vm) { |
duke@435 | 459 | if (sig == SIGBUS && info->si_code == BUS_OBJERR && thread->doing_unsafe_access()) { |
duke@435 | 460 | stub = StubRoutines::handler_for_unsafe_access(); |
duke@435 | 461 | } |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | if (thread->thread_state() == _thread_in_Java) { |
duke@435 | 465 | // Support Safepoint Polling |
duke@435 | 466 | if ( sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) { |
duke@435 | 467 | stub = SharedRuntime::get_poll_stub(pc); |
duke@435 | 468 | } |
duke@435 | 469 | else if (sig == SIGBUS && info->si_code == BUS_OBJERR) { |
duke@435 | 470 | // BugId 4454115: A read from a MappedByteBuffer can fault |
duke@435 | 471 | // here if the underlying file has been truncated. |
duke@435 | 472 | // Do not crash the VM in such a case. |
duke@435 | 473 | CodeBlob* cb = CodeCache::find_blob_unsafe(pc); |
duke@435 | 474 | nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL; |
duke@435 | 475 | if (nm != NULL && nm->has_unsafe_access()) { |
duke@435 | 476 | stub = StubRoutines::handler_for_unsafe_access(); |
duke@435 | 477 | } |
duke@435 | 478 | } |
duke@435 | 479 | else |
duke@435 | 480 | if (sig == SIGFPE && info->si_code == FPE_INTDIV) { |
duke@435 | 481 | // integer divide by zero |
duke@435 | 482 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
duke@435 | 483 | } |
duke@435 | 484 | #ifndef AMD64 |
duke@435 | 485 | else if (sig == SIGFPE && info->si_code == FPE_FLTDIV) { |
duke@435 | 486 | // floating-point divide by zero |
duke@435 | 487 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO); |
duke@435 | 488 | } |
duke@435 | 489 | else if (sig == SIGFPE && info->si_code == FPE_FLTINV) { |
duke@435 | 490 | // The encoding of D2I in i486.ad can cause an exception prior |
duke@435 | 491 | // to the fist instruction if there was an invalid operation |
duke@435 | 492 | // pending. We want to dismiss that exception. From the win_32 |
duke@435 | 493 | // side it also seems that if it really was the fist causing |
duke@435 | 494 | // the exception that we do the d2i by hand with different |
duke@435 | 495 | // rounding. Seems kind of weird. QQQ TODO |
duke@435 | 496 | // Note that we take the exception at the NEXT floating point instruction. |
duke@435 | 497 | if (pc[0] == 0xDB) { |
duke@435 | 498 | assert(pc[0] == 0xDB, "not a FIST opcode"); |
duke@435 | 499 | assert(pc[1] == 0x14, "not a FIST opcode"); |
duke@435 | 500 | assert(pc[2] == 0x24, "not a FIST opcode"); |
duke@435 | 501 | return true; |
duke@435 | 502 | } else { |
duke@435 | 503 | assert(pc[-3] == 0xDB, "not an flt invalid opcode"); |
duke@435 | 504 | assert(pc[-2] == 0x14, "not an flt invalid opcode"); |
duke@435 | 505 | assert(pc[-1] == 0x24, "not an flt invalid opcode"); |
duke@435 | 506 | } |
duke@435 | 507 | } |
duke@435 | 508 | else if (sig == SIGFPE ) { |
duke@435 | 509 | tty->print_cr("caught SIGFPE, info 0x%x.", info->si_code); |
duke@435 | 510 | } |
duke@435 | 511 | #endif // !AMD64 |
duke@435 | 512 | |
duke@435 | 513 | // QQQ It doesn't seem that we need to do this on x86 because we should be able |
duke@435 | 514 | // to return properly from the handler without this extra stuff on the back side. |
duke@435 | 515 | |
duke@435 | 516 | else if (sig == SIGSEGV && info->si_code > 0 && !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) { |
duke@435 | 517 | // Determination of interpreter/vtable stub/compiled code null exception |
duke@435 | 518 | stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); |
duke@435 | 519 | } |
duke@435 | 520 | } |
duke@435 | 521 | |
duke@435 | 522 | // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in |
duke@435 | 523 | // and the heap gets shrunk before the field access. |
duke@435 | 524 | if ((sig == SIGSEGV) || (sig == SIGBUS)) { |
duke@435 | 525 | address addr = JNI_FastGetField::find_slowcase_pc(pc); |
duke@435 | 526 | if (addr != (address)-1) { |
duke@435 | 527 | stub = addr; |
duke@435 | 528 | } |
duke@435 | 529 | } |
duke@435 | 530 | |
duke@435 | 531 | // Check to see if we caught the safepoint code in the |
duke@435 | 532 | // process of write protecting the memory serialization page. |
duke@435 | 533 | // It write enables the page immediately after protecting it |
duke@435 | 534 | // so we can just return to retry the write. |
duke@435 | 535 | if ((sig == SIGSEGV) && |
duke@435 | 536 | os::is_memory_serialize_page(thread, (address)info->si_addr)) { |
duke@435 | 537 | // Block current thread until the memory serialize page permission restored. |
duke@435 | 538 | os::block_on_serialize_page_trap(); |
duke@435 | 539 | return true; |
duke@435 | 540 | } |
duke@435 | 541 | } |
duke@435 | 542 | |
duke@435 | 543 | // Execution protection violation |
duke@435 | 544 | // |
duke@435 | 545 | // Preventative code for future versions of Solaris which may |
duke@435 | 546 | // enable execution protection when running the 32-bit VM on AMD64. |
duke@435 | 547 | // |
duke@435 | 548 | // This should be kept as the last step in the triage. We don't |
duke@435 | 549 | // have a dedicated trap number for a no-execute fault, so be |
duke@435 | 550 | // conservative and allow other handlers the first shot. |
duke@435 | 551 | // |
duke@435 | 552 | // Note: We don't test that info->si_code == SEGV_ACCERR here. |
duke@435 | 553 | // this si_code is so generic that it is almost meaningless; and |
duke@435 | 554 | // the si_code for this condition may change in the future. |
duke@435 | 555 | // Furthermore, a false-positive should be harmless. |
duke@435 | 556 | if (UnguardOnExecutionViolation > 0 && |
duke@435 | 557 | (sig == SIGSEGV || sig == SIGBUS) && |
duke@435 | 558 | uc->uc_mcontext.gregs[TRAPNO] == T_PGFLT) { // page fault |
duke@435 | 559 | int page_size = os::vm_page_size(); |
duke@435 | 560 | address addr = (address) info->si_addr; |
duke@435 | 561 | address pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
duke@435 | 562 | // Make sure the pc and the faulting address are sane. |
duke@435 | 563 | // |
duke@435 | 564 | // If an instruction spans a page boundary, and the page containing |
duke@435 | 565 | // the beginning of the instruction is executable but the following |
duke@435 | 566 | // page is not, the pc and the faulting address might be slightly |
duke@435 | 567 | // different - we still want to unguard the 2nd page in this case. |
duke@435 | 568 | // |
duke@435 | 569 | // 15 bytes seems to be a (very) safe value for max instruction size. |
duke@435 | 570 | bool pc_is_near_addr = |
duke@435 | 571 | (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); |
duke@435 | 572 | bool instr_spans_page_boundary = |
duke@435 | 573 | (align_size_down((intptr_t) pc ^ (intptr_t) addr, |
duke@435 | 574 | (intptr_t) page_size) > 0); |
duke@435 | 575 | |
duke@435 | 576 | if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { |
duke@435 | 577 | static volatile address last_addr = |
duke@435 | 578 | (address) os::non_memory_address_word(); |
duke@435 | 579 | |
duke@435 | 580 | // In conservative mode, don't unguard unless the address is in the VM |
duke@435 | 581 | if (addr != last_addr && |
duke@435 | 582 | (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { |
duke@435 | 583 | |
coleenp@912 | 584 | // Make memory rwx and retry |
duke@435 | 585 | address page_start = |
duke@435 | 586 | (address) align_size_down((intptr_t) addr, (intptr_t) page_size); |
coleenp@912 | 587 | bool res = os::protect_memory((char*) page_start, page_size, |
coleenp@912 | 588 | os::MEM_PROT_RWX); |
duke@435 | 589 | |
duke@435 | 590 | if (PrintMiscellaneous && Verbose) { |
duke@435 | 591 | char buf[256]; |
duke@435 | 592 | jio_snprintf(buf, sizeof(buf), "Execution protection violation " |
duke@435 | 593 | "at " INTPTR_FORMAT |
duke@435 | 594 | ", unguarding " INTPTR_FORMAT ": %s, errno=%d", addr, |
duke@435 | 595 | page_start, (res ? "success" : "failed"), errno); |
duke@435 | 596 | tty->print_raw_cr(buf); |
duke@435 | 597 | } |
duke@435 | 598 | stub = pc; |
duke@435 | 599 | |
duke@435 | 600 | // Set last_addr so if we fault again at the same address, we don't end |
duke@435 | 601 | // up in an endless loop. |
duke@435 | 602 | // |
duke@435 | 603 | // There are two potential complications here. Two threads trapping at |
duke@435 | 604 | // the same address at the same time could cause one of the threads to |
duke@435 | 605 | // think it already unguarded, and abort the VM. Likely very rare. |
duke@435 | 606 | // |
duke@435 | 607 | // The other race involves two threads alternately trapping at |
duke@435 | 608 | // different addresses and failing to unguard the page, resulting in |
duke@435 | 609 | // an endless loop. This condition is probably even more unlikely than |
duke@435 | 610 | // the first. |
duke@435 | 611 | // |
duke@435 | 612 | // Although both cases could be avoided by using locks or thread local |
duke@435 | 613 | // last_addr, these solutions are unnecessary complication: this |
duke@435 | 614 | // handler is a best-effort safety net, not a complete solution. It is |
duke@435 | 615 | // disabled by default and should only be used as a workaround in case |
duke@435 | 616 | // we missed any no-execute-unsafe VM code. |
duke@435 | 617 | |
duke@435 | 618 | last_addr = addr; |
duke@435 | 619 | } |
duke@435 | 620 | } |
duke@435 | 621 | } |
duke@435 | 622 | |
duke@435 | 623 | if (stub != NULL) { |
duke@435 | 624 | // save all thread context in case we need to restore it |
duke@435 | 625 | |
duke@435 | 626 | if (thread != NULL) thread->set_saved_exception_pc(pc); |
duke@435 | 627 | // 12/02/99: On Sparc it appears that the full context is also saved |
duke@435 | 628 | // but as yet, no one looks at or restores that saved context |
duke@435 | 629 | // factor me: setPC |
duke@435 | 630 | uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub; |
duke@435 | 631 | return true; |
duke@435 | 632 | } |
duke@435 | 633 | |
duke@435 | 634 | // signal-chaining |
duke@435 | 635 | if (os::Solaris::chained_handler(sig, info, ucVoid)) { |
duke@435 | 636 | return true; |
duke@435 | 637 | } |
duke@435 | 638 | |
duke@435 | 639 | #ifndef AMD64 |
duke@435 | 640 | // Workaround (bug 4900493) for Solaris kernel bug 4966651. |
duke@435 | 641 | // Handle an undefined selector caused by an attempt to assign |
duke@435 | 642 | // fs in libthread getipriptr(). With the current libthread design every 512 |
duke@435 | 643 | // thread creations the LDT for a private thread data structure is extended |
duke@435 | 644 | // and thre is a hazard that and another thread attempting a thread creation |
duke@435 | 645 | // will use a stale LDTR that doesn't reflect the structure's growth, |
duke@435 | 646 | // causing a GP fault. |
duke@435 | 647 | // Enforce the probable limit of passes through here to guard against an |
duke@435 | 648 | // infinite loop if some other move to fs caused the GP fault. Note that |
duke@435 | 649 | // this loop counter is ultimately a heuristic as it is possible for |
duke@435 | 650 | // more than one thread to generate this fault at a time in an MP system. |
duke@435 | 651 | // In the case of the loop count being exceeded or if the poll fails |
duke@435 | 652 | // just fall through to a fatal error. |
duke@435 | 653 | // If there is some other source of T_GPFLT traps and the text at EIP is |
duke@435 | 654 | // unreadable this code will loop infinitely until the stack is exausted. |
duke@435 | 655 | // The key to diagnosis in this case is to look for the bottom signal handler |
duke@435 | 656 | // frame. |
duke@435 | 657 | |
duke@435 | 658 | if(! IgnoreLibthreadGPFault) { |
duke@435 | 659 | if (sig == SIGSEGV && uc->uc_mcontext.gregs[TRAPNO] == T_GPFLT) { |
duke@435 | 660 | const unsigned char *p = |
duke@435 | 661 | (unsigned const char *) uc->uc_mcontext.gregs[EIP]; |
duke@435 | 662 | |
duke@435 | 663 | // Expected instruction? |
duke@435 | 664 | |
duke@435 | 665 | if(p[0] == movlfs[0] && p[1] == movlfs[1]) { |
duke@435 | 666 | |
duke@435 | 667 | Atomic::inc(&ldtr_refresh); |
duke@435 | 668 | |
duke@435 | 669 | // Infinite loop? |
duke@435 | 670 | |
duke@435 | 671 | if(ldtr_refresh < ((2 << 16) / PAGESIZE)) { |
duke@435 | 672 | |
duke@435 | 673 | // No, force scheduling to get a fresh view of the LDTR |
duke@435 | 674 | |
duke@435 | 675 | if(poll(NULL, 0, 10) == 0) { |
duke@435 | 676 | |
duke@435 | 677 | // Retry the move |
duke@435 | 678 | |
duke@435 | 679 | return false; |
duke@435 | 680 | } |
duke@435 | 681 | } |
duke@435 | 682 | } |
duke@435 | 683 | } |
duke@435 | 684 | } |
duke@435 | 685 | #endif // !AMD64 |
duke@435 | 686 | |
duke@435 | 687 | if (!abort_if_unrecognized) { |
duke@435 | 688 | // caller wants another chance, so give it to him |
duke@435 | 689 | return false; |
duke@435 | 690 | } |
duke@435 | 691 | |
duke@435 | 692 | if (!os::Solaris::libjsig_is_loaded) { |
duke@435 | 693 | struct sigaction oldAct; |
duke@435 | 694 | sigaction(sig, (struct sigaction *)0, &oldAct); |
duke@435 | 695 | if (oldAct.sa_sigaction != signalHandler) { |
duke@435 | 696 | void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) |
duke@435 | 697 | : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); |
twisti@1040 | 698 | warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand); |
duke@435 | 699 | } |
duke@435 | 700 | } |
duke@435 | 701 | |
duke@435 | 702 | if (pc == NULL && uc != NULL) { |
duke@435 | 703 | pc = (address) uc->uc_mcontext.gregs[REG_PC]; |
duke@435 | 704 | } |
duke@435 | 705 | |
duke@435 | 706 | // unmask current signal |
duke@435 | 707 | sigset_t newset; |
duke@435 | 708 | sigemptyset(&newset); |
duke@435 | 709 | sigaddset(&newset, sig); |
duke@435 | 710 | sigprocmask(SIG_UNBLOCK, &newset, NULL); |
duke@435 | 711 | |
coleenp@2418 | 712 | // Determine which sort of error to throw. Out of swap may signal |
coleenp@2418 | 713 | // on the thread stack, which could get a mapping error when touched. |
coleenp@2418 | 714 | address addr = (address) info->si_addr; |
coleenp@2418 | 715 | if (sig == SIGBUS && info->si_code == BUS_OBJERR && info->si_errno == ENOMEM) { |
ccheung@4993 | 716 | vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "Out of swap space to map in thread stack."); |
coleenp@2418 | 717 | } |
coleenp@2418 | 718 | |
duke@435 | 719 | VMError err(t, sig, pc, info, ucVoid); |
duke@435 | 720 | err.report_and_die(); |
duke@435 | 721 | |
duke@435 | 722 | ShouldNotReachHere(); |
duke@435 | 723 | } |
duke@435 | 724 | |
duke@435 | 725 | void os::print_context(outputStream *st, void *context) { |
duke@435 | 726 | if (context == NULL) return; |
duke@435 | 727 | |
duke@435 | 728 | ucontext_t *uc = (ucontext_t*)context; |
duke@435 | 729 | st->print_cr("Registers:"); |
duke@435 | 730 | #ifdef AMD64 |
duke@435 | 731 | st->print( "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]); |
duke@435 | 732 | st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]); |
duke@435 | 733 | st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]); |
duke@435 | 734 | st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]); |
duke@435 | 735 | st->cr(); |
duke@435 | 736 | st->print( "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]); |
duke@435 | 737 | st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]); |
duke@435 | 738 | st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]); |
duke@435 | 739 | st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]); |
duke@435 | 740 | st->cr(); |
never@2262 | 741 | st->print( "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]); |
never@2262 | 742 | st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]); |
duke@435 | 743 | st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]); |
duke@435 | 744 | st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]); |
never@1685 | 745 | st->cr(); |
never@1685 | 746 | st->print( "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]); |
duke@435 | 747 | st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]); |
duke@435 | 748 | st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]); |
duke@435 | 749 | st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]); |
duke@435 | 750 | st->cr(); |
duke@435 | 751 | st->print( "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]); |
duke@435 | 752 | st->print(", RFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RFL]); |
duke@435 | 753 | #else |
duke@435 | 754 | st->print( "EAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EAX]); |
duke@435 | 755 | st->print(", EBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBX]); |
duke@435 | 756 | st->print(", ECX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ECX]); |
duke@435 | 757 | st->print(", EDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDX]); |
duke@435 | 758 | st->cr(); |
duke@435 | 759 | st->print( "ESP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[UESP]); |
duke@435 | 760 | st->print(", EBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EBP]); |
duke@435 | 761 | st->print(", ESI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[ESI]); |
duke@435 | 762 | st->print(", EDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EDI]); |
duke@435 | 763 | st->cr(); |
duke@435 | 764 | st->print( "EIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EIP]); |
duke@435 | 765 | st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[EFL]); |
duke@435 | 766 | #endif // AMD64 |
duke@435 | 767 | st->cr(); |
duke@435 | 768 | st->cr(); |
duke@435 | 769 | |
duke@435 | 770 | intptr_t *sp = (intptr_t *)os::Solaris::ucontext_get_sp(uc); |
duke@435 | 771 | st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp); |
duke@435 | 772 | print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t)); |
duke@435 | 773 | st->cr(); |
duke@435 | 774 | |
duke@435 | 775 | // Note: it may be unsafe to inspect memory near pc. For example, pc may |
duke@435 | 776 | // point to garbage if entry point in an nmethod is corrupted. Leave |
duke@435 | 777 | // this at the end, and hope for the best. |
duke@435 | 778 | ExtendedPC epc = os::Solaris::ucontext_get_ExtendedPC(uc); |
duke@435 | 779 | address pc = epc.pc(); |
duke@435 | 780 | st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc); |
never@2262 | 781 | print_hex_dump(st, pc - 32, pc + 32, sizeof(char)); |
never@2262 | 782 | } |
never@2262 | 783 | |
never@2262 | 784 | void os::print_register_info(outputStream *st, void *context) { |
never@2262 | 785 | if (context == NULL) return; |
never@2262 | 786 | |
never@2262 | 787 | ucontext_t *uc = (ucontext_t*)context; |
never@2262 | 788 | |
never@2262 | 789 | st->print_cr("Register to memory mapping:"); |
never@2262 | 790 | st->cr(); |
never@2262 | 791 | |
never@2262 | 792 | // this is horrendously verbose but the layout of the registers in the |
never@2262 | 793 | // context does not match how we defined our abstract Register set, so |
never@2262 | 794 | // we can't just iterate through the gregs area |
never@2262 | 795 | |
never@2262 | 796 | // this is only for the "general purpose" registers |
never@2262 | 797 | |
never@2262 | 798 | #ifdef AMD64 |
never@2262 | 799 | st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]); |
never@2262 | 800 | st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]); |
never@2262 | 801 | st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]); |
never@2262 | 802 | st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]); |
never@2262 | 803 | st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]); |
never@2262 | 804 | st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]); |
never@2262 | 805 | st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]); |
never@2262 | 806 | st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]); |
never@2262 | 807 | st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]); |
never@2262 | 808 | st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]); |
never@2262 | 809 | st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]); |
never@2262 | 810 | st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]); |
never@2262 | 811 | st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]); |
never@2262 | 812 | st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]); |
never@2262 | 813 | st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]); |
never@2262 | 814 | st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]); |
never@2262 | 815 | #else |
never@2262 | 816 | st->print("EAX="); print_location(st, uc->uc_mcontext.gregs[EAX]); |
never@2262 | 817 | st->print("EBX="); print_location(st, uc->uc_mcontext.gregs[EBX]); |
never@2262 | 818 | st->print("ECX="); print_location(st, uc->uc_mcontext.gregs[ECX]); |
never@2262 | 819 | st->print("EDX="); print_location(st, uc->uc_mcontext.gregs[EDX]); |
never@2262 | 820 | st->print("ESP="); print_location(st, uc->uc_mcontext.gregs[UESP]); |
never@2262 | 821 | st->print("EBP="); print_location(st, uc->uc_mcontext.gregs[EBP]); |
never@2262 | 822 | st->print("ESI="); print_location(st, uc->uc_mcontext.gregs[ESI]); |
never@2262 | 823 | st->print("EDI="); print_location(st, uc->uc_mcontext.gregs[EDI]); |
never@2262 | 824 | #endif |
never@2262 | 825 | |
never@2262 | 826 | st->cr(); |
duke@435 | 827 | } |
duke@435 | 828 | |
bobv@2036 | 829 | |
duke@435 | 830 | #ifdef AMD64 |
duke@435 | 831 | void os::Solaris::init_thread_fpu_state(void) { |
duke@435 | 832 | // Nothing to do |
duke@435 | 833 | } |
duke@435 | 834 | #else |
duke@435 | 835 | // From solaris_i486.s |
duke@435 | 836 | extern "C" void fixcw(); |
duke@435 | 837 | |
duke@435 | 838 | void os::Solaris::init_thread_fpu_state(void) { |
duke@435 | 839 | // Set fpu to 53 bit precision. This happens too early to use a stub. |
duke@435 | 840 | fixcw(); |
duke@435 | 841 | } |
duke@435 | 842 | |
duke@435 | 843 | // These routines are the initial value of atomic_xchg_entry(), |
duke@435 | 844 | // atomic_cmpxchg_entry(), atomic_inc_entry() and fence_entry() |
duke@435 | 845 | // until initialization is complete. |
duke@435 | 846 | // TODO - replace with .il implementation when compiler supports it. |
duke@435 | 847 | |
duke@435 | 848 | typedef jint xchg_func_t (jint, volatile jint*); |
duke@435 | 849 | typedef jint cmpxchg_func_t (jint, volatile jint*, jint); |
duke@435 | 850 | typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong); |
duke@435 | 851 | typedef jint add_func_t (jint, volatile jint*); |
duke@435 | 852 | |
duke@435 | 853 | jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) { |
duke@435 | 854 | // try to use the stub: |
duke@435 | 855 | xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); |
duke@435 | 856 | |
duke@435 | 857 | if (func != NULL) { |
duke@435 | 858 | os::atomic_xchg_func = func; |
duke@435 | 859 | return (*func)(exchange_value, dest); |
duke@435 | 860 | } |
duke@435 | 861 | assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
duke@435 | 862 | |
duke@435 | 863 | jint old_value = *dest; |
duke@435 | 864 | *dest = exchange_value; |
duke@435 | 865 | return old_value; |
duke@435 | 866 | } |
duke@435 | 867 | |
duke@435 | 868 | jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) { |
duke@435 | 869 | // try to use the stub: |
duke@435 | 870 | cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); |
duke@435 | 871 | |
duke@435 | 872 | if (func != NULL) { |
duke@435 | 873 | os::atomic_cmpxchg_func = func; |
duke@435 | 874 | return (*func)(exchange_value, dest, compare_value); |
duke@435 | 875 | } |
duke@435 | 876 | assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
duke@435 | 877 | |
duke@435 | 878 | jint old_value = *dest; |
duke@435 | 879 | if (old_value == compare_value) |
duke@435 | 880 | *dest = exchange_value; |
duke@435 | 881 | return old_value; |
duke@435 | 882 | } |
duke@435 | 883 | |
duke@435 | 884 | jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) { |
duke@435 | 885 | // try to use the stub: |
duke@435 | 886 | cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); |
duke@435 | 887 | |
duke@435 | 888 | if (func != NULL) { |
duke@435 | 889 | os::atomic_cmpxchg_long_func = func; |
duke@435 | 890 | return (*func)(exchange_value, dest, compare_value); |
duke@435 | 891 | } |
duke@435 | 892 | assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
duke@435 | 893 | |
duke@435 | 894 | jlong old_value = *dest; |
duke@435 | 895 | if (old_value == compare_value) |
duke@435 | 896 | *dest = exchange_value; |
duke@435 | 897 | return old_value; |
duke@435 | 898 | } |
duke@435 | 899 | |
duke@435 | 900 | jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) { |
duke@435 | 901 | // try to use the stub: |
duke@435 | 902 | add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); |
duke@435 | 903 | |
duke@435 | 904 | if (func != NULL) { |
duke@435 | 905 | os::atomic_add_func = func; |
duke@435 | 906 | return (*func)(add_value, dest); |
duke@435 | 907 | } |
duke@435 | 908 | assert(Threads::number_of_threads() == 0, "for bootstrap only"); |
duke@435 | 909 | |
duke@435 | 910 | return (*dest) += add_value; |
duke@435 | 911 | } |
duke@435 | 912 | |
duke@435 | 913 | xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; |
duke@435 | 914 | cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; |
duke@435 | 915 | cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; |
duke@435 | 916 | add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; |
duke@435 | 917 | |
zgu@1979 | 918 | extern "C" void _solaris_raw_setup_fpu(address ptr); |
duke@435 | 919 | void os::setup_fpu() { |
duke@435 | 920 | address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); |
duke@435 | 921 | _solaris_raw_setup_fpu(fpu_cntrl); |
duke@435 | 922 | } |
duke@435 | 923 | #endif // AMD64 |
roland@3606 | 924 | |
roland@3606 | 925 | #ifndef PRODUCT |
roland@3606 | 926 | void os::verify_stack_alignment() { |
roland@3606 | 927 | #ifdef AMD64 |
roland@3606 | 928 | assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment"); |
roland@3606 | 929 | #endif |
roland@3606 | 930 | } |
roland@3606 | 931 | #endif |