src/cpu/sparc/vm/frame_sparc.cpp

Wed, 24 Sep 2014 12:19:07 -0700

author
simonis
date
Wed, 24 Sep 2014 12:19:07 -0700
changeset 7553
f43fad8786fc
parent 5784
190899198332
child 7994
04ff2f6cd0eb
child 8199
5d96c022391c
permissions
-rw-r--r--

8058345: Refactor native stack printing from vmError.cpp to debug.cpp to make it available in gdb as well
Summary: Also fix stack trace on x86 to enable walking of runtime stubs and native wrappers
Reviewed-by: kvn

duke@435 1 /*
drchase@4942 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "interpreter/interpreter.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "oops/markOop.hpp"
coleenp@4037 29 #include "oops/method.hpp"
stefank@2314 30 #include "oops/oop.inline.hpp"
iveresov@3495 31 #include "prims/methodHandles.hpp"
stefank@2314 32 #include "runtime/frame.inline.hpp"
stefank@2314 33 #include "runtime/handles.inline.hpp"
stefank@2314 34 #include "runtime/javaCalls.hpp"
stefank@2314 35 #include "runtime/monitorChunk.hpp"
stefank@2314 36 #include "runtime/signature.hpp"
stefank@2314 37 #include "runtime/stubCodeGenerator.hpp"
stefank@2314 38 #include "runtime/stubRoutines.hpp"
stefank@2314 39 #include "vmreg_sparc.inline.hpp"
stefank@2314 40 #ifdef COMPILER1
stefank@2314 41 #include "c1/c1_Runtime1.hpp"
stefank@2314 42 #include "runtime/vframeArray.hpp"
stefank@2314 43 #endif
duke@435 44
duke@435 45 void RegisterMap::pd_clear() {
duke@435 46 if (_thread->has_last_Java_frame()) {
duke@435 47 frame fr = _thread->last_frame();
duke@435 48 _window = fr.sp();
duke@435 49 } else {
duke@435 50 _window = NULL;
duke@435 51 }
duke@435 52 _younger_window = NULL;
duke@435 53 }
duke@435 54
duke@435 55
duke@435 56 // Unified register numbering scheme: each 32-bits counts as a register
duke@435 57 // number, so all the V9 registers take 2 slots.
duke@435 58 const static int R_L_nums[] = {0+040,2+040,4+040,6+040,8+040,10+040,12+040,14+040};
duke@435 59 const static int R_I_nums[] = {0+060,2+060,4+060,6+060,8+060,10+060,12+060,14+060};
duke@435 60 const static int R_O_nums[] = {0+020,2+020,4+020,6+020,8+020,10+020,12+020,14+020};
duke@435 61 const static int R_G_nums[] = {0+000,2+000,4+000,6+000,8+000,10+000,12+000,14+000};
duke@435 62 static RegisterMap::LocationValidType bad_mask = 0;
duke@435 63 static RegisterMap::LocationValidType R_LIO_mask = 0;
duke@435 64 static bool register_map_inited = false;
duke@435 65
duke@435 66 static void register_map_init() {
duke@435 67 if (!register_map_inited) {
duke@435 68 register_map_inited = true;
duke@435 69 int i;
duke@435 70 for (i = 0; i < 8; i++) {
duke@435 71 assert(R_L_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 72 assert(R_I_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 73 assert(R_O_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 74 assert(R_G_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 75 }
duke@435 76
duke@435 77 bad_mask |= (1LL << R_O_nums[6]); // SP
duke@435 78 bad_mask |= (1LL << R_O_nums[7]); // cPC
duke@435 79 bad_mask |= (1LL << R_I_nums[6]); // FP
duke@435 80 bad_mask |= (1LL << R_I_nums[7]); // rPC
duke@435 81 bad_mask |= (1LL << R_G_nums[2]); // TLS
duke@435 82 bad_mask |= (1LL << R_G_nums[7]); // reserved by libthread
duke@435 83
duke@435 84 for (i = 0; i < 8; i++) {
duke@435 85 R_LIO_mask |= (1LL << R_L_nums[i]);
duke@435 86 R_LIO_mask |= (1LL << R_I_nums[i]);
duke@435 87 R_LIO_mask |= (1LL << R_O_nums[i]);
duke@435 88 }
duke@435 89 }
duke@435 90 }
duke@435 91
duke@435 92
duke@435 93 address RegisterMap::pd_location(VMReg regname) const {
duke@435 94 register_map_init();
duke@435 95
duke@435 96 assert(regname->is_reg(), "sanity check");
duke@435 97 // Only the GPRs get handled this way
duke@435 98 if( !regname->is_Register())
duke@435 99 return NULL;
duke@435 100
duke@435 101 // don't talk about bad registers
duke@435 102 if ((bad_mask & ((LocationValidType)1 << regname->value())) != 0) {
duke@435 103 return NULL;
duke@435 104 }
duke@435 105
duke@435 106 // Convert to a GPR
duke@435 107 Register reg;
duke@435 108 int second_word = 0;
duke@435 109 // 32-bit registers for in, out and local
duke@435 110 if (!regname->is_concrete()) {
duke@435 111 // HMM ought to return NULL for any non-concrete (odd) vmreg
duke@435 112 // this all tied up in the fact we put out double oopMaps for
duke@435 113 // register locations. When that is fixed we'd will return NULL
duke@435 114 // (or assert here).
duke@435 115 reg = regname->prev()->as_Register();
duke@435 116 #ifdef _LP64
duke@435 117 second_word = sizeof(jint);
duke@435 118 #else
duke@435 119 return NULL;
duke@435 120 #endif // _LP64
duke@435 121 } else {
duke@435 122 reg = regname->as_Register();
duke@435 123 }
duke@435 124 if (reg->is_out()) {
duke@435 125 assert(_younger_window != NULL, "Younger window should be available");
duke@435 126 return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
duke@435 127 }
duke@435 128 if (reg->is_local() || reg->is_in()) {
duke@435 129 assert(_window != NULL, "Window should be available");
duke@435 130 return second_word + (address)&_window[reg->sp_offset_in_saved_window()];
duke@435 131 }
duke@435 132 // Only the window'd GPRs get handled this way; not the globals.
duke@435 133 return NULL;
duke@435 134 }
duke@435 135
duke@435 136
duke@435 137 #ifdef ASSERT
duke@435 138 void RegisterMap::check_location_valid() {
duke@435 139 register_map_init();
duke@435 140 assert((_location_valid[0] & bad_mask) == 0, "cannot have special locations for SP,FP,TLS,etc.");
duke@435 141 }
duke@435 142 #endif
duke@435 143
duke@435 144 // We are shifting windows. That means we are moving all %i to %o,
duke@435 145 // getting rid of all current %l, and keeping all %g. This is only
duke@435 146 // complicated if any of the location pointers for these are valid.
duke@435 147 // The normal case is that everything is in its standard register window
duke@435 148 // home, and _location_valid[0] is zero. In that case, this routine
duke@435 149 // does exactly nothing.
duke@435 150 void RegisterMap::shift_individual_registers() {
duke@435 151 if (!update_map()) return; // this only applies to maps with locations
duke@435 152 register_map_init();
duke@435 153 check_location_valid();
duke@435 154
duke@435 155 LocationValidType lv = _location_valid[0];
duke@435 156 LocationValidType lv0 = lv;
duke@435 157
duke@435 158 lv &= ~R_LIO_mask; // clear %l, %o, %i regs
duke@435 159
duke@435 160 // if we cleared some non-%g locations, we may have to do some shifting
duke@435 161 if (lv != lv0) {
duke@435 162 // copy %i0-%i5 to %o0-%o5, if they have special locations
duke@435 163 // This can happen in within stubs which spill argument registers
duke@435 164 // around a dynamic link operation, such as resolve_opt_virtual_call.
duke@435 165 for (int i = 0; i < 8; i++) {
duke@435 166 if (lv0 & (1LL << R_I_nums[i])) {
duke@435 167 _location[R_O_nums[i]] = _location[R_I_nums[i]];
duke@435 168 lv |= (1LL << R_O_nums[i]);
duke@435 169 }
duke@435 170 }
duke@435 171 }
duke@435 172
duke@435 173 _location_valid[0] = lv;
duke@435 174 check_location_valid();
duke@435 175 }
duke@435 176
sgoldman@542 177 bool frame::safe_for_sender(JavaThread *thread) {
duke@435 178
sgoldman@542 179 address _SP = (address) sp();
sgoldman@542 180 address _FP = (address) fp();
sgoldman@542 181 address _UNEXTENDED_SP = (address) unextended_sp();
sgoldman@542 182 // sp must be within the stack
sgoldman@542 183 bool sp_safe = (_SP <= thread->stack_base()) &&
sgoldman@542 184 (_SP >= thread->stack_base() - thread->stack_size());
sgoldman@542 185
sgoldman@542 186 if (!sp_safe) {
sgoldman@542 187 return false;
sgoldman@542 188 }
sgoldman@542 189
sgoldman@542 190 // unextended sp must be within the stack and above or equal sp
sgoldman@542 191 bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
sgoldman@542 192 (_UNEXTENDED_SP >= _SP);
sgoldman@542 193
sgoldman@542 194 if (!unextended_sp_safe) return false;
sgoldman@542 195
sgoldman@542 196 // an fp must be within the stack and above (but not equal) sp
sgoldman@542 197 bool fp_safe = (_FP <= thread->stack_base()) &&
sgoldman@542 198 (_FP > _SP);
sgoldman@542 199
sgoldman@542 200 // We know sp/unextended_sp are safe only fp is questionable here
sgoldman@542 201
sgoldman@542 202 // If the current frame is known to the code cache then we can attempt to
sgoldman@542 203 // to construct the sender and do some validation of it. This goes a long way
sgoldman@542 204 // toward eliminating issues when we get in frame construction code
sgoldman@542 205
sgoldman@542 206 if (_cb != NULL ) {
sgoldman@542 207
sgoldman@542 208 // First check if frame is complete and tester is reliable
sgoldman@542 209 // Unfortunately we can only check frame complete for runtime stubs and nmethod
sgoldman@542 210 // other generic buffer blobs are more problematic so we just assume they are
sgoldman@542 211 // ok. adapter blobs never have a frame complete and are never ok.
sgoldman@542 212
sgoldman@542 213 if (!_cb->is_frame_complete_at(_pc)) {
sgoldman@542 214 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
sgoldman@542 215 return false;
duke@435 216 }
sgoldman@542 217 }
sgoldman@542 218
rbackman@4645 219 // Could just be some random pointer within the codeBlob
rbackman@4645 220 if (!_cb->code_contains(_pc)) {
rbackman@4645 221 return false;
rbackman@4645 222 }
rbackman@4645 223
sgoldman@542 224 // Entry frame checks
sgoldman@542 225 if (is_entry_frame()) {
sgoldman@542 226 // an entry frame must have a valid fp.
sgoldman@542 227
sgoldman@542 228 if (!fp_safe) {
sgoldman@542 229 return false;
sgoldman@542 230 }
sgoldman@542 231
sgoldman@542 232 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 233
sgoldman@542 234 address jcw = (address)entry_frame_call_wrapper();
sgoldman@542 235
sgoldman@542 236 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
sgoldman@542 237
sgoldman@542 238 return jcw_safe;
sgoldman@542 239
sgoldman@542 240 }
sgoldman@542 241
sgoldman@542 242 intptr_t* younger_sp = sp();
sgoldman@542 243 intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
sgoldman@542 244 bool adjusted_stack = is_interpreted_frame();
sgoldman@542 245
sgoldman@542 246 address sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
sgoldman@542 247
sgoldman@542 248
sgoldman@542 249 // We must always be able to find a recognizable pc
sgoldman@542 250 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
sgoldman@542 251 if (sender_pc == NULL || sender_blob == NULL) {
sgoldman@542 252 return false;
sgoldman@542 253 }
sgoldman@542 254
sla@5237 255 // Could be a zombie method
sla@5237 256 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
sla@5237 257 return false;
sla@5237 258 }
sla@5237 259
sgoldman@542 260 // It should be safe to construct the sender though it might not be valid
sgoldman@542 261
sgoldman@542 262 frame sender(_SENDER_SP, younger_sp, adjusted_stack);
sgoldman@542 263
sgoldman@542 264 // Do we have a valid fp?
sgoldman@542 265 address sender_fp = (address) sender.fp();
sgoldman@542 266
sgoldman@542 267 // an fp must be within the stack and above (but not equal) current frame's _FP
sgoldman@542 268
sgoldman@542 269 bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
sgoldman@542 270 (sender_fp > _FP);
sgoldman@542 271
sgoldman@542 272 if (!sender_fp_safe) {
sgoldman@542 273 return false;
sgoldman@542 274 }
sgoldman@542 275
sgoldman@542 276
sgoldman@542 277 // If the potential sender is the interpreter then we can do some more checking
sgoldman@542 278 if (Interpreter::contains(sender_pc)) {
sgoldman@542 279 return sender.is_interpreted_frame_valid(thread);
sgoldman@542 280 }
sgoldman@542 281
sgoldman@542 282 // Could just be some random pointer within the codeBlob
twisti@2103 283 if (!sender.cb()->code_contains(sender_pc)) {
twisti@2103 284 return false;
twisti@2103 285 }
sgoldman@542 286
sgoldman@542 287 // We should never be able to see an adapter if the current frame is something from code cache
twisti@2103 288 if (sender_blob->is_adapter_blob()) {
sgoldman@542 289 return false;
sgoldman@542 290 }
sgoldman@542 291
sgoldman@542 292 if( sender.is_entry_frame()) {
sgoldman@542 293 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 294
sgoldman@542 295 address jcw = (address)sender.entry_frame_call_wrapper();
sgoldman@542 296
sgoldman@542 297 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
sgoldman@542 298
sgoldman@542 299 return jcw_safe;
sgoldman@542 300 }
sgoldman@542 301
sla@5237 302 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
sgoldman@542 303 // because you must allocate window space
sgoldman@542 304
sla@5237 305 if (sender_blob->frame_size() <= 0) {
sgoldman@542 306 assert(!sender_blob->is_nmethod(), "should count return address at least");
sgoldman@542 307 return false;
sgoldman@542 308 }
sgoldman@542 309
sgoldman@542 310 // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
sgoldman@542 311 // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
drchase@4942 312 // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the
sgoldman@542 313 // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
sgoldman@542 314 // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
sgoldman@542 315 // that initial frame and retrying.
sgoldman@542 316
sgoldman@542 317 if (!sender_blob->is_nmethod()) {
sgoldman@542 318 return false;
sgoldman@542 319 }
sgoldman@542 320
sgoldman@542 321 // Could put some more validation for the potential non-interpreted sender
sgoldman@542 322 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
sgoldman@542 323
sgoldman@542 324 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
sgoldman@542 325
sgoldman@542 326 // We've validated the potential sender that would be created
sgoldman@542 327
sgoldman@542 328 return true;
sgoldman@542 329
duke@435 330 }
sgoldman@542 331
sgoldman@542 332 // Must be native-compiled frame. Since sender will try and use fp to find
sgoldman@542 333 // linkages it must be safe
sgoldman@542 334
sgoldman@542 335 if (!fp_safe) return false;
sgoldman@542 336
sgoldman@542 337 // could try and do some more potential verification of native frame if we could think of some...
sgoldman@542 338
sgoldman@542 339 return true;
duke@435 340 }
duke@435 341
duke@435 342 // constructors
duke@435 343
duke@435 344 // Construct an unpatchable, deficient frame
simonis@7553 345 void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
duke@435 346 #ifdef _LP64
duke@435 347 assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
duke@435 348 #endif
duke@435 349 _sp = sp;
duke@435 350 _younger_sp = NULL;
duke@435 351 _pc = pc;
duke@435 352 _cb = cb;
duke@435 353 _sp_adjustment_by_callee = 0;
duke@435 354 assert(pc == NULL && cb == NULL || pc != NULL, "can't have a cb and no pc!");
duke@435 355 if (_cb == NULL && _pc != NULL ) {
duke@435 356 _cb = CodeCache::find_blob(_pc);
duke@435 357 }
duke@435 358 _deopt_state = unknown;
duke@435 359 #ifdef ASSERT
duke@435 360 if ( _cb != NULL && _cb->is_nmethod()) {
duke@435 361 // Without a valid unextended_sp() we can't convert the pc to "original"
duke@435 362 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
duke@435 363 }
duke@435 364 #endif // ASSERT
duke@435 365 }
duke@435 366
simonis@7553 367 frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
simonis@7553 368 init(sp, pc, cb);
simonis@7553 369 }
simonis@7553 370
twisti@1919 371 frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
twisti@1919 372 _sp(sp),
twisti@1919 373 _younger_sp(younger_sp),
twisti@1919 374 _deopt_state(unknown),
twisti@1919 375 _sp_adjustment_by_callee(0) {
duke@435 376 if (younger_sp == NULL) {
duke@435 377 // make a deficient frame which doesn't know where its PC is
duke@435 378 _pc = NULL;
duke@435 379 _cb = NULL;
duke@435 380 } else {
duke@435 381 _pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
duke@435 382 assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
duke@435 383 // Any frame we ever build should always "safe" therefore we should not have to call
duke@435 384 // find_blob_unsafe
duke@435 385 // In case of native stubs, the pc retrieved here might be
duke@435 386 // wrong. (the _last_native_pc will have the right value)
duke@435 387 // So do not put add any asserts on the _pc here.
duke@435 388 }
twisti@1919 389
twisti@1919 390 if (_pc != NULL)
twisti@1919 391 _cb = CodeCache::find_blob(_pc);
twisti@1919 392
twisti@1919 393 // Check for MethodHandle call sites.
twisti@1919 394 if (_cb != NULL) {
twisti@1919 395 nmethod* nm = _cb->as_nmethod_or_null();
twisti@1919 396 if (nm != NULL) {
twisti@1919 397 if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
twisti@1919 398 _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
twisti@1919 399 // The SP is already adjusted by this MH call site, don't
twisti@1919 400 // overwrite this value with the wrong interpreter value.
twisti@1919 401 younger_frame_is_interpreted = false;
twisti@1919 402 }
twisti@1919 403 }
duke@435 404 }
duke@435 405
twisti@1919 406 if (younger_frame_is_interpreted) {
twisti@1919 407 // compute adjustment to this frame's SP made by its interpreted callee
twisti@1919 408 _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
twisti@1919 409 }
duke@435 410
twisti@1919 411 // It is important that the frame is fully constructed when we do
twisti@1919 412 // this lookup as get_deopt_original_pc() needs a correct value for
twisti@1919 413 // unextended_sp() which uses _sp_adjustment_by_callee.
duke@435 414 if (_pc != NULL) {
twisti@1639 415 address original_pc = nmethod::get_deopt_original_pc(this);
twisti@1639 416 if (original_pc != NULL) {
twisti@1639 417 _pc = original_pc;
duke@435 418 _deopt_state = is_deoptimized;
duke@435 419 } else {
duke@435 420 _deopt_state = not_deoptimized;
duke@435 421 }
duke@435 422 }
duke@435 423 }
duke@435 424
simonis@7553 425 #ifndef PRODUCT
simonis@7553 426 // This is a generic constructor which is only used by pns() in debug.cpp.
simonis@7553 427 frame::frame(void* sp, void* fp, void* pc) {
simonis@7553 428 init((intptr_t*)sp, (address)pc, NULL);
simonis@7553 429 }
simonis@7553 430 #endif
simonis@7553 431
duke@435 432 bool frame::is_interpreted_frame() const {
duke@435 433 return Interpreter::contains(pc());
duke@435 434 }
duke@435 435
duke@435 436 // sender_sp
duke@435 437
duke@435 438 intptr_t* frame::interpreter_frame_sender_sp() const {
duke@435 439 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 440 return fp();
duke@435 441 }
duke@435 442
duke@435 443 #ifndef CC_INTERP
duke@435 444 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
duke@435 445 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 446 Unimplemented();
duke@435 447 }
duke@435 448 #endif // CC_INTERP
duke@435 449
duke@435 450
duke@435 451 #ifdef ASSERT
duke@435 452 // Debugging aid
duke@435 453 static frame nth_sender(int n) {
duke@435 454 frame f = JavaThread::current()->last_frame();
duke@435 455
duke@435 456 for(int i = 0; i < n; ++i)
duke@435 457 f = f.sender((RegisterMap*)NULL);
duke@435 458
duke@435 459 printf("first frame %d\n", f.is_first_frame() ? 1 : 0);
duke@435 460 printf("interpreted frame %d\n", f.is_interpreted_frame() ? 1 : 0);
duke@435 461 printf("java frame %d\n", f.is_java_frame() ? 1 : 0);
duke@435 462 printf("entry frame %d\n", f.is_entry_frame() ? 1 : 0);
duke@435 463 printf("native frame %d\n", f.is_native_frame() ? 1 : 0);
duke@435 464 if (f.is_compiled_frame()) {
duke@435 465 if (f.is_deoptimized_frame())
duke@435 466 printf("deoptimized frame 1\n");
duke@435 467 else
duke@435 468 printf("compiled frame 1\n");
duke@435 469 }
duke@435 470
duke@435 471 return f;
duke@435 472 }
duke@435 473 #endif
duke@435 474
duke@435 475
duke@435 476 frame frame::sender_for_entry_frame(RegisterMap *map) const {
duke@435 477 assert(map != NULL, "map must be set");
duke@435 478 // Java frame called from C; skip all C frames and return top C
duke@435 479 // frame of that chunk as the sender
duke@435 480 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
duke@435 481 assert(!entry_frame_is_first(), "next Java fp must be non zero");
duke@435 482 assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
duke@435 483 intptr_t* last_Java_sp = jfa->last_Java_sp();
duke@435 484 // Since we are walking the stack now this nested anchor is obviously walkable
duke@435 485 // even if it wasn't when it was stacked.
duke@435 486 if (!jfa->walkable()) {
duke@435 487 // Capture _last_Java_pc (if needed) and mark anchor walkable.
duke@435 488 jfa->capture_last_Java_pc(_sp);
duke@435 489 }
duke@435 490 assert(jfa->last_Java_pc() != NULL, "No captured pc!");
duke@435 491 map->clear();
duke@435 492 map->make_integer_regs_unsaved();
duke@435 493 map->shift_window(last_Java_sp, NULL);
duke@435 494 assert(map->include_argument_oops(), "should be set by clear");
duke@435 495 return frame(last_Java_sp, frame::unpatchable, jfa->last_Java_pc());
duke@435 496 }
duke@435 497
duke@435 498 frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
duke@435 499 ShouldNotCallThis();
duke@435 500 return sender(map);
duke@435 501 }
duke@435 502
duke@435 503 frame frame::sender_for_compiled_frame(RegisterMap *map) const {
duke@435 504 ShouldNotCallThis();
duke@435 505 return sender(map);
duke@435 506 }
duke@435 507
duke@435 508 frame frame::sender(RegisterMap* map) const {
duke@435 509 assert(map != NULL, "map must be set");
duke@435 510
duke@435 511 assert(CodeCache::find_blob_unsafe(_pc) == _cb, "inconsistent");
duke@435 512
duke@435 513 // Default is not to follow arguments; update it accordingly below
duke@435 514 map->set_include_argument_oops(false);
duke@435 515
duke@435 516 if (is_entry_frame()) return sender_for_entry_frame(map);
duke@435 517
twisti@1919 518 intptr_t* younger_sp = sp();
twisti@1919 519 intptr_t* sp = sender_sp();
duke@435 520
duke@435 521 // Note: The version of this operation on any platform with callee-save
duke@435 522 // registers must update the register map (if not null).
duke@435 523 // In order to do this correctly, the various subtypes of
duke@435 524 // of frame (interpreted, compiled, glue, native),
duke@435 525 // must be distinguished. There is no need on SPARC for
duke@435 526 // such distinctions, because all callee-save registers are
duke@435 527 // preserved for all frames via SPARC-specific mechanisms.
duke@435 528 //
duke@435 529 // *** HOWEVER, *** if and when we make any floating-point
duke@435 530 // registers callee-saved, then we will have to copy over
duke@435 531 // the RegisterMap update logic from the Intel code.
duke@435 532
duke@435 533 // The constructor of the sender must know whether this frame is interpreted so it can set the
duke@435 534 // sender's _sp_adjustment_by_callee field. An osr adapter frame was originally
duke@435 535 // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
duke@435 536 // explicitly recognized.
duke@435 537
never@2950 538
twisti@1919 539 bool frame_is_interpreted = is_interpreted_frame();
twisti@1919 540 if (frame_is_interpreted) {
duke@435 541 map->make_integer_regs_unsaved();
duke@435 542 map->shift_window(sp, younger_sp);
duke@435 543 } else if (_cb != NULL) {
duke@435 544 // Update the locations of implicitly saved registers to be their
duke@435 545 // addresses in the register save area.
duke@435 546 // For %o registers, the addresses of %i registers in the next younger
duke@435 547 // frame are used.
duke@435 548 map->shift_window(sp, younger_sp);
duke@435 549 if (map->update_map()) {
duke@435 550 // Tell GC to use argument oopmaps for some runtime stubs that need it.
duke@435 551 // For C1, the runtime stub might not have oop maps, so set this flag
duke@435 552 // outside of update_register_map.
duke@435 553 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
duke@435 554 if (_cb->oop_maps() != NULL) {
duke@435 555 OopMapSet::update_register_map(this, map);
duke@435 556 }
duke@435 557 }
duke@435 558 }
twisti@1919 559 return frame(sp, younger_sp, frame_is_interpreted);
duke@435 560 }
duke@435 561
duke@435 562
duke@435 563 void frame::patch_pc(Thread* thread, address pc) {
duke@435 564 if(thread == Thread::current()) {
duke@435 565 StubRoutines::Sparc::flush_callers_register_windows_func()();
duke@435 566 }
duke@435 567 if (TracePcPatching) {
duke@435 568 // QQQ this assert is invalid (or too strong anyway) sice _pc could
duke@435 569 // be original pc and frame could have the deopt pc.
duke@435 570 // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
duke@435 571 tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc);
duke@435 572 }
duke@435 573 _cb = CodeCache::find_blob(pc);
duke@435 574 *O7_addr() = pc - pc_return_offset;
duke@435 575 _cb = CodeCache::find_blob(_pc);
twisti@1639 576 address original_pc = nmethod::get_deopt_original_pc(this);
twisti@1639 577 if (original_pc != NULL) {
twisti@1639 578 assert(original_pc == _pc, "expected original to be stored before patching");
duke@435 579 _deopt_state = is_deoptimized;
duke@435 580 } else {
duke@435 581 _deopt_state = not_deoptimized;
duke@435 582 }
duke@435 583 }
duke@435 584
duke@435 585
duke@435 586 static bool sp_is_valid(intptr_t* old_sp, intptr_t* young_sp, intptr_t* sp) {
duke@435 587 return (((intptr_t)sp & (2*wordSize-1)) == 0 &&
duke@435 588 sp <= old_sp &&
duke@435 589 sp >= young_sp);
duke@435 590 }
duke@435 591
duke@435 592
duke@435 593 /*
duke@435 594 Find the (biased) sp that is just younger than old_sp starting at sp.
duke@435 595 If not found return NULL. Register windows are assumed to be flushed.
duke@435 596 */
duke@435 597 intptr_t* frame::next_younger_sp_or_null(intptr_t* old_sp, intptr_t* sp) {
duke@435 598
duke@435 599 intptr_t* previous_sp = NULL;
duke@435 600 intptr_t* orig_sp = sp;
duke@435 601
duke@435 602 int max_frames = (old_sp - sp) / 16; // Minimum frame size is 16
duke@435 603 int max_frame2 = max_frames;
duke@435 604 while(sp != old_sp && sp_is_valid(old_sp, orig_sp, sp)) {
duke@435 605 if (max_frames-- <= 0)
duke@435 606 // too many frames have gone by; invalid parameters given to this function
duke@435 607 break;
duke@435 608 previous_sp = sp;
duke@435 609 sp = (intptr_t*)sp[FP->sp_offset_in_saved_window()];
duke@435 610 sp = (intptr_t*)((intptr_t)sp + STACK_BIAS);
duke@435 611 }
duke@435 612
duke@435 613 return (sp == old_sp ? previous_sp : NULL);
duke@435 614 }
duke@435 615
duke@435 616 /*
duke@435 617 Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
duke@435 618 "valid_sp". So if "sp" is valid itself then it should be possible to walk frames
duke@435 619 from "sp" to "valid_sp". The assumption is that the registers windows for the
duke@435 620 thread stack in question are flushed.
duke@435 621 */
duke@435 622 bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
duke@435 623 return next_younger_sp_or_null(valid_sp, sp) != NULL;
duke@435 624 }
duke@435 625
duke@435 626
duke@435 627 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
duke@435 628 assert(is_interpreted_frame(), "must be interpreter frame");
duke@435 629 return this->fp() == fp;
duke@435 630 }
duke@435 631
duke@435 632
duke@435 633 void frame::pd_gc_epilog() {
duke@435 634 if (is_interpreted_frame()) {
duke@435 635 // set constant pool cache entry for interpreter
coleenp@4037 636 Method* m = interpreter_frame_method();
duke@435 637
duke@435 638 *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
duke@435 639 }
duke@435 640 }
duke@435 641
duke@435 642
sgoldman@542 643 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
duke@435 644 #ifdef CC_INTERP
duke@435 645 // Is there anything to do?
duke@435 646 #else
duke@435 647 assert(is_interpreted_frame(), "Not an interpreted frame");
duke@435 648 // These are reasonable sanity checks
duke@435 649 if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
duke@435 650 return false;
duke@435 651 }
duke@435 652 if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
duke@435 653 return false;
duke@435 654 }
sgoldman@542 655
duke@435 656 const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
duke@435 657 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
duke@435 658 return false;
duke@435 659 }
duke@435 660 // These are hacks to keep us out of trouble.
duke@435 661 // The problem with these is that they mask other problems
duke@435 662 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
duke@435 663 return false;
duke@435 664 }
sgoldman@542 665 // do some validation of frame elements
sgoldman@542 666
sgoldman@542 667 // first the method
sgoldman@542 668
coleenp@4037 669 Method* m = *interpreter_frame_method_addr();
sgoldman@542 670
sgoldman@542 671 // validate the method we'd find in this potential sender
coleenp@4295 672 if (!m->is_valid_method()) return false;
sgoldman@542 673
sgoldman@542 674 // stack frames shouldn't be much larger than max_stack elements
sgoldman@542 675
twisti@1861 676 if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
duke@435 677 return false;
duke@435 678 }
sgoldman@542 679
sgoldman@542 680 // validate bci/bcx
sgoldman@542 681
sgoldman@542 682 intptr_t bcx = interpreter_frame_bcx();
sgoldman@542 683 if (m->validate_bci_from_bcx(bcx) < 0) {
sgoldman@542 684 return false;
sgoldman@542 685 }
sgoldman@542 686
coleenp@4037 687 // validate ConstantPoolCache*
coleenp@4037 688 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
coleenp@5307 689 if (cp == NULL || !cp->is_metaspace_object()) return false;
sgoldman@542 690
sgoldman@542 691 // validate locals
sgoldman@542 692
sgoldman@542 693 address locals = (address) *interpreter_frame_locals_addr();
sgoldman@542 694
sgoldman@542 695 if (locals > thread->stack_base() || locals < (address) fp()) return false;
sgoldman@542 696
sgoldman@542 697 // We'd have to be pretty unlucky to be mislead at this point
duke@435 698 #endif /* CC_INTERP */
duke@435 699 return true;
duke@435 700 }
duke@435 701
duke@435 702
duke@435 703 // Windows have been flushed on entry (but not marked). Capture the pc that
duke@435 704 // is the return address to the frame that contains "sp" as its stack pointer.
duke@435 705 // This pc resides in the called of the frame corresponding to "sp".
duke@435 706 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
duke@435 707 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
duke@435 708 // call_helper) as flushed when we have flushed the windows for the most
duke@435 709 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
duke@435 710 // and lets us find the pc just once rather than multiple times as it did
duke@435 711 // in the bad old _post_Java_state days.
duke@435 712 //
duke@435 713 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
duke@435 714 if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
duke@435 715 // try and find the sp just younger than _last_Java_sp
duke@435 716 intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
duke@435 717 // Really this should never fail otherwise VM call must have non-standard
duke@435 718 // frame linkage (bad) or stack is not properly flushed (worse).
duke@435 719 guarantee(_post_Java_sp != NULL, "bad stack!");
duke@435 720 _last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
duke@435 721
duke@435 722 }
duke@435 723 set_window_flushed();
duke@435 724 }
duke@435 725
duke@435 726 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
duke@435 727 if (walkable()) return;
duke@435 728 // Eventually make an assert
duke@435 729 guarantee(Thread::current() == (Thread*)thread, "only current thread can flush its registers");
duke@435 730 // We always flush in case the profiler wants it but we won't mark
duke@435 731 // the windows as flushed unless we have a last_Java_frame
duke@435 732 intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
duke@435 733 if (last_Java_sp() != NULL ) {
duke@435 734 capture_last_Java_pc(sp);
duke@435 735 }
duke@435 736 }
duke@435 737
duke@435 738 intptr_t* frame::entry_frame_argument_at(int offset) const {
duke@435 739 // convert offset to index to deal with tsi
duke@435 740 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
duke@435 741
duke@435 742 intptr_t* LSP = (intptr_t*) sp()[Lentry_args->sp_offset_in_saved_window()];
duke@435 743 return &LSP[index+1];
duke@435 744 }
duke@435 745
duke@435 746
duke@435 747 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
duke@435 748 assert(is_interpreted_frame(), "interpreted frame expected");
coleenp@4037 749 Method* method = interpreter_frame_method();
duke@435 750 BasicType type = method->result_type();
duke@435 751
duke@435 752 if (method->is_native()) {
duke@435 753 // Prior to notifying the runtime of the method_exit the possible result
duke@435 754 // value is saved to l_scratch and d_scratch.
duke@435 755
duke@435 756 #ifdef CC_INTERP
duke@435 757 interpreterState istate = get_interpreterState();
duke@435 758 intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
duke@435 759 intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
duke@435 760 #else /* CC_INTERP */
duke@435 761 intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
duke@435 762 intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
duke@435 763 #endif /* CC_INTERP */
duke@435 764
duke@435 765 address l_addr = (address)l_scratch;
duke@435 766 #ifdef _LP64
duke@435 767 // On 64-bit the result for 1/8/16/32-bit result types is in the other
duke@435 768 // word half
duke@435 769 l_addr += wordSize/2;
duke@435 770 #endif
duke@435 771
duke@435 772 switch (type) {
duke@435 773 case T_OBJECT:
duke@435 774 case T_ARRAY: {
duke@435 775 #ifdef CC_INTERP
duke@435 776 *oop_result = istate->_oop_temp;
duke@435 777 #else
hseigel@5784 778 oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
duke@435 779 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
duke@435 780 *oop_result = obj;
duke@435 781 #endif // CC_INTERP
duke@435 782 break;
duke@435 783 }
duke@435 784
duke@435 785 case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
duke@435 786 case T_BYTE : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
duke@435 787 case T_CHAR : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
duke@435 788 case T_SHORT : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
duke@435 789 case T_INT : value_result->i = *(jint*)l_addr; break;
duke@435 790 case T_LONG : value_result->j = *(jlong*)l_scratch; break;
duke@435 791 case T_FLOAT : value_result->f = *(jfloat*)d_scratch; break;
duke@435 792 case T_DOUBLE : value_result->d = *(jdouble*)d_scratch; break;
duke@435 793 case T_VOID : /* Nothing to do */ break;
duke@435 794 default : ShouldNotReachHere();
duke@435 795 }
duke@435 796 } else {
duke@435 797 intptr_t* tos_addr = interpreter_frame_tos_address();
duke@435 798
duke@435 799 switch(type) {
duke@435 800 case T_OBJECT:
duke@435 801 case T_ARRAY: {
hseigel@5784 802 oop obj = cast_to_oop(*tos_addr);
duke@435 803 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
duke@435 804 *oop_result = obj;
duke@435 805 break;
duke@435 806 }
duke@435 807 case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
duke@435 808 case T_BYTE : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
duke@435 809 case T_CHAR : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
duke@435 810 case T_SHORT : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
duke@435 811 case T_INT : value_result->i = *(jint*)tos_addr; break;
duke@435 812 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
duke@435 813 case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
duke@435 814 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
duke@435 815 case T_VOID : /* Nothing to do */ break;
duke@435 816 default : ShouldNotReachHere();
duke@435 817 }
duke@435 818 };
duke@435 819
duke@435 820 return type;
duke@435 821 }
duke@435 822
duke@435 823 // Lesp pointer is one word lower than the top item on the stack.
duke@435 824 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
duke@435 825 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
duke@435 826 return &interpreter_frame_tos_address()[index];
duke@435 827 }
never@2868 828
never@2868 829
bdelsart@3451 830 #ifndef PRODUCT
never@2868 831
never@2868 832 #define DESCRIBE_FP_OFFSET(name) \
never@2897 833 values.describe(frame_no, fp() + frame::name##_offset, #name)
never@2868 834
never@2868 835 void frame::describe_pd(FrameValues& values, int frame_no) {
never@2868 836 for (int w = 0; w < frame::register_save_words; w++) {
never@2868 837 values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
never@2868 838 }
never@2868 839
twisti@3969 840 if (is_interpreted_frame()) {
never@2868 841 DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
never@2868 842 DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
never@2868 843 DESCRIBE_FP_OFFSET(interpreter_frame_padding);
never@2868 844 DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
bdelsart@3445 845
bdelsart@3445 846 // esp, according to Lesp (e.g. not depending on bci), if seems valid
bdelsart@3445 847 intptr_t* esp = *interpreter_frame_esp_addr();
bdelsart@3445 848 if ((esp >= sp()) && (esp < fp())) {
bdelsart@3445 849 values.describe(-1, esp, "*Lesp");
bdelsart@3445 850 }
never@2868 851 }
never@2868 852
never@2868 853 if (!is_compiled_frame()) {
never@2868 854 if (frame::callee_aggregate_return_pointer_words != 0) {
never@2868 855 values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
never@2868 856 }
never@2868 857 for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
never@2868 858 values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w,
never@2868 859 err_msg("callee_register_argument_save_area_words %d", w));
never@2868 860 }
never@2868 861 }
never@2868 862 }
never@2868 863
never@2868 864 #endif
bdelsart@3130 865
bdelsart@3130 866 intptr_t *frame::initial_deoptimization_info() {
bdelsart@3130 867 // unused... but returns fp() to minimize changes introduced by 7087445
bdelsart@3130 868 return fp();
bdelsart@3130 869 }

mercurial