src/cpu/sparc/vm/frame_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2314
f95d63e2154a
child 2868
2e038ad0c1d0
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
trims@1907 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "interpreter/interpreter.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "oops/markOop.hpp"
stefank@2314 29 #include "oops/methodOop.hpp"
stefank@2314 30 #include "oops/oop.inline.hpp"
stefank@2314 31 #include "runtime/frame.inline.hpp"
stefank@2314 32 #include "runtime/handles.inline.hpp"
stefank@2314 33 #include "runtime/javaCalls.hpp"
stefank@2314 34 #include "runtime/monitorChunk.hpp"
stefank@2314 35 #include "runtime/signature.hpp"
stefank@2314 36 #include "runtime/stubCodeGenerator.hpp"
stefank@2314 37 #include "runtime/stubRoutines.hpp"
stefank@2314 38 #include "vmreg_sparc.inline.hpp"
stefank@2314 39 #ifdef COMPILER1
stefank@2314 40 #include "c1/c1_Runtime1.hpp"
stefank@2314 41 #include "runtime/vframeArray.hpp"
stefank@2314 42 #endif
duke@435 43
duke@435 44 void RegisterMap::pd_clear() {
duke@435 45 if (_thread->has_last_Java_frame()) {
duke@435 46 frame fr = _thread->last_frame();
duke@435 47 _window = fr.sp();
duke@435 48 } else {
duke@435 49 _window = NULL;
duke@435 50 }
duke@435 51 _younger_window = NULL;
duke@435 52 }
duke@435 53
duke@435 54
duke@435 55 // Unified register numbering scheme: each 32-bits counts as a register
duke@435 56 // number, so all the V9 registers take 2 slots.
duke@435 57 const static int R_L_nums[] = {0+040,2+040,4+040,6+040,8+040,10+040,12+040,14+040};
duke@435 58 const static int R_I_nums[] = {0+060,2+060,4+060,6+060,8+060,10+060,12+060,14+060};
duke@435 59 const static int R_O_nums[] = {0+020,2+020,4+020,6+020,8+020,10+020,12+020,14+020};
duke@435 60 const static int R_G_nums[] = {0+000,2+000,4+000,6+000,8+000,10+000,12+000,14+000};
duke@435 61 static RegisterMap::LocationValidType bad_mask = 0;
duke@435 62 static RegisterMap::LocationValidType R_LIO_mask = 0;
duke@435 63 static bool register_map_inited = false;
duke@435 64
duke@435 65 static void register_map_init() {
duke@435 66 if (!register_map_inited) {
duke@435 67 register_map_inited = true;
duke@435 68 int i;
duke@435 69 for (i = 0; i < 8; i++) {
duke@435 70 assert(R_L_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 71 assert(R_I_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 72 assert(R_O_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 73 assert(R_G_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
duke@435 74 }
duke@435 75
duke@435 76 bad_mask |= (1LL << R_O_nums[6]); // SP
duke@435 77 bad_mask |= (1LL << R_O_nums[7]); // cPC
duke@435 78 bad_mask |= (1LL << R_I_nums[6]); // FP
duke@435 79 bad_mask |= (1LL << R_I_nums[7]); // rPC
duke@435 80 bad_mask |= (1LL << R_G_nums[2]); // TLS
duke@435 81 bad_mask |= (1LL << R_G_nums[7]); // reserved by libthread
duke@435 82
duke@435 83 for (i = 0; i < 8; i++) {
duke@435 84 R_LIO_mask |= (1LL << R_L_nums[i]);
duke@435 85 R_LIO_mask |= (1LL << R_I_nums[i]);
duke@435 86 R_LIO_mask |= (1LL << R_O_nums[i]);
duke@435 87 }
duke@435 88 }
duke@435 89 }
duke@435 90
duke@435 91
duke@435 92 address RegisterMap::pd_location(VMReg regname) const {
duke@435 93 register_map_init();
duke@435 94
duke@435 95 assert(regname->is_reg(), "sanity check");
duke@435 96 // Only the GPRs get handled this way
duke@435 97 if( !regname->is_Register())
duke@435 98 return NULL;
duke@435 99
duke@435 100 // don't talk about bad registers
duke@435 101 if ((bad_mask & ((LocationValidType)1 << regname->value())) != 0) {
duke@435 102 return NULL;
duke@435 103 }
duke@435 104
duke@435 105 // Convert to a GPR
duke@435 106 Register reg;
duke@435 107 int second_word = 0;
duke@435 108 // 32-bit registers for in, out and local
duke@435 109 if (!regname->is_concrete()) {
duke@435 110 // HMM ought to return NULL for any non-concrete (odd) vmreg
duke@435 111 // this all tied up in the fact we put out double oopMaps for
duke@435 112 // register locations. When that is fixed we'd will return NULL
duke@435 113 // (or assert here).
duke@435 114 reg = regname->prev()->as_Register();
duke@435 115 #ifdef _LP64
duke@435 116 second_word = sizeof(jint);
duke@435 117 #else
duke@435 118 return NULL;
duke@435 119 #endif // _LP64
duke@435 120 } else {
duke@435 121 reg = regname->as_Register();
duke@435 122 }
duke@435 123 if (reg->is_out()) {
duke@435 124 assert(_younger_window != NULL, "Younger window should be available");
duke@435 125 return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
duke@435 126 }
duke@435 127 if (reg->is_local() || reg->is_in()) {
duke@435 128 assert(_window != NULL, "Window should be available");
duke@435 129 return second_word + (address)&_window[reg->sp_offset_in_saved_window()];
duke@435 130 }
duke@435 131 // Only the window'd GPRs get handled this way; not the globals.
duke@435 132 return NULL;
duke@435 133 }
duke@435 134
duke@435 135
duke@435 136 #ifdef ASSERT
duke@435 137 void RegisterMap::check_location_valid() {
duke@435 138 register_map_init();
duke@435 139 assert((_location_valid[0] & bad_mask) == 0, "cannot have special locations for SP,FP,TLS,etc.");
duke@435 140 }
duke@435 141 #endif
duke@435 142
duke@435 143 // We are shifting windows. That means we are moving all %i to %o,
duke@435 144 // getting rid of all current %l, and keeping all %g. This is only
duke@435 145 // complicated if any of the location pointers for these are valid.
duke@435 146 // The normal case is that everything is in its standard register window
duke@435 147 // home, and _location_valid[0] is zero. In that case, this routine
duke@435 148 // does exactly nothing.
duke@435 149 void RegisterMap::shift_individual_registers() {
duke@435 150 if (!update_map()) return; // this only applies to maps with locations
duke@435 151 register_map_init();
duke@435 152 check_location_valid();
duke@435 153
duke@435 154 LocationValidType lv = _location_valid[0];
duke@435 155 LocationValidType lv0 = lv;
duke@435 156
duke@435 157 lv &= ~R_LIO_mask; // clear %l, %o, %i regs
duke@435 158
duke@435 159 // if we cleared some non-%g locations, we may have to do some shifting
duke@435 160 if (lv != lv0) {
duke@435 161 // copy %i0-%i5 to %o0-%o5, if they have special locations
duke@435 162 // This can happen in within stubs which spill argument registers
duke@435 163 // around a dynamic link operation, such as resolve_opt_virtual_call.
duke@435 164 for (int i = 0; i < 8; i++) {
duke@435 165 if (lv0 & (1LL << R_I_nums[i])) {
duke@435 166 _location[R_O_nums[i]] = _location[R_I_nums[i]];
duke@435 167 lv |= (1LL << R_O_nums[i]);
duke@435 168 }
duke@435 169 }
duke@435 170 }
duke@435 171
duke@435 172 _location_valid[0] = lv;
duke@435 173 check_location_valid();
duke@435 174 }
duke@435 175
sgoldman@542 176 bool frame::safe_for_sender(JavaThread *thread) {
duke@435 177
sgoldman@542 178 address _SP = (address) sp();
sgoldman@542 179 address _FP = (address) fp();
sgoldman@542 180 address _UNEXTENDED_SP = (address) unextended_sp();
sgoldman@542 181 // sp must be within the stack
sgoldman@542 182 bool sp_safe = (_SP <= thread->stack_base()) &&
sgoldman@542 183 (_SP >= thread->stack_base() - thread->stack_size());
sgoldman@542 184
sgoldman@542 185 if (!sp_safe) {
sgoldman@542 186 return false;
sgoldman@542 187 }
sgoldman@542 188
sgoldman@542 189 // unextended sp must be within the stack and above or equal sp
sgoldman@542 190 bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
sgoldman@542 191 (_UNEXTENDED_SP >= _SP);
sgoldman@542 192
sgoldman@542 193 if (!unextended_sp_safe) return false;
sgoldman@542 194
sgoldman@542 195 // an fp must be within the stack and above (but not equal) sp
sgoldman@542 196 bool fp_safe = (_FP <= thread->stack_base()) &&
sgoldman@542 197 (_FP > _SP);
sgoldman@542 198
sgoldman@542 199 // We know sp/unextended_sp are safe only fp is questionable here
sgoldman@542 200
sgoldman@542 201 // If the current frame is known to the code cache then we can attempt to
sgoldman@542 202 // to construct the sender and do some validation of it. This goes a long way
sgoldman@542 203 // toward eliminating issues when we get in frame construction code
sgoldman@542 204
sgoldman@542 205 if (_cb != NULL ) {
sgoldman@542 206
sgoldman@542 207 // First check if frame is complete and tester is reliable
sgoldman@542 208 // Unfortunately we can only check frame complete for runtime stubs and nmethod
sgoldman@542 209 // other generic buffer blobs are more problematic so we just assume they are
sgoldman@542 210 // ok. adapter blobs never have a frame complete and are never ok.
sgoldman@542 211
sgoldman@542 212 if (!_cb->is_frame_complete_at(_pc)) {
sgoldman@542 213 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
sgoldman@542 214 return false;
duke@435 215 }
sgoldman@542 216 }
sgoldman@542 217
sgoldman@542 218 // Entry frame checks
sgoldman@542 219 if (is_entry_frame()) {
sgoldman@542 220 // an entry frame must have a valid fp.
sgoldman@542 221
sgoldman@542 222 if (!fp_safe) {
sgoldman@542 223 return false;
sgoldman@542 224 }
sgoldman@542 225
sgoldman@542 226 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 227
sgoldman@542 228 address jcw = (address)entry_frame_call_wrapper();
sgoldman@542 229
sgoldman@542 230 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
sgoldman@542 231
sgoldman@542 232 return jcw_safe;
sgoldman@542 233
sgoldman@542 234 }
sgoldman@542 235
sgoldman@542 236 intptr_t* younger_sp = sp();
sgoldman@542 237 intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
sgoldman@542 238 bool adjusted_stack = is_interpreted_frame();
sgoldman@542 239
sgoldman@542 240 address sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
sgoldman@542 241
sgoldman@542 242
sgoldman@542 243 // We must always be able to find a recognizable pc
sgoldman@542 244 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
sgoldman@542 245 if (sender_pc == NULL || sender_blob == NULL) {
sgoldman@542 246 return false;
sgoldman@542 247 }
sgoldman@542 248
sgoldman@542 249 // It should be safe to construct the sender though it might not be valid
sgoldman@542 250
sgoldman@542 251 frame sender(_SENDER_SP, younger_sp, adjusted_stack);
sgoldman@542 252
sgoldman@542 253 // Do we have a valid fp?
sgoldman@542 254 address sender_fp = (address) sender.fp();
sgoldman@542 255
sgoldman@542 256 // an fp must be within the stack and above (but not equal) current frame's _FP
sgoldman@542 257
sgoldman@542 258 bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
sgoldman@542 259 (sender_fp > _FP);
sgoldman@542 260
sgoldman@542 261 if (!sender_fp_safe) {
sgoldman@542 262 return false;
sgoldman@542 263 }
sgoldman@542 264
sgoldman@542 265
sgoldman@542 266 // If the potential sender is the interpreter then we can do some more checking
sgoldman@542 267 if (Interpreter::contains(sender_pc)) {
sgoldman@542 268 return sender.is_interpreted_frame_valid(thread);
sgoldman@542 269 }
sgoldman@542 270
sgoldman@542 271 // Could just be some random pointer within the codeBlob
twisti@2103 272 if (!sender.cb()->code_contains(sender_pc)) {
twisti@2103 273 return false;
twisti@2103 274 }
sgoldman@542 275
sgoldman@542 276 // We should never be able to see an adapter if the current frame is something from code cache
twisti@2103 277 if (sender_blob->is_adapter_blob()) {
sgoldman@542 278 return false;
sgoldman@542 279 }
sgoldman@542 280
sgoldman@542 281 if( sender.is_entry_frame()) {
sgoldman@542 282 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 283
sgoldman@542 284 address jcw = (address)sender.entry_frame_call_wrapper();
sgoldman@542 285
sgoldman@542 286 bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
sgoldman@542 287
sgoldman@542 288 return jcw_safe;
sgoldman@542 289 }
sgoldman@542 290
sgoldman@542 291 // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
sgoldman@542 292 // because you must allocate window space
sgoldman@542 293
sgoldman@542 294 if (sender_blob->frame_size() == 0) {
sgoldman@542 295 assert(!sender_blob->is_nmethod(), "should count return address at least");
sgoldman@542 296 return false;
sgoldman@542 297 }
sgoldman@542 298
sgoldman@542 299 // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
sgoldman@542 300 // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
sgoldman@542 301 // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the
sgoldman@542 302 // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
sgoldman@542 303 // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
sgoldman@542 304 // that initial frame and retrying.
sgoldman@542 305
sgoldman@542 306 if (!sender_blob->is_nmethod()) {
sgoldman@542 307 return false;
sgoldman@542 308 }
sgoldman@542 309
sgoldman@542 310 // Could put some more validation for the potential non-interpreted sender
sgoldman@542 311 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
sgoldman@542 312
sgoldman@542 313 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
sgoldman@542 314
sgoldman@542 315 // We've validated the potential sender that would be created
sgoldman@542 316
sgoldman@542 317 return true;
sgoldman@542 318
duke@435 319 }
sgoldman@542 320
sgoldman@542 321 // Must be native-compiled frame. Since sender will try and use fp to find
sgoldman@542 322 // linkages it must be safe
sgoldman@542 323
sgoldman@542 324 if (!fp_safe) return false;
sgoldman@542 325
sgoldman@542 326 // could try and do some more potential verification of native frame if we could think of some...
sgoldman@542 327
sgoldman@542 328 return true;
duke@435 329 }
duke@435 330
duke@435 331 // constructors
duke@435 332
duke@435 333 // Construct an unpatchable, deficient frame
duke@435 334 frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
duke@435 335 #ifdef _LP64
duke@435 336 assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
duke@435 337 #endif
duke@435 338 _sp = sp;
duke@435 339 _younger_sp = NULL;
duke@435 340 _pc = pc;
duke@435 341 _cb = cb;
duke@435 342 _sp_adjustment_by_callee = 0;
duke@435 343 assert(pc == NULL && cb == NULL || pc != NULL, "can't have a cb and no pc!");
duke@435 344 if (_cb == NULL && _pc != NULL ) {
duke@435 345 _cb = CodeCache::find_blob(_pc);
duke@435 346 }
duke@435 347 _deopt_state = unknown;
duke@435 348 #ifdef ASSERT
duke@435 349 if ( _cb != NULL && _cb->is_nmethod()) {
duke@435 350 // Without a valid unextended_sp() we can't convert the pc to "original"
duke@435 351 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
duke@435 352 }
duke@435 353 #endif // ASSERT
duke@435 354 }
duke@435 355
twisti@1919 356 frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
twisti@1919 357 _sp(sp),
twisti@1919 358 _younger_sp(younger_sp),
twisti@1919 359 _deopt_state(unknown),
twisti@1919 360 _sp_adjustment_by_callee(0) {
duke@435 361 if (younger_sp == NULL) {
duke@435 362 // make a deficient frame which doesn't know where its PC is
duke@435 363 _pc = NULL;
duke@435 364 _cb = NULL;
duke@435 365 } else {
duke@435 366 _pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
duke@435 367 assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
duke@435 368 // Any frame we ever build should always "safe" therefore we should not have to call
duke@435 369 // find_blob_unsafe
duke@435 370 // In case of native stubs, the pc retrieved here might be
duke@435 371 // wrong. (the _last_native_pc will have the right value)
duke@435 372 // So do not put add any asserts on the _pc here.
duke@435 373 }
twisti@1919 374
twisti@1919 375 if (_pc != NULL)
twisti@1919 376 _cb = CodeCache::find_blob(_pc);
twisti@1919 377
twisti@1919 378 // Check for MethodHandle call sites.
twisti@1919 379 if (_cb != NULL) {
twisti@1919 380 nmethod* nm = _cb->as_nmethod_or_null();
twisti@1919 381 if (nm != NULL) {
twisti@1919 382 if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
twisti@1919 383 _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
twisti@1919 384 // The SP is already adjusted by this MH call site, don't
twisti@1919 385 // overwrite this value with the wrong interpreter value.
twisti@1919 386 younger_frame_is_interpreted = false;
twisti@1919 387 }
twisti@1919 388 }
duke@435 389 }
duke@435 390
twisti@1919 391 if (younger_frame_is_interpreted) {
twisti@1919 392 // compute adjustment to this frame's SP made by its interpreted callee
twisti@1919 393 _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
twisti@1919 394 }
duke@435 395
twisti@1919 396 // It is important that the frame is fully constructed when we do
twisti@1919 397 // this lookup as get_deopt_original_pc() needs a correct value for
twisti@1919 398 // unextended_sp() which uses _sp_adjustment_by_callee.
duke@435 399 if (_pc != NULL) {
twisti@1639 400 address original_pc = nmethod::get_deopt_original_pc(this);
twisti@1639 401 if (original_pc != NULL) {
twisti@1639 402 _pc = original_pc;
duke@435 403 _deopt_state = is_deoptimized;
duke@435 404 } else {
duke@435 405 _deopt_state = not_deoptimized;
duke@435 406 }
duke@435 407 }
duke@435 408 }
duke@435 409
duke@435 410 bool frame::is_interpreted_frame() const {
duke@435 411 return Interpreter::contains(pc());
duke@435 412 }
duke@435 413
duke@435 414 // sender_sp
duke@435 415
duke@435 416 intptr_t* frame::interpreter_frame_sender_sp() const {
duke@435 417 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 418 return fp();
duke@435 419 }
duke@435 420
duke@435 421 #ifndef CC_INTERP
duke@435 422 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
duke@435 423 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 424 Unimplemented();
duke@435 425 }
duke@435 426 #endif // CC_INTERP
duke@435 427
duke@435 428
duke@435 429 #ifdef ASSERT
duke@435 430 // Debugging aid
duke@435 431 static frame nth_sender(int n) {
duke@435 432 frame f = JavaThread::current()->last_frame();
duke@435 433
duke@435 434 for(int i = 0; i < n; ++i)
duke@435 435 f = f.sender((RegisterMap*)NULL);
duke@435 436
duke@435 437 printf("first frame %d\n", f.is_first_frame() ? 1 : 0);
duke@435 438 printf("interpreted frame %d\n", f.is_interpreted_frame() ? 1 : 0);
duke@435 439 printf("java frame %d\n", f.is_java_frame() ? 1 : 0);
duke@435 440 printf("entry frame %d\n", f.is_entry_frame() ? 1 : 0);
duke@435 441 printf("native frame %d\n", f.is_native_frame() ? 1 : 0);
duke@435 442 if (f.is_compiled_frame()) {
duke@435 443 if (f.is_deoptimized_frame())
duke@435 444 printf("deoptimized frame 1\n");
duke@435 445 else
duke@435 446 printf("compiled frame 1\n");
duke@435 447 }
duke@435 448
duke@435 449 return f;
duke@435 450 }
duke@435 451 #endif
duke@435 452
duke@435 453
duke@435 454 frame frame::sender_for_entry_frame(RegisterMap *map) const {
duke@435 455 assert(map != NULL, "map must be set");
duke@435 456 // Java frame called from C; skip all C frames and return top C
duke@435 457 // frame of that chunk as the sender
duke@435 458 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
duke@435 459 assert(!entry_frame_is_first(), "next Java fp must be non zero");
duke@435 460 assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
duke@435 461 intptr_t* last_Java_sp = jfa->last_Java_sp();
duke@435 462 // Since we are walking the stack now this nested anchor is obviously walkable
duke@435 463 // even if it wasn't when it was stacked.
duke@435 464 if (!jfa->walkable()) {
duke@435 465 // Capture _last_Java_pc (if needed) and mark anchor walkable.
duke@435 466 jfa->capture_last_Java_pc(_sp);
duke@435 467 }
duke@435 468 assert(jfa->last_Java_pc() != NULL, "No captured pc!");
duke@435 469 map->clear();
duke@435 470 map->make_integer_regs_unsaved();
duke@435 471 map->shift_window(last_Java_sp, NULL);
duke@435 472 assert(map->include_argument_oops(), "should be set by clear");
duke@435 473 return frame(last_Java_sp, frame::unpatchable, jfa->last_Java_pc());
duke@435 474 }
duke@435 475
duke@435 476 frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
duke@435 477 ShouldNotCallThis();
duke@435 478 return sender(map);
duke@435 479 }
duke@435 480
duke@435 481 frame frame::sender_for_compiled_frame(RegisterMap *map) const {
duke@435 482 ShouldNotCallThis();
duke@435 483 return sender(map);
duke@435 484 }
duke@435 485
duke@435 486 frame frame::sender(RegisterMap* map) const {
duke@435 487 assert(map != NULL, "map must be set");
duke@435 488
duke@435 489 assert(CodeCache::find_blob_unsafe(_pc) == _cb, "inconsistent");
duke@435 490
duke@435 491 // Default is not to follow arguments; update it accordingly below
duke@435 492 map->set_include_argument_oops(false);
duke@435 493
duke@435 494 if (is_entry_frame()) return sender_for_entry_frame(map);
duke@435 495
twisti@1919 496 intptr_t* younger_sp = sp();
twisti@1919 497 intptr_t* sp = sender_sp();
duke@435 498
duke@435 499 // Note: The version of this operation on any platform with callee-save
duke@435 500 // registers must update the register map (if not null).
duke@435 501 // In order to do this correctly, the various subtypes of
duke@435 502 // of frame (interpreted, compiled, glue, native),
duke@435 503 // must be distinguished. There is no need on SPARC for
duke@435 504 // such distinctions, because all callee-save registers are
duke@435 505 // preserved for all frames via SPARC-specific mechanisms.
duke@435 506 //
duke@435 507 // *** HOWEVER, *** if and when we make any floating-point
duke@435 508 // registers callee-saved, then we will have to copy over
duke@435 509 // the RegisterMap update logic from the Intel code.
duke@435 510
duke@435 511 // The constructor of the sender must know whether this frame is interpreted so it can set the
duke@435 512 // sender's _sp_adjustment_by_callee field. An osr adapter frame was originally
duke@435 513 // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
duke@435 514 // explicitly recognized.
duke@435 515
twisti@1919 516 bool frame_is_interpreted = is_interpreted_frame();
twisti@1919 517 if (frame_is_interpreted) {
duke@435 518 map->make_integer_regs_unsaved();
duke@435 519 map->shift_window(sp, younger_sp);
duke@435 520 } else if (_cb != NULL) {
duke@435 521 // Update the locations of implicitly saved registers to be their
duke@435 522 // addresses in the register save area.
duke@435 523 // For %o registers, the addresses of %i registers in the next younger
duke@435 524 // frame are used.
duke@435 525 map->shift_window(sp, younger_sp);
duke@435 526 if (map->update_map()) {
duke@435 527 // Tell GC to use argument oopmaps for some runtime stubs that need it.
duke@435 528 // For C1, the runtime stub might not have oop maps, so set this flag
duke@435 529 // outside of update_register_map.
duke@435 530 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
duke@435 531 if (_cb->oop_maps() != NULL) {
duke@435 532 OopMapSet::update_register_map(this, map);
duke@435 533 }
duke@435 534 }
duke@435 535 }
twisti@1919 536 return frame(sp, younger_sp, frame_is_interpreted);
duke@435 537 }
duke@435 538
duke@435 539
duke@435 540 void frame::patch_pc(Thread* thread, address pc) {
duke@435 541 if(thread == Thread::current()) {
duke@435 542 StubRoutines::Sparc::flush_callers_register_windows_func()();
duke@435 543 }
duke@435 544 if (TracePcPatching) {
duke@435 545 // QQQ this assert is invalid (or too strong anyway) sice _pc could
duke@435 546 // be original pc and frame could have the deopt pc.
duke@435 547 // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
duke@435 548 tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc);
duke@435 549 }
duke@435 550 _cb = CodeCache::find_blob(pc);
duke@435 551 *O7_addr() = pc - pc_return_offset;
duke@435 552 _cb = CodeCache::find_blob(_pc);
twisti@1639 553 address original_pc = nmethod::get_deopt_original_pc(this);
twisti@1639 554 if (original_pc != NULL) {
twisti@1639 555 assert(original_pc == _pc, "expected original to be stored before patching");
duke@435 556 _deopt_state = is_deoptimized;
duke@435 557 } else {
duke@435 558 _deopt_state = not_deoptimized;
duke@435 559 }
duke@435 560 }
duke@435 561
duke@435 562
duke@435 563 static bool sp_is_valid(intptr_t* old_sp, intptr_t* young_sp, intptr_t* sp) {
duke@435 564 return (((intptr_t)sp & (2*wordSize-1)) == 0 &&
duke@435 565 sp <= old_sp &&
duke@435 566 sp >= young_sp);
duke@435 567 }
duke@435 568
duke@435 569
duke@435 570 /*
duke@435 571 Find the (biased) sp that is just younger than old_sp starting at sp.
duke@435 572 If not found return NULL. Register windows are assumed to be flushed.
duke@435 573 */
duke@435 574 intptr_t* frame::next_younger_sp_or_null(intptr_t* old_sp, intptr_t* sp) {
duke@435 575
duke@435 576 intptr_t* previous_sp = NULL;
duke@435 577 intptr_t* orig_sp = sp;
duke@435 578
duke@435 579 int max_frames = (old_sp - sp) / 16; // Minimum frame size is 16
duke@435 580 int max_frame2 = max_frames;
duke@435 581 while(sp != old_sp && sp_is_valid(old_sp, orig_sp, sp)) {
duke@435 582 if (max_frames-- <= 0)
duke@435 583 // too many frames have gone by; invalid parameters given to this function
duke@435 584 break;
duke@435 585 previous_sp = sp;
duke@435 586 sp = (intptr_t*)sp[FP->sp_offset_in_saved_window()];
duke@435 587 sp = (intptr_t*)((intptr_t)sp + STACK_BIAS);
duke@435 588 }
duke@435 589
duke@435 590 return (sp == old_sp ? previous_sp : NULL);
duke@435 591 }
duke@435 592
duke@435 593 /*
duke@435 594 Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
duke@435 595 "valid_sp". So if "sp" is valid itself then it should be possible to walk frames
duke@435 596 from "sp" to "valid_sp". The assumption is that the registers windows for the
duke@435 597 thread stack in question are flushed.
duke@435 598 */
duke@435 599 bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
duke@435 600 return next_younger_sp_or_null(valid_sp, sp) != NULL;
duke@435 601 }
duke@435 602
duke@435 603
duke@435 604 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
duke@435 605 assert(is_interpreted_frame(), "must be interpreter frame");
duke@435 606 return this->fp() == fp;
duke@435 607 }
duke@435 608
duke@435 609
duke@435 610 void frame::pd_gc_epilog() {
duke@435 611 if (is_interpreted_frame()) {
duke@435 612 // set constant pool cache entry for interpreter
duke@435 613 methodOop m = interpreter_frame_method();
duke@435 614
duke@435 615 *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
duke@435 616 }
duke@435 617 }
duke@435 618
duke@435 619
sgoldman@542 620 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
duke@435 621 #ifdef CC_INTERP
duke@435 622 // Is there anything to do?
duke@435 623 #else
duke@435 624 assert(is_interpreted_frame(), "Not an interpreted frame");
duke@435 625 // These are reasonable sanity checks
duke@435 626 if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
duke@435 627 return false;
duke@435 628 }
duke@435 629 if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
duke@435 630 return false;
duke@435 631 }
sgoldman@542 632
duke@435 633 const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
duke@435 634 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
duke@435 635 return false;
duke@435 636 }
duke@435 637 // These are hacks to keep us out of trouble.
duke@435 638 // The problem with these is that they mask other problems
duke@435 639 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
duke@435 640 return false;
duke@435 641 }
sgoldman@542 642 // do some validation of frame elements
sgoldman@542 643
sgoldman@542 644 // first the method
sgoldman@542 645
sgoldman@542 646 methodOop m = *interpreter_frame_method_addr();
sgoldman@542 647
sgoldman@542 648 // validate the method we'd find in this potential sender
sgoldman@542 649 if (!Universe::heap()->is_valid_method(m)) return false;
sgoldman@542 650
sgoldman@542 651 // stack frames shouldn't be much larger than max_stack elements
sgoldman@542 652
twisti@1861 653 if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
duke@435 654 return false;
duke@435 655 }
sgoldman@542 656
sgoldman@542 657 // validate bci/bcx
sgoldman@542 658
sgoldman@542 659 intptr_t bcx = interpreter_frame_bcx();
sgoldman@542 660 if (m->validate_bci_from_bcx(bcx) < 0) {
sgoldman@542 661 return false;
sgoldman@542 662 }
sgoldman@542 663
sgoldman@542 664 // validate constantPoolCacheOop
sgoldman@542 665
sgoldman@542 666 constantPoolCacheOop cp = *interpreter_frame_cache_addr();
sgoldman@542 667
sgoldman@542 668 if (cp == NULL ||
sgoldman@542 669 !Space::is_aligned(cp) ||
sgoldman@542 670 !Universe::heap()->is_permanent((void*)cp)) return false;
sgoldman@542 671
sgoldman@542 672 // validate locals
sgoldman@542 673
sgoldman@542 674 address locals = (address) *interpreter_frame_locals_addr();
sgoldman@542 675
sgoldman@542 676 if (locals > thread->stack_base() || locals < (address) fp()) return false;
sgoldman@542 677
sgoldman@542 678 // We'd have to be pretty unlucky to be mislead at this point
duke@435 679 #endif /* CC_INTERP */
duke@435 680 return true;
duke@435 681 }
duke@435 682
duke@435 683
duke@435 684 // Windows have been flushed on entry (but not marked). Capture the pc that
duke@435 685 // is the return address to the frame that contains "sp" as its stack pointer.
duke@435 686 // This pc resides in the called of the frame corresponding to "sp".
duke@435 687 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
duke@435 688 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
duke@435 689 // call_helper) as flushed when we have flushed the windows for the most
duke@435 690 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
duke@435 691 // and lets us find the pc just once rather than multiple times as it did
duke@435 692 // in the bad old _post_Java_state days.
duke@435 693 //
duke@435 694 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
duke@435 695 if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
duke@435 696 // try and find the sp just younger than _last_Java_sp
duke@435 697 intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
duke@435 698 // Really this should never fail otherwise VM call must have non-standard
duke@435 699 // frame linkage (bad) or stack is not properly flushed (worse).
duke@435 700 guarantee(_post_Java_sp != NULL, "bad stack!");
duke@435 701 _last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
duke@435 702
duke@435 703 }
duke@435 704 set_window_flushed();
duke@435 705 }
duke@435 706
duke@435 707 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
duke@435 708 if (walkable()) return;
duke@435 709 // Eventually make an assert
duke@435 710 guarantee(Thread::current() == (Thread*)thread, "only current thread can flush its registers");
duke@435 711 // We always flush in case the profiler wants it but we won't mark
duke@435 712 // the windows as flushed unless we have a last_Java_frame
duke@435 713 intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
duke@435 714 if (last_Java_sp() != NULL ) {
duke@435 715 capture_last_Java_pc(sp);
duke@435 716 }
duke@435 717 }
duke@435 718
duke@435 719 intptr_t* frame::entry_frame_argument_at(int offset) const {
duke@435 720 // convert offset to index to deal with tsi
duke@435 721 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
duke@435 722
duke@435 723 intptr_t* LSP = (intptr_t*) sp()[Lentry_args->sp_offset_in_saved_window()];
duke@435 724 return &LSP[index+1];
duke@435 725 }
duke@435 726
duke@435 727
duke@435 728 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
duke@435 729 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 730 methodOop method = interpreter_frame_method();
duke@435 731 BasicType type = method->result_type();
duke@435 732
duke@435 733 if (method->is_native()) {
duke@435 734 // Prior to notifying the runtime of the method_exit the possible result
duke@435 735 // value is saved to l_scratch and d_scratch.
duke@435 736
duke@435 737 #ifdef CC_INTERP
duke@435 738 interpreterState istate = get_interpreterState();
duke@435 739 intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
duke@435 740 intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
duke@435 741 #else /* CC_INTERP */
duke@435 742 intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
duke@435 743 intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
duke@435 744 #endif /* CC_INTERP */
duke@435 745
duke@435 746 address l_addr = (address)l_scratch;
duke@435 747 #ifdef _LP64
duke@435 748 // On 64-bit the result for 1/8/16/32-bit result types is in the other
duke@435 749 // word half
duke@435 750 l_addr += wordSize/2;
duke@435 751 #endif
duke@435 752
duke@435 753 switch (type) {
duke@435 754 case T_OBJECT:
duke@435 755 case T_ARRAY: {
duke@435 756 #ifdef CC_INTERP
duke@435 757 *oop_result = istate->_oop_temp;
duke@435 758 #else
duke@435 759 oop obj = (oop) at(interpreter_frame_oop_temp_offset);
duke@435 760 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
duke@435 761 *oop_result = obj;
duke@435 762 #endif // CC_INTERP
duke@435 763 break;
duke@435 764 }
duke@435 765
duke@435 766 case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
duke@435 767 case T_BYTE : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
duke@435 768 case T_CHAR : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
duke@435 769 case T_SHORT : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
duke@435 770 case T_INT : value_result->i = *(jint*)l_addr; break;
duke@435 771 case T_LONG : value_result->j = *(jlong*)l_scratch; break;
duke@435 772 case T_FLOAT : value_result->f = *(jfloat*)d_scratch; break;
duke@435 773 case T_DOUBLE : value_result->d = *(jdouble*)d_scratch; break;
duke@435 774 case T_VOID : /* Nothing to do */ break;
duke@435 775 default : ShouldNotReachHere();
duke@435 776 }
duke@435 777 } else {
duke@435 778 intptr_t* tos_addr = interpreter_frame_tos_address();
duke@435 779
duke@435 780 switch(type) {
duke@435 781 case T_OBJECT:
duke@435 782 case T_ARRAY: {
duke@435 783 oop obj = (oop)*tos_addr;
duke@435 784 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
duke@435 785 *oop_result = obj;
duke@435 786 break;
duke@435 787 }
duke@435 788 case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
duke@435 789 case T_BYTE : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
duke@435 790 case T_CHAR : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
duke@435 791 case T_SHORT : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
duke@435 792 case T_INT : value_result->i = *(jint*)tos_addr; break;
duke@435 793 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
duke@435 794 case T_FLOAT : value_result->f = *(jfloat*)tos_addr; break;
duke@435 795 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
duke@435 796 case T_VOID : /* Nothing to do */ break;
duke@435 797 default : ShouldNotReachHere();
duke@435 798 }
duke@435 799 };
duke@435 800
duke@435 801 return type;
duke@435 802 }
duke@435 803
duke@435 804 // Lesp pointer is one word lower than the top item on the stack.
duke@435 805 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
duke@435 806 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
duke@435 807 return &interpreter_frame_tos_address()[index];
duke@435 808 }

mercurial