src/cpu/x86/vm/frame_x86.cpp

Thu, 26 Sep 2013 10:25:02 -0400

author
hseigel
date
Thu, 26 Sep 2013 10:25:02 -0400
changeset 5784
190899198332
parent 5307
e0c9a1d29eb4
child 6163
c586f8a7322f
permissions
-rw-r--r--

7195622: CheckUnhandledOops has limited usefulness now
Summary: Enable CHECK_UNHANDLED_OOPS in fastdebug builds across all supported platforms.
Reviewed-by: coleenp, hseigel, dholmes, stefank, twisti, ihse, rdurbin
Contributed-by: lois.foltan@oracle.com

duke@435 1 /*
sla@5237 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "interpreter/interpreter.hpp"
stefank@2314 27 #include "memory/resourceArea.hpp"
stefank@2314 28 #include "oops/markOop.hpp"
coleenp@4037 29 #include "oops/method.hpp"
stefank@2314 30 #include "oops/oop.inline.hpp"
iveresov@3495 31 #include "prims/methodHandles.hpp"
stefank@2314 32 #include "runtime/frame.inline.hpp"
stefank@2314 33 #include "runtime/handles.inline.hpp"
stefank@2314 34 #include "runtime/javaCalls.hpp"
stefank@2314 35 #include "runtime/monitorChunk.hpp"
sla@5237 36 #include "runtime/os.hpp"
stefank@2314 37 #include "runtime/signature.hpp"
stefank@2314 38 #include "runtime/stubCodeGenerator.hpp"
stefank@2314 39 #include "runtime/stubRoutines.hpp"
stefank@2314 40 #include "vmreg_x86.inline.hpp"
stefank@2314 41 #ifdef COMPILER1
stefank@2314 42 #include "c1/c1_Runtime1.hpp"
stefank@2314 43 #include "runtime/vframeArray.hpp"
stefank@2314 44 #endif
duke@435 45
duke@435 46 #ifdef ASSERT
duke@435 47 void RegisterMap::check_location_valid() {
duke@435 48 }
duke@435 49 #endif
duke@435 50
duke@435 51
duke@435 52 // Profiling/safepoint support
duke@435 53
duke@435 54 bool frame::safe_for_sender(JavaThread *thread) {
duke@435 55 address sp = (address)_sp;
duke@435 56 address fp = (address)_fp;
duke@435 57 address unextended_sp = (address)_unextended_sp;
sla@5237 58
sla@5237 59 // consider stack guards when trying to determine "safe" stack pointers
sla@5237 60 static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
sla@5237 61 size_t usable_stack_size = thread->stack_size() - stack_guard_size;
sla@5237 62
sla@5237 63 // sp must be within the usable part of the stack (not in guards)
sla@5237 64 bool sp_safe = (sp < thread->stack_base()) &&
sla@5237 65 (sp >= thread->stack_base() - usable_stack_size);
sla@5237 66
sgoldman@542 67
sgoldman@542 68 if (!sp_safe) {
sgoldman@542 69 return false;
sgoldman@542 70 }
sgoldman@542 71
sgoldman@542 72 // unextended sp must be within the stack and above or equal sp
sla@5237 73 bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
sgoldman@542 74 (unextended_sp >= sp);
sgoldman@542 75
sgoldman@542 76 if (!unextended_sp_safe) {
sgoldman@542 77 return false;
sgoldman@542 78 }
sgoldman@542 79
sgoldman@542 80 // an fp must be within the stack and above (but not equal) sp
sla@5237 81 // second evaluation on fp+ is added to handle situation where fp is -1
sla@5237 82 bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
sgoldman@542 83
sgoldman@542 84 // We know sp/unextended_sp are safe only fp is questionable here
sgoldman@542 85
sgoldman@542 86 // If the current frame is known to the code cache then we can attempt to
sgoldman@542 87 // to construct the sender and do some validation of it. This goes a long way
sgoldman@542 88 // toward eliminating issues when we get in frame construction code
sgoldman@542 89
sgoldman@542 90 if (_cb != NULL ) {
sgoldman@542 91
sgoldman@542 92 // First check if frame is complete and tester is reliable
duke@435 93 // Unfortunately we can only check frame complete for runtime stubs and nmethod
duke@435 94 // other generic buffer blobs are more problematic so we just assume they are
duke@435 95 // ok. adapter blobs never have a frame complete and are never ok.
sgoldman@542 96
sla@5237 97 // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
sla@5237 98
sla@5237 99 if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
sla@5237 100 //assert(0, "Invalid frame_size");
sla@5237 101 return false;
sla@5237 102 }
sla@5237 103
sgoldman@542 104 if (!_cb->is_frame_complete_at(_pc)) {
duke@435 105 if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
duke@435 106 return false;
duke@435 107 }
duke@435 108 }
rbackman@4645 109
rbackman@4645 110 // Could just be some random pointer within the codeBlob
rbackman@4645 111 if (!_cb->code_contains(_pc)) {
rbackman@4645 112 return false;
rbackman@4645 113 }
rbackman@4645 114
sgoldman@542 115 // Entry frame checks
sgoldman@542 116 if (is_entry_frame()) {
sgoldman@542 117 // an entry frame must have a valid fp.
sgoldman@542 118
sgoldman@542 119 if (!fp_safe) return false;
sgoldman@542 120
sgoldman@542 121 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 122
sgoldman@542 123 address jcw = (address)entry_frame_call_wrapper();
sgoldman@542 124
sla@5237 125 bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
sgoldman@542 126
sgoldman@542 127 return jcw_safe;
sgoldman@542 128
sgoldman@542 129 }
sgoldman@542 130
sgoldman@542 131 intptr_t* sender_sp = NULL;
sgoldman@542 132 address sender_pc = NULL;
sgoldman@542 133
sgoldman@542 134 if (is_interpreted_frame()) {
sgoldman@542 135 // fp must be safe
sgoldman@542 136 if (!fp_safe) {
sgoldman@542 137 return false;
sgoldman@542 138 }
sgoldman@542 139
sgoldman@542 140 sender_pc = (address) this->fp()[return_addr_offset];
sgoldman@542 141 sender_sp = (intptr_t*) addr_at(sender_sp_offset);
sgoldman@542 142
sgoldman@542 143 } else {
sgoldman@542 144 // must be some sort of compiled/runtime frame
sgoldman@542 145 // fp does not have to be safe (although it could be check for c1?)
sgoldman@542 146
sgoldman@542 147 sender_sp = _unextended_sp + _cb->frame_size();
sgoldman@542 148 // On Intel the return_address is always the word on the stack
sgoldman@542 149 sender_pc = (address) *(sender_sp-1);
sgoldman@542 150 }
sgoldman@542 151
sgoldman@542 152
sgoldman@542 153 // If the potential sender is the interpreter then we can do some more checking
sgoldman@542 154 if (Interpreter::contains(sender_pc)) {
sgoldman@542 155
sgoldman@542 156 // ebp is always saved in a recognizable place in any code we generate. However
sgoldman@542 157 // only if the sender is interpreted/call_stub (c1 too?) are we certain that the saved ebp
sgoldman@542 158 // is really a frame pointer.
sgoldman@542 159
sgoldman@542 160 intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
sla@5237 161 bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
sgoldman@542 162
sgoldman@542 163 if (!saved_fp_safe) {
sgoldman@542 164 return false;
sgoldman@542 165 }
sgoldman@542 166
sgoldman@542 167 // construct the potential sender
sgoldman@542 168
sgoldman@542 169 frame sender(sender_sp, saved_fp, sender_pc);
sgoldman@542 170
sgoldman@542 171 return sender.is_interpreted_frame_valid(thread);
sgoldman@542 172
sgoldman@542 173 }
sgoldman@542 174
sla@5237 175 // We must always be able to find a recognizable pc
sla@5237 176 CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
sla@5237 177 if (sender_pc == NULL || sender_blob == NULL) {
sla@5237 178 return false;
sla@5237 179 }
sla@5237 180
sla@5237 181 // Could be a zombie method
sla@5237 182 if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
sla@5237 183 return false;
sla@5237 184 }
sla@5237 185
sgoldman@542 186 // Could just be some random pointer within the codeBlob
twisti@2103 187 if (!sender_blob->code_contains(sender_pc)) {
twisti@2103 188 return false;
twisti@2103 189 }
sgoldman@542 190
sgoldman@542 191 // We should never be able to see an adapter if the current frame is something from code cache
twisti@2103 192 if (sender_blob->is_adapter_blob()) {
sgoldman@542 193 return false;
sgoldman@542 194 }
sgoldman@542 195
sgoldman@542 196 // Could be the call_stub
sgoldman@542 197 if (StubRoutines::returns_to_call_stub(sender_pc)) {
sgoldman@542 198 intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
sla@5237 199 bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
sgoldman@542 200
sgoldman@542 201 if (!saved_fp_safe) {
sgoldman@542 202 return false;
sgoldman@542 203 }
sgoldman@542 204
sgoldman@542 205 // construct the potential sender
sgoldman@542 206
sgoldman@542 207 frame sender(sender_sp, saved_fp, sender_pc);
sgoldman@542 208
sgoldman@542 209 // Validate the JavaCallWrapper an entry frame must have
sgoldman@542 210 address jcw = (address)sender.entry_frame_call_wrapper();
sgoldman@542 211
sla@5237 212 bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
sgoldman@542 213
sgoldman@542 214 return jcw_safe;
sgoldman@542 215 }
sgoldman@542 216
sla@5237 217 if (sender_blob->is_nmethod()) {
sla@5237 218 nmethod* nm = sender_blob->as_nmethod_or_null();
sla@5237 219 if (nm != NULL) {
sla@5237 220 if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
sla@5237 221 return false;
sla@5237 222 }
sla@5237 223 }
sla@5237 224 }
sla@5237 225
sla@5237 226 // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
sgoldman@542 227 // because the return address counts against the callee's frame.
sgoldman@542 228
sla@5237 229 if (sender_blob->frame_size() <= 0) {
sgoldman@542 230 assert(!sender_blob->is_nmethod(), "should count return address at least");
sgoldman@542 231 return false;
sgoldman@542 232 }
sgoldman@542 233
sgoldman@542 234 // We should never be able to see anything here except an nmethod. If something in the
sgoldman@542 235 // code cache (current frame) is called by an entity within the code cache that entity
sgoldman@542 236 // should not be anything but the call stub (already covered), the interpreter (already covered)
sgoldman@542 237 // or an nmethod.
sgoldman@542 238
sla@5237 239 if (!sender_blob->is_nmethod()) {
sla@5237 240 return false;
sla@5237 241 }
sgoldman@542 242
sgoldman@542 243 // Could put some more validation for the potential non-interpreted sender
sgoldman@542 244 // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
sgoldman@542 245
sgoldman@542 246 // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
sgoldman@542 247
sgoldman@542 248 // We've validated the potential sender that would be created
duke@435 249 return true;
duke@435 250 }
sgoldman@542 251
sgoldman@542 252 // Must be native-compiled frame. Since sender will try and use fp to find
sgoldman@542 253 // linkages it must be safe
sgoldman@542 254
sgoldman@542 255 if (!fp_safe) {
sgoldman@542 256 return false;
duke@435 257 }
sgoldman@542 258
sgoldman@542 259 // Will the pc we fetch be non-zero (which we'll find at the oldest frame)
sgoldman@542 260
sgoldman@542 261 if ( (address) this->fp()[return_addr_offset] == NULL) return false;
sgoldman@542 262
sgoldman@542 263
sgoldman@542 264 // could try and do some more potential verification of native frame if we could think of some...
sgoldman@542 265
sgoldman@542 266 return true;
sgoldman@542 267
duke@435 268 }
duke@435 269
duke@435 270
duke@435 271 void frame::patch_pc(Thread* thread, address pc) {
twisti@3196 272 address* pc_addr = &(((address*) sp())[-1]);
duke@435 273 if (TracePcPatching) {
twisti@3252 274 tty->print_cr("patch_pc at address " INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "]",
twisti@3196 275 pc_addr, *pc_addr, pc);
duke@435 276 }
twisti@3252 277 // Either the return address is the original one or we are going to
twisti@3252 278 // patch in the same address that's already there.
twisti@3252 279 assert(_pc == *pc_addr || pc == *pc_addr, "must be");
twisti@3196 280 *pc_addr = pc;
duke@435 281 _cb = CodeCache::find_blob(pc);
twisti@1639 282 address original_pc = nmethod::get_deopt_original_pc(this);
twisti@1639 283 if (original_pc != NULL) {
twisti@1639 284 assert(original_pc == _pc, "expected original PC to be stored before patching");
duke@435 285 _deopt_state = is_deoptimized;
duke@435 286 // leave _pc as is
duke@435 287 } else {
duke@435 288 _deopt_state = not_deoptimized;
duke@435 289 _pc = pc;
duke@435 290 }
duke@435 291 }
duke@435 292
duke@435 293 bool frame::is_interpreted_frame() const {
duke@435 294 return Interpreter::contains(pc());
duke@435 295 }
duke@435 296
cfang@1228 297 int frame::frame_size(RegisterMap* map) const {
cfang@1228 298 frame sender = this->sender(map);
duke@435 299 return sender.sp() - sp();
duke@435 300 }
duke@435 301
duke@435 302 intptr_t* frame::entry_frame_argument_at(int offset) const {
duke@435 303 // convert offset to index to deal with tsi
duke@435 304 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
duke@435 305 // Entry frame's arguments are always in relation to unextended_sp()
duke@435 306 return &unextended_sp()[index];
duke@435 307 }
duke@435 308
duke@435 309 // sender_sp
duke@435 310 #ifdef CC_INTERP
duke@435 311 intptr_t* frame::interpreter_frame_sender_sp() const {
duke@435 312 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 313 // QQQ why does this specialize method exist if frame::sender_sp() does same thing?
duke@435 314 // seems odd and if we always know interpreted vs. non then sender_sp() is really
duke@435 315 // doing too much work.
duke@435 316 return get_interpreterState()->sender_sp();
duke@435 317 }
duke@435 318
duke@435 319 // monitor elements
duke@435 320
duke@435 321 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
duke@435 322 return get_interpreterState()->monitor_base();
duke@435 323 }
duke@435 324
duke@435 325 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
duke@435 326 return (BasicObjectLock*) get_interpreterState()->stack_base();
duke@435 327 }
duke@435 328
duke@435 329 #else // CC_INTERP
duke@435 330
duke@435 331 intptr_t* frame::interpreter_frame_sender_sp() const {
duke@435 332 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 333 return (intptr_t*) at(interpreter_frame_sender_sp_offset);
duke@435 334 }
duke@435 335
duke@435 336 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
duke@435 337 assert(is_interpreted_frame(), "interpreted frame expected");
duke@435 338 ptr_at_put(interpreter_frame_sender_sp_offset, (intptr_t) sender_sp);
duke@435 339 }
duke@435 340
duke@435 341
duke@435 342 // monitor elements
duke@435 343
duke@435 344 BasicObjectLock* frame::interpreter_frame_monitor_begin() const {
duke@435 345 return (BasicObjectLock*) addr_at(interpreter_frame_monitor_block_bottom_offset);
duke@435 346 }
duke@435 347
duke@435 348 BasicObjectLock* frame::interpreter_frame_monitor_end() const {
duke@435 349 BasicObjectLock* result = (BasicObjectLock*) *addr_at(interpreter_frame_monitor_block_top_offset);
duke@435 350 // make sure the pointer points inside the frame
johnc@1843 351 assert(sp() <= (intptr_t*) result, "monitor end should be above the stack pointer");
johnc@1843 352 assert((intptr_t*) result < fp(), "monitor end should be strictly below the frame pointer");
duke@435 353 return result;
duke@435 354 }
duke@435 355
duke@435 356 void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
duke@435 357 *((BasicObjectLock**)addr_at(interpreter_frame_monitor_block_top_offset)) = value;
duke@435 358 }
duke@435 359
duke@435 360 // Used by template based interpreter deoptimization
duke@435 361 void frame::interpreter_frame_set_last_sp(intptr_t* sp) {
duke@435 362 *((intptr_t**)addr_at(interpreter_frame_last_sp_offset)) = sp;
duke@435 363 }
duke@435 364 #endif // CC_INTERP
duke@435 365
duke@435 366 frame frame::sender_for_entry_frame(RegisterMap* map) const {
duke@435 367 assert(map != NULL, "map must be set");
duke@435 368 // Java frame called from C; skip all C frames and return top C
duke@435 369 // frame of that chunk as the sender
duke@435 370 JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
duke@435 371 assert(!entry_frame_is_first(), "next Java fp must be non zero");
duke@435 372 assert(jfa->last_Java_sp() > sp(), "must be above this frame on stack");
duke@435 373 map->clear();
duke@435 374 assert(map->include_argument_oops(), "should be set by clear");
duke@435 375 if (jfa->last_Java_pc() != NULL ) {
duke@435 376 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp(), jfa->last_Java_pc());
duke@435 377 return fr;
duke@435 378 }
duke@435 379 frame fr(jfa->last_Java_sp(), jfa->last_Java_fp());
duke@435 380 return fr;
duke@435 381 }
duke@435 382
twisti@1639 383 //------------------------------------------------------------------------------
twisti@1639 384 // frame::verify_deopt_original_pc
twisti@1639 385 //
twisti@1639 386 // Verifies the calculated original PC of a deoptimization PC for the
twisti@1639 387 // given unextended SP. The unextended SP might also be the saved SP
twisti@1639 388 // for MethodHandle call sites.
jprovino@4721 389 #ifdef ASSERT
twisti@1639 390 void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp, bool is_method_handle_return) {
twisti@1639 391 frame fr;
twisti@1639 392
twisti@1639 393 // This is ugly but it's better than to change {get,set}_original_pc
twisti@1639 394 // to take an SP value as argument. And it's only a debugging
twisti@1639 395 // method anyway.
twisti@1639 396 fr._unextended_sp = unextended_sp;
twisti@1639 397
twisti@1639 398 address original_pc = nm->get_original_pc(&fr);
twisti@2103 399 assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
twisti@1639 400 assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be");
twisti@1639 401 }
twisti@1639 402 #endif
twisti@1639 403
never@2895 404 //------------------------------------------------------------------------------
never@2895 405 // frame::adjust_unextended_sp
never@2895 406 void frame::adjust_unextended_sp() {
never@2895 407 // If we are returning to a compiled MethodHandle call site, the
never@2895 408 // saved_fp will in fact be a saved value of the unextended SP. The
never@2895 409 // simplest way to tell whether we are returning to such a call site
never@2895 410 // is as follows:
never@2895 411
never@2895 412 nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
never@2895 413 if (sender_nm != NULL) {
never@2895 414 // If the sender PC is a deoptimization point, get the original
never@2895 415 // PC. For MethodHandle call site the unextended_sp is stored in
never@2895 416 // saved_fp.
never@2895 417 if (sender_nm->is_deopt_mh_entry(_pc)) {
never@2895 418 DEBUG_ONLY(verify_deopt_mh_original_pc(sender_nm, _fp));
never@2895 419 _unextended_sp = _fp;
never@2895 420 }
never@2895 421 else if (sender_nm->is_deopt_entry(_pc)) {
never@2895 422 DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
never@2895 423 }
never@2895 424 else if (sender_nm->is_method_handle_return(_pc)) {
never@2895 425 _unextended_sp = _fp;
never@2895 426 }
never@2895 427 }
never@2895 428 }
never@2895 429
never@2895 430 //------------------------------------------------------------------------------
never@2895 431 // frame::update_map_with_saved_link
never@2895 432 void frame::update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr) {
never@2895 433 // The interpreter and compiler(s) always save EBP/RBP in a known
never@2895 434 // location on entry. We must record where that location is
never@2895 435 // so this if EBP/RBP was live on callout from c2 we can find
never@2895 436 // the saved copy no matter what it called.
never@2895 437
never@2895 438 // Since the interpreter always saves EBP/RBP if we record where it is then
never@2895 439 // we don't have to always save EBP/RBP on entry and exit to c2 compiled
never@2895 440 // code, on entry will be enough.
never@2895 441 map->set_location(rbp->as_VMReg(), (address) link_addr);
never@2895 442 #ifdef AMD64
never@2895 443 // this is weird "H" ought to be at a higher address however the
never@2895 444 // oopMaps seems to have the "H" regs at the same address and the
never@2895 445 // vanilla register.
never@2895 446 // XXXX make this go away
never@2895 447 if (true) {
never@2895 448 map->set_location(rbp->as_VMReg()->next(), (address) link_addr);
never@2895 449 }
never@2895 450 #endif // AMD64
never@2895 451 }
never@2895 452
twisti@1639 453
twisti@1639 454 //------------------------------------------------------------------------------
twisti@1639 455 // frame::sender_for_interpreter_frame
duke@435 456 frame frame::sender_for_interpreter_frame(RegisterMap* map) const {
twisti@1639 457 // SP is the raw SP from the sender after adapter or interpreter
twisti@1639 458 // extension.
twisti@1639 459 intptr_t* sender_sp = this->sender_sp();
duke@435 460
duke@435 461 // This is the sp before any possible extension (adapter/locals).
duke@435 462 intptr_t* unextended_sp = interpreter_frame_sender_sp();
duke@435 463
duke@435 464 #ifdef COMPILER2
duke@435 465 if (map->update_map()) {
never@2895 466 update_map_with_saved_link(map, (intptr_t**) addr_at(link_offset));
duke@435 467 }
twisti@1639 468 #endif // COMPILER2
twisti@1639 469
never@2895 470 return frame(sender_sp, unextended_sp, link(), sender_pc());
duke@435 471 }
duke@435 472
duke@435 473
twisti@1639 474 //------------------------------------------------------------------------------
twisti@1639 475 // frame::sender_for_compiled_frame
duke@435 476 frame frame::sender_for_compiled_frame(RegisterMap* map) const {
duke@435 477 assert(map != NULL, "map must be set");
duke@435 478
duke@435 479 // frame owned by optimizing compiler
duke@435 480 assert(_cb->frame_size() >= 0, "must have non-zero frame size");
twisti@1639 481 intptr_t* sender_sp = unextended_sp() + _cb->frame_size();
twisti@1639 482 intptr_t* unextended_sp = sender_sp;
duke@435 483
duke@435 484 // On Intel the return_address is always the word on the stack
duke@435 485 address sender_pc = (address) *(sender_sp-1);
duke@435 486
twisti@1639 487 // This is the saved value of EBP which may or may not really be an FP.
twisti@1639 488 // It is only an FP if the sender is an interpreter frame (or C1?).
never@2895 489 intptr_t** saved_fp_addr = (intptr_t**) (sender_sp - frame::sender_sp_offset);
twisti@1570 490
duke@435 491 if (map->update_map()) {
duke@435 492 // Tell GC to use argument oopmaps for some runtime stubs that need it.
duke@435 493 // For C1, the runtime stub might not have oop maps, so set this flag
duke@435 494 // outside of update_register_map.
duke@435 495 map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
duke@435 496 if (_cb->oop_maps() != NULL) {
duke@435 497 OopMapSet::update_register_map(this, map);
duke@435 498 }
never@2895 499
twisti@1639 500 // Since the prolog does the save and restore of EBP there is no oopmap
duke@435 501 // for it so we must fill in its location as if there was an oopmap entry
duke@435 502 // since if our caller was compiled code there could be live jvm state in it.
never@2895 503 update_map_with_saved_link(map, saved_fp_addr);
duke@435 504 }
duke@435 505
duke@435 506 assert(sender_sp != sp(), "must have changed");
never@2895 507 return frame(sender_sp, unextended_sp, *saved_fp_addr, sender_pc);
duke@435 508 }
duke@435 509
twisti@1639 510
twisti@1639 511 //------------------------------------------------------------------------------
twisti@1639 512 // frame::sender
duke@435 513 frame frame::sender(RegisterMap* map) const {
duke@435 514 // Default is we done have to follow them. The sender_for_xxx will
duke@435 515 // update it accordingly
duke@435 516 map->set_include_argument_oops(false);
duke@435 517
duke@435 518 if (is_entry_frame()) return sender_for_entry_frame(map);
duke@435 519 if (is_interpreted_frame()) return sender_for_interpreter_frame(map);
duke@435 520 assert(_cb == CodeCache::find_blob(pc()),"Must be the same");
duke@435 521
duke@435 522 if (_cb != NULL) {
duke@435 523 return sender_for_compiled_frame(map);
duke@435 524 }
duke@435 525 // Must be native-compiled frame, i.e. the marshaling code for native
duke@435 526 // methods that exists in the core system.
duke@435 527 return frame(sender_sp(), link(), sender_pc());
duke@435 528 }
duke@435 529
duke@435 530
duke@435 531 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
duke@435 532 assert(is_interpreted_frame(), "must be interpreter frame");
coleenp@4037 533 Method* method = interpreter_frame_method();
duke@435 534 // When unpacking an optimized frame the frame pointer is
duke@435 535 // adjusted with:
duke@435 536 int diff = (method->max_locals() - method->size_of_parameters()) *
twisti@1861 537 Interpreter::stackElementWords;
duke@435 538 return _fp == (fp - diff);
duke@435 539 }
duke@435 540
duke@435 541 void frame::pd_gc_epilog() {
duke@435 542 // nothing done here now
duke@435 543 }
duke@435 544
sgoldman@542 545 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
duke@435 546 // QQQ
duke@435 547 #ifdef CC_INTERP
duke@435 548 #else
duke@435 549 assert(is_interpreted_frame(), "Not an interpreted frame");
duke@435 550 // These are reasonable sanity checks
duke@435 551 if (fp() == 0 || (intptr_t(fp()) & (wordSize-1)) != 0) {
duke@435 552 return false;
duke@435 553 }
duke@435 554 if (sp() == 0 || (intptr_t(sp()) & (wordSize-1)) != 0) {
duke@435 555 return false;
duke@435 556 }
duke@435 557 if (fp() + interpreter_frame_initial_sp_offset < sp()) {
duke@435 558 return false;
duke@435 559 }
duke@435 560 // These are hacks to keep us out of trouble.
duke@435 561 // The problem with these is that they mask other problems
duke@435 562 if (fp() <= sp()) { // this attempts to deal with unsigned comparison above
duke@435 563 return false;
duke@435 564 }
sgoldman@542 565
sgoldman@542 566 // do some validation of frame elements
sgoldman@542 567
sgoldman@542 568 // first the method
sgoldman@542 569
coleenp@4037 570 Method* m = *interpreter_frame_method_addr();
sgoldman@542 571
sgoldman@542 572 // validate the method we'd find in this potential sender
coleenp@4295 573 if (!m->is_valid_method()) return false;
sgoldman@542 574
sgoldman@542 575 // stack frames shouldn't be much larger than max_stack elements
sgoldman@542 576
twisti@1861 577 if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
duke@435 578 return false;
duke@435 579 }
sgoldman@542 580
sgoldman@542 581 // validate bci/bcx
sgoldman@542 582
sgoldman@542 583 intptr_t bcx = interpreter_frame_bcx();
sgoldman@542 584 if (m->validate_bci_from_bcx(bcx) < 0) {
sgoldman@542 585 return false;
sgoldman@542 586 }
sgoldman@542 587
coleenp@4037 588 // validate ConstantPoolCache*
coleenp@4037 589 ConstantPoolCache* cp = *interpreter_frame_cache_addr();
coleenp@5307 590 if (cp == NULL || !cp->is_metaspace_object()) return false;
sgoldman@542 591
sgoldman@542 592 // validate locals
sgoldman@542 593
sgoldman@542 594 address locals = (address) *interpreter_frame_locals_addr();
sgoldman@542 595
sgoldman@542 596 if (locals > thread->stack_base() || locals < (address) fp()) return false;
sgoldman@542 597
sgoldman@542 598 // We'd have to be pretty unlucky to be mislead at this point
sgoldman@542 599
duke@435 600 #endif // CC_INTERP
duke@435 601 return true;
duke@435 602 }
duke@435 603
duke@435 604 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
duke@435 605 #ifdef CC_INTERP
bobv@2036 606 // Needed for JVMTI. The result should always be in the
bobv@2036 607 // interpreterState object
duke@435 608 interpreterState istate = get_interpreterState();
duke@435 609 #endif // CC_INTERP
duke@435 610 assert(is_interpreted_frame(), "interpreted frame expected");
coleenp@4037 611 Method* method = interpreter_frame_method();
duke@435 612 BasicType type = method->result_type();
duke@435 613
duke@435 614 intptr_t* tos_addr;
duke@435 615 if (method->is_native()) {
duke@435 616 // Prior to calling into the runtime to report the method_exit the possible
duke@435 617 // return value is pushed to the native stack. If the result is a jfloat/jdouble
duke@435 618 // then ST0 is saved before EAX/EDX. See the note in generate_native_result
duke@435 619 tos_addr = (intptr_t*)sp();
duke@435 620 if (type == T_FLOAT || type == T_DOUBLE) {
duke@435 621 // QQQ seems like this code is equivalent on the two platforms
duke@435 622 #ifdef AMD64
duke@435 623 // This is times two because we do a push(ltos) after pushing XMM0
duke@435 624 // and that takes two interpreter stack slots.
twisti@1861 625 tos_addr += 2 * Interpreter::stackElementWords;
duke@435 626 #else
duke@435 627 tos_addr += 2;
duke@435 628 #endif // AMD64
duke@435 629 }
duke@435 630 } else {
duke@435 631 tos_addr = (intptr_t*)interpreter_frame_tos_address();
duke@435 632 }
duke@435 633
duke@435 634 switch (type) {
duke@435 635 case T_OBJECT :
duke@435 636 case T_ARRAY : {
duke@435 637 oop obj;
duke@435 638 if (method->is_native()) {
duke@435 639 #ifdef CC_INTERP
duke@435 640 obj = istate->_oop_temp;
duke@435 641 #else
hseigel@5784 642 obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
duke@435 643 #endif // CC_INTERP
duke@435 644 } else {
duke@435 645 oop* obj_p = (oop*)tos_addr;
duke@435 646 obj = (obj_p == NULL) ? (oop)NULL : *obj_p;
duke@435 647 }
duke@435 648 assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
duke@435 649 *oop_result = obj;
duke@435 650 break;
duke@435 651 }
duke@435 652 case T_BOOLEAN : value_result->z = *(jboolean*)tos_addr; break;
duke@435 653 case T_BYTE : value_result->b = *(jbyte*)tos_addr; break;
duke@435 654 case T_CHAR : value_result->c = *(jchar*)tos_addr; break;
duke@435 655 case T_SHORT : value_result->s = *(jshort*)tos_addr; break;
duke@435 656 case T_INT : value_result->i = *(jint*)tos_addr; break;
duke@435 657 case T_LONG : value_result->j = *(jlong*)tos_addr; break;
duke@435 658 case T_FLOAT : {
duke@435 659 #ifdef AMD64
duke@435 660 value_result->f = *(jfloat*)tos_addr;
duke@435 661 #else
duke@435 662 if (method->is_native()) {
duke@435 663 jdouble d = *(jdouble*)tos_addr; // Result was in ST0 so need to convert to jfloat
duke@435 664 value_result->f = (jfloat)d;
duke@435 665 } else {
duke@435 666 value_result->f = *(jfloat*)tos_addr;
duke@435 667 }
duke@435 668 #endif // AMD64
duke@435 669 break;
duke@435 670 }
duke@435 671 case T_DOUBLE : value_result->d = *(jdouble*)tos_addr; break;
duke@435 672 case T_VOID : /* Nothing to do */ break;
duke@435 673 default : ShouldNotReachHere();
duke@435 674 }
duke@435 675
duke@435 676 return type;
duke@435 677 }
duke@435 678
duke@435 679
duke@435 680 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
duke@435 681 int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
duke@435 682 return &interpreter_frame_tos_address()[index];
duke@435 683 }
never@2868 684
bdelsart@3451 685 #ifndef PRODUCT
never@2868 686
never@2868 687 #define DESCRIBE_FP_OFFSET(name) \
never@2897 688 values.describe(frame_no, fp() + frame::name##_offset, #name)
never@2868 689
never@2868 690 void frame::describe_pd(FrameValues& values, int frame_no) {
twisti@3969 691 if (is_interpreted_frame()) {
never@2868 692 DESCRIBE_FP_OFFSET(interpreter_frame_sender_sp);
never@2868 693 DESCRIBE_FP_OFFSET(interpreter_frame_last_sp);
never@2868 694 DESCRIBE_FP_OFFSET(interpreter_frame_method);
never@2868 695 DESCRIBE_FP_OFFSET(interpreter_frame_mdx);
never@2868 696 DESCRIBE_FP_OFFSET(interpreter_frame_cache);
never@2868 697 DESCRIBE_FP_OFFSET(interpreter_frame_locals);
never@2868 698 DESCRIBE_FP_OFFSET(interpreter_frame_bcx);
never@2868 699 DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
never@2868 700 }
never@2868 701 }
never@2868 702 #endif
bdelsart@3130 703
bdelsart@3130 704 intptr_t *frame::initial_deoptimization_info() {
bdelsart@3130 705 // used to reset the saved FP
bdelsart@3130 706 return fp();
bdelsart@3130 707 }
bdelsart@3433 708
bdelsart@3433 709 intptr_t* frame::real_fp() const {
bdelsart@3433 710 if (_cb != NULL) {
bdelsart@3433 711 // use the frame size if valid
bdelsart@3433 712 int size = _cb->frame_size();
twisti@3969 713 if (size > 0) {
bdelsart@3433 714 return unextended_sp() + size;
bdelsart@3433 715 }
bdelsart@3433 716 }
bdelsart@3433 717 // else rely on fp()
bdelsart@3433 718 assert(! is_compiled_frame(), "unknown compiled frame size");
bdelsart@3433 719 return fp();
bdelsart@3433 720 }

mercurial