src/share/vm/interpreter/bytecodeInterpreter.cpp

Tue, 19 Nov 2013 11:53:58 -0800

author
simonis
date
Tue, 19 Nov 2013 11:53:58 -0800
changeset 6483
018b357638aa
parent 6470
abe03600372a
child 6496
b4e19a1e459f
permissions
-rw-r--r--

8028514: PPC64: Fix C++ Interpreter after '7195622: CheckUnhandledOops has limited usefulness now'
Summary: fix CPP-interpreter after CheckUnhandledOops was re-enabled in the fastdebug build
Reviewed-by: kvn, dholmes, lfoltan

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // no precompiled headers
stefank@2314 26 #include "classfile/vmSymbols.hpp"
stefank@2314 27 #include "gc_interface/collectedHeap.hpp"
stefank@2314 28 #include "interpreter/bytecodeHistogram.hpp"
stefank@2314 29 #include "interpreter/bytecodeInterpreter.hpp"
stefank@2314 30 #include "interpreter/bytecodeInterpreter.inline.hpp"
goetz@6470 31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
stefank@2314 32 #include "interpreter/interpreter.hpp"
stefank@2314 33 #include "interpreter/interpreterRuntime.hpp"
stefank@2314 34 #include "memory/resourceArea.hpp"
jiangli@5065 35 #include "oops/methodCounters.hpp"
stefank@2314 36 #include "oops/objArrayKlass.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "prims/jvmtiExport.hpp"
goetz@6450 39 #include "prims/jvmtiThreadState.hpp"
goetz@6445 40 #include "runtime/biasedLocking.hpp"
stefank@2314 41 #include "runtime/frame.inline.hpp"
stefank@2314 42 #include "runtime/handles.inline.hpp"
stefank@2314 43 #include "runtime/interfaceSupport.hpp"
stefank@2314 44 #include "runtime/sharedRuntime.hpp"
stefank@2314 45 #include "runtime/threadCritical.hpp"
stefank@2314 46 #include "utilities/exceptions.hpp"
stefank@2314 47 #ifdef TARGET_OS_ARCH_linux_x86
stefank@2314 48 # include "orderAccess_linux_x86.inline.hpp"
stefank@2314 49 #endif
stefank@2314 50 #ifdef TARGET_OS_ARCH_linux_sparc
stefank@2314 51 # include "orderAccess_linux_sparc.inline.hpp"
stefank@2314 52 #endif
stefank@2314 53 #ifdef TARGET_OS_ARCH_linux_zero
stefank@2314 54 # include "orderAccess_linux_zero.inline.hpp"
stefank@2314 55 #endif
stefank@2314 56 #ifdef TARGET_OS_ARCH_solaris_x86
stefank@2314 57 # include "orderAccess_solaris_x86.inline.hpp"
stefank@2314 58 #endif
stefank@2314 59 #ifdef TARGET_OS_ARCH_solaris_sparc
stefank@2314 60 # include "orderAccess_solaris_sparc.inline.hpp"
stefank@2314 61 #endif
stefank@2314 62 #ifdef TARGET_OS_ARCH_windows_x86
stefank@2314 63 # include "orderAccess_windows_x86.inline.hpp"
stefank@2314 64 #endif
bobv@2508 65 #ifdef TARGET_OS_ARCH_linux_arm
bobv@2508 66 # include "orderAccess_linux_arm.inline.hpp"
bobv@2508 67 #endif
bobv@2508 68 #ifdef TARGET_OS_ARCH_linux_ppc
bobv@2508 69 # include "orderAccess_linux_ppc.inline.hpp"
bobv@2508 70 #endif
goetz@6461 71 #ifdef TARGET_OS_ARCH_aix_ppc
goetz@6461 72 # include "orderAccess_aix_ppc.inline.hpp"
goetz@6461 73 #endif
never@3156 74 #ifdef TARGET_OS_ARCH_bsd_x86
never@3156 75 # include "orderAccess_bsd_x86.inline.hpp"
never@3156 76 #endif
never@3156 77 #ifdef TARGET_OS_ARCH_bsd_zero
never@3156 78 # include "orderAccess_bsd_zero.inline.hpp"
never@3156 79 #endif
stefank@2314 80
stefank@2314 81
stefank@2314 82 // no precompiled headers
duke@435 83 #ifdef CC_INTERP
duke@435 84
duke@435 85 /*
duke@435 86 * USELABELS - If using GCC, then use labels for the opcode dispatching
duke@435 87 * rather -then a switch statement. This improves performance because it
duke@435 88 * gives us the oportunity to have the instructions that calculate the
duke@435 89 * next opcode to jump to be intermixed with the rest of the instructions
duke@435 90 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
duke@435 91 */
duke@435 92 #undef USELABELS
duke@435 93 #ifdef __GNUC__
duke@435 94 /*
duke@435 95 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
duke@435 96 don't use the computed goto approach.
duke@435 97 */
duke@435 98 #ifndef ASSERT
duke@435 99 #define USELABELS
duke@435 100 #endif
duke@435 101 #endif
duke@435 102
duke@435 103 #undef CASE
duke@435 104 #ifdef USELABELS
duke@435 105 #define CASE(opcode) opc ## opcode
duke@435 106 #define DEFAULT opc_default
duke@435 107 #else
duke@435 108 #define CASE(opcode) case Bytecodes:: opcode
duke@435 109 #define DEFAULT default
duke@435 110 #endif
duke@435 111
duke@435 112 /*
duke@435 113 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
duke@435 114 * opcode before going back to the top of the while loop, rather then having
duke@435 115 * the top of the while loop handle it. This provides a better opportunity
duke@435 116 * for instruction scheduling. Some compilers just do this prefetch
duke@435 117 * automatically. Some actually end up with worse performance if you
duke@435 118 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
duke@435 119 */
duke@435 120 #undef PREFETCH_OPCCODE
duke@435 121 #define PREFETCH_OPCCODE
duke@435 122
duke@435 123 /*
duke@435 124 Interpreter safepoint: it is expected that the interpreter will have no live
duke@435 125 handles of its own creation live at an interpreter safepoint. Therefore we
duke@435 126 run a HandleMarkCleaner and trash all handles allocated in the call chain
duke@435 127 since the JavaCalls::call_helper invocation that initiated the chain.
duke@435 128 There really shouldn't be any handles remaining to trash but this is cheap
duke@435 129 in relation to a safepoint.
duke@435 130 */
duke@435 131 #define SAFEPOINT \
duke@435 132 if ( SafepointSynchronize::is_synchronizing()) { \
duke@435 133 { \
duke@435 134 /* zap freed handles rather than GC'ing them */ \
duke@435 135 HandleMarkCleaner __hmc(THREAD); \
duke@435 136 } \
duke@435 137 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
duke@435 138 }
duke@435 139
duke@435 140 /*
duke@435 141 * VM_JAVA_ERROR - Macro for throwing a java exception from
duke@435 142 * the interpreter loop. Should really be a CALL_VM but there
duke@435 143 * is no entry point to do the transition to vm so we just
duke@435 144 * do it by hand here.
duke@435 145 */
goetz@6470 146 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
duke@435 147 DECACHE_STATE(); \
duke@435 148 SET_LAST_JAVA_FRAME(); \
duke@435 149 { \
goetz@6470 150 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
duke@435 151 ThreadInVMfromJava trans(THREAD); \
duke@435 152 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
duke@435 153 } \
duke@435 154 RESET_LAST_JAVA_FRAME(); \
duke@435 155 CACHE_STATE();
duke@435 156
goetz@6470 157 // Normal throw of a java error.
goetz@6470 158 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
goetz@6470 159 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
duke@435 160 goto handle_exception;
duke@435 161
duke@435 162 #ifdef PRODUCT
duke@435 163 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
duke@435 164 #else
duke@435 165 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
duke@435 166 { \
duke@435 167 BytecodeCounter::_counter_value++; \
duke@435 168 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
duke@435 169 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
duke@435 170 if (TraceBytecodes) { \
duke@435 171 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
duke@435 172 topOfStack[Interpreter::expr_index_at(1)], \
duke@435 173 topOfStack[Interpreter::expr_index_at(2)]), \
duke@435 174 handle_exception); \
duke@435 175 } \
duke@435 176 }
duke@435 177 #endif
duke@435 178
duke@435 179 #undef DEBUGGER_SINGLE_STEP_NOTIFY
duke@435 180 #ifdef VM_JVMTI
duke@435 181 /* NOTE: (kbr) This macro must be called AFTER the PC has been
duke@435 182 incremented. JvmtiExport::at_single_stepping_point() may cause a
duke@435 183 breakpoint opcode to get inserted at the current PC to allow the
duke@435 184 debugger to coalesce single-step events.
duke@435 185
duke@435 186 As a result if we call at_single_stepping_point() we refetch opcode
duke@435 187 to get the current opcode. This will override any other prefetching
duke@435 188 that might have occurred.
duke@435 189 */
duke@435 190 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
duke@435 191 { \
duke@435 192 if (_jvmti_interp_events) { \
duke@435 193 if (JvmtiExport::should_post_single_step()) { \
duke@435 194 DECACHE_STATE(); \
duke@435 195 SET_LAST_JAVA_FRAME(); \
duke@435 196 ThreadInVMfromJava trans(THREAD); \
duke@435 197 JvmtiExport::at_single_stepping_point(THREAD, \
duke@435 198 istate->method(), \
duke@435 199 pc); \
duke@435 200 RESET_LAST_JAVA_FRAME(); \
duke@435 201 CACHE_STATE(); \
duke@435 202 if (THREAD->pop_frame_pending() && \
duke@435 203 !THREAD->pop_frame_in_process()) { \
duke@435 204 goto handle_Pop_Frame; \
duke@435 205 } \
goetz@6450 206 if (THREAD->jvmti_thread_state() && \
goetz@6450 207 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
goetz@6450 208 goto handle_Early_Return; \
goetz@6450 209 } \
duke@435 210 opcode = *pc; \
duke@435 211 } \
duke@435 212 } \
duke@435 213 }
duke@435 214 #else
duke@435 215 #define DEBUGGER_SINGLE_STEP_NOTIFY()
duke@435 216 #endif
duke@435 217
duke@435 218 /*
duke@435 219 * CONTINUE - Macro for executing the next opcode.
duke@435 220 */
duke@435 221 #undef CONTINUE
duke@435 222 #ifdef USELABELS
duke@435 223 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
duke@435 224 // initialization (which is is the initialization of the table pointer...)
coleenp@955 225 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
duke@435 226 #define CONTINUE { \
duke@435 227 opcode = *pc; \
duke@435 228 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 229 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 230 DISPATCH(opcode); \
duke@435 231 }
duke@435 232 #else
duke@435 233 #ifdef PREFETCH_OPCCODE
duke@435 234 #define CONTINUE { \
duke@435 235 opcode = *pc; \
duke@435 236 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 237 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 238 continue; \
duke@435 239 }
duke@435 240 #else
duke@435 241 #define CONTINUE { \
duke@435 242 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 243 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 244 continue; \
duke@435 245 }
duke@435 246 #endif
duke@435 247 #endif
duke@435 248
duke@435 249
duke@435 250 #define UPDATE_PC(opsize) {pc += opsize; }
duke@435 251 /*
duke@435 252 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
duke@435 253 */
duke@435 254 #undef UPDATE_PC_AND_TOS
duke@435 255 #define UPDATE_PC_AND_TOS(opsize, stack) \
duke@435 256 {pc += opsize; MORE_STACK(stack); }
duke@435 257
duke@435 258 /*
duke@435 259 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
duke@435 260 * and executing the next opcode. It's somewhat similar to the combination
duke@435 261 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
duke@435 262 */
duke@435 263 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
duke@435 264 #ifdef USELABELS
duke@435 265 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 266 pc += opsize; opcode = *pc; MORE_STACK(stack); \
duke@435 267 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 268 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 269 DISPATCH(opcode); \
duke@435 270 }
duke@435 271
duke@435 272 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 273 pc += opsize; opcode = *pc; \
duke@435 274 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 275 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 276 DISPATCH(opcode); \
duke@435 277 }
duke@435 278 #else
duke@435 279 #ifdef PREFETCH_OPCCODE
duke@435 280 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 281 pc += opsize; opcode = *pc; MORE_STACK(stack); \
duke@435 282 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 283 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 284 goto do_continue; \
duke@435 285 }
duke@435 286
duke@435 287 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 288 pc += opsize; opcode = *pc; \
duke@435 289 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 290 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 291 goto do_continue; \
duke@435 292 }
duke@435 293 #else
duke@435 294 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 295 pc += opsize; MORE_STACK(stack); \
duke@435 296 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 297 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 298 goto do_continue; \
duke@435 299 }
duke@435 300
duke@435 301 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 302 pc += opsize; \
duke@435 303 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 304 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 305 goto do_continue; \
duke@435 306 }
duke@435 307 #endif /* PREFETCH_OPCCODE */
duke@435 308 #endif /* USELABELS */
duke@435 309
duke@435 310 // About to call a new method, update the save the adjusted pc and return to frame manager
duke@435 311 #define UPDATE_PC_AND_RETURN(opsize) \
duke@435 312 DECACHE_TOS(); \
duke@435 313 istate->set_bcp(pc+opsize); \
duke@435 314 return;
duke@435 315
duke@435 316
duke@435 317 #define METHOD istate->method()
jiangli@5065 318 #define GET_METHOD_COUNTERS(res) \
jiangli@5065 319 res = METHOD->method_counters(); \
jiangli@5065 320 if (res == NULL) { \
jiangli@5065 321 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
jiangli@5065 322 }
jiangli@5065 323
duke@435 324 #define OSR_REQUEST(res, branch_pc) \
duke@435 325 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
duke@435 326 /*
duke@435 327 * For those opcodes that need to have a GC point on a backwards branch
duke@435 328 */
duke@435 329
duke@435 330 // Backedge counting is kind of strange. The asm interpreter will increment
duke@435 331 // the backedge counter as a separate counter but it does it's comparisons
duke@435 332 // to the sum (scaled) of invocation counter and backedge count to make
duke@435 333 // a decision. Seems kind of odd to sum them together like that
duke@435 334
duke@435 335 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
duke@435 336
duke@435 337
duke@435 338 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
duke@435 339 if ((skip) <= 0) { \
jiangli@5065 340 MethodCounters* mcs; \
jiangli@5065 341 GET_METHOD_COUNTERS(mcs); \
twisti@1513 342 if (UseLoopCounter) { \
duke@435 343 bool do_OSR = UseOnStackReplacement; \
jiangli@5065 344 mcs->backedge_counter()->increment(); \
goetz@6470 345 if (ProfileInterpreter) { \
goetz@6470 346 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
goetz@6470 347 /* Check for overflow against MDO count. */ \
goetz@6470 348 do_OSR = do_OSR \
goetz@6470 349 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
goetz@6470 350 /* When ProfileInterpreter is on, the backedge_count comes */ \
goetz@6470 351 /* from the methodDataOop, which value does not get reset on */ \
goetz@6470 352 /* the call to frequency_counter_overflow(). To avoid */ \
goetz@6470 353 /* excessive calls to the overflow routine while the method is */ \
goetz@6470 354 /* being compiled, add a second test to make sure the overflow */ \
goetz@6470 355 /* function is called only once every overflow_frequency. */ \
goetz@6470 356 && (!(mdo_last_branch_taken_count & 1023)); \
goetz@6470 357 } else { \
goetz@6470 358 /* check for overflow of backedge counter */ \
goetz@6470 359 do_OSR = do_OSR \
goetz@6470 360 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
goetz@6470 361 } \
duke@435 362 if (do_OSR) { \
goetz@6470 363 nmethod* osr_nmethod; \
duke@435 364 OSR_REQUEST(osr_nmethod, branch_pc); \
duke@435 365 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
goetz@6467 366 intptr_t* buf; \
goetz@6467 367 /* Call OSR migration with last java frame only, no checks. */ \
goetz@6467 368 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
duke@435 369 istate->set_msg(do_osr); \
duke@435 370 istate->set_osr_buf((address)buf); \
duke@435 371 istate->set_osr_entry(osr_nmethod->osr_entry()); \
duke@435 372 return; \
duke@435 373 } \
duke@435 374 } \
duke@435 375 } /* UseCompiler ... */ \
duke@435 376 SAFEPOINT; \
duke@435 377 }
duke@435 378
duke@435 379 /*
duke@435 380 * For those opcodes that need to have a GC point on a backwards branch
duke@435 381 */
duke@435 382
duke@435 383 /*
duke@435 384 * Macros for caching and flushing the interpreter state. Some local
duke@435 385 * variables need to be flushed out to the frame before we do certain
duke@435 386 * things (like pushing frames or becomming gc safe) and some need to
duke@435 387 * be recached later (like after popping a frame). We could use one
duke@435 388 * macro to cache or decache everything, but this would be less then
duke@435 389 * optimal because we don't always need to cache or decache everything
duke@435 390 * because some things we know are already cached or decached.
duke@435 391 */
duke@435 392 #undef DECACHE_TOS
duke@435 393 #undef CACHE_TOS
duke@435 394 #undef CACHE_PREV_TOS
duke@435 395 #define DECACHE_TOS() istate->set_stack(topOfStack);
duke@435 396
duke@435 397 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
duke@435 398
duke@435 399 #undef DECACHE_PC
duke@435 400 #undef CACHE_PC
duke@435 401 #define DECACHE_PC() istate->set_bcp(pc);
duke@435 402 #define CACHE_PC() pc = istate->bcp();
duke@435 403 #define CACHE_CP() cp = istate->constants();
duke@435 404 #define CACHE_LOCALS() locals = istate->locals();
duke@435 405 #undef CACHE_FRAME
duke@435 406 #define CACHE_FRAME()
duke@435 407
goetz@6470 408 // BCI() returns the current bytecode-index.
goetz@6470 409 #undef BCI
goetz@6470 410 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
goetz@6470 411
duke@435 412 /*
duke@435 413 * CHECK_NULL - Macro for throwing a NullPointerException if the object
duke@435 414 * passed is a null ref.
duke@435 415 * On some architectures/platforms it should be possible to do this implicitly
duke@435 416 */
duke@435 417 #undef CHECK_NULL
goetz@6470 418 #define CHECK_NULL(obj_) \
goetz@6470 419 if ((obj_) == NULL) { \
goetz@6470 420 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); \
goetz@6470 421 } \
goetz@6470 422 VERIFY_OOP(obj_)
duke@435 423
duke@435 424 #define VMdoubleConstZero() 0.0
duke@435 425 #define VMdoubleConstOne() 1.0
duke@435 426 #define VMlongConstZero() (max_jlong-max_jlong)
duke@435 427 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
duke@435 428
duke@435 429 /*
duke@435 430 * Alignment
duke@435 431 */
duke@435 432 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
duke@435 433
duke@435 434 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
duke@435 435 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
duke@435 436
duke@435 437 // Reload interpreter state after calling the VM or a possible GC
duke@435 438 #define CACHE_STATE() \
duke@435 439 CACHE_TOS(); \
duke@435 440 CACHE_PC(); \
duke@435 441 CACHE_CP(); \
duke@435 442 CACHE_LOCALS();
duke@435 443
goetz@6467 444 // Call the VM with last java frame only.
goetz@6467 445 #define CALL_VM_NAKED_LJF(func) \
goetz@6450 446 DECACHE_STATE(); \
goetz@6450 447 SET_LAST_JAVA_FRAME(); \
goetz@6450 448 func; \
goetz@6450 449 RESET_LAST_JAVA_FRAME(); \
goetz@6467 450 CACHE_STATE();
goetz@6467 451
goetz@6467 452 // Call the VM. Don't check for pending exceptions.
goetz@6467 453 #define CALL_VM_NOCHECK(func) \
goetz@6467 454 CALL_VM_NAKED_LJF(func) \
goetz@6450 455 if (THREAD->pop_frame_pending() && \
goetz@6450 456 !THREAD->pop_frame_in_process()) { \
goetz@6450 457 goto handle_Pop_Frame; \
goetz@6450 458 } \
goetz@6450 459 if (THREAD->jvmti_thread_state() && \
goetz@6450 460 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
goetz@6450 461 goto handle_Early_Return; \
goetz@6450 462 }
duke@435 463
duke@435 464 // Call the VM and check for pending exceptions
goetz@6450 465 #define CALL_VM(func, label) { \
goetz@6450 466 CALL_VM_NOCHECK(func); \
goetz@6450 467 if (THREAD->has_pending_exception()) goto label; \
duke@435 468 }
duke@435 469
duke@435 470 /*
duke@435 471 * BytecodeInterpreter::run(interpreterState istate)
duke@435 472 * BytecodeInterpreter::runWithChecks(interpreterState istate)
duke@435 473 *
duke@435 474 * The real deal. This is where byte codes actually get interpreted.
duke@435 475 * Basically it's a big while loop that iterates until we return from
duke@435 476 * the method passed in.
duke@435 477 *
duke@435 478 * The runWithChecks is used if JVMTI is enabled.
duke@435 479 *
duke@435 480 */
duke@435 481 #if defined(VM_JVMTI)
duke@435 482 void
duke@435 483 BytecodeInterpreter::runWithChecks(interpreterState istate) {
duke@435 484 #else
duke@435 485 void
duke@435 486 BytecodeInterpreter::run(interpreterState istate) {
duke@435 487 #endif
duke@435 488
duke@435 489 // In order to simplify some tests based on switches set at runtime
duke@435 490 // we invoke the interpreter a single time after switches are enabled
duke@435 491 // and set simpler to to test variables rather than method calls or complex
duke@435 492 // boolean expressions.
duke@435 493
duke@435 494 static int initialized = 0;
duke@435 495 static int checkit = 0;
duke@435 496 static intptr_t* c_addr = NULL;
duke@435 497 static intptr_t c_value;
duke@435 498
duke@435 499 if (checkit && *c_addr != c_value) {
duke@435 500 os::breakpoint();
duke@435 501 }
duke@435 502 #ifdef VM_JVMTI
duke@435 503 static bool _jvmti_interp_events = 0;
duke@435 504 #endif
duke@435 505
duke@435 506 static int _compiling; // (UseCompiler || CountCompiledCalls)
duke@435 507
duke@435 508 #ifdef ASSERT
duke@435 509 if (istate->_msg != initialize) {
roland@5225 510 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
roland@5225 511 // because in that case, EnableInvokeDynamic is true by default but will be later switched off
roland@5225 512 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
roland@5225 513 // for the old JSR292 implementation.
roland@5225 514 // This leads to a situation where 'istate->_stack_limit' always accounts for
roland@5225 515 // methodOopDesc::extra_stack_entries() because it is computed in
roland@5225 516 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
roland@5225 517 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
roland@5225 518 // account for extra_stack_entries() anymore because at the time when it is called
roland@5225 519 // EnableInvokeDynamic was already set to false.
roland@5225 520 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
roland@5225 521 // switched off because of the wrong classes.
roland@5225 522 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
goetz@5319 523 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
roland@5225 524 } else {
goetz@5319 525 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
roland@5225 526 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
roland@5225 527 + 1), "bad stack limit");
roland@5225 528 }
twisti@2084 529 #ifndef SHARK
twisti@2084 530 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
twisti@2084 531 #endif // !SHARK
duke@435 532 }
duke@435 533 // Verify linkages.
duke@435 534 interpreterState l = istate;
duke@435 535 do {
duke@435 536 assert(l == l->_self_link, "bad link");
duke@435 537 l = l->_prev_link;
duke@435 538 } while (l != NULL);
duke@435 539 // Screwups with stack management usually cause us to overwrite istate
duke@435 540 // save a copy so we can verify it.
duke@435 541 interpreterState orig = istate;
duke@435 542 #endif
duke@435 543
duke@435 544 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
duke@435 545 register address pc = istate->bcp();
duke@435 546 register jubyte opcode;
duke@435 547 register intptr_t* locals = istate->locals();
coleenp@4037 548 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
duke@435 549 #ifdef LOTS_OF_REGS
duke@435 550 register JavaThread* THREAD = istate->thread();
duke@435 551 #else
duke@435 552 #undef THREAD
duke@435 553 #define THREAD istate->thread()
duke@435 554 #endif
duke@435 555
duke@435 556 #ifdef USELABELS
duke@435 557 const static void* const opclabels_data[256] = {
duke@435 558 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
duke@435 559 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
duke@435 560 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
duke@435 561 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
duke@435 562
duke@435 563 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
duke@435 564 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
duke@435 565 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
duke@435 566 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
duke@435 567
duke@435 568 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
duke@435 569 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
duke@435 570 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
duke@435 571 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
duke@435 572
duke@435 573 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
duke@435 574 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
duke@435 575 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
duke@435 576 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
duke@435 577
duke@435 578 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
duke@435 579 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
duke@435 580 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
duke@435 581 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
duke@435 582
duke@435 583 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
duke@435 584 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
duke@435 585 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
duke@435 586 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
duke@435 587
duke@435 588 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
duke@435 589 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
duke@435 590 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
duke@435 591 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
duke@435 592
duke@435 593 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
duke@435 594 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
duke@435 595 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
duke@435 596 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
duke@435 597
duke@435 598 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
duke@435 599 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
duke@435 600 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
duke@435 601 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
duke@435 602
duke@435 603 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
duke@435 604 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
duke@435 605 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
duke@435 606 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
duke@435 607
duke@435 608 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
duke@435 609 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
duke@435 610 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
duke@435 611 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
duke@435 612
duke@435 613 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
duke@435 614 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
twisti@2762 615 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
duke@435 616 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
duke@435 617
duke@435 618 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
duke@435 619 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
sgoldman@558 620 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
sgoldman@558 621 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
sgoldman@558 622
sgoldman@558 623 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 624 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 625 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 626 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 627
duke@435 628 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
twisti@2762 629 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
twisti@4237 630 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
duke@435 631 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 632
duke@435 633 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 634 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 635 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 636 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
duke@435 637 };
duke@435 638 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
duke@435 639 #endif /* USELABELS */
duke@435 640
duke@435 641 #ifdef ASSERT
duke@435 642 // this will trigger a VERIFY_OOP on entry
duke@435 643 if (istate->msg() != initialize && ! METHOD->is_static()) {
duke@435 644 oop rcvr = LOCALS_OBJECT(0);
bobv@2036 645 VERIFY_OOP(rcvr);
duke@435 646 }
duke@435 647 #endif
duke@435 648 // #define HACK
duke@435 649 #ifdef HACK
duke@435 650 bool interesting = false;
duke@435 651 #endif // HACK
duke@435 652
duke@435 653 /* QQQ this should be a stack method so we don't know actual direction */
bobv@2036 654 guarantee(istate->msg() == initialize ||
duke@435 655 topOfStack >= istate->stack_limit() &&
duke@435 656 topOfStack < istate->stack_base(),
duke@435 657 "Stack top out of range");
duke@435 658
goetz@6470 659 #ifdef CC_INTERP_PROFILE
goetz@6470 660 // MethodData's last branch taken count.
goetz@6470 661 uint mdo_last_branch_taken_count = 0;
goetz@6470 662 #else
goetz@6470 663 const uint mdo_last_branch_taken_count = 0;
goetz@6470 664 #endif
goetz@6470 665
duke@435 666 switch (istate->msg()) {
duke@435 667 case initialize: {
goetz@6470 668 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
duke@435 669 _compiling = (UseCompiler || CountCompiledCalls);
duke@435 670 #ifdef VM_JVMTI
duke@435 671 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
duke@435 672 #endif
duke@435 673 return;
duke@435 674 }
duke@435 675 break;
duke@435 676 case method_entry: {
duke@435 677 THREAD->set_do_not_unlock();
duke@435 678 // count invocations
duke@435 679 assert(initialized, "Interpreter not initialized");
duke@435 680 if (_compiling) {
jiangli@5065 681 MethodCounters* mcs;
jiangli@5065 682 GET_METHOD_COUNTERS(mcs);
duke@435 683 if (ProfileInterpreter) {
jiangli@5065 684 METHOD->increment_interpreter_invocation_count(THREAD);
duke@435 685 }
jiangli@5065 686 mcs->invocation_counter()->increment();
goetz@6470 687 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
goetz@6470 688 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
goetz@6470 689 // We no longer retry on a counter overflow.
duke@435 690 }
goetz@6470 691 // Get or create profile data. Check for pending (async) exceptions.
goetz@6470 692 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
duke@435 693 SAFEPOINT;
duke@435 694 }
duke@435 695
duke@435 696 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
duke@435 697 // initialize
duke@435 698 os::breakpoint();
duke@435 699 }
duke@435 700
duke@435 701 #ifdef HACK
duke@435 702 {
duke@435 703 ResourceMark rm;
duke@435 704 char *method_name = istate->method()->name_and_sig_as_C_string();
duke@435 705 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
duke@435 706 tty->print_cr("entering: depth %d bci: %d",
duke@435 707 (istate->_stack_base - istate->_stack),
duke@435 708 istate->_bcp - istate->_method->code_base());
duke@435 709 interesting = true;
duke@435 710 }
duke@435 711 }
duke@435 712 #endif // HACK
duke@435 713
goetz@6470 714 // Lock method if synchronized.
duke@435 715 if (METHOD->is_synchronized()) {
goetz@6445 716 // oop rcvr = locals[0].j.r;
goetz@6445 717 oop rcvr;
goetz@6445 718 if (METHOD->is_static()) {
goetz@6445 719 rcvr = METHOD->constants()->pool_holder()->java_mirror();
goetz@6445 720 } else {
goetz@6445 721 rcvr = LOCALS_OBJECT(0);
goetz@6445 722 VERIFY_OOP(rcvr);
goetz@6445 723 }
goetz@6470 724 // The initial monitor is ours for the taking.
goetz@6445 725 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
goetz@6445 726 BasicObjectLock* mon = &istate->monitor_base()[-1];
goetz@6445 727 mon->set_obj(rcvr);
goetz@6445 728 bool success = false;
goetz@6445 729 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 730 markOop mark = rcvr->mark();
goetz@6445 731 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 732 // Implies UseBiasedLocking.
goetz@6445 733 if (mark->has_bias_pattern()) {
goetz@6445 734 uintptr_t thread_ident;
goetz@6445 735 uintptr_t anticipated_bias_locking_value;
goetz@6445 736 thread_ident = (uintptr_t)istate->thread();
goetz@6445 737 anticipated_bias_locking_value =
goetz@6445 738 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 739 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 740
goetz@6445 741 if (anticipated_bias_locking_value == 0) {
goetz@6445 742 // Already biased towards this thread, nothing to do.
goetz@6445 743 if (PrintBiasedLockingStatistics) {
goetz@6445 744 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 745 }
goetz@6445 746 success = true;
goetz@6445 747 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 748 // Try to revoke bias.
goetz@6445 749 markOop header = rcvr->klass()->prototype_header();
goetz@6445 750 if (hash != markOopDesc::no_hash) {
goetz@6445 751 header = header->copy_set_hash(hash);
goetz@6445 752 }
goetz@6445 753 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
goetz@6445 754 if (PrintBiasedLockingStatistics)
goetz@6445 755 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 756 }
goetz@6445 757 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
goetz@6445 758 // Try to rebias.
goetz@6445 759 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
goetz@6445 760 if (hash != markOopDesc::no_hash) {
goetz@6445 761 new_header = new_header->copy_set_hash(hash);
goetz@6445 762 }
goetz@6445 763 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
goetz@6445 764 if (PrintBiasedLockingStatistics) {
goetz@6445 765 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
duke@435 766 }
duke@435 767 } else {
goetz@6445 768 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
goetz@6445 769 }
goetz@6445 770 success = true;
goetz@6445 771 } else {
goetz@6445 772 // Try to bias towards thread in case object is anonymously biased.
goetz@6445 773 markOop header = (markOop) ((uintptr_t) mark &
goetz@6445 774 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 775 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
goetz@6445 776 if (hash != markOopDesc::no_hash) {
goetz@6445 777 header = header->copy_set_hash(hash);
goetz@6445 778 }
goetz@6445 779 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 780 // Debugging hint.
goetz@6445 781 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 782 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
goetz@6445 783 if (PrintBiasedLockingStatistics) {
goetz@6445 784 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 785 }
goetz@6445 786 } else {
goetz@6445 787 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
goetz@6445 788 }
goetz@6445 789 success = true;
goetz@6445 790 }
goetz@6445 791 }
goetz@6445 792
goetz@6445 793 // Traditional lightweight locking.
goetz@6445 794 if (!success) {
goetz@6445 795 markOop displaced = rcvr->mark()->set_unlocked();
goetz@6445 796 mon->lock()->set_displaced_header(displaced);
goetz@6445 797 bool call_vm = UseHeavyMonitors;
goetz@6445 798 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
goetz@6445 799 // Is it simple recursive case?
goetz@6445 800 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 801 mon->lock()->set_displaced_header(NULL);
goetz@6445 802 } else {
goetz@6445 803 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
duke@435 804 }
duke@435 805 }
goetz@6445 806 }
duke@435 807 }
duke@435 808 THREAD->clr_do_not_unlock();
duke@435 809
duke@435 810 // Notify jvmti
duke@435 811 #ifdef VM_JVMTI
duke@435 812 if (_jvmti_interp_events) {
duke@435 813 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 814 // entry/exit events are sent for that thread to track stack depth.
duke@435 815 if (THREAD->is_interp_only_mode()) {
duke@435 816 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
duke@435 817 handle_exception);
duke@435 818 }
duke@435 819 }
duke@435 820 #endif /* VM_JVMTI */
duke@435 821
duke@435 822 goto run;
duke@435 823 }
duke@435 824
duke@435 825 case popping_frame: {
duke@435 826 // returned from a java call to pop the frame, restart the call
duke@435 827 // clear the message so we don't confuse ourselves later
duke@435 828 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
duke@435 829 istate->set_msg(no_request);
goetz@6470 830 if (_compiling) {
goetz@6470 831 // Set MDX back to the ProfileData of the invoke bytecode that will be
goetz@6470 832 // restarted.
goetz@6470 833 SET_MDX(NULL);
goetz@6470 834 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
goetz@6470 835 }
duke@435 836 THREAD->clr_pop_frame_in_process();
duke@435 837 goto run;
duke@435 838 }
duke@435 839
duke@435 840 case method_resume: {
duke@435 841 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
duke@435 842 // resume
duke@435 843 os::breakpoint();
duke@435 844 }
duke@435 845 #ifdef HACK
duke@435 846 {
duke@435 847 ResourceMark rm;
duke@435 848 char *method_name = istate->method()->name_and_sig_as_C_string();
duke@435 849 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
duke@435 850 tty->print_cr("resume: depth %d bci: %d",
duke@435 851 (istate->_stack_base - istate->_stack) ,
duke@435 852 istate->_bcp - istate->_method->code_base());
duke@435 853 interesting = true;
duke@435 854 }
duke@435 855 }
duke@435 856 #endif // HACK
duke@435 857 // returned from a java call, continue executing.
duke@435 858 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
duke@435 859 goto handle_Pop_Frame;
duke@435 860 }
goetz@6450 861 if (THREAD->jvmti_thread_state() &&
goetz@6450 862 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
goetz@6450 863 goto handle_Early_Return;
goetz@6450 864 }
duke@435 865
duke@435 866 if (THREAD->has_pending_exception()) goto handle_exception;
duke@435 867 // Update the pc by the saved amount of the invoke bytecode size
duke@435 868 UPDATE_PC(istate->bcp_advance());
goetz@6470 869
goetz@6470 870 if (_compiling) {
goetz@6470 871 // Get or create profile data. Check for pending (async) exceptions.
goetz@6470 872 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
goetz@6470 873 }
duke@435 874 goto run;
duke@435 875 }
duke@435 876
duke@435 877 case deopt_resume2: {
duke@435 878 // Returned from an opcode that will reexecute. Deopt was
duke@435 879 // a result of a PopFrame request.
duke@435 880 //
goetz@6470 881
goetz@6470 882 if (_compiling) {
goetz@6470 883 // Get or create profile data. Check for pending (async) exceptions.
goetz@6470 884 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
goetz@6470 885 }
duke@435 886 goto run;
duke@435 887 }
duke@435 888
duke@435 889 case deopt_resume: {
duke@435 890 // Returned from an opcode that has completed. The stack has
duke@435 891 // the result all we need to do is skip across the bytecode
duke@435 892 // and continue (assuming there is no exception pending)
duke@435 893 //
duke@435 894 // compute continuation length
duke@435 895 //
duke@435 896 // Note: it is possible to deopt at a return_register_finalizer opcode
duke@435 897 // because this requires entering the vm to do the registering. While the
duke@435 898 // opcode is complete we can't advance because there are no more opcodes
duke@435 899 // much like trying to deopt at a poll return. In that has we simply
duke@435 900 // get out of here
duke@435 901 //
never@2462 902 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
duke@435 903 // this will do the right thing even if an exception is pending.
duke@435 904 goto handle_return;
duke@435 905 }
never@2462 906 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
duke@435 907 if (THREAD->has_pending_exception()) goto handle_exception;
goetz@6470 908
goetz@6470 909 if (_compiling) {
goetz@6470 910 // Get or create profile data. Check for pending (async) exceptions.
goetz@6470 911 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
goetz@6470 912 }
duke@435 913 goto run;
duke@435 914 }
duke@435 915 case got_monitors: {
duke@435 916 // continue locking now that we have a monitor to use
duke@435 917 // we expect to find newly allocated monitor at the "top" of the monitor stack.
duke@435 918 oop lockee = STACK_OBJECT(-1);
bobv@2036 919 VERIFY_OOP(lockee);
duke@435 920 // derefing's lockee ought to provoke implicit null check
duke@435 921 // find a free monitor
duke@435 922 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
duke@435 923 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
duke@435 924 entry->set_obj(lockee);
goetz@6445 925 bool success = false;
goetz@6445 926 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 927
goetz@6445 928 markOop mark = lockee->mark();
goetz@6445 929 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 930 // implies UseBiasedLocking
goetz@6445 931 if (mark->has_bias_pattern()) {
goetz@6445 932 uintptr_t thread_ident;
goetz@6445 933 uintptr_t anticipated_bias_locking_value;
goetz@6445 934 thread_ident = (uintptr_t)istate->thread();
goetz@6445 935 anticipated_bias_locking_value =
goetz@6445 936 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 937 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 938
goetz@6445 939 if (anticipated_bias_locking_value == 0) {
goetz@6445 940 // already biased towards this thread, nothing to do
goetz@6445 941 if (PrintBiasedLockingStatistics) {
goetz@6445 942 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 943 }
goetz@6445 944 success = true;
goetz@6445 945 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 946 // try revoke bias
goetz@6445 947 markOop header = lockee->klass()->prototype_header();
goetz@6445 948 if (hash != markOopDesc::no_hash) {
goetz@6445 949 header = header->copy_set_hash(hash);
goetz@6445 950 }
goetz@6445 951 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
goetz@6445 952 if (PrintBiasedLockingStatistics) {
goetz@6445 953 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 954 }
goetz@6445 955 }
goetz@6445 956 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
goetz@6445 957 // try rebias
goetz@6445 958 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
goetz@6445 959 if (hash != markOopDesc::no_hash) {
goetz@6445 960 new_header = new_header->copy_set_hash(hash);
goetz@6445 961 }
goetz@6445 962 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
goetz@6445 963 if (PrintBiasedLockingStatistics) {
goetz@6445 964 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
goetz@6445 965 }
goetz@6445 966 } else {
goetz@6445 967 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 968 }
goetz@6445 969 success = true;
duke@435 970 } else {
goetz@6445 971 // try to bias towards thread in case object is anonymously biased
goetz@6445 972 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 973 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
goetz@6445 974 if (hash != markOopDesc::no_hash) {
goetz@6445 975 header = header->copy_set_hash(hash);
goetz@6445 976 }
goetz@6445 977 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 978 // debugging hint
goetz@6445 979 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 980 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
goetz@6445 981 if (PrintBiasedLockingStatistics) {
goetz@6445 982 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 983 }
goetz@6445 984 } else {
goetz@6445 985 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 986 }
goetz@6445 987 success = true;
goetz@6445 988 }
goetz@6445 989 }
goetz@6445 990
goetz@6445 991 // traditional lightweight locking
goetz@6445 992 if (!success) {
goetz@6445 993 markOop displaced = lockee->mark()->set_unlocked();
goetz@6445 994 entry->lock()->set_displaced_header(displaced);
goetz@6445 995 bool call_vm = UseHeavyMonitors;
goetz@6445 996 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
goetz@6445 997 // Is it simple recursive case?
goetz@6445 998 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 999 entry->lock()->set_displaced_header(NULL);
goetz@6445 1000 } else {
goetz@6445 1001 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1002 }
duke@435 1003 }
duke@435 1004 }
duke@435 1005 UPDATE_PC_AND_TOS(1, -1);
duke@435 1006 goto run;
duke@435 1007 }
duke@435 1008 default: {
duke@435 1009 fatal("Unexpected message from frame manager");
duke@435 1010 }
duke@435 1011 }
duke@435 1012
duke@435 1013 run:
duke@435 1014
duke@435 1015 DO_UPDATE_INSTRUCTION_COUNT(*pc)
duke@435 1016 DEBUGGER_SINGLE_STEP_NOTIFY();
duke@435 1017 #ifdef PREFETCH_OPCCODE
duke@435 1018 opcode = *pc; /* prefetch first opcode */
duke@435 1019 #endif
duke@435 1020
duke@435 1021 #ifndef USELABELS
duke@435 1022 while (1)
duke@435 1023 #endif
duke@435 1024 {
duke@435 1025 #ifndef PREFETCH_OPCCODE
duke@435 1026 opcode = *pc;
duke@435 1027 #endif
duke@435 1028 // Seems like this happens twice per opcode. At worst this is only
duke@435 1029 // need at entry to the loop.
duke@435 1030 // DEBUGGER_SINGLE_STEP_NOTIFY();
duke@435 1031 /* Using this labels avoids double breakpoints when quickening and
duke@435 1032 * when returing from transition frames.
duke@435 1033 */
duke@435 1034 opcode_switch:
duke@435 1035 assert(istate == orig, "Corrupted istate");
duke@435 1036 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
duke@435 1037 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
duke@435 1038 assert(topOfStack < istate->stack_base(), "Stack underrun");
duke@435 1039
duke@435 1040 #ifdef USELABELS
duke@435 1041 DISPATCH(opcode);
duke@435 1042 #else
duke@435 1043 switch (opcode)
duke@435 1044 #endif
duke@435 1045 {
duke@435 1046 CASE(_nop):
duke@435 1047 UPDATE_PC_AND_CONTINUE(1);
duke@435 1048
duke@435 1049 /* Push miscellaneous constants onto the stack. */
duke@435 1050
duke@435 1051 CASE(_aconst_null):
duke@435 1052 SET_STACK_OBJECT(NULL, 0);
duke@435 1053 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1054
duke@435 1055 #undef OPC_CONST_n
duke@435 1056 #define OPC_CONST_n(opcode, const_type, value) \
duke@435 1057 CASE(opcode): \
duke@435 1058 SET_STACK_ ## const_type(value, 0); \
duke@435 1059 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1060
duke@435 1061 OPC_CONST_n(_iconst_m1, INT, -1);
duke@435 1062 OPC_CONST_n(_iconst_0, INT, 0);
duke@435 1063 OPC_CONST_n(_iconst_1, INT, 1);
duke@435 1064 OPC_CONST_n(_iconst_2, INT, 2);
duke@435 1065 OPC_CONST_n(_iconst_3, INT, 3);
duke@435 1066 OPC_CONST_n(_iconst_4, INT, 4);
duke@435 1067 OPC_CONST_n(_iconst_5, INT, 5);
duke@435 1068 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
duke@435 1069 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
duke@435 1070 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
duke@435 1071
duke@435 1072 #undef OPC_CONST2_n
duke@435 1073 #define OPC_CONST2_n(opcname, value, key, kind) \
duke@435 1074 CASE(_##opcname): \
duke@435 1075 { \
duke@435 1076 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
duke@435 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
duke@435 1078 }
duke@435 1079 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
duke@435 1080 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
duke@435 1081 OPC_CONST2_n(lconst_0, Zero, long, LONG);
duke@435 1082 OPC_CONST2_n(lconst_1, One, long, LONG);
duke@435 1083
duke@435 1084 /* Load constant from constant pool: */
duke@435 1085
duke@435 1086 /* Push a 1-byte signed integer value onto the stack. */
duke@435 1087 CASE(_bipush):
duke@435 1088 SET_STACK_INT((jbyte)(pc[1]), 0);
duke@435 1089 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1090
duke@435 1091 /* Push a 2-byte signed integer constant onto the stack. */
duke@435 1092 CASE(_sipush):
duke@435 1093 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
duke@435 1094 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 1095
duke@435 1096 /* load from local variable */
duke@435 1097
duke@435 1098 CASE(_aload):
bobv@2036 1099 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
duke@435 1100 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
duke@435 1101 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1102
duke@435 1103 CASE(_iload):
duke@435 1104 CASE(_fload):
duke@435 1105 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
duke@435 1106 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1107
duke@435 1108 CASE(_lload):
duke@435 1109 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
duke@435 1110 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
duke@435 1111
duke@435 1112 CASE(_dload):
duke@435 1113 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
duke@435 1114 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
duke@435 1115
duke@435 1116 #undef OPC_LOAD_n
duke@435 1117 #define OPC_LOAD_n(num) \
duke@435 1118 CASE(_aload_##num): \
bobv@2036 1119 VERIFY_OOP(LOCALS_OBJECT(num)); \
duke@435 1120 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
duke@435 1121 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
duke@435 1122 \
duke@435 1123 CASE(_iload_##num): \
duke@435 1124 CASE(_fload_##num): \
duke@435 1125 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
duke@435 1126 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
duke@435 1127 \
duke@435 1128 CASE(_lload_##num): \
duke@435 1129 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
duke@435 1130 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
duke@435 1131 CASE(_dload_##num): \
duke@435 1132 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
duke@435 1133 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1134
duke@435 1135 OPC_LOAD_n(0);
duke@435 1136 OPC_LOAD_n(1);
duke@435 1137 OPC_LOAD_n(2);
duke@435 1138 OPC_LOAD_n(3);
duke@435 1139
duke@435 1140 /* store to a local variable */
duke@435 1141
duke@435 1142 CASE(_astore):
duke@435 1143 astore(topOfStack, -1, locals, pc[1]);
duke@435 1144 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
duke@435 1145
duke@435 1146 CASE(_istore):
duke@435 1147 CASE(_fstore):
duke@435 1148 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
duke@435 1149 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
duke@435 1150
duke@435 1151 CASE(_lstore):
duke@435 1152 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
duke@435 1153 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
duke@435 1154
duke@435 1155 CASE(_dstore):
duke@435 1156 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
duke@435 1157 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
duke@435 1158
duke@435 1159 CASE(_wide): {
duke@435 1160 uint16_t reg = Bytes::get_Java_u2(pc + 2);
duke@435 1161
duke@435 1162 opcode = pc[1];
goetz@6470 1163
goetz@6470 1164 // Wide and it's sub-bytecode are counted as separate instructions. If we
goetz@6470 1165 // don't account for this here, the bytecode trace skips the next bytecode.
goetz@6470 1166 DO_UPDATE_INSTRUCTION_COUNT(opcode);
goetz@6470 1167
duke@435 1168 switch(opcode) {
duke@435 1169 case Bytecodes::_aload:
bobv@2036 1170 VERIFY_OOP(LOCALS_OBJECT(reg));
duke@435 1171 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
duke@435 1172 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
duke@435 1173
duke@435 1174 case Bytecodes::_iload:
duke@435 1175 case Bytecodes::_fload:
duke@435 1176 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
duke@435 1177 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
duke@435 1178
duke@435 1179 case Bytecodes::_lload:
duke@435 1180 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
duke@435 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
duke@435 1182
duke@435 1183 case Bytecodes::_dload:
duke@435 1184 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
duke@435 1185 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
duke@435 1186
duke@435 1187 case Bytecodes::_astore:
duke@435 1188 astore(topOfStack, -1, locals, reg);
duke@435 1189 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
duke@435 1190
duke@435 1191 case Bytecodes::_istore:
duke@435 1192 case Bytecodes::_fstore:
duke@435 1193 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
duke@435 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
duke@435 1195
duke@435 1196 case Bytecodes::_lstore:
duke@435 1197 SET_LOCALS_LONG(STACK_LONG(-1), reg);
duke@435 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
duke@435 1199
duke@435 1200 case Bytecodes::_dstore:
duke@435 1201 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
duke@435 1202 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
duke@435 1203
duke@435 1204 case Bytecodes::_iinc: {
duke@435 1205 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
duke@435 1206 // Be nice to see what this generates.... QQQ
duke@435 1207 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
duke@435 1208 UPDATE_PC_AND_CONTINUE(6);
duke@435 1209 }
duke@435 1210 case Bytecodes::_ret:
goetz@6470 1211 // Profile ret.
goetz@6470 1212 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
goetz@6470 1213 // Now, update the pc.
duke@435 1214 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
duke@435 1215 UPDATE_PC_AND_CONTINUE(0);
duke@435 1216 default:
goetz@6470 1217 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
duke@435 1218 }
duke@435 1219 }
duke@435 1220
duke@435 1221
duke@435 1222 #undef OPC_STORE_n
duke@435 1223 #define OPC_STORE_n(num) \
duke@435 1224 CASE(_astore_##num): \
duke@435 1225 astore(topOfStack, -1, locals, num); \
duke@435 1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1227 CASE(_istore_##num): \
duke@435 1228 CASE(_fstore_##num): \
duke@435 1229 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
duke@435 1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1231
duke@435 1232 OPC_STORE_n(0);
duke@435 1233 OPC_STORE_n(1);
duke@435 1234 OPC_STORE_n(2);
duke@435 1235 OPC_STORE_n(3);
duke@435 1236
duke@435 1237 #undef OPC_DSTORE_n
duke@435 1238 #define OPC_DSTORE_n(num) \
duke@435 1239 CASE(_dstore_##num): \
duke@435 1240 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
duke@435 1241 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1242 CASE(_lstore_##num): \
duke@435 1243 SET_LOCALS_LONG(STACK_LONG(-1), num); \
duke@435 1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
duke@435 1245
duke@435 1246 OPC_DSTORE_n(0);
duke@435 1247 OPC_DSTORE_n(1);
duke@435 1248 OPC_DSTORE_n(2);
duke@435 1249 OPC_DSTORE_n(3);
duke@435 1250
duke@435 1251 /* stack pop, dup, and insert opcodes */
duke@435 1252
duke@435 1253
duke@435 1254 CASE(_pop): /* Discard the top item on the stack */
duke@435 1255 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1256
duke@435 1257
duke@435 1258 CASE(_pop2): /* Discard the top 2 items on the stack */
duke@435 1259 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
duke@435 1260
duke@435 1261
duke@435 1262 CASE(_dup): /* Duplicate the top item on the stack */
duke@435 1263 dup(topOfStack);
duke@435 1264 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1265
duke@435 1266 CASE(_dup2): /* Duplicate the top 2 items on the stack */
duke@435 1267 dup2(topOfStack);
duke@435 1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1269
duke@435 1270 CASE(_dup_x1): /* insert top word two down */
duke@435 1271 dup_x1(topOfStack);
duke@435 1272 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1273
duke@435 1274 CASE(_dup_x2): /* insert top word three down */
duke@435 1275 dup_x2(topOfStack);
duke@435 1276 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1277
duke@435 1278 CASE(_dup2_x1): /* insert top 2 slots three down */
duke@435 1279 dup2_x1(topOfStack);
duke@435 1280 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1281
duke@435 1282 CASE(_dup2_x2): /* insert top 2 slots four down */
duke@435 1283 dup2_x2(topOfStack);
duke@435 1284 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1285
duke@435 1286 CASE(_swap): { /* swap top two elements on the stack */
duke@435 1287 swap(topOfStack);
duke@435 1288 UPDATE_PC_AND_CONTINUE(1);
duke@435 1289 }
duke@435 1290
duke@435 1291 /* Perform various binary integer operations */
duke@435 1292
duke@435 1293 #undef OPC_INT_BINARY
duke@435 1294 #define OPC_INT_BINARY(opcname, opname, test) \
duke@435 1295 CASE(_i##opcname): \
duke@435 1296 if (test && (STACK_INT(-1) == 0)) { \
duke@435 1297 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
goetz@6470 1298 "/ by zero", note_div0Check_trap); \
duke@435 1299 } \
duke@435 1300 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
duke@435 1301 STACK_INT(-1)), \
duke@435 1302 -2); \
duke@435 1303 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1304 CASE(_l##opcname): \
duke@435 1305 { \
duke@435 1306 if (test) { \
duke@435 1307 jlong l1 = STACK_LONG(-1); \
duke@435 1308 if (VMlongEqz(l1)) { \
duke@435 1309 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
goetz@6470 1310 "/ by long zero", note_div0Check_trap); \
duke@435 1311 } \
duke@435 1312 } \
duke@435 1313 /* First long at (-1,-2) next long at (-3,-4) */ \
duke@435 1314 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
duke@435 1315 STACK_LONG(-1)), \
duke@435 1316 -3); \
duke@435 1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1318 }
duke@435 1319
duke@435 1320 OPC_INT_BINARY(add, Add, 0);
duke@435 1321 OPC_INT_BINARY(sub, Sub, 0);
duke@435 1322 OPC_INT_BINARY(mul, Mul, 0);
duke@435 1323 OPC_INT_BINARY(and, And, 0);
duke@435 1324 OPC_INT_BINARY(or, Or, 0);
duke@435 1325 OPC_INT_BINARY(xor, Xor, 0);
duke@435 1326 OPC_INT_BINARY(div, Div, 1);
duke@435 1327 OPC_INT_BINARY(rem, Rem, 1);
duke@435 1328
duke@435 1329
duke@435 1330 /* Perform various binary floating number operations */
duke@435 1331 /* On some machine/platforms/compilers div zero check can be implicit */
duke@435 1332
duke@435 1333 #undef OPC_FLOAT_BINARY
duke@435 1334 #define OPC_FLOAT_BINARY(opcname, opname) \
duke@435 1335 CASE(_d##opcname): { \
duke@435 1336 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
duke@435 1337 STACK_DOUBLE(-1)), \
duke@435 1338 -3); \
duke@435 1339 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1340 } \
duke@435 1341 CASE(_f##opcname): \
duke@435 1342 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
duke@435 1343 STACK_FLOAT(-1)), \
duke@435 1344 -2); \
duke@435 1345 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1346
duke@435 1347
duke@435 1348 OPC_FLOAT_BINARY(add, Add);
duke@435 1349 OPC_FLOAT_BINARY(sub, Sub);
duke@435 1350 OPC_FLOAT_BINARY(mul, Mul);
duke@435 1351 OPC_FLOAT_BINARY(div, Div);
duke@435 1352 OPC_FLOAT_BINARY(rem, Rem);
duke@435 1353
duke@435 1354 /* Shift operations
duke@435 1355 * Shift left int and long: ishl, lshl
duke@435 1356 * Logical shift right int and long w/zero extension: iushr, lushr
duke@435 1357 * Arithmetic shift right int and long w/sign extension: ishr, lshr
duke@435 1358 */
duke@435 1359
duke@435 1360 #undef OPC_SHIFT_BINARY
duke@435 1361 #define OPC_SHIFT_BINARY(opcname, opname) \
duke@435 1362 CASE(_i##opcname): \
duke@435 1363 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
duke@435 1364 STACK_INT(-1)), \
duke@435 1365 -2); \
duke@435 1366 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1367 CASE(_l##opcname): \
duke@435 1368 { \
duke@435 1369 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
duke@435 1370 STACK_INT(-1)), \
duke@435 1371 -2); \
duke@435 1372 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1373 }
duke@435 1374
duke@435 1375 OPC_SHIFT_BINARY(shl, Shl);
duke@435 1376 OPC_SHIFT_BINARY(shr, Shr);
duke@435 1377 OPC_SHIFT_BINARY(ushr, Ushr);
duke@435 1378
duke@435 1379 /* Increment local variable by constant */
duke@435 1380 CASE(_iinc):
duke@435 1381 {
duke@435 1382 // locals[pc[1]].j.i += (jbyte)(pc[2]);
duke@435 1383 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
duke@435 1384 UPDATE_PC_AND_CONTINUE(3);
duke@435 1385 }
duke@435 1386
duke@435 1387 /* negate the value on the top of the stack */
duke@435 1388
duke@435 1389 CASE(_ineg):
duke@435 1390 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
duke@435 1391 UPDATE_PC_AND_CONTINUE(1);
duke@435 1392
duke@435 1393 CASE(_fneg):
duke@435 1394 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
duke@435 1395 UPDATE_PC_AND_CONTINUE(1);
duke@435 1396
duke@435 1397 CASE(_lneg):
duke@435 1398 {
duke@435 1399 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
duke@435 1400 UPDATE_PC_AND_CONTINUE(1);
duke@435 1401 }
duke@435 1402
duke@435 1403 CASE(_dneg):
duke@435 1404 {
duke@435 1405 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
duke@435 1406 UPDATE_PC_AND_CONTINUE(1);
duke@435 1407 }
duke@435 1408
duke@435 1409 /* Conversion operations */
duke@435 1410
duke@435 1411 CASE(_i2f): /* convert top of stack int to float */
duke@435 1412 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
duke@435 1413 UPDATE_PC_AND_CONTINUE(1);
duke@435 1414
duke@435 1415 CASE(_i2l): /* convert top of stack int to long */
duke@435 1416 {
duke@435 1417 // this is ugly QQQ
duke@435 1418 jlong r = VMint2Long(STACK_INT(-1));
duke@435 1419 MORE_STACK(-1); // Pop
duke@435 1420 SET_STACK_LONG(r, 1);
duke@435 1421
duke@435 1422 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1423 }
duke@435 1424
duke@435 1425 CASE(_i2d): /* convert top of stack int to double */
duke@435 1426 {
duke@435 1427 // this is ugly QQQ (why cast to jlong?? )
duke@435 1428 jdouble r = (jlong)STACK_INT(-1);
duke@435 1429 MORE_STACK(-1); // Pop
duke@435 1430 SET_STACK_DOUBLE(r, 1);
duke@435 1431
duke@435 1432 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1433 }
duke@435 1434
duke@435 1435 CASE(_l2i): /* convert top of stack long to int */
duke@435 1436 {
duke@435 1437 jint r = VMlong2Int(STACK_LONG(-1));
duke@435 1438 MORE_STACK(-2); // Pop
duke@435 1439 SET_STACK_INT(r, 0);
duke@435 1440 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1441 }
duke@435 1442
duke@435 1443 CASE(_l2f): /* convert top of stack long to float */
duke@435 1444 {
duke@435 1445 jlong r = STACK_LONG(-1);
duke@435 1446 MORE_STACK(-2); // Pop
duke@435 1447 SET_STACK_FLOAT(VMlong2Float(r), 0);
duke@435 1448 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1449 }
duke@435 1450
duke@435 1451 CASE(_l2d): /* convert top of stack long to double */
duke@435 1452 {
duke@435 1453 jlong r = STACK_LONG(-1);
duke@435 1454 MORE_STACK(-2); // Pop
duke@435 1455 SET_STACK_DOUBLE(VMlong2Double(r), 1);
duke@435 1456 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1457 }
duke@435 1458
duke@435 1459 CASE(_f2i): /* Convert top of stack float to int */
duke@435 1460 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
duke@435 1461 UPDATE_PC_AND_CONTINUE(1);
duke@435 1462
duke@435 1463 CASE(_f2l): /* convert top of stack float to long */
duke@435 1464 {
duke@435 1465 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
duke@435 1466 MORE_STACK(-1); // POP
duke@435 1467 SET_STACK_LONG(r, 1);
duke@435 1468 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1469 }
duke@435 1470
duke@435 1471 CASE(_f2d): /* convert top of stack float to double */
duke@435 1472 {
duke@435 1473 jfloat f;
duke@435 1474 jdouble r;
duke@435 1475 f = STACK_FLOAT(-1);
duke@435 1476 r = (jdouble) f;
duke@435 1477 MORE_STACK(-1); // POP
duke@435 1478 SET_STACK_DOUBLE(r, 1);
duke@435 1479 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1480 }
duke@435 1481
duke@435 1482 CASE(_d2i): /* convert top of stack double to int */
duke@435 1483 {
duke@435 1484 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
duke@435 1485 MORE_STACK(-2);
duke@435 1486 SET_STACK_INT(r1, 0);
duke@435 1487 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1488 }
duke@435 1489
duke@435 1490 CASE(_d2f): /* convert top of stack double to float */
duke@435 1491 {
duke@435 1492 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
duke@435 1493 MORE_STACK(-2);
duke@435 1494 SET_STACK_FLOAT(r1, 0);
duke@435 1495 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1496 }
duke@435 1497
duke@435 1498 CASE(_d2l): /* convert top of stack double to long */
duke@435 1499 {
duke@435 1500 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
duke@435 1501 MORE_STACK(-2);
duke@435 1502 SET_STACK_LONG(r1, 1);
duke@435 1503 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1504 }
duke@435 1505
duke@435 1506 CASE(_i2b):
duke@435 1507 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
duke@435 1508 UPDATE_PC_AND_CONTINUE(1);
duke@435 1509
duke@435 1510 CASE(_i2c):
duke@435 1511 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
duke@435 1512 UPDATE_PC_AND_CONTINUE(1);
duke@435 1513
duke@435 1514 CASE(_i2s):
duke@435 1515 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
duke@435 1516 UPDATE_PC_AND_CONTINUE(1);
duke@435 1517
duke@435 1518 /* comparison operators */
duke@435 1519
duke@435 1520
duke@435 1521 #define COMPARISON_OP(name, comparison) \
duke@435 1522 CASE(_if_icmp##name): { \
goetz@6470 1523 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
goetz@6470 1524 int skip = cmp \
duke@435 1525 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1526 address branch_pc = pc; \
goetz@6470 1527 /* Profile branch. */ \
goetz@6470 1528 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
duke@435 1529 UPDATE_PC_AND_TOS(skip, -2); \
duke@435 1530 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1531 CONTINUE; \
duke@435 1532 } \
duke@435 1533 CASE(_if##name): { \
goetz@6470 1534 const bool cmp = (STACK_INT(-1) comparison 0); \
goetz@6470 1535 int skip = cmp \
duke@435 1536 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1537 address branch_pc = pc; \
goetz@6470 1538 /* Profile branch. */ \
goetz@6470 1539 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
duke@435 1540 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1541 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1542 CONTINUE; \
duke@435 1543 }
duke@435 1544
duke@435 1545 #define COMPARISON_OP2(name, comparison) \
duke@435 1546 COMPARISON_OP(name, comparison) \
duke@435 1547 CASE(_if_acmp##name): { \
goetz@6470 1548 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
goetz@6470 1549 int skip = cmp \
duke@435 1550 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1551 address branch_pc = pc; \
goetz@6470 1552 /* Profile branch. */ \
goetz@6470 1553 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
duke@435 1554 UPDATE_PC_AND_TOS(skip, -2); \
duke@435 1555 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1556 CONTINUE; \
duke@435 1557 }
duke@435 1558
duke@435 1559 #define NULL_COMPARISON_NOT_OP(name) \
duke@435 1560 CASE(_if##name): { \
goetz@6470 1561 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
goetz@6470 1562 int skip = cmp \
duke@435 1563 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1564 address branch_pc = pc; \
goetz@6470 1565 /* Profile branch. */ \
goetz@6470 1566 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
duke@435 1567 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1568 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1569 CONTINUE; \
duke@435 1570 }
duke@435 1571
duke@435 1572 #define NULL_COMPARISON_OP(name) \
duke@435 1573 CASE(_if##name): { \
goetz@6470 1574 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
goetz@6470 1575 int skip = cmp \
duke@435 1576 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1577 address branch_pc = pc; \
goetz@6470 1578 /* Profile branch. */ \
goetz@6470 1579 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
duke@435 1580 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1581 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1582 CONTINUE; \
duke@435 1583 }
duke@435 1584 COMPARISON_OP(lt, <);
duke@435 1585 COMPARISON_OP(gt, >);
duke@435 1586 COMPARISON_OP(le, <=);
duke@435 1587 COMPARISON_OP(ge, >=);
duke@435 1588 COMPARISON_OP2(eq, ==); /* include ref comparison */
duke@435 1589 COMPARISON_OP2(ne, !=); /* include ref comparison */
duke@435 1590 NULL_COMPARISON_OP(null);
duke@435 1591 NULL_COMPARISON_NOT_OP(nonnull);
duke@435 1592
duke@435 1593 /* Goto pc at specified offset in switch table. */
duke@435 1594
duke@435 1595 CASE(_tableswitch): {
duke@435 1596 jint* lpc = (jint*)VMalignWordUp(pc+1);
duke@435 1597 int32_t key = STACK_INT(-1);
duke@435 1598 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
duke@435 1599 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
duke@435 1600 int32_t skip;
duke@435 1601 key -= low;
goetz@6470 1602 if (((uint32_t) key > (uint32_t)(high - low))) {
goetz@6470 1603 key = -1;
goetz@6470 1604 skip = Bytes::get_Java_u4((address)&lpc[0]);
goetz@6470 1605 } else {
goetz@6470 1606 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
goetz@6470 1607 }
goetz@6470 1608 // Profile switch.
goetz@6470 1609 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
goetz@6470 1610 // Does this really need a full backedge check (osr)?
duke@435 1611 address branch_pc = pc;
duke@435 1612 UPDATE_PC_AND_TOS(skip, -1);
duke@435 1613 DO_BACKEDGE_CHECKS(skip, branch_pc);
duke@435 1614 CONTINUE;
duke@435 1615 }
duke@435 1616
goetz@6470 1617 /* Goto pc whose table entry matches specified key. */
duke@435 1618
duke@435 1619 CASE(_lookupswitch): {
duke@435 1620 jint* lpc = (jint*)VMalignWordUp(pc+1);
duke@435 1621 int32_t key = STACK_INT(-1);
duke@435 1622 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
goetz@6470 1623 // Remember index.
goetz@6470 1624 int index = -1;
goetz@6470 1625 int newindex = 0;
duke@435 1626 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
duke@435 1627 while (--npairs >= 0) {
goetz@6470 1628 lpc += 2;
goetz@6470 1629 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
goetz@6470 1630 skip = Bytes::get_Java_u4((address)&lpc[1]);
goetz@6470 1631 index = newindex;
goetz@6470 1632 break;
goetz@6470 1633 }
goetz@6470 1634 newindex += 1;
duke@435 1635 }
goetz@6470 1636 // Profile switch.
goetz@6470 1637 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
duke@435 1638 address branch_pc = pc;
duke@435 1639 UPDATE_PC_AND_TOS(skip, -1);
duke@435 1640 DO_BACKEDGE_CHECKS(skip, branch_pc);
duke@435 1641 CONTINUE;
duke@435 1642 }
duke@435 1643
duke@435 1644 CASE(_fcmpl):
duke@435 1645 CASE(_fcmpg):
duke@435 1646 {
duke@435 1647 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
duke@435 1648 STACK_FLOAT(-1),
duke@435 1649 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
duke@435 1650 -2);
duke@435 1651 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1652 }
duke@435 1653
duke@435 1654 CASE(_dcmpl):
duke@435 1655 CASE(_dcmpg):
duke@435 1656 {
duke@435 1657 int r = VMdoubleCompare(STACK_DOUBLE(-3),
duke@435 1658 STACK_DOUBLE(-1),
duke@435 1659 (opcode == Bytecodes::_dcmpl ? -1 : 1));
duke@435 1660 MORE_STACK(-4); // Pop
duke@435 1661 SET_STACK_INT(r, 0);
duke@435 1662 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1663 }
duke@435 1664
duke@435 1665 CASE(_lcmp):
duke@435 1666 {
duke@435 1667 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
duke@435 1668 MORE_STACK(-4);
duke@435 1669 SET_STACK_INT(r, 0);
duke@435 1670 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1671 }
duke@435 1672
duke@435 1673
duke@435 1674 /* Return from a method */
duke@435 1675
duke@435 1676 CASE(_areturn):
duke@435 1677 CASE(_ireturn):
duke@435 1678 CASE(_freturn):
duke@435 1679 {
duke@435 1680 // Allow a safepoint before returning to frame manager.
duke@435 1681 SAFEPOINT;
duke@435 1682
duke@435 1683 goto handle_return;
duke@435 1684 }
duke@435 1685
duke@435 1686 CASE(_lreturn):
duke@435 1687 CASE(_dreturn):
duke@435 1688 {
duke@435 1689 // Allow a safepoint before returning to frame manager.
duke@435 1690 SAFEPOINT;
duke@435 1691 goto handle_return;
duke@435 1692 }
duke@435 1693
duke@435 1694 CASE(_return_register_finalizer): {
duke@435 1695
duke@435 1696 oop rcvr = LOCALS_OBJECT(0);
bobv@2036 1697 VERIFY_OOP(rcvr);
coleenp@4037 1698 if (rcvr->klass()->has_finalizer()) {
duke@435 1699 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
duke@435 1700 }
duke@435 1701 goto handle_return;
duke@435 1702 }
duke@435 1703 CASE(_return): {
duke@435 1704
duke@435 1705 // Allow a safepoint before returning to frame manager.
duke@435 1706 SAFEPOINT;
duke@435 1707 goto handle_return;
duke@435 1708 }
duke@435 1709
duke@435 1710 /* Array access byte-codes */
duke@435 1711
duke@435 1712 /* Every array access byte-code starts out like this */
duke@435 1713 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
duke@435 1714 #define ARRAY_INTRO(arrayOff) \
duke@435 1715 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
duke@435 1716 jint index = STACK_INT(arrayOff + 1); \
duke@435 1717 char message[jintAsStringSize]; \
duke@435 1718 CHECK_NULL(arrObj); \
duke@435 1719 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
duke@435 1720 sprintf(message, "%d", index); \
duke@435 1721 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
goetz@6470 1722 message, note_rangeCheck_trap); \
duke@435 1723 }
duke@435 1724
duke@435 1725 /* 32-bit loads. These handle conversion from < 32-bit types */
duke@435 1726 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
duke@435 1727 { \
duke@435 1728 ARRAY_INTRO(-2); \
simonis@5350 1729 (void)extra; \
duke@435 1730 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
duke@435 1731 -2); \
duke@435 1732 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1733 }
duke@435 1734
duke@435 1735 /* 64-bit loads */
duke@435 1736 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
duke@435 1737 { \
duke@435 1738 ARRAY_INTRO(-2); \
duke@435 1739 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
simonis@5350 1740 (void)extra; \
simonis@5350 1741 UPDATE_PC_AND_CONTINUE(1); \
duke@435 1742 }
duke@435 1743
duke@435 1744 CASE(_iaload):
duke@435 1745 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
duke@435 1746 CASE(_faload):
duke@435 1747 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
goetz@6449 1748 CASE(_aaload): {
goetz@6449 1749 ARRAY_INTRO(-2);
goetz@6449 1750 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
goetz@6449 1751 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
goetz@6449 1752 }
duke@435 1753 CASE(_baload):
duke@435 1754 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
duke@435 1755 CASE(_caload):
duke@435 1756 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
duke@435 1757 CASE(_saload):
duke@435 1758 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
duke@435 1759 CASE(_laload):
duke@435 1760 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
duke@435 1761 CASE(_daload):
duke@435 1762 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
duke@435 1763
duke@435 1764 /* 32-bit stores. These handle conversion to < 32-bit types */
duke@435 1765 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
duke@435 1766 { \
duke@435 1767 ARRAY_INTRO(-3); \
simonis@5350 1768 (void)extra; \
duke@435 1769 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
duke@435 1770 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
duke@435 1771 }
duke@435 1772
duke@435 1773 /* 64-bit stores */
duke@435 1774 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
duke@435 1775 { \
duke@435 1776 ARRAY_INTRO(-4); \
simonis@5350 1777 (void)extra; \
duke@435 1778 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
duke@435 1779 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
duke@435 1780 }
duke@435 1781
duke@435 1782 CASE(_iastore):
duke@435 1783 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
duke@435 1784 CASE(_fastore):
duke@435 1785 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
duke@435 1786 /*
duke@435 1787 * This one looks different because of the assignability check
duke@435 1788 */
duke@435 1789 CASE(_aastore): {
duke@435 1790 oop rhsObject = STACK_OBJECT(-1);
bobv@2036 1791 VERIFY_OOP(rhsObject);
duke@435 1792 ARRAY_INTRO( -3);
duke@435 1793 // arrObj, index are set
duke@435 1794 if (rhsObject != NULL) {
duke@435 1795 /* Check assignability of rhsObject into arrObj */
goetz@6470 1796 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
goetz@6470 1797 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
duke@435 1798 //
duke@435 1799 // Check for compatibilty. This check must not GC!!
duke@435 1800 // Seems way more expensive now that we must dispatch
duke@435 1801 //
goetz@6470 1802 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
goetz@6470 1803 // Decrement counter if subtype check failed.
goetz@6470 1804 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
goetz@6470 1805 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
duke@435 1806 }
goetz@6470 1807 // Profile checkcast with null_seen and receiver.
goetz@6470 1808 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
goetz@6470 1809 } else {
goetz@6470 1810 // Profile checkcast with null_seen and receiver.
goetz@6470 1811 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
duke@435 1812 }
simonis@6483 1813 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
duke@435 1814 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
duke@435 1815 }
duke@435 1816 CASE(_bastore):
duke@435 1817 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
duke@435 1818 CASE(_castore):
duke@435 1819 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
duke@435 1820 CASE(_sastore):
duke@435 1821 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
duke@435 1822 CASE(_lastore):
duke@435 1823 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
duke@435 1824 CASE(_dastore):
duke@435 1825 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
duke@435 1826
duke@435 1827 CASE(_arraylength):
duke@435 1828 {
duke@435 1829 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
duke@435 1830 CHECK_NULL(ary);
duke@435 1831 SET_STACK_INT(ary->length(), -1);
duke@435 1832 UPDATE_PC_AND_CONTINUE(1);
duke@435 1833 }
duke@435 1834
duke@435 1835 /* monitorenter and monitorexit for locking/unlocking an object */
duke@435 1836
duke@435 1837 CASE(_monitorenter): {
duke@435 1838 oop lockee = STACK_OBJECT(-1);
duke@435 1839 // derefing's lockee ought to provoke implicit null check
duke@435 1840 CHECK_NULL(lockee);
duke@435 1841 // find a free monitor or one already allocated for this object
duke@435 1842 // if we find a matching object then we need a new monitor
duke@435 1843 // since this is recursive enter
duke@435 1844 BasicObjectLock* limit = istate->monitor_base();
duke@435 1845 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
duke@435 1846 BasicObjectLock* entry = NULL;
duke@435 1847 while (most_recent != limit ) {
duke@435 1848 if (most_recent->obj() == NULL) entry = most_recent;
duke@435 1849 else if (most_recent->obj() == lockee) break;
duke@435 1850 most_recent++;
duke@435 1851 }
duke@435 1852 if (entry != NULL) {
duke@435 1853 entry->set_obj(lockee);
goetz@6445 1854 int success = false;
goetz@6445 1855 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 1856
goetz@6445 1857 markOop mark = lockee->mark();
goetz@6445 1858 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 1859 // implies UseBiasedLocking
goetz@6445 1860 if (mark->has_bias_pattern()) {
goetz@6445 1861 uintptr_t thread_ident;
goetz@6445 1862 uintptr_t anticipated_bias_locking_value;
goetz@6445 1863 thread_ident = (uintptr_t)istate->thread();
goetz@6445 1864 anticipated_bias_locking_value =
goetz@6445 1865 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 1866 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 1867
goetz@6445 1868 if (anticipated_bias_locking_value == 0) {
goetz@6445 1869 // already biased towards this thread, nothing to do
goetz@6445 1870 if (PrintBiasedLockingStatistics) {
goetz@6445 1871 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 1872 }
goetz@6445 1873 success = true;
goetz@6445 1874 }
goetz@6445 1875 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 1876 // try revoke bias
goetz@6445 1877 markOop header = lockee->klass()->prototype_header();
goetz@6445 1878 if (hash != markOopDesc::no_hash) {
goetz@6445 1879 header = header->copy_set_hash(hash);
goetz@6445 1880 }
goetz@6445 1881 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
goetz@6445 1882 if (PrintBiasedLockingStatistics)
goetz@6445 1883 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 1884 }
goetz@6445 1885 }
goetz@6445 1886 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
goetz@6445 1887 // try rebias
goetz@6445 1888 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
goetz@6445 1889 if (hash != markOopDesc::no_hash) {
goetz@6445 1890 new_header = new_header->copy_set_hash(hash);
goetz@6445 1891 }
goetz@6445 1892 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
goetz@6445 1893 if (PrintBiasedLockingStatistics)
goetz@6445 1894 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
goetz@6445 1895 }
goetz@6445 1896 else {
goetz@6445 1897 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1898 }
goetz@6445 1899 success = true;
goetz@6445 1900 }
goetz@6445 1901 else {
goetz@6445 1902 // try to bias towards thread in case object is anonymously biased
goetz@6445 1903 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 1904 (uintptr_t)markOopDesc::age_mask_in_place |
goetz@6445 1905 epoch_mask_in_place));
goetz@6445 1906 if (hash != markOopDesc::no_hash) {
goetz@6445 1907 header = header->copy_set_hash(hash);
goetz@6445 1908 }
goetz@6445 1909 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 1910 // debugging hint
goetz@6445 1911 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 1912 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
goetz@6445 1913 if (PrintBiasedLockingStatistics)
goetz@6445 1914 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 1915 }
goetz@6445 1916 else {
goetz@6445 1917 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1918 }
goetz@6445 1919 success = true;
goetz@6445 1920 }
goetz@6445 1921 }
goetz@6445 1922
goetz@6445 1923 // traditional lightweight locking
goetz@6445 1924 if (!success) {
goetz@6445 1925 markOop displaced = lockee->mark()->set_unlocked();
goetz@6445 1926 entry->lock()->set_displaced_header(displaced);
goetz@6445 1927 bool call_vm = UseHeavyMonitors;
goetz@6445 1928 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
goetz@6445 1929 // Is it simple recursive case?
goetz@6445 1930 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 1931 entry->lock()->set_displaced_header(NULL);
goetz@6445 1932 } else {
goetz@6445 1933 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1934 }
duke@435 1935 }
duke@435 1936 }
duke@435 1937 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1938 } else {
duke@435 1939 istate->set_msg(more_monitors);
duke@435 1940 UPDATE_PC_AND_RETURN(0); // Re-execute
duke@435 1941 }
duke@435 1942 }
duke@435 1943
duke@435 1944 CASE(_monitorexit): {
duke@435 1945 oop lockee = STACK_OBJECT(-1);
duke@435 1946 CHECK_NULL(lockee);
duke@435 1947 // derefing's lockee ought to provoke implicit null check
duke@435 1948 // find our monitor slot
duke@435 1949 BasicObjectLock* limit = istate->monitor_base();
duke@435 1950 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
duke@435 1951 while (most_recent != limit ) {
duke@435 1952 if ((most_recent)->obj() == lockee) {
duke@435 1953 BasicLock* lock = most_recent->lock();
duke@435 1954 markOop header = lock->displaced_header();
duke@435 1955 most_recent->set_obj(NULL);
goetz@6445 1956 if (!lockee->mark()->has_bias_pattern()) {
goetz@6445 1957 bool call_vm = UseHeavyMonitors;
goetz@6445 1958 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 1959 if (header != NULL || call_vm) {
goetz@6445 1960 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
goetz@6445 1961 // restore object for the slow case
goetz@6445 1962 most_recent->set_obj(lockee);
goetz@6445 1963 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
goetz@6445 1964 }
duke@435 1965 }
duke@435 1966 }
duke@435 1967 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1968 }
duke@435 1969 most_recent++;
duke@435 1970 }
duke@435 1971 // Need to throw illegal monitor state exception
duke@435 1972 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
twisti@2762 1973 ShouldNotReachHere();
duke@435 1974 }
duke@435 1975
duke@435 1976 /* All of the non-quick opcodes. */
duke@435 1977
duke@435 1978 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
duke@435 1979 * constant pool index in the instruction.
duke@435 1980 */
duke@435 1981 CASE(_getfield):
duke@435 1982 CASE(_getstatic):
duke@435 1983 {
duke@435 1984 u2 index;
duke@435 1985 ConstantPoolCacheEntry* cache;
duke@435 1986 index = Bytes::get_native_u2(pc+1);
duke@435 1987
duke@435 1988 // QQQ Need to make this as inlined as possible. Probably need to
duke@435 1989 // split all the bytecode cases out so c++ compiler has a chance
duke@435 1990 // for constant prop to fold everything possible away.
duke@435 1991
duke@435 1992 cache = cp->entry_at(index);
duke@435 1993 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 1994 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
duke@435 1995 handle_exception);
duke@435 1996 cache = cp->entry_at(index);
duke@435 1997 }
duke@435 1998
duke@435 1999 #ifdef VM_JVMTI
duke@435 2000 if (_jvmti_interp_events) {
duke@435 2001 int *count_addr;
duke@435 2002 oop obj;
duke@435 2003 // Check to see if a field modification watch has been set
duke@435 2004 // before we take the time to call into the VM.
duke@435 2005 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
duke@435 2006 if ( *count_addr > 0 ) {
duke@435 2007 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
duke@435 2008 obj = (oop)NULL;
duke@435 2009 } else {
duke@435 2010 obj = (oop) STACK_OBJECT(-1);
bobv@2036 2011 VERIFY_OOP(obj);
duke@435 2012 }
duke@435 2013 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
duke@435 2014 obj,
duke@435 2015 cache),
duke@435 2016 handle_exception);
duke@435 2017 }
duke@435 2018 }
duke@435 2019 #endif /* VM_JVMTI */
duke@435 2020
duke@435 2021 oop obj;
duke@435 2022 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
twisti@4237 2023 Klass* k = cache->f1_as_klass();
coleenp@4037 2024 obj = k->java_mirror();
duke@435 2025 MORE_STACK(1); // Assume single slot push
duke@435 2026 } else {
duke@435 2027 obj = (oop) STACK_OBJECT(-1);
duke@435 2028 CHECK_NULL(obj);
duke@435 2029 }
duke@435 2030
duke@435 2031 //
duke@435 2032 // Now store the result on the stack
duke@435 2033 //
duke@435 2034 TosState tos_type = cache->flag_state();
twisti@3969 2035 int field_offset = cache->f2_as_index();
duke@435 2036 if (cache->is_volatile()) {
duke@435 2037 if (tos_type == atos) {
bobv@2036 2038 VERIFY_OOP(obj->obj_field_acquire(field_offset));
duke@435 2039 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
duke@435 2040 } else if (tos_type == itos) {
duke@435 2041 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
duke@435 2042 } else if (tos_type == ltos) {
duke@435 2043 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
duke@435 2044 MORE_STACK(1);
duke@435 2045 } else if (tos_type == btos) {
duke@435 2046 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
duke@435 2047 } else if (tos_type == ctos) {
duke@435 2048 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
duke@435 2049 } else if (tos_type == stos) {
duke@435 2050 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
duke@435 2051 } else if (tos_type == ftos) {
duke@435 2052 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
duke@435 2053 } else {
duke@435 2054 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
duke@435 2055 MORE_STACK(1);
duke@435 2056 }
duke@435 2057 } else {
duke@435 2058 if (tos_type == atos) {
bobv@2036 2059 VERIFY_OOP(obj->obj_field(field_offset));
duke@435 2060 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
duke@435 2061 } else if (tos_type == itos) {
duke@435 2062 SET_STACK_INT(obj->int_field(field_offset), -1);
duke@435 2063 } else if (tos_type == ltos) {
duke@435 2064 SET_STACK_LONG(obj->long_field(field_offset), 0);
duke@435 2065 MORE_STACK(1);
duke@435 2066 } else if (tos_type == btos) {
duke@435 2067 SET_STACK_INT(obj->byte_field(field_offset), -1);
duke@435 2068 } else if (tos_type == ctos) {
duke@435 2069 SET_STACK_INT(obj->char_field(field_offset), -1);
duke@435 2070 } else if (tos_type == stos) {
duke@435 2071 SET_STACK_INT(obj->short_field(field_offset), -1);
duke@435 2072 } else if (tos_type == ftos) {
duke@435 2073 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
duke@435 2074 } else {
duke@435 2075 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
duke@435 2076 MORE_STACK(1);
duke@435 2077 }
duke@435 2078 }
duke@435 2079
duke@435 2080 UPDATE_PC_AND_CONTINUE(3);
duke@435 2081 }
duke@435 2082
duke@435 2083 CASE(_putfield):
duke@435 2084 CASE(_putstatic):
duke@435 2085 {
duke@435 2086 u2 index = Bytes::get_native_u2(pc+1);
duke@435 2087 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 2088 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 2089 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
duke@435 2090 handle_exception);
duke@435 2091 cache = cp->entry_at(index);
duke@435 2092 }
duke@435 2093
duke@435 2094 #ifdef VM_JVMTI
duke@435 2095 if (_jvmti_interp_events) {
duke@435 2096 int *count_addr;
duke@435 2097 oop obj;
duke@435 2098 // Check to see if a field modification watch has been set
duke@435 2099 // before we take the time to call into the VM.
duke@435 2100 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
duke@435 2101 if ( *count_addr > 0 ) {
duke@435 2102 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
duke@435 2103 obj = (oop)NULL;
duke@435 2104 }
duke@435 2105 else {
duke@435 2106 if (cache->is_long() || cache->is_double()) {
duke@435 2107 obj = (oop) STACK_OBJECT(-3);
duke@435 2108 } else {
duke@435 2109 obj = (oop) STACK_OBJECT(-2);
duke@435 2110 }
bobv@2036 2111 VERIFY_OOP(obj);
duke@435 2112 }
duke@435 2113
duke@435 2114 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
duke@435 2115 obj,
duke@435 2116 cache,
duke@435 2117 (jvalue *)STACK_SLOT(-1)),
duke@435 2118 handle_exception);
duke@435 2119 }
duke@435 2120 }
duke@435 2121 #endif /* VM_JVMTI */
duke@435 2122
duke@435 2123 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2124 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2125
duke@435 2126 oop obj;
duke@435 2127 int count;
duke@435 2128 TosState tos_type = cache->flag_state();
duke@435 2129
duke@435 2130 count = -1;
duke@435 2131 if (tos_type == ltos || tos_type == dtos) {
duke@435 2132 --count;
duke@435 2133 }
duke@435 2134 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
twisti@4237 2135 Klass* k = cache->f1_as_klass();
coleenp@4037 2136 obj = k->java_mirror();
duke@435 2137 } else {
duke@435 2138 --count;
duke@435 2139 obj = (oop) STACK_OBJECT(count);
duke@435 2140 CHECK_NULL(obj);
duke@435 2141 }
duke@435 2142
duke@435 2143 //
duke@435 2144 // Now store the result
duke@435 2145 //
twisti@3969 2146 int field_offset = cache->f2_as_index();
duke@435 2147 if (cache->is_volatile()) {
duke@435 2148 if (tos_type == itos) {
duke@435 2149 obj->release_int_field_put(field_offset, STACK_INT(-1));
duke@435 2150 } else if (tos_type == atos) {
bobv@2036 2151 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2152 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
duke@435 2153 } else if (tos_type == btos) {
duke@435 2154 obj->release_byte_field_put(field_offset, STACK_INT(-1));
duke@435 2155 } else if (tos_type == ltos) {
duke@435 2156 obj->release_long_field_put(field_offset, STACK_LONG(-1));
duke@435 2157 } else if (tos_type == ctos) {
duke@435 2158 obj->release_char_field_put(field_offset, STACK_INT(-1));
duke@435 2159 } else if (tos_type == stos) {
duke@435 2160 obj->release_short_field_put(field_offset, STACK_INT(-1));
duke@435 2161 } else if (tos_type == ftos) {
duke@435 2162 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
duke@435 2163 } else {
duke@435 2164 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
duke@435 2165 }
duke@435 2166 OrderAccess::storeload();
duke@435 2167 } else {
duke@435 2168 if (tos_type == itos) {
duke@435 2169 obj->int_field_put(field_offset, STACK_INT(-1));
duke@435 2170 } else if (tos_type == atos) {
bobv@2036 2171 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2172 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
duke@435 2173 } else if (tos_type == btos) {
duke@435 2174 obj->byte_field_put(field_offset, STACK_INT(-1));
duke@435 2175 } else if (tos_type == ltos) {
duke@435 2176 obj->long_field_put(field_offset, STACK_LONG(-1));
duke@435 2177 } else if (tos_type == ctos) {
duke@435 2178 obj->char_field_put(field_offset, STACK_INT(-1));
duke@435 2179 } else if (tos_type == stos) {
duke@435 2180 obj->short_field_put(field_offset, STACK_INT(-1));
duke@435 2181 } else if (tos_type == ftos) {
duke@435 2182 obj->float_field_put(field_offset, STACK_FLOAT(-1));
duke@435 2183 } else {
duke@435 2184 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
duke@435 2185 }
duke@435 2186 }
duke@435 2187
duke@435 2188 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
duke@435 2189 }
duke@435 2190
duke@435 2191 CASE(_new): {
duke@435 2192 u2 index = Bytes::get_Java_u2(pc+1);
coleenp@4037 2193 ConstantPool* constants = istate->method()->constants();
duke@435 2194 if (!constants->tag_at(index).is_unresolved_klass()) {
duke@435 2195 // Make sure klass is initialized and doesn't have a finalizer
coleenp@4037 2196 Klass* entry = constants->slot_at(index).get_klass();
duke@435 2197 assert(entry->is_klass(), "Should be resolved klass");
coleenp@4037 2198 Klass* k_entry = (Klass*) entry;
coleenp@4037 2199 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
coleenp@4037 2200 InstanceKlass* ik = (InstanceKlass*) k_entry;
duke@435 2201 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
duke@435 2202 size_t obj_size = ik->size_helper();
duke@435 2203 oop result = NULL;
duke@435 2204 // If the TLAB isn't pre-zeroed then we'll have to do it
duke@435 2205 bool need_zero = !ZeroTLAB;
duke@435 2206 if (UseTLAB) {
duke@435 2207 result = (oop) THREAD->tlab().allocate(obj_size);
duke@435 2208 }
goetz@6470 2209 // Disable non-TLAB-based fast-path, because profiling requires that all
goetz@6470 2210 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
goetz@6470 2211 // returns NULL.
goetz@6470 2212 #ifndef CC_INTERP_PROFILE
duke@435 2213 if (result == NULL) {
duke@435 2214 need_zero = true;
duke@435 2215 // Try allocate in shared eden
goetz@6470 2216 retry:
duke@435 2217 HeapWord* compare_to = *Universe::heap()->top_addr();
duke@435 2218 HeapWord* new_top = compare_to + obj_size;
duke@435 2219 if (new_top <= *Universe::heap()->end_addr()) {
duke@435 2220 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
duke@435 2221 goto retry;
duke@435 2222 }
duke@435 2223 result = (oop) compare_to;
duke@435 2224 }
duke@435 2225 }
goetz@6470 2226 #endif
duke@435 2227 if (result != NULL) {
duke@435 2228 // Initialize object (if nonzero size and need) and then the header
duke@435 2229 if (need_zero ) {
duke@435 2230 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
duke@435 2231 obj_size -= sizeof(oopDesc) / oopSize;
duke@435 2232 if (obj_size > 0 ) {
duke@435 2233 memset(to_zero, 0, obj_size * HeapWordSize);
duke@435 2234 }
duke@435 2235 }
duke@435 2236 if (UseBiasedLocking) {
duke@435 2237 result->set_mark(ik->prototype_header());
duke@435 2238 } else {
duke@435 2239 result->set_mark(markOopDesc::prototype());
duke@435 2240 }
coleenp@602 2241 result->set_klass_gap(0);
duke@435 2242 result->set_klass(k_entry);
duke@435 2243 SET_STACK_OBJECT(result, 0);
duke@435 2244 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 2245 }
duke@435 2246 }
duke@435 2247 }
duke@435 2248 // Slow case allocation
duke@435 2249 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
duke@435 2250 handle_exception);
duke@435 2251 SET_STACK_OBJECT(THREAD->vm_result(), 0);
duke@435 2252 THREAD->set_vm_result(NULL);
duke@435 2253 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 2254 }
duke@435 2255 CASE(_anewarray): {
duke@435 2256 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2257 jint size = STACK_INT(-1);
duke@435 2258 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
duke@435 2259 handle_exception);
duke@435 2260 SET_STACK_OBJECT(THREAD->vm_result(), -1);
duke@435 2261 THREAD->set_vm_result(NULL);
duke@435 2262 UPDATE_PC_AND_CONTINUE(3);
duke@435 2263 }
duke@435 2264 CASE(_multianewarray): {
duke@435 2265 jint dims = *(pc+3);
duke@435 2266 jint size = STACK_INT(-1);
duke@435 2267 // stack grows down, dimensions are up!
duke@435 2268 jint *dimarray =
twisti@1864 2269 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
twisti@1864 2270 Interpreter::stackElementWords-1];
duke@435 2271 //adjust pointer to start of stack element
duke@435 2272 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
duke@435 2273 handle_exception);
duke@435 2274 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
duke@435 2275 THREAD->set_vm_result(NULL);
duke@435 2276 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
duke@435 2277 }
duke@435 2278 CASE(_checkcast):
duke@435 2279 if (STACK_OBJECT(-1) != NULL) {
bobv@2036 2280 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2281 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2282 // Constant pool may have actual klass or unresolved klass. If it is
goetz@6470 2283 // unresolved we must resolve it.
duke@435 2284 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
duke@435 2285 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
duke@435 2286 }
coleenp@4037 2287 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
goetz@6470 2288 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
duke@435 2289 //
duke@435 2290 // Check for compatibilty. This check must not GC!!
goetz@6470 2291 // Seems way more expensive now that we must dispatch.
duke@435 2292 //
goetz@6470 2293 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
goetz@6470 2294 // Decrement counter at checkcast.
goetz@6470 2295 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
duke@435 2296 ResourceMark rm(THREAD);
goetz@6470 2297 const char* objName = objKlass->external_name();
hseigel@4278 2298 const char* klassName = klassOf->external_name();
duke@435 2299 char* message = SharedRuntime::generate_class_cast_message(
duke@435 2300 objName, klassName);
goetz@6470 2301 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
duke@435 2302 }
goetz@6470 2303 // Profile checkcast with null_seen and receiver.
goetz@6470 2304 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
duke@435 2305 } else {
goetz@6470 2306 // Profile checkcast with null_seen and receiver.
goetz@6470 2307 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
duke@435 2308 }
duke@435 2309 UPDATE_PC_AND_CONTINUE(3);
duke@435 2310
duke@435 2311 CASE(_instanceof):
duke@435 2312 if (STACK_OBJECT(-1) == NULL) {
duke@435 2313 SET_STACK_INT(0, -1);
goetz@6470 2314 // Profile instanceof with null_seen and receiver.
goetz@6470 2315 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
duke@435 2316 } else {
bobv@2036 2317 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2318 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2319 // Constant pool may have actual klass or unresolved klass. If it is
goetz@6470 2320 // unresolved we must resolve it.
duke@435 2321 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
duke@435 2322 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
duke@435 2323 }
coleenp@4037 2324 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
goetz@6470 2325 Klass* objKlass = STACK_OBJECT(-1)->klass();
duke@435 2326 //
duke@435 2327 // Check for compatibilty. This check must not GC!!
goetz@6470 2328 // Seems way more expensive now that we must dispatch.
duke@435 2329 //
goetz@6470 2330 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
duke@435 2331 SET_STACK_INT(1, -1);
duke@435 2332 } else {
duke@435 2333 SET_STACK_INT(0, -1);
goetz@6470 2334 // Decrement counter at checkcast.
goetz@6470 2335 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
duke@435 2336 }
goetz@6470 2337 // Profile instanceof with null_seen and receiver.
goetz@6470 2338 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
duke@435 2339 }
duke@435 2340 UPDATE_PC_AND_CONTINUE(3);
duke@435 2341
duke@435 2342 CASE(_ldc_w):
duke@435 2343 CASE(_ldc):
duke@435 2344 {
duke@435 2345 u2 index;
duke@435 2346 bool wide = false;
duke@435 2347 int incr = 2; // frequent case
duke@435 2348 if (opcode == Bytecodes::_ldc) {
duke@435 2349 index = pc[1];
duke@435 2350 } else {
duke@435 2351 index = Bytes::get_Java_u2(pc+1);
duke@435 2352 incr = 3;
duke@435 2353 wide = true;
duke@435 2354 }
duke@435 2355
coleenp@4037 2356 ConstantPool* constants = METHOD->constants();
duke@435 2357 switch (constants->tag_at(index).value()) {
duke@435 2358 case JVM_CONSTANT_Integer:
duke@435 2359 SET_STACK_INT(constants->int_at(index), 0);
duke@435 2360 break;
duke@435 2361
duke@435 2362 case JVM_CONSTANT_Float:
duke@435 2363 SET_STACK_FLOAT(constants->float_at(index), 0);
duke@435 2364 break;
duke@435 2365
duke@435 2366 case JVM_CONSTANT_String:
coleenp@4037 2367 {
coleenp@4037 2368 oop result = constants->resolved_references()->obj_at(index);
coleenp@4037 2369 if (result == NULL) {
coleenp@4037 2370 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
coleenp@4037 2371 SET_STACK_OBJECT(THREAD->vm_result(), 0);
coleenp@4037 2372 THREAD->set_vm_result(NULL);
coleenp@4037 2373 } else {
coleenp@4037 2374 VERIFY_OOP(result);
coleenp@4037 2375 SET_STACK_OBJECT(result, 0);
coleenp@4037 2376 }
duke@435 2377 break;
coleenp@4037 2378 }
duke@435 2379
duke@435 2380 case JVM_CONSTANT_Class:
never@2658 2381 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
never@2658 2382 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
duke@435 2383 break;
duke@435 2384
duke@435 2385 case JVM_CONSTANT_UnresolvedClass:
duke@435 2386 case JVM_CONSTANT_UnresolvedClassInError:
duke@435 2387 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
duke@435 2388 SET_STACK_OBJECT(THREAD->vm_result(), 0);
duke@435 2389 THREAD->set_vm_result(NULL);
duke@435 2390 break;
duke@435 2391
duke@435 2392 default: ShouldNotReachHere();
duke@435 2393 }
duke@435 2394 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
duke@435 2395 }
duke@435 2396
duke@435 2397 CASE(_ldc2_w):
duke@435 2398 {
duke@435 2399 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2400
coleenp@4037 2401 ConstantPool* constants = METHOD->constants();
duke@435 2402 switch (constants->tag_at(index).value()) {
duke@435 2403
duke@435 2404 case JVM_CONSTANT_Long:
duke@435 2405 SET_STACK_LONG(constants->long_at(index), 1);
duke@435 2406 break;
duke@435 2407
duke@435 2408 case JVM_CONSTANT_Double:
duke@435 2409 SET_STACK_DOUBLE(constants->double_at(index), 1);
duke@435 2410 break;
duke@435 2411 default: ShouldNotReachHere();
duke@435 2412 }
duke@435 2413 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
duke@435 2414 }
duke@435 2415
twisti@2762 2416 CASE(_fast_aldc_w):
twisti@2762 2417 CASE(_fast_aldc): {
twisti@2762 2418 u2 index;
twisti@2762 2419 int incr;
twisti@2762 2420 if (opcode == Bytecodes::_fast_aldc) {
twisti@2762 2421 index = pc[1];
twisti@2762 2422 incr = 2;
twisti@2762 2423 } else {
twisti@2762 2424 index = Bytes::get_native_u2(pc+1);
twisti@2762 2425 incr = 3;
twisti@2762 2426 }
twisti@2762 2427
twisti@2762 2428 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
twisti@2762 2429 // This kind of CP cache entry does not need to match the flags byte, because
twisti@2762 2430 // there is a 1-1 relation between bytecode type and CP entry type.
coleenp@4037 2431 ConstantPool* constants = METHOD->constants();
coleenp@4037 2432 oop result = constants->resolved_references()->obj_at(index);
twisti@3969 2433 if (result == NULL) {
twisti@2762 2434 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
twisti@2762 2435 handle_exception);
coleenp@4037 2436 result = THREAD->vm_result();
twisti@2762 2437 }
twisti@2762 2438
twisti@3969 2439 VERIFY_OOP(result);
twisti@3969 2440 SET_STACK_OBJECT(result, 0);
twisti@2762 2441 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
twisti@2762 2442 }
twisti@2762 2443
twisti@2762 2444 CASE(_invokedynamic): {
twisti@4237 2445
twisti@2762 2446 if (!EnableInvokeDynamic) {
twisti@2762 2447 // We should not encounter this bytecode if !EnableInvokeDynamic.
twisti@2762 2448 // The verifier will stop it. However, if we get past the verifier,
twisti@2762 2449 // this will stop the thread in a reasonable way, without crashing the JVM.
twisti@2762 2450 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
twisti@2762 2451 handle_exception);
twisti@2762 2452 ShouldNotReachHere();
twisti@2762 2453 }
twisti@2762 2454
twisti@4237 2455 u4 index = Bytes::get_native_u4(pc+1);
twisti@4237 2456 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
twisti@2762 2457
coleenp@4037 2458 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
twisti@2762 2459 // This kind of CP cache entry does not need to match the flags byte, because
twisti@2762 2460 // there is a 1-1 relation between bytecode type and CP entry type.
twisti@4237 2461 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
twisti@2762 2462 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
twisti@2762 2463 handle_exception);
twisti@4237 2464 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
twisti@2762 2465 }
twisti@2762 2466
twisti@4237 2467 Method* method = cache->f1_as_method();
goetz@5319 2468 if (VerifyOops) method->verify();
twisti@4237 2469
twisti@4237 2470 if (cache->has_appendix()) {
twisti@4237 2471 ConstantPool* constants = METHOD->constants();
twisti@4237 2472 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
twisti@4237 2473 MORE_STACK(1);
twisti@4237 2474 }
twisti@4237 2475
twisti@4237 2476 istate->set_msg(call_method);
twisti@4237 2477 istate->set_callee(method);
twisti@4237 2478 istate->set_callee_entry_point(method->from_interpreted_entry());
twisti@2762 2479 istate->set_bcp_advance(5);
twisti@2762 2480
goetz@6470 2481 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
goetz@6470 2482 BI_PROFILE_UPDATE_CALL();
goetz@6470 2483
twisti@2762 2484 UPDATE_PC_AND_RETURN(0); // I'll be back...
twisti@2762 2485 }
twisti@2762 2486
twisti@4237 2487 CASE(_invokehandle): {
twisti@4237 2488
twisti@4237 2489 if (!EnableInvokeDynamic) {
twisti@4237 2490 ShouldNotReachHere();
twisti@4237 2491 }
twisti@4237 2492
twisti@4237 2493 u2 index = Bytes::get_native_u2(pc+1);
twisti@4237 2494 ConstantPoolCacheEntry* cache = cp->entry_at(index);
twisti@4237 2495
twisti@4237 2496 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
twisti@4237 2497 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
twisti@4237 2498 handle_exception);
twisti@4237 2499 cache = cp->entry_at(index);
twisti@4237 2500 }
twisti@4237 2501
twisti@4237 2502 Method* method = cache->f1_as_method();
goetz@5319 2503 if (VerifyOops) method->verify();
twisti@4237 2504
twisti@4237 2505 if (cache->has_appendix()) {
twisti@4237 2506 ConstantPool* constants = METHOD->constants();
twisti@4237 2507 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
twisti@4237 2508 MORE_STACK(1);
twisti@4237 2509 }
twisti@4237 2510
twisti@4237 2511 istate->set_msg(call_method);
twisti@4237 2512 istate->set_callee(method);
twisti@4237 2513 istate->set_callee_entry_point(method->from_interpreted_entry());
twisti@4237 2514 istate->set_bcp_advance(3);
twisti@4237 2515
goetz@6470 2516 // Invokehandle has got a call counter, just like a final call -> increment!
goetz@6470 2517 BI_PROFILE_UPDATE_FINALCALL();
goetz@6470 2518
twisti@4237 2519 UPDATE_PC_AND_RETURN(0); // I'll be back...
twisti@4237 2520 }
twisti@4237 2521
duke@435 2522 CASE(_invokeinterface): {
duke@435 2523 u2 index = Bytes::get_native_u2(pc+1);
duke@435 2524
duke@435 2525 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2526 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2527
duke@435 2528 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 2529 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 2530 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
duke@435 2531 handle_exception);
duke@435 2532 cache = cp->entry_at(index);
duke@435 2533 }
duke@435 2534
duke@435 2535 istate->set_msg(call_method);
duke@435 2536
duke@435 2537 // Special case of invokeinterface called for virtual method of
duke@435 2538 // java.lang.Object. See cpCacheOop.cpp for details.
duke@435 2539 // This code isn't produced by javac, but could be produced by
duke@435 2540 // another compliant java compiler.
twisti@3969 2541 if (cache->is_forced_virtual()) {
coleenp@4037 2542 Method* callee;
duke@435 2543 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
duke@435 2544 if (cache->is_vfinal()) {
twisti@3969 2545 callee = cache->f2_as_vfinal_method();
goetz@6470 2546 // Profile 'special case of invokeinterface' final call.
goetz@6470 2547 BI_PROFILE_UPDATE_FINALCALL();
duke@435 2548 } else {
goetz@6470 2549 // Get receiver.
duke@435 2550 int parms = cache->parameter_size();
goetz@6470 2551 // Same comments as invokevirtual apply here.
goetz@6470 2552 oop rcvr = STACK_OBJECT(-parms);
goetz@6470 2553 VERIFY_OOP(rcvr);
goetz@6470 2554 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
coleenp@4037 2555 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
goetz@6470 2556 // Profile 'special case of invokeinterface' virtual call.
goetz@6470 2557 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
duke@435 2558 }
duke@435 2559 istate->set_callee(callee);
duke@435 2560 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2561 #ifdef VM_JVMTI
duke@435 2562 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2563 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2564 }
duke@435 2565 #endif /* VM_JVMTI */
duke@435 2566 istate->set_bcp_advance(5);
duke@435 2567 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2568 }
duke@435 2569
duke@435 2570 // this could definitely be cleaned up QQQ
coleenp@4037 2571 Method* callee;
coleenp@4037 2572 Klass* iclass = cache->f1_as_klass();
coleenp@4037 2573 // InstanceKlass* interface = (InstanceKlass*) iclass;
duke@435 2574 // get receiver
duke@435 2575 int parms = cache->parameter_size();
duke@435 2576 oop rcvr = STACK_OBJECT(-parms);
duke@435 2577 CHECK_NULL(rcvr);
coleenp@4037 2578 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
duke@435 2579 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
duke@435 2580 int i;
duke@435 2581 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
duke@435 2582 if (ki->interface_klass() == iclass) break;
duke@435 2583 }
duke@435 2584 // If the interface isn't found, this class doesn't implement this
duke@435 2585 // interface. The link resolver checks this but only for the first
duke@435 2586 // time this interface is called.
duke@435 2587 if (i == int2->itable_length()) {
goetz@6470 2588 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
duke@435 2589 }
twisti@3969 2590 int mindex = cache->f2_as_index();
duke@435 2591 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
duke@435 2592 callee = im[mindex].method();
duke@435 2593 if (callee == NULL) {
goetz@6470 2594 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
duke@435 2595 }
duke@435 2596
goetz@6470 2597 // Profile virtual call.
goetz@6470 2598 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
goetz@6470 2599
duke@435 2600 istate->set_callee(callee);
duke@435 2601 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2602 #ifdef VM_JVMTI
duke@435 2603 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2604 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2605 }
duke@435 2606 #endif /* VM_JVMTI */
duke@435 2607 istate->set_bcp_advance(5);
duke@435 2608 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2609 }
duke@435 2610
duke@435 2611 CASE(_invokevirtual):
duke@435 2612 CASE(_invokespecial):
duke@435 2613 CASE(_invokestatic): {
duke@435 2614 u2 index = Bytes::get_native_u2(pc+1);
duke@435 2615
duke@435 2616 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 2617 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2618 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2619
duke@435 2620 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 2621 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
duke@435 2622 handle_exception);
duke@435 2623 cache = cp->entry_at(index);
duke@435 2624 }
duke@435 2625
duke@435 2626 istate->set_msg(call_method);
duke@435 2627 {
coleenp@4037 2628 Method* callee;
duke@435 2629 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
duke@435 2630 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
goetz@6470 2631 if (cache->is_vfinal()) {
goetz@6470 2632 callee = cache->f2_as_vfinal_method();
goetz@6470 2633 // Profile final call.
goetz@6470 2634 BI_PROFILE_UPDATE_FINALCALL();
goetz@6470 2635 } else {
duke@435 2636 // get receiver
duke@435 2637 int parms = cache->parameter_size();
duke@435 2638 // this works but needs a resourcemark and seems to create a vtable on every call:
coleenp@4037 2639 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
duke@435 2640 //
duke@435 2641 // this fails with an assert
coleenp@4037 2642 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
duke@435 2643 // but this works
goetz@6470 2644 oop rcvr = STACK_OBJECT(-parms);
goetz@6470 2645 VERIFY_OOP(rcvr);
goetz@6470 2646 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
duke@435 2647 /*
duke@435 2648 Executing this code in java.lang.String:
duke@435 2649 public String(char value[]) {
duke@435 2650 this.count = value.length;
duke@435 2651 this.value = (char[])value.clone();
duke@435 2652 }
duke@435 2653
coleenp@4037 2654 a find on rcvr->klass() reports:
duke@435 2655 {type array char}{type array class}
duke@435 2656 - klass: {other class}
duke@435 2657
coleenp@4037 2658 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
coleenp@4037 2659 because rcvr->klass()->oop_is_instance() == 0
duke@435 2660 However it seems to have a vtable in the right location. Huh?
duke@435 2661
duke@435 2662 */
coleenp@4037 2663 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
goetz@6470 2664 // Profile virtual call.
goetz@6470 2665 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
duke@435 2666 }
duke@435 2667 } else {
duke@435 2668 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
duke@435 2669 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
duke@435 2670 }
twisti@3969 2671 callee = cache->f1_as_method();
goetz@6470 2672
goetz@6470 2673 // Profile call.
goetz@6470 2674 BI_PROFILE_UPDATE_CALL();
duke@435 2675 }
duke@435 2676
duke@435 2677 istate->set_callee(callee);
duke@435 2678 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2679 #ifdef VM_JVMTI
duke@435 2680 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2681 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2682 }
duke@435 2683 #endif /* VM_JVMTI */
duke@435 2684 istate->set_bcp_advance(3);
duke@435 2685 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2686 }
duke@435 2687 }
duke@435 2688
duke@435 2689 /* Allocate memory for a new java object. */
duke@435 2690
duke@435 2691 CASE(_newarray): {
duke@435 2692 BasicType atype = (BasicType) *(pc+1);
duke@435 2693 jint size = STACK_INT(-1);
duke@435 2694 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
duke@435 2695 handle_exception);
duke@435 2696 SET_STACK_OBJECT(THREAD->vm_result(), -1);
duke@435 2697 THREAD->set_vm_result(NULL);
duke@435 2698
duke@435 2699 UPDATE_PC_AND_CONTINUE(2);
duke@435 2700 }
duke@435 2701
duke@435 2702 /* Throw an exception. */
duke@435 2703
duke@435 2704 CASE(_athrow): {
duke@435 2705 oop except_oop = STACK_OBJECT(-1);
duke@435 2706 CHECK_NULL(except_oop);
duke@435 2707 // set pending_exception so we use common code
duke@435 2708 THREAD->set_pending_exception(except_oop, NULL, 0);
duke@435 2709 goto handle_exception;
duke@435 2710 }
duke@435 2711
duke@435 2712 /* goto and jsr. They are exactly the same except jsr pushes
duke@435 2713 * the address of the next instruction first.
duke@435 2714 */
duke@435 2715
duke@435 2716 CASE(_jsr): {
duke@435 2717 /* push bytecode index on stack */
duke@435 2718 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
duke@435 2719 MORE_STACK(1);
duke@435 2720 /* FALL THROUGH */
duke@435 2721 }
duke@435 2722
duke@435 2723 CASE(_goto):
duke@435 2724 {
duke@435 2725 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
goetz@6470 2726 // Profile jump.
goetz@6470 2727 BI_PROFILE_UPDATE_JUMP();
duke@435 2728 address branch_pc = pc;
duke@435 2729 UPDATE_PC(offset);
duke@435 2730 DO_BACKEDGE_CHECKS(offset, branch_pc);
duke@435 2731 CONTINUE;
duke@435 2732 }
duke@435 2733
duke@435 2734 CASE(_jsr_w): {
duke@435 2735 /* push return address on the stack */
duke@435 2736 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
duke@435 2737 MORE_STACK(1);
duke@435 2738 /* FALL THROUGH */
duke@435 2739 }
duke@435 2740
duke@435 2741 CASE(_goto_w):
duke@435 2742 {
duke@435 2743 int32_t offset = Bytes::get_Java_u4(pc + 1);
goetz@6470 2744 // Profile jump.
goetz@6470 2745 BI_PROFILE_UPDATE_JUMP();
duke@435 2746 address branch_pc = pc;
duke@435 2747 UPDATE_PC(offset);
duke@435 2748 DO_BACKEDGE_CHECKS(offset, branch_pc);
duke@435 2749 CONTINUE;
duke@435 2750 }
duke@435 2751
duke@435 2752 /* return from a jsr or jsr_w */
duke@435 2753
duke@435 2754 CASE(_ret): {
goetz@6470 2755 // Profile ret.
goetz@6470 2756 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
goetz@6470 2757 // Now, update the pc.
duke@435 2758 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
duke@435 2759 UPDATE_PC_AND_CONTINUE(0);
duke@435 2760 }
duke@435 2761
duke@435 2762 /* debugger breakpoint */
duke@435 2763
duke@435 2764 CASE(_breakpoint): {
duke@435 2765 Bytecodes::Code original_bytecode;
duke@435 2766 DECACHE_STATE();
duke@435 2767 SET_LAST_JAVA_FRAME();
duke@435 2768 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
duke@435 2769 METHOD, pc);
duke@435 2770 RESET_LAST_JAVA_FRAME();
duke@435 2771 CACHE_STATE();
duke@435 2772 if (THREAD->has_pending_exception()) goto handle_exception;
duke@435 2773 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
duke@435 2774 handle_exception);
duke@435 2775
duke@435 2776 opcode = (jubyte)original_bytecode;
duke@435 2777 goto opcode_switch;
duke@435 2778 }
duke@435 2779
duke@435 2780 DEFAULT:
jcoomes@1845 2781 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
jcoomes@1845 2782 Bytecodes::name((Bytecodes::Code)opcode)));
duke@435 2783 goto finish;
duke@435 2784
duke@435 2785 } /* switch(opc) */
duke@435 2786
duke@435 2787
duke@435 2788 #ifdef USELABELS
duke@435 2789 check_for_exception:
duke@435 2790 #endif
duke@435 2791 {
duke@435 2792 if (!THREAD->has_pending_exception()) {
duke@435 2793 CONTINUE;
duke@435 2794 }
duke@435 2795 /* We will be gcsafe soon, so flush our state. */
duke@435 2796 DECACHE_PC();
duke@435 2797 goto handle_exception;
duke@435 2798 }
duke@435 2799 do_continue: ;
duke@435 2800
duke@435 2801 } /* while (1) interpreter loop */
duke@435 2802
duke@435 2803
duke@435 2804 // An exception exists in the thread state see whether this activation can handle it
duke@435 2805 handle_exception: {
duke@435 2806
duke@435 2807 HandleMarkCleaner __hmc(THREAD);
duke@435 2808 Handle except_oop(THREAD, THREAD->pending_exception());
duke@435 2809 // Prevent any subsequent HandleMarkCleaner in the VM
duke@435 2810 // from freeing the except_oop handle.
duke@435 2811 HandleMark __hm(THREAD);
duke@435 2812
duke@435 2813 THREAD->clear_pending_exception();
duke@435 2814 assert(except_oop(), "No exception to process");
duke@435 2815 intptr_t continuation_bci;
duke@435 2816 // expression stack is emptied
twisti@1864 2817 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
duke@435 2818 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
duke@435 2819 handle_exception);
duke@435 2820
coleenp@4037 2821 except_oop = THREAD->vm_result();
duke@435 2822 THREAD->set_vm_result(NULL);
duke@435 2823 if (continuation_bci >= 0) {
duke@435 2824 // Place exception on top of stack
duke@435 2825 SET_STACK_OBJECT(except_oop(), 0);
duke@435 2826 MORE_STACK(1);
duke@435 2827 pc = METHOD->code_base() + continuation_bci;
duke@435 2828 if (TraceExceptions) {
duke@435 2829 ttyLocker ttyl;
duke@435 2830 ResourceMark rm;
simonis@6483 2831 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
duke@435 2832 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
duke@435 2833 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
goetz@6467 2834 istate->bcp() - (intptr_t)METHOD->code_base(),
duke@435 2835 continuation_bci, THREAD);
duke@435 2836 }
duke@435 2837 // for AbortVMOnException flag
duke@435 2838 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
goetz@6470 2839
goetz@6470 2840 // Update profiling data.
goetz@6470 2841 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
duke@435 2842 goto run;
duke@435 2843 }
duke@435 2844 if (TraceExceptions) {
duke@435 2845 ttyLocker ttyl;
duke@435 2846 ResourceMark rm;
simonis@6483 2847 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
duke@435 2848 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
duke@435 2849 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
goetz@6467 2850 istate->bcp() - (intptr_t)METHOD->code_base(),
duke@435 2851 THREAD);
duke@435 2852 }
duke@435 2853 // for AbortVMOnException flag
duke@435 2854 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
duke@435 2855 // No handler in this activation, unwind and try again
duke@435 2856 THREAD->set_pending_exception(except_oop(), NULL, 0);
duke@435 2857 goto handle_return;
goetz@6450 2858 } // handle_exception:
duke@435 2859
duke@435 2860 // Return from an interpreter invocation with the result of the interpretation
duke@435 2861 // on the top of the Java Stack (or a pending exception)
duke@435 2862
goetz@6450 2863 handle_Pop_Frame: {
goetz@6450 2864
goetz@6450 2865 // We don't really do anything special here except we must be aware
goetz@6450 2866 // that we can get here without ever locking the method (if sync).
goetz@6450 2867 // Also we skip the notification of the exit.
goetz@6450 2868
goetz@6450 2869 istate->set_msg(popping_frame);
goetz@6450 2870 // Clear pending so while the pop is in process
goetz@6450 2871 // we don't start another one if a call_vm is done.
goetz@6450 2872 THREAD->clr_pop_frame_pending();
goetz@6450 2873 // Let interpreter (only) see the we're in the process of popping a frame
goetz@6450 2874 THREAD->set_pop_frame_in_process();
goetz@6450 2875
goetz@6450 2876 goto handle_return;
goetz@6450 2877
goetz@6450 2878 } // handle_Pop_Frame
goetz@6450 2879
goetz@6450 2880 // ForceEarlyReturn ends a method, and returns to the caller with a return value
goetz@6450 2881 // given by the invoker of the early return.
goetz@6450 2882 handle_Early_Return: {
goetz@6450 2883
goetz@6450 2884 istate->set_msg(early_return);
goetz@6450 2885
goetz@6450 2886 // Clear expression stack.
goetz@6450 2887 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
goetz@6450 2888
goetz@6450 2889 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
goetz@6450 2890
goetz@6450 2891 // Push the value to be returned.
goetz@6450 2892 switch (istate->method()->result_type()) {
goetz@6450 2893 case T_BOOLEAN:
goetz@6450 2894 case T_SHORT:
goetz@6450 2895 case T_BYTE:
goetz@6450 2896 case T_CHAR:
goetz@6450 2897 case T_INT:
goetz@6451 2898 SET_STACK_INT(ts->earlyret_value().i, 0);
goetz@6450 2899 MORE_STACK(1);
goetz@6450 2900 break;
goetz@6450 2901 case T_LONG:
goetz@6450 2902 SET_STACK_LONG(ts->earlyret_value().j, 1);
goetz@6450 2903 MORE_STACK(2);
goetz@6450 2904 break;
goetz@6450 2905 case T_FLOAT:
goetz@6450 2906 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
goetz@6450 2907 MORE_STACK(1);
goetz@6450 2908 break;
goetz@6450 2909 case T_DOUBLE:
goetz@6450 2910 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
goetz@6450 2911 MORE_STACK(2);
goetz@6450 2912 break;
goetz@6450 2913 case T_ARRAY:
goetz@6450 2914 case T_OBJECT:
goetz@6450 2915 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
goetz@6450 2916 MORE_STACK(1);
goetz@6450 2917 break;
goetz@6450 2918 }
goetz@6450 2919
goetz@6450 2920 ts->clr_earlyret_value();
goetz@6450 2921 ts->set_earlyret_oop(NULL);
goetz@6450 2922 ts->clr_earlyret_pending();
goetz@6450 2923
goetz@6450 2924 // Fall through to handle_return.
goetz@6450 2925
goetz@6450 2926 } // handle_Early_Return
goetz@6450 2927
goetz@6450 2928 handle_return: {
duke@435 2929 DECACHE_STATE();
duke@435 2930
goetz@6450 2931 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
goetz@6450 2932 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
duke@435 2933 Handle original_exception(THREAD, THREAD->pending_exception());
duke@435 2934 Handle illegal_state_oop(THREAD, NULL);
duke@435 2935
duke@435 2936 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
duke@435 2937 // in any following VM entries from freeing our live handles, but illegal_state_oop
duke@435 2938 // isn't really allocated yet and so doesn't become live until later and
duke@435 2939 // in unpredicatable places. Instead we must protect the places where we enter the
duke@435 2940 // VM. It would be much simpler (and safer) if we could allocate a real handle with
duke@435 2941 // a NULL oop in it and then overwrite the oop later as needed. This isn't
duke@435 2942 // unfortunately isn't possible.
duke@435 2943
duke@435 2944 THREAD->clear_pending_exception();
duke@435 2945
duke@435 2946 //
duke@435 2947 // As far as we are concerned we have returned. If we have a pending exception
duke@435 2948 // that will be returned as this invocation's result. However if we get any
duke@435 2949 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
duke@435 2950 // will be our final result (i.e. monitor exception trumps a pending exception).
duke@435 2951 //
duke@435 2952
duke@435 2953 // If we never locked the method (or really passed the point where we would have),
duke@435 2954 // there is no need to unlock it (or look for other monitors), since that
duke@435 2955 // could not have happened.
duke@435 2956
duke@435 2957 if (THREAD->do_not_unlock()) {
duke@435 2958
duke@435 2959 // Never locked, reset the flag now because obviously any caller must
duke@435 2960 // have passed their point of locking for us to have gotten here.
duke@435 2961
duke@435 2962 THREAD->clr_do_not_unlock();
duke@435 2963 } else {
duke@435 2964 // At this point we consider that we have returned. We now check that the
duke@435 2965 // locks were properly block structured. If we find that they were not
duke@435 2966 // used properly we will return with an illegal monitor exception.
duke@435 2967 // The exception is checked by the caller not the callee since this
duke@435 2968 // checking is considered to be part of the invocation and therefore
duke@435 2969 // in the callers scope (JVM spec 8.13).
duke@435 2970 //
duke@435 2971 // Another weird thing to watch for is if the method was locked
duke@435 2972 // recursively and then not exited properly. This means we must
duke@435 2973 // examine all the entries in reverse time(and stack) order and
duke@435 2974 // unlock as we find them. If we find the method monitor before
duke@435 2975 // we are at the initial entry then we should throw an exception.
duke@435 2976 // It is not clear the template based interpreter does this
duke@435 2977 // correctly
duke@435 2978
duke@435 2979 BasicObjectLock* base = istate->monitor_base();
duke@435 2980 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
duke@435 2981 bool method_unlock_needed = METHOD->is_synchronized();
duke@435 2982 // We know the initial monitor was used for the method don't check that
duke@435 2983 // slot in the loop
duke@435 2984 if (method_unlock_needed) base--;
duke@435 2985
duke@435 2986 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
duke@435 2987 while (end < base) {
duke@435 2988 oop lockee = end->obj();
duke@435 2989 if (lockee != NULL) {
duke@435 2990 BasicLock* lock = end->lock();
duke@435 2991 markOop header = lock->displaced_header();
duke@435 2992 end->set_obj(NULL);
goetz@6445 2993
goetz@6445 2994 if (!lockee->mark()->has_bias_pattern()) {
goetz@6445 2995 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 2996 if (header != NULL) {
goetz@6445 2997 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
goetz@6445 2998 // restore object for the slow case
goetz@6445 2999 end->set_obj(lockee);
goetz@6445 3000 {
goetz@6445 3001 // Prevent any HandleMarkCleaner from freeing our live handles
goetz@6445 3002 HandleMark __hm(THREAD);
goetz@6445 3003 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
goetz@6445 3004 }
duke@435 3005 }
duke@435 3006 }
duke@435 3007 }
duke@435 3008 // One error is plenty
duke@435 3009 if (illegal_state_oop() == NULL && !suppress_error) {
duke@435 3010 {
duke@435 3011 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 3012 HandleMark __hm(THREAD);
duke@435 3013 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
duke@435 3014 }
duke@435 3015 assert(THREAD->has_pending_exception(), "Lost our exception!");
duke@435 3016 illegal_state_oop = THREAD->pending_exception();
duke@435 3017 THREAD->clear_pending_exception();
duke@435 3018 }
duke@435 3019 }
duke@435 3020 end++;
duke@435 3021 }
duke@435 3022 // Unlock the method if needed
duke@435 3023 if (method_unlock_needed) {
duke@435 3024 if (base->obj() == NULL) {
duke@435 3025 // The method is already unlocked this is not good.
duke@435 3026 if (illegal_state_oop() == NULL && !suppress_error) {
duke@435 3027 {
duke@435 3028 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 3029 HandleMark __hm(THREAD);
duke@435 3030 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
duke@435 3031 }
duke@435 3032 assert(THREAD->has_pending_exception(), "Lost our exception!");
duke@435 3033 illegal_state_oop = THREAD->pending_exception();
duke@435 3034 THREAD->clear_pending_exception();
duke@435 3035 }
duke@435 3036 } else {
duke@435 3037 //
duke@435 3038 // The initial monitor is always used for the method
duke@435 3039 // However if that slot is no longer the oop for the method it was unlocked
duke@435 3040 // and reused by something that wasn't unlocked!
duke@435 3041 //
duke@435 3042 // deopt can come in with rcvr dead because c2 knows
duke@435 3043 // its value is preserved in the monitor. So we can't use locals[0] at all
duke@435 3044 // and must use first monitor slot.
duke@435 3045 //
duke@435 3046 oop rcvr = base->obj();
duke@435 3047 if (rcvr == NULL) {
duke@435 3048 if (!suppress_error) {
goetz@6470 3049 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
duke@435 3050 illegal_state_oop = THREAD->pending_exception();
duke@435 3051 THREAD->clear_pending_exception();
duke@435 3052 }
goetz@6445 3053 } else if (UseHeavyMonitors) {
goetz@6445 3054 {
goetz@6445 3055 // Prevent any HandleMarkCleaner from freeing our live handles.
goetz@6445 3056 HandleMark __hm(THREAD);
goetz@6445 3057 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
goetz@6445 3058 }
goetz@6445 3059 if (THREAD->has_pending_exception()) {
goetz@6445 3060 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
goetz@6445 3061 THREAD->clear_pending_exception();
goetz@6445 3062 }
duke@435 3063 } else {
duke@435 3064 BasicLock* lock = base->lock();
duke@435 3065 markOop header = lock->displaced_header();
duke@435 3066 base->set_obj(NULL);
goetz@6445 3067
goetz@6445 3068 if (!rcvr->mark()->has_bias_pattern()) {
goetz@6445 3069 base->set_obj(NULL);
goetz@6445 3070 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 3071 if (header != NULL) {
goetz@6445 3072 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
goetz@6445 3073 // restore object for the slow case
goetz@6445 3074 base->set_obj(rcvr);
goetz@6445 3075 {
goetz@6445 3076 // Prevent any HandleMarkCleaner from freeing our live handles
goetz@6445 3077 HandleMark __hm(THREAD);
goetz@6445 3078 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
goetz@6445 3079 }
goetz@6445 3080 if (THREAD->has_pending_exception()) {
goetz@6445 3081 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
goetz@6445 3082 THREAD->clear_pending_exception();
goetz@6445 3083 }
duke@435 3084 }
duke@435 3085 }
duke@435 3086 }
duke@435 3087 }
duke@435 3088 }
duke@435 3089 }
duke@435 3090 }
goetz@6445 3091 // Clear the do_not_unlock flag now.
goetz@6445 3092 THREAD->clr_do_not_unlock();
duke@435 3093
duke@435 3094 //
duke@435 3095 // Notify jvmti/jvmdi
duke@435 3096 //
duke@435 3097 // NOTE: we do not notify a method_exit if we have a pending exception,
duke@435 3098 // including an exception we generate for unlocking checks. In the former
duke@435 3099 // case, JVMDI has already been notified by our call for the exception handler
duke@435 3100 // and in both cases as far as JVMDI is concerned we have already returned.
duke@435 3101 // If we notify it again JVMDI will be all confused about how many frames
duke@435 3102 // are still on the stack (4340444).
duke@435 3103 //
duke@435 3104 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
duke@435 3105 // method_exit events whenever we leave an activation unless it was done
duke@435 3106 // for popframe. This is nothing like jvmdi. However we are passing the
duke@435 3107 // tests at the moment (apparently because they are jvmdi based) so rather
duke@435 3108 // than change this code and possibly fail tests we will leave it alone
duke@435 3109 // (with this note) in anticipation of changing the vm and the tests
duke@435 3110 // simultaneously.
duke@435 3111
duke@435 3112
duke@435 3113 //
duke@435 3114 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
duke@435 3115
duke@435 3116
duke@435 3117
duke@435 3118 #ifdef VM_JVMTI
duke@435 3119 if (_jvmti_interp_events) {
duke@435 3120 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 3121 // entry/exit events are sent for that thread to track stack depth.
duke@435 3122 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
duke@435 3123 {
duke@435 3124 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 3125 HandleMark __hm(THREAD);
duke@435 3126 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
duke@435 3127 }
duke@435 3128 }
duke@435 3129 }
duke@435 3130 #endif /* VM_JVMTI */
duke@435 3131
duke@435 3132 //
duke@435 3133 // See if we are returning any exception
duke@435 3134 // A pending exception that was pending prior to a possible popping frame
duke@435 3135 // overrides the popping frame.
duke@435 3136 //
goetz@6470 3137 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
duke@435 3138 if (illegal_state_oop() != NULL || original_exception() != NULL) {
goetz@6470 3139 // Inform the frame manager we have no result.
duke@435 3140 istate->set_msg(throwing_exception);
duke@435 3141 if (illegal_state_oop() != NULL)
duke@435 3142 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
duke@435 3143 else
duke@435 3144 THREAD->set_pending_exception(original_exception(), NULL, 0);
duke@435 3145 UPDATE_PC_AND_RETURN(0);
duke@435 3146 }
duke@435 3147
duke@435 3148 if (istate->msg() == popping_frame) {
duke@435 3149 // Make it simpler on the assembly code and set the message for the frame pop.
duke@435 3150 // returns
duke@435 3151 if (istate->prev() == NULL) {
duke@435 3152 // We must be returning to a deoptimized frame (because popframe only happens between
duke@435 3153 // two interpreted frames). We need to save the current arguments in C heap so that
duke@435 3154 // the deoptimized frame when it restarts can copy the arguments to its expression
duke@435 3155 // stack and re-execute the call. We also have to notify deoptimization that this
twisti@1040 3156 // has occurred and to pick the preserved args copy them to the deoptimized frame's
duke@435 3157 // java expression stack. Yuck.
duke@435 3158 //
duke@435 3159 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
duke@435 3160 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
duke@435 3161 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
duke@435 3162 }
goetz@6450 3163 } else {
goetz@6450 3164 istate->set_msg(return_from_method);
duke@435 3165 }
bobv@2036 3166
bobv@2036 3167 // Normal return
bobv@2036 3168 // Advance the pc and return to frame manager
bobv@2036 3169 UPDATE_PC_AND_RETURN(1);
duke@435 3170 } /* handle_return: */
duke@435 3171
duke@435 3172 // This is really a fatal error return
duke@435 3173
duke@435 3174 finish:
duke@435 3175 DECACHE_TOS();
duke@435 3176 DECACHE_PC();
duke@435 3177
duke@435 3178 return;
duke@435 3179 }
duke@435 3180
duke@435 3181 /*
duke@435 3182 * All the code following this point is only produced once and is not present
duke@435 3183 * in the JVMTI version of the interpreter
duke@435 3184 */
duke@435 3185
duke@435 3186 #ifndef VM_JVMTI
duke@435 3187
duke@435 3188 // This constructor should only be used to contruct the object to signal
duke@435 3189 // interpreter initialization. All other instances should be created by
duke@435 3190 // the frame manager.
duke@435 3191 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
duke@435 3192 if (msg != initialize) ShouldNotReachHere();
duke@435 3193 _msg = msg;
duke@435 3194 _self_link = this;
duke@435 3195 _prev_link = NULL;
duke@435 3196 }
duke@435 3197
duke@435 3198 // Inline static functions for Java Stack and Local manipulation
duke@435 3199
duke@435 3200 // The implementations are platform dependent. We have to worry about alignment
duke@435 3201 // issues on some machines which can change on the same platform depending on
duke@435 3202 // whether it is an LP64 machine also.
duke@435 3203 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
duke@435 3204 return (address) tos[Interpreter::expr_index_at(-offset)];
duke@435 3205 }
duke@435 3206
duke@435 3207 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
duke@435 3208 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
duke@435 3209 }
duke@435 3210
duke@435 3211 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
duke@435 3212 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
duke@435 3213 }
duke@435 3214
duke@435 3215 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
simonis@6483 3216 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
duke@435 3217 }
duke@435 3218
duke@435 3219 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
duke@435 3220 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
duke@435 3221 }
duke@435 3222
duke@435 3223 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
duke@435 3224 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
duke@435 3225 }
duke@435 3226
duke@435 3227 // only used for value types
duke@435 3228 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
duke@435 3229 int offset) {
duke@435 3230 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3231 }
duke@435 3232
duke@435 3233 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
duke@435 3234 int offset) {
duke@435 3235 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3236 }
duke@435 3237
duke@435 3238 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
duke@435 3239 int offset) {
duke@435 3240 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3241 }
duke@435 3242
duke@435 3243 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
duke@435 3244 int offset) {
duke@435 3245 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3246 }
duke@435 3247
duke@435 3248 // needs to be platform dep for the 32 bit platforms.
duke@435 3249 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
duke@435 3250 int offset) {
duke@435 3251 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
duke@435 3252 }
duke@435 3253
duke@435 3254 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
duke@435 3255 address addr, int offset) {
duke@435 3256 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
duke@435 3257 ((VMJavaVal64*)addr)->d);
duke@435 3258 }
duke@435 3259
duke@435 3260 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
duke@435 3261 int offset) {
duke@435 3262 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
duke@435 3263 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
duke@435 3264 }
duke@435 3265
duke@435 3266 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
duke@435 3267 address addr, int offset) {
duke@435 3268 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
duke@435 3269 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
duke@435 3270 ((VMJavaVal64*)addr)->l;
duke@435 3271 }
duke@435 3272
duke@435 3273 // Locals
duke@435 3274
duke@435 3275 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
duke@435 3276 return (address)locals[Interpreter::local_index_at(-offset)];
duke@435 3277 }
duke@435 3278 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
duke@435 3279 return (jint)locals[Interpreter::local_index_at(-offset)];
duke@435 3280 }
duke@435 3281 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
duke@435 3282 return (jfloat)locals[Interpreter::local_index_at(-offset)];
duke@435 3283 }
duke@435 3284 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
simonis@6483 3285 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
duke@435 3286 }
duke@435 3287 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
duke@435 3288 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
duke@435 3289 }
duke@435 3290 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
duke@435 3291 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
duke@435 3292 }
duke@435 3293
duke@435 3294 // Returns the address of locals value.
duke@435 3295 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
duke@435 3296 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
duke@435 3297 }
duke@435 3298 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
duke@435 3299 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
duke@435 3300 }
duke@435 3301
duke@435 3302 // Used for local value or returnAddress
duke@435 3303 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
duke@435 3304 address value, int offset) {
duke@435 3305 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3306 }
duke@435 3307 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
duke@435 3308 jint value, int offset) {
duke@435 3309 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3310 }
duke@435 3311 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
duke@435 3312 jfloat value, int offset) {
duke@435 3313 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3314 }
duke@435 3315 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
duke@435 3316 oop value, int offset) {
duke@435 3317 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3318 }
duke@435 3319 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
duke@435 3320 jdouble value, int offset) {
duke@435 3321 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
duke@435 3322 }
duke@435 3323 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
duke@435 3324 jlong value, int offset) {
duke@435 3325 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
duke@435 3326 }
duke@435 3327 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
duke@435 3328 address addr, int offset) {
duke@435 3329 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
duke@435 3330 }
duke@435 3331 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
duke@435 3332 address addr, int offset) {
duke@435 3333 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
duke@435 3334 }
duke@435 3335
duke@435 3336 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
duke@435 3337 intptr_t* locals, int locals_offset) {
duke@435 3338 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
duke@435 3339 locals[Interpreter::local_index_at(-locals_offset)] = value;
duke@435 3340 }
duke@435 3341
duke@435 3342
duke@435 3343 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
duke@435 3344 int to_offset) {
duke@435 3345 tos[Interpreter::expr_index_at(-to_offset)] =
duke@435 3346 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
duke@435 3347 }
duke@435 3348
duke@435 3349 void BytecodeInterpreter::dup(intptr_t *tos) {
duke@435 3350 copy_stack_slot(tos, -1, 0);
duke@435 3351 }
duke@435 3352 void BytecodeInterpreter::dup2(intptr_t *tos) {
duke@435 3353 copy_stack_slot(tos, -2, 0);
duke@435 3354 copy_stack_slot(tos, -1, 1);
duke@435 3355 }
duke@435 3356
duke@435 3357 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
duke@435 3358 /* insert top word two down */
duke@435 3359 copy_stack_slot(tos, -1, 0);
duke@435 3360 copy_stack_slot(tos, -2, -1);
duke@435 3361 copy_stack_slot(tos, 0, -2);
duke@435 3362 }
duke@435 3363
duke@435 3364 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
duke@435 3365 /* insert top word three down */
duke@435 3366 copy_stack_slot(tos, -1, 0);
duke@435 3367 copy_stack_slot(tos, -2, -1);
duke@435 3368 copy_stack_slot(tos, -3, -2);
duke@435 3369 copy_stack_slot(tos, 0, -3);
duke@435 3370 }
duke@435 3371 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
duke@435 3372 /* insert top 2 slots three down */
duke@435 3373 copy_stack_slot(tos, -1, 1);
duke@435 3374 copy_stack_slot(tos, -2, 0);
duke@435 3375 copy_stack_slot(tos, -3, -1);
duke@435 3376 copy_stack_slot(tos, 1, -2);
duke@435 3377 copy_stack_slot(tos, 0, -3);
duke@435 3378 }
duke@435 3379 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
duke@435 3380 /* insert top 2 slots four down */
duke@435 3381 copy_stack_slot(tos, -1, 1);
duke@435 3382 copy_stack_slot(tos, -2, 0);
duke@435 3383 copy_stack_slot(tos, -3, -1);
duke@435 3384 copy_stack_slot(tos, -4, -2);
duke@435 3385 copy_stack_slot(tos, 1, -3);
duke@435 3386 copy_stack_slot(tos, 0, -4);
duke@435 3387 }
duke@435 3388
duke@435 3389
duke@435 3390 void BytecodeInterpreter::swap(intptr_t *tos) {
duke@435 3391 // swap top two elements
duke@435 3392 intptr_t val = tos[Interpreter::expr_index_at(1)];
duke@435 3393 // Copy -2 entry to -1
duke@435 3394 copy_stack_slot(tos, -2, -1);
duke@435 3395 // Store saved -1 entry into -2
duke@435 3396 tos[Interpreter::expr_index_at(2)] = val;
duke@435 3397 }
duke@435 3398 // --------------------------------------------------------------------------------
duke@435 3399 // Non-product code
duke@435 3400 #ifndef PRODUCT
duke@435 3401
duke@435 3402 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
duke@435 3403 switch (msg) {
duke@435 3404 case BytecodeInterpreter::no_request: return("no_request");
duke@435 3405 case BytecodeInterpreter::initialize: return("initialize");
duke@435 3406 // status message to C++ interpreter
duke@435 3407 case BytecodeInterpreter::method_entry: return("method_entry");
duke@435 3408 case BytecodeInterpreter::method_resume: return("method_resume");
duke@435 3409 case BytecodeInterpreter::got_monitors: return("got_monitors");
duke@435 3410 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
duke@435 3411 // requests to frame manager from C++ interpreter
duke@435 3412 case BytecodeInterpreter::call_method: return("call_method");
duke@435 3413 case BytecodeInterpreter::return_from_method: return("return_from_method");
duke@435 3414 case BytecodeInterpreter::more_monitors: return("more_monitors");
duke@435 3415 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
duke@435 3416 case BytecodeInterpreter::popping_frame: return("popping_frame");
duke@435 3417 case BytecodeInterpreter::do_osr: return("do_osr");
duke@435 3418 // deopt
duke@435 3419 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
duke@435 3420 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
duke@435 3421 default: return("BAD MSG");
duke@435 3422 }
duke@435 3423 }
duke@435 3424 void
duke@435 3425 BytecodeInterpreter::print() {
duke@435 3426 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
duke@435 3427 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
duke@435 3428 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
duke@435 3429 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
duke@435 3430 {
duke@435 3431 ResourceMark rm;
duke@435 3432 char *method_name = _method->name_and_sig_as_C_string();
duke@435 3433 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
duke@435 3434 }
duke@435 3435 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
duke@435 3436 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
duke@435 3437 tty->print_cr("msg: %s", C_msg(this->_msg));
duke@435 3438 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
duke@435 3439 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
duke@435 3440 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
duke@435 3441 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
duke@435 3442 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
duke@435 3443 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
simonis@6483 3444 tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
duke@435 3445 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
duke@435 3446 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
duke@435 3447 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
duke@435 3448 #ifdef SPARC
duke@435 3449 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
duke@435 3450 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
duke@435 3451 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
duke@435 3452 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
duke@435 3453 #endif
morris@4535 3454 #if !defined(ZERO)
duke@435 3455 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
morris@4535 3456 #endif // !ZERO
duke@435 3457 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
duke@435 3458 }
duke@435 3459
duke@435 3460 extern "C" {
goetz@6445 3461 void PI(uintptr_t arg) {
goetz@6445 3462 ((BytecodeInterpreter*)arg)->print();
goetz@6445 3463 }
duke@435 3464 }
duke@435 3465 #endif // PRODUCT
duke@435 3466
duke@435 3467 #endif // JVMTI
duke@435 3468 #endif // CC_INTERP

mercurial