src/share/vm/interpreter/bytecodeInterpreter.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 // no precompiled headers
aoqi@0 26 #include "classfile/vmSymbols.hpp"
aoqi@0 27 #include "gc_interface/collectedHeap.hpp"
aoqi@0 28 #include "interpreter/bytecodeHistogram.hpp"
aoqi@0 29 #include "interpreter/bytecodeInterpreter.hpp"
aoqi@0 30 #include "interpreter/bytecodeInterpreter.inline.hpp"
aoqi@0 31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
aoqi@0 32 #include "interpreter/interpreter.hpp"
aoqi@0 33 #include "interpreter/interpreterRuntime.hpp"
aoqi@0 34 #include "memory/resourceArea.hpp"
aoqi@0 35 #include "oops/methodCounters.hpp"
aoqi@0 36 #include "oops/objArrayKlass.hpp"
aoqi@0 37 #include "oops/oop.inline.hpp"
aoqi@0 38 #include "prims/jvmtiExport.hpp"
aoqi@0 39 #include "prims/jvmtiThreadState.hpp"
aoqi@0 40 #include "runtime/biasedLocking.hpp"
aoqi@0 41 #include "runtime/frame.inline.hpp"
aoqi@0 42 #include "runtime/handles.inline.hpp"
aoqi@0 43 #include "runtime/interfaceSupport.hpp"
aoqi@0 44 #include "runtime/sharedRuntime.hpp"
aoqi@0 45 #include "runtime/threadCritical.hpp"
aoqi@0 46 #include "utilities/exceptions.hpp"
aoqi@0 47 #ifdef TARGET_OS_ARCH_linux_x86
aoqi@0 48 # include "orderAccess_linux_x86.inline.hpp"
aoqi@0 49 #endif
aoqi@0 50 #ifdef TARGET_OS_ARCH_linux_sparc
aoqi@0 51 # include "orderAccess_linux_sparc.inline.hpp"
aoqi@0 52 #endif
aoqi@0 53 #ifdef TARGET_OS_ARCH_linux_zero
aoqi@0 54 # include "orderAccess_linux_zero.inline.hpp"
aoqi@0 55 #endif
aoqi@0 56 #ifdef TARGET_OS_ARCH_solaris_x86
aoqi@0 57 # include "orderAccess_solaris_x86.inline.hpp"
aoqi@0 58 #endif
aoqi@0 59 #ifdef TARGET_OS_ARCH_solaris_sparc
aoqi@0 60 # include "orderAccess_solaris_sparc.inline.hpp"
aoqi@0 61 #endif
aoqi@0 62 #ifdef TARGET_OS_ARCH_windows_x86
aoqi@0 63 # include "orderAccess_windows_x86.inline.hpp"
aoqi@0 64 #endif
aoqi@0 65 #ifdef TARGET_OS_ARCH_linux_arm
aoqi@0 66 # include "orderAccess_linux_arm.inline.hpp"
aoqi@0 67 #endif
aoqi@0 68 #ifdef TARGET_OS_ARCH_linux_ppc
aoqi@0 69 # include "orderAccess_linux_ppc.inline.hpp"
aoqi@0 70 #endif
aoqi@0 71 #ifdef TARGET_OS_ARCH_aix_ppc
aoqi@0 72 # include "orderAccess_aix_ppc.inline.hpp"
aoqi@0 73 #endif
aoqi@0 74 #ifdef TARGET_OS_ARCH_bsd_x86
aoqi@0 75 # include "orderAccess_bsd_x86.inline.hpp"
aoqi@0 76 #endif
aoqi@0 77 #ifdef TARGET_OS_ARCH_bsd_zero
aoqi@0 78 # include "orderAccess_bsd_zero.inline.hpp"
aoqi@0 79 #endif
aoqi@0 80
aoqi@0 81
aoqi@0 82 // no precompiled headers
aoqi@0 83 #ifdef CC_INTERP
aoqi@0 84
aoqi@0 85 /*
aoqi@0 86 * USELABELS - If using GCC, then use labels for the opcode dispatching
aoqi@0 87 * rather -then a switch statement. This improves performance because it
aoqi@0 88 * gives us the oportunity to have the instructions that calculate the
aoqi@0 89 * next opcode to jump to be intermixed with the rest of the instructions
aoqi@0 90 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
aoqi@0 91 */
aoqi@0 92 #undef USELABELS
aoqi@0 93 #ifdef __GNUC__
aoqi@0 94 /*
aoqi@0 95 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
aoqi@0 96 don't use the computed goto approach.
aoqi@0 97 */
aoqi@0 98 #ifndef ASSERT
aoqi@0 99 #define USELABELS
aoqi@0 100 #endif
aoqi@0 101 #endif
aoqi@0 102
aoqi@0 103 #undef CASE
aoqi@0 104 #ifdef USELABELS
aoqi@0 105 #define CASE(opcode) opc ## opcode
aoqi@0 106 #define DEFAULT opc_default
aoqi@0 107 #else
aoqi@0 108 #define CASE(opcode) case Bytecodes:: opcode
aoqi@0 109 #define DEFAULT default
aoqi@0 110 #endif
aoqi@0 111
aoqi@0 112 /*
aoqi@0 113 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
aoqi@0 114 * opcode before going back to the top of the while loop, rather then having
aoqi@0 115 * the top of the while loop handle it. This provides a better opportunity
aoqi@0 116 * for instruction scheduling. Some compilers just do this prefetch
aoqi@0 117 * automatically. Some actually end up with worse performance if you
aoqi@0 118 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
aoqi@0 119 */
aoqi@0 120 #undef PREFETCH_OPCCODE
aoqi@0 121 #define PREFETCH_OPCCODE
aoqi@0 122
aoqi@0 123 /*
aoqi@0 124 Interpreter safepoint: it is expected that the interpreter will have no live
aoqi@0 125 handles of its own creation live at an interpreter safepoint. Therefore we
aoqi@0 126 run a HandleMarkCleaner and trash all handles allocated in the call chain
aoqi@0 127 since the JavaCalls::call_helper invocation that initiated the chain.
aoqi@0 128 There really shouldn't be any handles remaining to trash but this is cheap
aoqi@0 129 in relation to a safepoint.
aoqi@0 130 */
aoqi@0 131 #define SAFEPOINT \
aoqi@0 132 if ( SafepointSynchronize::is_synchronizing()) { \
aoqi@0 133 { \
aoqi@0 134 /* zap freed handles rather than GC'ing them */ \
aoqi@0 135 HandleMarkCleaner __hmc(THREAD); \
aoqi@0 136 } \
aoqi@0 137 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
aoqi@0 138 }
aoqi@0 139
aoqi@0 140 /*
aoqi@0 141 * VM_JAVA_ERROR - Macro for throwing a java exception from
aoqi@0 142 * the interpreter loop. Should really be a CALL_VM but there
aoqi@0 143 * is no entry point to do the transition to vm so we just
aoqi@0 144 * do it by hand here.
aoqi@0 145 */
aoqi@0 146 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
aoqi@0 147 DECACHE_STATE(); \
aoqi@0 148 SET_LAST_JAVA_FRAME(); \
aoqi@0 149 { \
aoqi@0 150 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
aoqi@0 151 ThreadInVMfromJava trans(THREAD); \
aoqi@0 152 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
aoqi@0 153 } \
aoqi@0 154 RESET_LAST_JAVA_FRAME(); \
aoqi@0 155 CACHE_STATE();
aoqi@0 156
aoqi@0 157 // Normal throw of a java error.
aoqi@0 158 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
aoqi@0 159 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
aoqi@0 160 goto handle_exception;
aoqi@0 161
aoqi@0 162 #ifdef PRODUCT
aoqi@0 163 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
aoqi@0 164 #else
aoqi@0 165 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
aoqi@0 166 { \
aoqi@0 167 BytecodeCounter::_counter_value++; \
aoqi@0 168 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
aoqi@0 169 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
aoqi@0 170 if (TraceBytecodes) { \
aoqi@0 171 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
aoqi@0 172 topOfStack[Interpreter::expr_index_at(1)], \
aoqi@0 173 topOfStack[Interpreter::expr_index_at(2)]), \
aoqi@0 174 handle_exception); \
aoqi@0 175 } \
aoqi@0 176 }
aoqi@0 177 #endif
aoqi@0 178
aoqi@0 179 #undef DEBUGGER_SINGLE_STEP_NOTIFY
aoqi@0 180 #ifdef VM_JVMTI
aoqi@0 181 /* NOTE: (kbr) This macro must be called AFTER the PC has been
aoqi@0 182 incremented. JvmtiExport::at_single_stepping_point() may cause a
aoqi@0 183 breakpoint opcode to get inserted at the current PC to allow the
aoqi@0 184 debugger to coalesce single-step events.
aoqi@0 185
aoqi@0 186 As a result if we call at_single_stepping_point() we refetch opcode
aoqi@0 187 to get the current opcode. This will override any other prefetching
aoqi@0 188 that might have occurred.
aoqi@0 189 */
aoqi@0 190 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
aoqi@0 191 { \
aoqi@0 192 if (_jvmti_interp_events) { \
aoqi@0 193 if (JvmtiExport::should_post_single_step()) { \
aoqi@0 194 DECACHE_STATE(); \
aoqi@0 195 SET_LAST_JAVA_FRAME(); \
aoqi@0 196 ThreadInVMfromJava trans(THREAD); \
aoqi@0 197 JvmtiExport::at_single_stepping_point(THREAD, \
aoqi@0 198 istate->method(), \
aoqi@0 199 pc); \
aoqi@0 200 RESET_LAST_JAVA_FRAME(); \
aoqi@0 201 CACHE_STATE(); \
aoqi@0 202 if (THREAD->pop_frame_pending() && \
aoqi@0 203 !THREAD->pop_frame_in_process()) { \
aoqi@0 204 goto handle_Pop_Frame; \
aoqi@0 205 } \
aoqi@0 206 if (THREAD->jvmti_thread_state() && \
aoqi@0 207 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
aoqi@0 208 goto handle_Early_Return; \
aoqi@0 209 } \
aoqi@0 210 opcode = *pc; \
aoqi@0 211 } \
aoqi@0 212 } \
aoqi@0 213 }
aoqi@0 214 #else
aoqi@0 215 #define DEBUGGER_SINGLE_STEP_NOTIFY()
aoqi@0 216 #endif
aoqi@0 217
aoqi@0 218 /*
aoqi@0 219 * CONTINUE - Macro for executing the next opcode.
aoqi@0 220 */
aoqi@0 221 #undef CONTINUE
aoqi@0 222 #ifdef USELABELS
aoqi@0 223 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
aoqi@0 224 // initialization (which is is the initialization of the table pointer...)
aoqi@0 225 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
aoqi@0 226 #define CONTINUE { \
aoqi@0 227 opcode = *pc; \
aoqi@0 228 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 229 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 230 DISPATCH(opcode); \
aoqi@0 231 }
aoqi@0 232 #else
aoqi@0 233 #ifdef PREFETCH_OPCCODE
aoqi@0 234 #define CONTINUE { \
aoqi@0 235 opcode = *pc; \
aoqi@0 236 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 237 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 238 continue; \
aoqi@0 239 }
aoqi@0 240 #else
aoqi@0 241 #define CONTINUE { \
aoqi@0 242 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 243 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 244 continue; \
aoqi@0 245 }
aoqi@0 246 #endif
aoqi@0 247 #endif
aoqi@0 248
aoqi@0 249
aoqi@0 250 #define UPDATE_PC(opsize) {pc += opsize; }
aoqi@0 251 /*
aoqi@0 252 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
aoqi@0 253 */
aoqi@0 254 #undef UPDATE_PC_AND_TOS
aoqi@0 255 #define UPDATE_PC_AND_TOS(opsize, stack) \
aoqi@0 256 {pc += opsize; MORE_STACK(stack); }
aoqi@0 257
aoqi@0 258 /*
aoqi@0 259 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
aoqi@0 260 * and executing the next opcode. It's somewhat similar to the combination
aoqi@0 261 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
aoqi@0 262 */
aoqi@0 263 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
aoqi@0 264 #ifdef USELABELS
aoqi@0 265 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 266 pc += opsize; opcode = *pc; MORE_STACK(stack); \
aoqi@0 267 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 268 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 269 DISPATCH(opcode); \
aoqi@0 270 }
aoqi@0 271
aoqi@0 272 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 273 pc += opsize; opcode = *pc; \
aoqi@0 274 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 275 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 276 DISPATCH(opcode); \
aoqi@0 277 }
aoqi@0 278 #else
aoqi@0 279 #ifdef PREFETCH_OPCCODE
aoqi@0 280 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 281 pc += opsize; opcode = *pc; MORE_STACK(stack); \
aoqi@0 282 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 283 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 284 goto do_continue; \
aoqi@0 285 }
aoqi@0 286
aoqi@0 287 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 288 pc += opsize; opcode = *pc; \
aoqi@0 289 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 290 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 291 goto do_continue; \
aoqi@0 292 }
aoqi@0 293 #else
aoqi@0 294 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 295 pc += opsize; MORE_STACK(stack); \
aoqi@0 296 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 297 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 298 goto do_continue; \
aoqi@0 299 }
aoqi@0 300
aoqi@0 301 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 302 pc += opsize; \
aoqi@0 303 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 304 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 305 goto do_continue; \
aoqi@0 306 }
aoqi@0 307 #endif /* PREFETCH_OPCCODE */
aoqi@0 308 #endif /* USELABELS */
aoqi@0 309
aoqi@0 310 // About to call a new method, update the save the adjusted pc and return to frame manager
aoqi@0 311 #define UPDATE_PC_AND_RETURN(opsize) \
aoqi@0 312 DECACHE_TOS(); \
aoqi@0 313 istate->set_bcp(pc+opsize); \
aoqi@0 314 return;
aoqi@0 315
aoqi@0 316
aoqi@0 317 #define METHOD istate->method()
aoqi@0 318 #define GET_METHOD_COUNTERS(res) \
aoqi@0 319 res = METHOD->method_counters(); \
aoqi@0 320 if (res == NULL) { \
aoqi@0 321 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
aoqi@0 322 }
aoqi@0 323
aoqi@0 324 #define OSR_REQUEST(res, branch_pc) \
aoqi@0 325 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
aoqi@0 326 /*
aoqi@0 327 * For those opcodes that need to have a GC point on a backwards branch
aoqi@0 328 */
aoqi@0 329
aoqi@0 330 // Backedge counting is kind of strange. The asm interpreter will increment
aoqi@0 331 // the backedge counter as a separate counter but it does it's comparisons
aoqi@0 332 // to the sum (scaled) of invocation counter and backedge count to make
aoqi@0 333 // a decision. Seems kind of odd to sum them together like that
aoqi@0 334
aoqi@0 335 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
aoqi@0 336
aoqi@0 337
aoqi@0 338 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
aoqi@0 339 if ((skip) <= 0) { \
aoqi@0 340 MethodCounters* mcs; \
aoqi@0 341 GET_METHOD_COUNTERS(mcs); \
aoqi@0 342 if (UseLoopCounter) { \
aoqi@0 343 bool do_OSR = UseOnStackReplacement; \
aoqi@0 344 mcs->backedge_counter()->increment(); \
aoqi@0 345 if (ProfileInterpreter) { \
aoqi@0 346 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
aoqi@0 347 /* Check for overflow against MDO count. */ \
aoqi@0 348 do_OSR = do_OSR \
aoqi@0 349 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
aoqi@0 350 /* When ProfileInterpreter is on, the backedge_count comes */ \
aoqi@0 351 /* from the methodDataOop, which value does not get reset on */ \
aoqi@0 352 /* the call to frequency_counter_overflow(). To avoid */ \
aoqi@0 353 /* excessive calls to the overflow routine while the method is */ \
aoqi@0 354 /* being compiled, add a second test to make sure the overflow */ \
aoqi@0 355 /* function is called only once every overflow_frequency. */ \
aoqi@0 356 && (!(mdo_last_branch_taken_count & 1023)); \
aoqi@0 357 } else { \
aoqi@0 358 /* check for overflow of backedge counter */ \
aoqi@0 359 do_OSR = do_OSR \
aoqi@0 360 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
aoqi@0 361 } \
aoqi@0 362 if (do_OSR) { \
aoqi@0 363 nmethod* osr_nmethod; \
aoqi@0 364 OSR_REQUEST(osr_nmethod, branch_pc); \
aoqi@0 365 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
aoqi@0 366 intptr_t* buf; \
aoqi@0 367 /* Call OSR migration with last java frame only, no checks. */ \
aoqi@0 368 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
aoqi@0 369 istate->set_msg(do_osr); \
aoqi@0 370 istate->set_osr_buf((address)buf); \
aoqi@0 371 istate->set_osr_entry(osr_nmethod->osr_entry()); \
aoqi@0 372 return; \
aoqi@0 373 } \
aoqi@0 374 } \
aoqi@0 375 } /* UseCompiler ... */ \
aoqi@0 376 SAFEPOINT; \
aoqi@0 377 }
aoqi@0 378
aoqi@0 379 /*
aoqi@0 380 * For those opcodes that need to have a GC point on a backwards branch
aoqi@0 381 */
aoqi@0 382
aoqi@0 383 /*
aoqi@0 384 * Macros for caching and flushing the interpreter state. Some local
aoqi@0 385 * variables need to be flushed out to the frame before we do certain
aoqi@0 386 * things (like pushing frames or becomming gc safe) and some need to
aoqi@0 387 * be recached later (like after popping a frame). We could use one
aoqi@0 388 * macro to cache or decache everything, but this would be less then
aoqi@0 389 * optimal because we don't always need to cache or decache everything
aoqi@0 390 * because some things we know are already cached or decached.
aoqi@0 391 */
aoqi@0 392 #undef DECACHE_TOS
aoqi@0 393 #undef CACHE_TOS
aoqi@0 394 #undef CACHE_PREV_TOS
aoqi@0 395 #define DECACHE_TOS() istate->set_stack(topOfStack);
aoqi@0 396
aoqi@0 397 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
aoqi@0 398
aoqi@0 399 #undef DECACHE_PC
aoqi@0 400 #undef CACHE_PC
aoqi@0 401 #define DECACHE_PC() istate->set_bcp(pc);
aoqi@0 402 #define CACHE_PC() pc = istate->bcp();
aoqi@0 403 #define CACHE_CP() cp = istate->constants();
aoqi@0 404 #define CACHE_LOCALS() locals = istate->locals();
aoqi@0 405 #undef CACHE_FRAME
aoqi@0 406 #define CACHE_FRAME()
aoqi@0 407
aoqi@0 408 // BCI() returns the current bytecode-index.
aoqi@0 409 #undef BCI
aoqi@0 410 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
aoqi@0 411
aoqi@0 412 /*
aoqi@0 413 * CHECK_NULL - Macro for throwing a NullPointerException if the object
aoqi@0 414 * passed is a null ref.
aoqi@0 415 * On some architectures/platforms it should be possible to do this implicitly
aoqi@0 416 */
aoqi@0 417 #undef CHECK_NULL
aoqi@0 418 #define CHECK_NULL(obj_) \
aoqi@0 419 if ((obj_) == NULL) { \
aoqi@0 420 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
aoqi@0 421 } \
aoqi@0 422 VERIFY_OOP(obj_)
aoqi@0 423
aoqi@0 424 #define VMdoubleConstZero() 0.0
aoqi@0 425 #define VMdoubleConstOne() 1.0
aoqi@0 426 #define VMlongConstZero() (max_jlong-max_jlong)
aoqi@0 427 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
aoqi@0 428
aoqi@0 429 /*
aoqi@0 430 * Alignment
aoqi@0 431 */
aoqi@0 432 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
aoqi@0 433
aoqi@0 434 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
aoqi@0 435 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
aoqi@0 436
aoqi@0 437 // Reload interpreter state after calling the VM or a possible GC
aoqi@0 438 #define CACHE_STATE() \
aoqi@0 439 CACHE_TOS(); \
aoqi@0 440 CACHE_PC(); \
aoqi@0 441 CACHE_CP(); \
aoqi@0 442 CACHE_LOCALS();
aoqi@0 443
aoqi@0 444 // Call the VM with last java frame only.
aoqi@0 445 #define CALL_VM_NAKED_LJF(func) \
aoqi@0 446 DECACHE_STATE(); \
aoqi@0 447 SET_LAST_JAVA_FRAME(); \
aoqi@0 448 func; \
aoqi@0 449 RESET_LAST_JAVA_FRAME(); \
aoqi@0 450 CACHE_STATE();
aoqi@0 451
aoqi@0 452 // Call the VM. Don't check for pending exceptions.
aoqi@0 453 #define CALL_VM_NOCHECK(func) \
aoqi@0 454 CALL_VM_NAKED_LJF(func) \
aoqi@0 455 if (THREAD->pop_frame_pending() && \
aoqi@0 456 !THREAD->pop_frame_in_process()) { \
aoqi@0 457 goto handle_Pop_Frame; \
aoqi@0 458 } \
aoqi@0 459 if (THREAD->jvmti_thread_state() && \
aoqi@0 460 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
aoqi@0 461 goto handle_Early_Return; \
aoqi@0 462 }
aoqi@0 463
aoqi@0 464 // Call the VM and check for pending exceptions
aoqi@0 465 #define CALL_VM(func, label) { \
aoqi@0 466 CALL_VM_NOCHECK(func); \
aoqi@0 467 if (THREAD->has_pending_exception()) goto label; \
aoqi@0 468 }
aoqi@0 469
aoqi@0 470 /*
aoqi@0 471 * BytecodeInterpreter::run(interpreterState istate)
aoqi@0 472 * BytecodeInterpreter::runWithChecks(interpreterState istate)
aoqi@0 473 *
aoqi@0 474 * The real deal. This is where byte codes actually get interpreted.
aoqi@0 475 * Basically it's a big while loop that iterates until we return from
aoqi@0 476 * the method passed in.
aoqi@0 477 *
aoqi@0 478 * The runWithChecks is used if JVMTI is enabled.
aoqi@0 479 *
aoqi@0 480 */
aoqi@0 481 #if defined(VM_JVMTI)
aoqi@0 482 void
aoqi@0 483 BytecodeInterpreter::runWithChecks(interpreterState istate) {
aoqi@0 484 #else
aoqi@0 485 void
aoqi@0 486 BytecodeInterpreter::run(interpreterState istate) {
aoqi@0 487 #endif
aoqi@0 488
aoqi@0 489 // In order to simplify some tests based on switches set at runtime
aoqi@0 490 // we invoke the interpreter a single time after switches are enabled
aoqi@0 491 // and set simpler to to test variables rather than method calls or complex
aoqi@0 492 // boolean expressions.
aoqi@0 493
aoqi@0 494 static int initialized = 0;
aoqi@0 495 static int checkit = 0;
aoqi@0 496 static intptr_t* c_addr = NULL;
aoqi@0 497 static intptr_t c_value;
aoqi@0 498
aoqi@0 499 if (checkit && *c_addr != c_value) {
aoqi@0 500 os::breakpoint();
aoqi@0 501 }
aoqi@0 502 #ifdef VM_JVMTI
aoqi@0 503 static bool _jvmti_interp_events = 0;
aoqi@0 504 #endif
aoqi@0 505
aoqi@0 506 static int _compiling; // (UseCompiler || CountCompiledCalls)
aoqi@0 507
aoqi@0 508 #ifdef ASSERT
aoqi@0 509 if (istate->_msg != initialize) {
aoqi@0 510 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
aoqi@0 511 // because in that case, EnableInvokeDynamic is true by default but will be later switched off
aoqi@0 512 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
aoqi@0 513 // for the old JSR292 implementation.
aoqi@0 514 // This leads to a situation where 'istate->_stack_limit' always accounts for
aoqi@0 515 // methodOopDesc::extra_stack_entries() because it is computed in
aoqi@0 516 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
aoqi@0 517 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
aoqi@0 518 // account for extra_stack_entries() anymore because at the time when it is called
aoqi@0 519 // EnableInvokeDynamic was already set to false.
aoqi@0 520 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
aoqi@0 521 // switched off because of the wrong classes.
aoqi@0 522 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
aoqi@0 523 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
aoqi@0 524 } else {
aoqi@0 525 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
aoqi@0 526 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
aoqi@0 527 + 1), "bad stack limit");
aoqi@0 528 }
aoqi@0 529 #ifndef SHARK
aoqi@0 530 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
aoqi@0 531 #endif // !SHARK
aoqi@0 532 }
aoqi@0 533 // Verify linkages.
aoqi@0 534 interpreterState l = istate;
aoqi@0 535 do {
aoqi@0 536 assert(l == l->_self_link, "bad link");
aoqi@0 537 l = l->_prev_link;
aoqi@0 538 } while (l != NULL);
aoqi@0 539 // Screwups with stack management usually cause us to overwrite istate
aoqi@0 540 // save a copy so we can verify it.
aoqi@0 541 interpreterState orig = istate;
aoqi@0 542 #endif
aoqi@0 543
aoqi@0 544 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
aoqi@0 545 register address pc = istate->bcp();
aoqi@0 546 register jubyte opcode;
aoqi@0 547 register intptr_t* locals = istate->locals();
aoqi@0 548 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
aoqi@0 549 #ifdef LOTS_OF_REGS
aoqi@0 550 register JavaThread* THREAD = istate->thread();
aoqi@0 551 #else
aoqi@0 552 #undef THREAD
aoqi@0 553 #define THREAD istate->thread()
aoqi@0 554 #endif
aoqi@0 555
aoqi@0 556 #ifdef USELABELS
aoqi@0 557 const static void* const opclabels_data[256] = {
aoqi@0 558 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
aoqi@0 559 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
aoqi@0 560 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
aoqi@0 561 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
aoqi@0 562
aoqi@0 563 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
aoqi@0 564 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
aoqi@0 565 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
aoqi@0 566 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
aoqi@0 567
aoqi@0 568 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
aoqi@0 569 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
aoqi@0 570 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
aoqi@0 571 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
aoqi@0 572
aoqi@0 573 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
aoqi@0 574 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
aoqi@0 575 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
aoqi@0 576 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
aoqi@0 577
aoqi@0 578 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
aoqi@0 579 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
aoqi@0 580 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
aoqi@0 581 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
aoqi@0 582
aoqi@0 583 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
aoqi@0 584 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
aoqi@0 585 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
aoqi@0 586 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
aoqi@0 587
aoqi@0 588 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
aoqi@0 589 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
aoqi@0 590 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
aoqi@0 591 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
aoqi@0 592
aoqi@0 593 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
aoqi@0 594 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
aoqi@0 595 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
aoqi@0 596 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
aoqi@0 597
aoqi@0 598 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
aoqi@0 599 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
aoqi@0 600 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
aoqi@0 601 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
aoqi@0 602
aoqi@0 603 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
aoqi@0 604 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
aoqi@0 605 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
aoqi@0 606 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
aoqi@0 607
aoqi@0 608 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
aoqi@0 609 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
aoqi@0 610 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
aoqi@0 611 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
aoqi@0 612
aoqi@0 613 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
aoqi@0 614 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
aoqi@0 615 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
aoqi@0 616 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
aoqi@0 617
aoqi@0 618 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
aoqi@0 619 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
aoqi@0 620 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
aoqi@0 621 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 622
aoqi@0 623 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 624 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 625 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 626 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 627
aoqi@0 628 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 629 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
aoqi@0 630 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
aoqi@0 631 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 632
aoqi@0 633 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 634 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 635 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 636 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
aoqi@0 637 };
aoqi@0 638 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
aoqi@0 639 #endif /* USELABELS */
aoqi@0 640
aoqi@0 641 #ifdef ASSERT
aoqi@0 642 // this will trigger a VERIFY_OOP on entry
aoqi@0 643 if (istate->msg() != initialize && ! METHOD->is_static()) {
aoqi@0 644 oop rcvr = LOCALS_OBJECT(0);
aoqi@0 645 VERIFY_OOP(rcvr);
aoqi@0 646 }
aoqi@0 647 #endif
aoqi@0 648 // #define HACK
aoqi@0 649 #ifdef HACK
aoqi@0 650 bool interesting = false;
aoqi@0 651 #endif // HACK
aoqi@0 652
aoqi@0 653 /* QQQ this should be a stack method so we don't know actual direction */
aoqi@0 654 guarantee(istate->msg() == initialize ||
aoqi@0 655 topOfStack >= istate->stack_limit() &&
aoqi@0 656 topOfStack < istate->stack_base(),
aoqi@0 657 "Stack top out of range");
aoqi@0 658
aoqi@0 659 #ifdef CC_INTERP_PROFILE
aoqi@0 660 // MethodData's last branch taken count.
aoqi@0 661 uint mdo_last_branch_taken_count = 0;
aoqi@0 662 #else
aoqi@0 663 const uint mdo_last_branch_taken_count = 0;
aoqi@0 664 #endif
aoqi@0 665
aoqi@0 666 switch (istate->msg()) {
aoqi@0 667 case initialize: {
aoqi@0 668 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
aoqi@0 669 _compiling = (UseCompiler || CountCompiledCalls);
aoqi@0 670 #ifdef VM_JVMTI
aoqi@0 671 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
aoqi@0 672 #endif
aoqi@0 673 return;
aoqi@0 674 }
aoqi@0 675 break;
aoqi@0 676 case method_entry: {
aoqi@0 677 THREAD->set_do_not_unlock();
aoqi@0 678 // count invocations
aoqi@0 679 assert(initialized, "Interpreter not initialized");
aoqi@0 680 if (_compiling) {
aoqi@0 681 MethodCounters* mcs;
aoqi@0 682 GET_METHOD_COUNTERS(mcs);
aoqi@0 683 if (ProfileInterpreter) {
aoqi@0 684 METHOD->increment_interpreter_invocation_count(THREAD);
aoqi@0 685 }
aoqi@0 686 mcs->invocation_counter()->increment();
aoqi@0 687 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
aoqi@0 688 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
aoqi@0 689 // We no longer retry on a counter overflow.
aoqi@0 690 }
aoqi@0 691 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 692 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 693 SAFEPOINT;
aoqi@0 694 }
aoqi@0 695
aoqi@0 696 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
aoqi@0 697 // initialize
aoqi@0 698 os::breakpoint();
aoqi@0 699 }
aoqi@0 700
aoqi@0 701 #ifdef HACK
aoqi@0 702 {
aoqi@0 703 ResourceMark rm;
aoqi@0 704 char *method_name = istate->method()->name_and_sig_as_C_string();
aoqi@0 705 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
aoqi@0 706 tty->print_cr("entering: depth %d bci: %d",
aoqi@0 707 (istate->_stack_base - istate->_stack),
aoqi@0 708 istate->_bcp - istate->_method->code_base());
aoqi@0 709 interesting = true;
aoqi@0 710 }
aoqi@0 711 }
aoqi@0 712 #endif // HACK
aoqi@0 713
aoqi@0 714 // Lock method if synchronized.
aoqi@0 715 if (METHOD->is_synchronized()) {
aoqi@0 716 // oop rcvr = locals[0].j.r;
aoqi@0 717 oop rcvr;
aoqi@0 718 if (METHOD->is_static()) {
aoqi@0 719 rcvr = METHOD->constants()->pool_holder()->java_mirror();
aoqi@0 720 } else {
aoqi@0 721 rcvr = LOCALS_OBJECT(0);
aoqi@0 722 VERIFY_OOP(rcvr);
aoqi@0 723 }
aoqi@0 724 // The initial monitor is ours for the taking.
aoqi@0 725 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
aoqi@0 726 BasicObjectLock* mon = &istate->monitor_base()[-1];
aoqi@0 727 mon->set_obj(rcvr);
aoqi@0 728 bool success = false;
aoqi@0 729 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 730 markOop mark = rcvr->mark();
aoqi@0 731 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 732 // Implies UseBiasedLocking.
aoqi@0 733 if (mark->has_bias_pattern()) {
aoqi@0 734 uintptr_t thread_ident;
aoqi@0 735 uintptr_t anticipated_bias_locking_value;
aoqi@0 736 thread_ident = (uintptr_t)istate->thread();
aoqi@0 737 anticipated_bias_locking_value =
aoqi@0 738 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 739 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 740
aoqi@0 741 if (anticipated_bias_locking_value == 0) {
aoqi@0 742 // Already biased towards this thread, nothing to do.
aoqi@0 743 if (PrintBiasedLockingStatistics) {
aoqi@0 744 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 745 }
aoqi@0 746 success = true;
aoqi@0 747 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 748 // Try to revoke bias.
aoqi@0 749 markOop header = rcvr->klass()->prototype_header();
aoqi@0 750 if (hash != markOopDesc::no_hash) {
aoqi@0 751 header = header->copy_set_hash(hash);
aoqi@0 752 }
aoqi@0 753 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
aoqi@0 754 if (PrintBiasedLockingStatistics)
aoqi@0 755 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 756 }
aoqi@0 757 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
aoqi@0 758 // Try to rebias.
aoqi@0 759 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
aoqi@0 760 if (hash != markOopDesc::no_hash) {
aoqi@0 761 new_header = new_header->copy_set_hash(hash);
aoqi@0 762 }
aoqi@0 763 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
aoqi@0 764 if (PrintBiasedLockingStatistics) {
aoqi@0 765 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 766 }
aoqi@0 767 } else {
aoqi@0 768 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 769 }
aoqi@0 770 success = true;
aoqi@0 771 } else {
aoqi@0 772 // Try to bias towards thread in case object is anonymously biased.
aoqi@0 773 markOop header = (markOop) ((uintptr_t) mark &
aoqi@0 774 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 775 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
aoqi@0 776 if (hash != markOopDesc::no_hash) {
aoqi@0 777 header = header->copy_set_hash(hash);
aoqi@0 778 }
aoqi@0 779 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 780 // Debugging hint.
aoqi@0 781 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 782 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
aoqi@0 783 if (PrintBiasedLockingStatistics) {
aoqi@0 784 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 785 }
aoqi@0 786 } else {
aoqi@0 787 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 788 }
aoqi@0 789 success = true;
aoqi@0 790 }
aoqi@0 791 }
aoqi@0 792
aoqi@0 793 // Traditional lightweight locking.
aoqi@0 794 if (!success) {
aoqi@0 795 markOop displaced = rcvr->mark()->set_unlocked();
aoqi@0 796 mon->lock()->set_displaced_header(displaced);
aoqi@0 797 bool call_vm = UseHeavyMonitors;
aoqi@0 798 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
aoqi@0 799 // Is it simple recursive case?
aoqi@0 800 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 801 mon->lock()->set_displaced_header(NULL);
aoqi@0 802 } else {
aoqi@0 803 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 804 }
aoqi@0 805 }
aoqi@0 806 }
aoqi@0 807 }
aoqi@0 808 THREAD->clr_do_not_unlock();
aoqi@0 809
aoqi@0 810 // Notify jvmti
aoqi@0 811 #ifdef VM_JVMTI
aoqi@0 812 if (_jvmti_interp_events) {
aoqi@0 813 // Whenever JVMTI puts a thread in interp_only_mode, method
aoqi@0 814 // entry/exit events are sent for that thread to track stack depth.
aoqi@0 815 if (THREAD->is_interp_only_mode()) {
aoqi@0 816 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
aoqi@0 817 handle_exception);
aoqi@0 818 }
aoqi@0 819 }
aoqi@0 820 #endif /* VM_JVMTI */
aoqi@0 821
aoqi@0 822 goto run;
aoqi@0 823 }
aoqi@0 824
aoqi@0 825 case popping_frame: {
aoqi@0 826 // returned from a java call to pop the frame, restart the call
aoqi@0 827 // clear the message so we don't confuse ourselves later
aoqi@0 828 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
aoqi@0 829 istate->set_msg(no_request);
aoqi@0 830 if (_compiling) {
aoqi@0 831 // Set MDX back to the ProfileData of the invoke bytecode that will be
aoqi@0 832 // restarted.
aoqi@0 833 SET_MDX(NULL);
aoqi@0 834 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 835 }
aoqi@0 836 THREAD->clr_pop_frame_in_process();
aoqi@0 837 goto run;
aoqi@0 838 }
aoqi@0 839
aoqi@0 840 case method_resume: {
aoqi@0 841 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
aoqi@0 842 // resume
aoqi@0 843 os::breakpoint();
aoqi@0 844 }
aoqi@0 845 #ifdef HACK
aoqi@0 846 {
aoqi@0 847 ResourceMark rm;
aoqi@0 848 char *method_name = istate->method()->name_and_sig_as_C_string();
aoqi@0 849 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
aoqi@0 850 tty->print_cr("resume: depth %d bci: %d",
aoqi@0 851 (istate->_stack_base - istate->_stack) ,
aoqi@0 852 istate->_bcp - istate->_method->code_base());
aoqi@0 853 interesting = true;
aoqi@0 854 }
aoqi@0 855 }
aoqi@0 856 #endif // HACK
aoqi@0 857 // returned from a java call, continue executing.
aoqi@0 858 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
aoqi@0 859 goto handle_Pop_Frame;
aoqi@0 860 }
aoqi@0 861 if (THREAD->jvmti_thread_state() &&
aoqi@0 862 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
aoqi@0 863 goto handle_Early_Return;
aoqi@0 864 }
aoqi@0 865
aoqi@0 866 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 867 // Update the pc by the saved amount of the invoke bytecode size
aoqi@0 868 UPDATE_PC(istate->bcp_advance());
aoqi@0 869
aoqi@0 870 if (_compiling) {
aoqi@0 871 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 872 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 873 }
aoqi@0 874 goto run;
aoqi@0 875 }
aoqi@0 876
aoqi@0 877 case deopt_resume2: {
aoqi@0 878 // Returned from an opcode that will reexecute. Deopt was
aoqi@0 879 // a result of a PopFrame request.
aoqi@0 880 //
aoqi@0 881
aoqi@0 882 if (_compiling) {
aoqi@0 883 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 884 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 885 }
aoqi@0 886 goto run;
aoqi@0 887 }
aoqi@0 888
aoqi@0 889 case deopt_resume: {
aoqi@0 890 // Returned from an opcode that has completed. The stack has
aoqi@0 891 // the result all we need to do is skip across the bytecode
aoqi@0 892 // and continue (assuming there is no exception pending)
aoqi@0 893 //
aoqi@0 894 // compute continuation length
aoqi@0 895 //
aoqi@0 896 // Note: it is possible to deopt at a return_register_finalizer opcode
aoqi@0 897 // because this requires entering the vm to do the registering. While the
aoqi@0 898 // opcode is complete we can't advance because there are no more opcodes
aoqi@0 899 // much like trying to deopt at a poll return. In that has we simply
aoqi@0 900 // get out of here
aoqi@0 901 //
aoqi@0 902 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
aoqi@0 903 // this will do the right thing even if an exception is pending.
aoqi@0 904 goto handle_return;
aoqi@0 905 }
aoqi@0 906 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
aoqi@0 907 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 908
aoqi@0 909 if (_compiling) {
aoqi@0 910 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 911 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 912 }
aoqi@0 913 goto run;
aoqi@0 914 }
aoqi@0 915 case got_monitors: {
aoqi@0 916 // continue locking now that we have a monitor to use
aoqi@0 917 // we expect to find newly allocated monitor at the "top" of the monitor stack.
aoqi@0 918 oop lockee = STACK_OBJECT(-1);
aoqi@0 919 VERIFY_OOP(lockee);
aoqi@0 920 // derefing's lockee ought to provoke implicit null check
aoqi@0 921 // find a free monitor
aoqi@0 922 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
aoqi@0 923 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
aoqi@0 924 entry->set_obj(lockee);
aoqi@0 925 bool success = false;
aoqi@0 926 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 927
aoqi@0 928 markOop mark = lockee->mark();
aoqi@0 929 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 930 // implies UseBiasedLocking
aoqi@0 931 if (mark->has_bias_pattern()) {
aoqi@0 932 uintptr_t thread_ident;
aoqi@0 933 uintptr_t anticipated_bias_locking_value;
aoqi@0 934 thread_ident = (uintptr_t)istate->thread();
aoqi@0 935 anticipated_bias_locking_value =
aoqi@0 936 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 937 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 938
aoqi@0 939 if (anticipated_bias_locking_value == 0) {
aoqi@0 940 // already biased towards this thread, nothing to do
aoqi@0 941 if (PrintBiasedLockingStatistics) {
aoqi@0 942 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 943 }
aoqi@0 944 success = true;
aoqi@0 945 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 946 // try revoke bias
aoqi@0 947 markOop header = lockee->klass()->prototype_header();
aoqi@0 948 if (hash != markOopDesc::no_hash) {
aoqi@0 949 header = header->copy_set_hash(hash);
aoqi@0 950 }
aoqi@0 951 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
aoqi@0 952 if (PrintBiasedLockingStatistics) {
aoqi@0 953 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 954 }
aoqi@0 955 }
aoqi@0 956 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
aoqi@0 957 // try rebias
aoqi@0 958 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
aoqi@0 959 if (hash != markOopDesc::no_hash) {
aoqi@0 960 new_header = new_header->copy_set_hash(hash);
aoqi@0 961 }
aoqi@0 962 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
aoqi@0 963 if (PrintBiasedLockingStatistics) {
aoqi@0 964 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 965 }
aoqi@0 966 } else {
aoqi@0 967 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 968 }
aoqi@0 969 success = true;
aoqi@0 970 } else {
aoqi@0 971 // try to bias towards thread in case object is anonymously biased
aoqi@0 972 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 973 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
aoqi@0 974 if (hash != markOopDesc::no_hash) {
aoqi@0 975 header = header->copy_set_hash(hash);
aoqi@0 976 }
aoqi@0 977 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 978 // debugging hint
aoqi@0 979 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 980 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
aoqi@0 981 if (PrintBiasedLockingStatistics) {
aoqi@0 982 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 983 }
aoqi@0 984 } else {
aoqi@0 985 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 986 }
aoqi@0 987 success = true;
aoqi@0 988 }
aoqi@0 989 }
aoqi@0 990
aoqi@0 991 // traditional lightweight locking
aoqi@0 992 if (!success) {
aoqi@0 993 markOop displaced = lockee->mark()->set_unlocked();
aoqi@0 994 entry->lock()->set_displaced_header(displaced);
aoqi@0 995 bool call_vm = UseHeavyMonitors;
aoqi@0 996 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
aoqi@0 997 // Is it simple recursive case?
aoqi@0 998 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 999 entry->lock()->set_displaced_header(NULL);
aoqi@0 1000 } else {
aoqi@0 1001 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1002 }
aoqi@0 1003 }
aoqi@0 1004 }
aoqi@0 1005 UPDATE_PC_AND_TOS(1, -1);
aoqi@0 1006 goto run;
aoqi@0 1007 }
aoqi@0 1008 default: {
aoqi@0 1009 fatal("Unexpected message from frame manager");
aoqi@0 1010 }
aoqi@0 1011 }
aoqi@0 1012
aoqi@0 1013 run:
aoqi@0 1014
aoqi@0 1015 DO_UPDATE_INSTRUCTION_COUNT(*pc)
aoqi@0 1016 DEBUGGER_SINGLE_STEP_NOTIFY();
aoqi@0 1017 #ifdef PREFETCH_OPCCODE
aoqi@0 1018 opcode = *pc; /* prefetch first opcode */
aoqi@0 1019 #endif
aoqi@0 1020
aoqi@0 1021 #ifndef USELABELS
aoqi@0 1022 while (1)
aoqi@0 1023 #endif
aoqi@0 1024 {
aoqi@0 1025 #ifndef PREFETCH_OPCCODE
aoqi@0 1026 opcode = *pc;
aoqi@0 1027 #endif
aoqi@0 1028 // Seems like this happens twice per opcode. At worst this is only
aoqi@0 1029 // need at entry to the loop.
aoqi@0 1030 // DEBUGGER_SINGLE_STEP_NOTIFY();
aoqi@0 1031 /* Using this labels avoids double breakpoints when quickening and
aoqi@0 1032 * when returing from transition frames.
aoqi@0 1033 */
aoqi@0 1034 opcode_switch:
aoqi@0 1035 assert(istate == orig, "Corrupted istate");
aoqi@0 1036 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
aoqi@0 1037 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
aoqi@0 1038 assert(topOfStack < istate->stack_base(), "Stack underrun");
aoqi@0 1039
aoqi@0 1040 #ifdef USELABELS
aoqi@0 1041 DISPATCH(opcode);
aoqi@0 1042 #else
aoqi@0 1043 switch (opcode)
aoqi@0 1044 #endif
aoqi@0 1045 {
aoqi@0 1046 CASE(_nop):
aoqi@0 1047 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1048
aoqi@0 1049 /* Push miscellaneous constants onto the stack. */
aoqi@0 1050
aoqi@0 1051 CASE(_aconst_null):
aoqi@0 1052 SET_STACK_OBJECT(NULL, 0);
aoqi@0 1053 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1054
aoqi@0 1055 #undef OPC_CONST_n
aoqi@0 1056 #define OPC_CONST_n(opcode, const_type, value) \
aoqi@0 1057 CASE(opcode): \
aoqi@0 1058 SET_STACK_ ## const_type(value, 0); \
aoqi@0 1059 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1060
aoqi@0 1061 OPC_CONST_n(_iconst_m1, INT, -1);
aoqi@0 1062 OPC_CONST_n(_iconst_0, INT, 0);
aoqi@0 1063 OPC_CONST_n(_iconst_1, INT, 1);
aoqi@0 1064 OPC_CONST_n(_iconst_2, INT, 2);
aoqi@0 1065 OPC_CONST_n(_iconst_3, INT, 3);
aoqi@0 1066 OPC_CONST_n(_iconst_4, INT, 4);
aoqi@0 1067 OPC_CONST_n(_iconst_5, INT, 5);
aoqi@0 1068 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
aoqi@0 1069 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
aoqi@0 1070 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
aoqi@0 1071
aoqi@0 1072 #undef OPC_CONST2_n
aoqi@0 1073 #define OPC_CONST2_n(opcname, value, key, kind) \
aoqi@0 1074 CASE(_##opcname): \
aoqi@0 1075 { \
aoqi@0 1076 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
aoqi@0 1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
aoqi@0 1078 }
aoqi@0 1079 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
aoqi@0 1080 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
aoqi@0 1081 OPC_CONST2_n(lconst_0, Zero, long, LONG);
aoqi@0 1082 OPC_CONST2_n(lconst_1, One, long, LONG);
aoqi@0 1083
aoqi@0 1084 /* Load constant from constant pool: */
aoqi@0 1085
aoqi@0 1086 /* Push a 1-byte signed integer value onto the stack. */
aoqi@0 1087 CASE(_bipush):
aoqi@0 1088 SET_STACK_INT((jbyte)(pc[1]), 0);
aoqi@0 1089 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1090
aoqi@0 1091 /* Push a 2-byte signed integer constant onto the stack. */
aoqi@0 1092 CASE(_sipush):
aoqi@0 1093 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
aoqi@0 1094 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 1095
aoqi@0 1096 /* load from local variable */
aoqi@0 1097
aoqi@0 1098 CASE(_aload):
aoqi@0 1099 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
aoqi@0 1100 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
aoqi@0 1101 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1102
aoqi@0 1103 CASE(_iload):
aoqi@0 1104 CASE(_fload):
aoqi@0 1105 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
aoqi@0 1106 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1107
aoqi@0 1108 CASE(_lload):
aoqi@0 1109 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
aoqi@0 1110 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
aoqi@0 1111
aoqi@0 1112 CASE(_dload):
aoqi@0 1113 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
aoqi@0 1114 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
aoqi@0 1115
aoqi@0 1116 #undef OPC_LOAD_n
aoqi@0 1117 #define OPC_LOAD_n(num) \
aoqi@0 1118 CASE(_aload_##num): \
aoqi@0 1119 VERIFY_OOP(LOCALS_OBJECT(num)); \
aoqi@0 1120 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
aoqi@0 1121 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
aoqi@0 1122 \
aoqi@0 1123 CASE(_iload_##num): \
aoqi@0 1124 CASE(_fload_##num): \
aoqi@0 1125 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
aoqi@0 1126 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
aoqi@0 1127 \
aoqi@0 1128 CASE(_lload_##num): \
aoqi@0 1129 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
aoqi@0 1130 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
aoqi@0 1131 CASE(_dload_##num): \
aoqi@0 1132 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
aoqi@0 1133 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1134
aoqi@0 1135 OPC_LOAD_n(0);
aoqi@0 1136 OPC_LOAD_n(1);
aoqi@0 1137 OPC_LOAD_n(2);
aoqi@0 1138 OPC_LOAD_n(3);
aoqi@0 1139
aoqi@0 1140 /* store to a local variable */
aoqi@0 1141
aoqi@0 1142 CASE(_astore):
aoqi@0 1143 astore(topOfStack, -1, locals, pc[1]);
aoqi@0 1144 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
aoqi@0 1145
aoqi@0 1146 CASE(_istore):
aoqi@0 1147 CASE(_fstore):
aoqi@0 1148 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
aoqi@0 1149 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
aoqi@0 1150
aoqi@0 1151 CASE(_lstore):
aoqi@0 1152 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
aoqi@0 1153 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
aoqi@0 1154
aoqi@0 1155 CASE(_dstore):
aoqi@0 1156 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
aoqi@0 1157 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
aoqi@0 1158
aoqi@0 1159 CASE(_wide): {
aoqi@0 1160 uint16_t reg = Bytes::get_Java_u2(pc + 2);
aoqi@0 1161
aoqi@0 1162 opcode = pc[1];
aoqi@0 1163
aoqi@0 1164 // Wide and it's sub-bytecode are counted as separate instructions. If we
aoqi@0 1165 // don't account for this here, the bytecode trace skips the next bytecode.
aoqi@0 1166 DO_UPDATE_INSTRUCTION_COUNT(opcode);
aoqi@0 1167
aoqi@0 1168 switch(opcode) {
aoqi@0 1169 case Bytecodes::_aload:
aoqi@0 1170 VERIFY_OOP(LOCALS_OBJECT(reg));
aoqi@0 1171 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
aoqi@0 1172 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
aoqi@0 1173
aoqi@0 1174 case Bytecodes::_iload:
aoqi@0 1175 case Bytecodes::_fload:
aoqi@0 1176 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
aoqi@0 1177 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
aoqi@0 1178
aoqi@0 1179 case Bytecodes::_lload:
aoqi@0 1180 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
aoqi@0 1181 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
aoqi@0 1182
aoqi@0 1183 case Bytecodes::_dload:
aoqi@0 1184 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
aoqi@0 1185 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
aoqi@0 1186
aoqi@0 1187 case Bytecodes::_astore:
aoqi@0 1188 astore(topOfStack, -1, locals, reg);
aoqi@0 1189 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
aoqi@0 1190
aoqi@0 1191 case Bytecodes::_istore:
aoqi@0 1192 case Bytecodes::_fstore:
aoqi@0 1193 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
aoqi@0 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
aoqi@0 1195
aoqi@0 1196 case Bytecodes::_lstore:
aoqi@0 1197 SET_LOCALS_LONG(STACK_LONG(-1), reg);
aoqi@0 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
aoqi@0 1199
aoqi@0 1200 case Bytecodes::_dstore:
aoqi@0 1201 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
aoqi@0 1202 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
aoqi@0 1203
aoqi@0 1204 case Bytecodes::_iinc: {
aoqi@0 1205 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
aoqi@0 1206 // Be nice to see what this generates.... QQQ
aoqi@0 1207 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
aoqi@0 1208 UPDATE_PC_AND_CONTINUE(6);
aoqi@0 1209 }
aoqi@0 1210 case Bytecodes::_ret:
aoqi@0 1211 // Profile ret.
aoqi@0 1212 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
aoqi@0 1213 // Now, update the pc.
aoqi@0 1214 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
aoqi@0 1215 UPDATE_PC_AND_CONTINUE(0);
aoqi@0 1216 default:
aoqi@0 1217 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
aoqi@0 1218 }
aoqi@0 1219 }
aoqi@0 1220
aoqi@0 1221
aoqi@0 1222 #undef OPC_STORE_n
aoqi@0 1223 #define OPC_STORE_n(num) \
aoqi@0 1224 CASE(_astore_##num): \
aoqi@0 1225 astore(topOfStack, -1, locals, num); \
aoqi@0 1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1227 CASE(_istore_##num): \
aoqi@0 1228 CASE(_fstore_##num): \
aoqi@0 1229 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
aoqi@0 1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1231
aoqi@0 1232 OPC_STORE_n(0);
aoqi@0 1233 OPC_STORE_n(1);
aoqi@0 1234 OPC_STORE_n(2);
aoqi@0 1235 OPC_STORE_n(3);
aoqi@0 1236
aoqi@0 1237 #undef OPC_DSTORE_n
aoqi@0 1238 #define OPC_DSTORE_n(num) \
aoqi@0 1239 CASE(_dstore_##num): \
aoqi@0 1240 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
aoqi@0 1241 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1242 CASE(_lstore_##num): \
aoqi@0 1243 SET_LOCALS_LONG(STACK_LONG(-1), num); \
aoqi@0 1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
aoqi@0 1245
aoqi@0 1246 OPC_DSTORE_n(0);
aoqi@0 1247 OPC_DSTORE_n(1);
aoqi@0 1248 OPC_DSTORE_n(2);
aoqi@0 1249 OPC_DSTORE_n(3);
aoqi@0 1250
aoqi@0 1251 /* stack pop, dup, and insert opcodes */
aoqi@0 1252
aoqi@0 1253
aoqi@0 1254 CASE(_pop): /* Discard the top item on the stack */
aoqi@0 1255 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1256
aoqi@0 1257
aoqi@0 1258 CASE(_pop2): /* Discard the top 2 items on the stack */
aoqi@0 1259 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
aoqi@0 1260
aoqi@0 1261
aoqi@0 1262 CASE(_dup): /* Duplicate the top item on the stack */
aoqi@0 1263 dup(topOfStack);
aoqi@0 1264 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1265
aoqi@0 1266 CASE(_dup2): /* Duplicate the top 2 items on the stack */
aoqi@0 1267 dup2(topOfStack);
aoqi@0 1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1269
aoqi@0 1270 CASE(_dup_x1): /* insert top word two down */
aoqi@0 1271 dup_x1(topOfStack);
aoqi@0 1272 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1273
aoqi@0 1274 CASE(_dup_x2): /* insert top word three down */
aoqi@0 1275 dup_x2(topOfStack);
aoqi@0 1276 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1277
aoqi@0 1278 CASE(_dup2_x1): /* insert top 2 slots three down */
aoqi@0 1279 dup2_x1(topOfStack);
aoqi@0 1280 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1281
aoqi@0 1282 CASE(_dup2_x2): /* insert top 2 slots four down */
aoqi@0 1283 dup2_x2(topOfStack);
aoqi@0 1284 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1285
aoqi@0 1286 CASE(_swap): { /* swap top two elements on the stack */
aoqi@0 1287 swap(topOfStack);
aoqi@0 1288 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1289 }
aoqi@0 1290
aoqi@0 1291 /* Perform various binary integer operations */
aoqi@0 1292
aoqi@0 1293 #undef OPC_INT_BINARY
aoqi@0 1294 #define OPC_INT_BINARY(opcname, opname, test) \
aoqi@0 1295 CASE(_i##opcname): \
aoqi@0 1296 if (test && (STACK_INT(-1) == 0)) { \
aoqi@0 1297 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
aoqi@0 1298 "/ by zero", note_div0Check_trap); \
aoqi@0 1299 } \
aoqi@0 1300 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
aoqi@0 1301 STACK_INT(-1)), \
aoqi@0 1302 -2); \
aoqi@0 1303 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1304 CASE(_l##opcname): \
aoqi@0 1305 { \
aoqi@0 1306 if (test) { \
aoqi@0 1307 jlong l1 = STACK_LONG(-1); \
aoqi@0 1308 if (VMlongEqz(l1)) { \
aoqi@0 1309 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
aoqi@0 1310 "/ by long zero", note_div0Check_trap); \
aoqi@0 1311 } \
aoqi@0 1312 } \
aoqi@0 1313 /* First long at (-1,-2) next long at (-3,-4) */ \
aoqi@0 1314 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
aoqi@0 1315 STACK_LONG(-1)), \
aoqi@0 1316 -3); \
aoqi@0 1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1318 }
aoqi@0 1319
aoqi@0 1320 OPC_INT_BINARY(add, Add, 0);
aoqi@0 1321 OPC_INT_BINARY(sub, Sub, 0);
aoqi@0 1322 OPC_INT_BINARY(mul, Mul, 0);
aoqi@0 1323 OPC_INT_BINARY(and, And, 0);
aoqi@0 1324 OPC_INT_BINARY(or, Or, 0);
aoqi@0 1325 OPC_INT_BINARY(xor, Xor, 0);
aoqi@0 1326 OPC_INT_BINARY(div, Div, 1);
aoqi@0 1327 OPC_INT_BINARY(rem, Rem, 1);
aoqi@0 1328
aoqi@0 1329
aoqi@0 1330 /* Perform various binary floating number operations */
aoqi@0 1331 /* On some machine/platforms/compilers div zero check can be implicit */
aoqi@0 1332
aoqi@0 1333 #undef OPC_FLOAT_BINARY
aoqi@0 1334 #define OPC_FLOAT_BINARY(opcname, opname) \
aoqi@0 1335 CASE(_d##opcname): { \
aoqi@0 1336 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
aoqi@0 1337 STACK_DOUBLE(-1)), \
aoqi@0 1338 -3); \
aoqi@0 1339 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1340 } \
aoqi@0 1341 CASE(_f##opcname): \
aoqi@0 1342 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
aoqi@0 1343 STACK_FLOAT(-1)), \
aoqi@0 1344 -2); \
aoqi@0 1345 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1346
aoqi@0 1347
aoqi@0 1348 OPC_FLOAT_BINARY(add, Add);
aoqi@0 1349 OPC_FLOAT_BINARY(sub, Sub);
aoqi@0 1350 OPC_FLOAT_BINARY(mul, Mul);
aoqi@0 1351 OPC_FLOAT_BINARY(div, Div);
aoqi@0 1352 OPC_FLOAT_BINARY(rem, Rem);
aoqi@0 1353
aoqi@0 1354 /* Shift operations
aoqi@0 1355 * Shift left int and long: ishl, lshl
aoqi@0 1356 * Logical shift right int and long w/zero extension: iushr, lushr
aoqi@0 1357 * Arithmetic shift right int and long w/sign extension: ishr, lshr
aoqi@0 1358 */
aoqi@0 1359
aoqi@0 1360 #undef OPC_SHIFT_BINARY
aoqi@0 1361 #define OPC_SHIFT_BINARY(opcname, opname) \
aoqi@0 1362 CASE(_i##opcname): \
aoqi@0 1363 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
aoqi@0 1364 STACK_INT(-1)), \
aoqi@0 1365 -2); \
aoqi@0 1366 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1367 CASE(_l##opcname): \
aoqi@0 1368 { \
aoqi@0 1369 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
aoqi@0 1370 STACK_INT(-1)), \
aoqi@0 1371 -2); \
aoqi@0 1372 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1373 }
aoqi@0 1374
aoqi@0 1375 OPC_SHIFT_BINARY(shl, Shl);
aoqi@0 1376 OPC_SHIFT_BINARY(shr, Shr);
aoqi@0 1377 OPC_SHIFT_BINARY(ushr, Ushr);
aoqi@0 1378
aoqi@0 1379 /* Increment local variable by constant */
aoqi@0 1380 CASE(_iinc):
aoqi@0 1381 {
aoqi@0 1382 // locals[pc[1]].j.i += (jbyte)(pc[2]);
aoqi@0 1383 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
aoqi@0 1384 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 1385 }
aoqi@0 1386
aoqi@0 1387 /* negate the value on the top of the stack */
aoqi@0 1388
aoqi@0 1389 CASE(_ineg):
aoqi@0 1390 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
aoqi@0 1391 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1392
aoqi@0 1393 CASE(_fneg):
aoqi@0 1394 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
aoqi@0 1395 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1396
aoqi@0 1397 CASE(_lneg):
aoqi@0 1398 {
aoqi@0 1399 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
aoqi@0 1400 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1401 }
aoqi@0 1402
aoqi@0 1403 CASE(_dneg):
aoqi@0 1404 {
aoqi@0 1405 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
aoqi@0 1406 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1407 }
aoqi@0 1408
aoqi@0 1409 /* Conversion operations */
aoqi@0 1410
aoqi@0 1411 CASE(_i2f): /* convert top of stack int to float */
aoqi@0 1412 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
aoqi@0 1413 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1414
aoqi@0 1415 CASE(_i2l): /* convert top of stack int to long */
aoqi@0 1416 {
aoqi@0 1417 // this is ugly QQQ
aoqi@0 1418 jlong r = VMint2Long(STACK_INT(-1));
aoqi@0 1419 MORE_STACK(-1); // Pop
aoqi@0 1420 SET_STACK_LONG(r, 1);
aoqi@0 1421
aoqi@0 1422 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1423 }
aoqi@0 1424
aoqi@0 1425 CASE(_i2d): /* convert top of stack int to double */
aoqi@0 1426 {
aoqi@0 1427 // this is ugly QQQ (why cast to jlong?? )
aoqi@0 1428 jdouble r = (jlong)STACK_INT(-1);
aoqi@0 1429 MORE_STACK(-1); // Pop
aoqi@0 1430 SET_STACK_DOUBLE(r, 1);
aoqi@0 1431
aoqi@0 1432 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1433 }
aoqi@0 1434
aoqi@0 1435 CASE(_l2i): /* convert top of stack long to int */
aoqi@0 1436 {
aoqi@0 1437 jint r = VMlong2Int(STACK_LONG(-1));
aoqi@0 1438 MORE_STACK(-2); // Pop
aoqi@0 1439 SET_STACK_INT(r, 0);
aoqi@0 1440 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1441 }
aoqi@0 1442
aoqi@0 1443 CASE(_l2f): /* convert top of stack long to float */
aoqi@0 1444 {
aoqi@0 1445 jlong r = STACK_LONG(-1);
aoqi@0 1446 MORE_STACK(-2); // Pop
aoqi@0 1447 SET_STACK_FLOAT(VMlong2Float(r), 0);
aoqi@0 1448 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1449 }
aoqi@0 1450
aoqi@0 1451 CASE(_l2d): /* convert top of stack long to double */
aoqi@0 1452 {
aoqi@0 1453 jlong r = STACK_LONG(-1);
aoqi@0 1454 MORE_STACK(-2); // Pop
aoqi@0 1455 SET_STACK_DOUBLE(VMlong2Double(r), 1);
aoqi@0 1456 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1457 }
aoqi@0 1458
aoqi@0 1459 CASE(_f2i): /* Convert top of stack float to int */
aoqi@0 1460 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
aoqi@0 1461 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1462
aoqi@0 1463 CASE(_f2l): /* convert top of stack float to long */
aoqi@0 1464 {
aoqi@0 1465 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
aoqi@0 1466 MORE_STACK(-1); // POP
aoqi@0 1467 SET_STACK_LONG(r, 1);
aoqi@0 1468 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1469 }
aoqi@0 1470
aoqi@0 1471 CASE(_f2d): /* convert top of stack float to double */
aoqi@0 1472 {
aoqi@0 1473 jfloat f;
aoqi@0 1474 jdouble r;
aoqi@0 1475 f = STACK_FLOAT(-1);
aoqi@0 1476 r = (jdouble) f;
aoqi@0 1477 MORE_STACK(-1); // POP
aoqi@0 1478 SET_STACK_DOUBLE(r, 1);
aoqi@0 1479 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1480 }
aoqi@0 1481
aoqi@0 1482 CASE(_d2i): /* convert top of stack double to int */
aoqi@0 1483 {
aoqi@0 1484 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
aoqi@0 1485 MORE_STACK(-2);
aoqi@0 1486 SET_STACK_INT(r1, 0);
aoqi@0 1487 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1488 }
aoqi@0 1489
aoqi@0 1490 CASE(_d2f): /* convert top of stack double to float */
aoqi@0 1491 {
aoqi@0 1492 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
aoqi@0 1493 MORE_STACK(-2);
aoqi@0 1494 SET_STACK_FLOAT(r1, 0);
aoqi@0 1495 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1496 }
aoqi@0 1497
aoqi@0 1498 CASE(_d2l): /* convert top of stack double to long */
aoqi@0 1499 {
aoqi@0 1500 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
aoqi@0 1501 MORE_STACK(-2);
aoqi@0 1502 SET_STACK_LONG(r1, 1);
aoqi@0 1503 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1504 }
aoqi@0 1505
aoqi@0 1506 CASE(_i2b):
aoqi@0 1507 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
aoqi@0 1508 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1509
aoqi@0 1510 CASE(_i2c):
aoqi@0 1511 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
aoqi@0 1512 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1513
aoqi@0 1514 CASE(_i2s):
aoqi@0 1515 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
aoqi@0 1516 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1517
aoqi@0 1518 /* comparison operators */
aoqi@0 1519
aoqi@0 1520
aoqi@0 1521 #define COMPARISON_OP(name, comparison) \
aoqi@0 1522 CASE(_if_icmp##name): { \
aoqi@0 1523 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
aoqi@0 1524 int skip = cmp \
aoqi@0 1525 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1526 address branch_pc = pc; \
aoqi@0 1527 /* Profile branch. */ \
aoqi@0 1528 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1529 UPDATE_PC_AND_TOS(skip, -2); \
aoqi@0 1530 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1531 CONTINUE; \
aoqi@0 1532 } \
aoqi@0 1533 CASE(_if##name): { \
aoqi@0 1534 const bool cmp = (STACK_INT(-1) comparison 0); \
aoqi@0 1535 int skip = cmp \
aoqi@0 1536 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1537 address branch_pc = pc; \
aoqi@0 1538 /* Profile branch. */ \
aoqi@0 1539 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1540 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1541 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1542 CONTINUE; \
aoqi@0 1543 }
aoqi@0 1544
aoqi@0 1545 #define COMPARISON_OP2(name, comparison) \
aoqi@0 1546 COMPARISON_OP(name, comparison) \
aoqi@0 1547 CASE(_if_acmp##name): { \
aoqi@0 1548 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
aoqi@0 1549 int skip = cmp \
aoqi@0 1550 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1551 address branch_pc = pc; \
aoqi@0 1552 /* Profile branch. */ \
aoqi@0 1553 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1554 UPDATE_PC_AND_TOS(skip, -2); \
aoqi@0 1555 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1556 CONTINUE; \
aoqi@0 1557 }
aoqi@0 1558
aoqi@0 1559 #define NULL_COMPARISON_NOT_OP(name) \
aoqi@0 1560 CASE(_if##name): { \
aoqi@0 1561 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
aoqi@0 1562 int skip = cmp \
aoqi@0 1563 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1564 address branch_pc = pc; \
aoqi@0 1565 /* Profile branch. */ \
aoqi@0 1566 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1567 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1568 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1569 CONTINUE; \
aoqi@0 1570 }
aoqi@0 1571
aoqi@0 1572 #define NULL_COMPARISON_OP(name) \
aoqi@0 1573 CASE(_if##name): { \
aoqi@0 1574 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
aoqi@0 1575 int skip = cmp \
aoqi@0 1576 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1577 address branch_pc = pc; \
aoqi@0 1578 /* Profile branch. */ \
aoqi@0 1579 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1580 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1581 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1582 CONTINUE; \
aoqi@0 1583 }
aoqi@0 1584 COMPARISON_OP(lt, <);
aoqi@0 1585 COMPARISON_OP(gt, >);
aoqi@0 1586 COMPARISON_OP(le, <=);
aoqi@0 1587 COMPARISON_OP(ge, >=);
aoqi@0 1588 COMPARISON_OP2(eq, ==); /* include ref comparison */
aoqi@0 1589 COMPARISON_OP2(ne, !=); /* include ref comparison */
aoqi@0 1590 NULL_COMPARISON_OP(null);
aoqi@0 1591 NULL_COMPARISON_NOT_OP(nonnull);
aoqi@0 1592
aoqi@0 1593 /* Goto pc at specified offset in switch table. */
aoqi@0 1594
aoqi@0 1595 CASE(_tableswitch): {
aoqi@0 1596 jint* lpc = (jint*)VMalignWordUp(pc+1);
aoqi@0 1597 int32_t key = STACK_INT(-1);
aoqi@0 1598 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
aoqi@0 1599 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
aoqi@0 1600 int32_t skip;
aoqi@0 1601 key -= low;
aoqi@0 1602 if (((uint32_t) key > (uint32_t)(high - low))) {
aoqi@0 1603 key = -1;
aoqi@0 1604 skip = Bytes::get_Java_u4((address)&lpc[0]);
aoqi@0 1605 } else {
aoqi@0 1606 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
aoqi@0 1607 }
aoqi@0 1608 // Profile switch.
aoqi@0 1609 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
aoqi@0 1610 // Does this really need a full backedge check (osr)?
aoqi@0 1611 address branch_pc = pc;
aoqi@0 1612 UPDATE_PC_AND_TOS(skip, -1);
aoqi@0 1613 DO_BACKEDGE_CHECKS(skip, branch_pc);
aoqi@0 1614 CONTINUE;
aoqi@0 1615 }
aoqi@0 1616
aoqi@0 1617 /* Goto pc whose table entry matches specified key. */
aoqi@0 1618
aoqi@0 1619 CASE(_lookupswitch): {
aoqi@0 1620 jint* lpc = (jint*)VMalignWordUp(pc+1);
aoqi@0 1621 int32_t key = STACK_INT(-1);
aoqi@0 1622 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
aoqi@0 1623 // Remember index.
aoqi@0 1624 int index = -1;
aoqi@0 1625 int newindex = 0;
aoqi@0 1626 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
aoqi@0 1627 while (--npairs >= 0) {
aoqi@0 1628 lpc += 2;
aoqi@0 1629 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
aoqi@0 1630 skip = Bytes::get_Java_u4((address)&lpc[1]);
aoqi@0 1631 index = newindex;
aoqi@0 1632 break;
aoqi@0 1633 }
aoqi@0 1634 newindex += 1;
aoqi@0 1635 }
aoqi@0 1636 // Profile switch.
aoqi@0 1637 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
aoqi@0 1638 address branch_pc = pc;
aoqi@0 1639 UPDATE_PC_AND_TOS(skip, -1);
aoqi@0 1640 DO_BACKEDGE_CHECKS(skip, branch_pc);
aoqi@0 1641 CONTINUE;
aoqi@0 1642 }
aoqi@0 1643
aoqi@0 1644 CASE(_fcmpl):
aoqi@0 1645 CASE(_fcmpg):
aoqi@0 1646 {
aoqi@0 1647 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
aoqi@0 1648 STACK_FLOAT(-1),
aoqi@0 1649 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
aoqi@0 1650 -2);
aoqi@0 1651 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1652 }
aoqi@0 1653
aoqi@0 1654 CASE(_dcmpl):
aoqi@0 1655 CASE(_dcmpg):
aoqi@0 1656 {
aoqi@0 1657 int r = VMdoubleCompare(STACK_DOUBLE(-3),
aoqi@0 1658 STACK_DOUBLE(-1),
aoqi@0 1659 (opcode == Bytecodes::_dcmpl ? -1 : 1));
aoqi@0 1660 MORE_STACK(-4); // Pop
aoqi@0 1661 SET_STACK_INT(r, 0);
aoqi@0 1662 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1663 }
aoqi@0 1664
aoqi@0 1665 CASE(_lcmp):
aoqi@0 1666 {
aoqi@0 1667 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
aoqi@0 1668 MORE_STACK(-4);
aoqi@0 1669 SET_STACK_INT(r, 0);
aoqi@0 1670 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1671 }
aoqi@0 1672
aoqi@0 1673
aoqi@0 1674 /* Return from a method */
aoqi@0 1675
aoqi@0 1676 CASE(_areturn):
aoqi@0 1677 CASE(_ireturn):
aoqi@0 1678 CASE(_freturn):
aoqi@0 1679 {
aoqi@0 1680 // Allow a safepoint before returning to frame manager.
aoqi@0 1681 SAFEPOINT;
aoqi@0 1682
aoqi@0 1683 goto handle_return;
aoqi@0 1684 }
aoqi@0 1685
aoqi@0 1686 CASE(_lreturn):
aoqi@0 1687 CASE(_dreturn):
aoqi@0 1688 {
aoqi@0 1689 // Allow a safepoint before returning to frame manager.
aoqi@0 1690 SAFEPOINT;
aoqi@0 1691 goto handle_return;
aoqi@0 1692 }
aoqi@0 1693
aoqi@0 1694 CASE(_return_register_finalizer): {
aoqi@0 1695
aoqi@0 1696 oop rcvr = LOCALS_OBJECT(0);
aoqi@0 1697 VERIFY_OOP(rcvr);
aoqi@0 1698 if (rcvr->klass()->has_finalizer()) {
aoqi@0 1699 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
aoqi@0 1700 }
aoqi@0 1701 goto handle_return;
aoqi@0 1702 }
aoqi@0 1703 CASE(_return): {
aoqi@0 1704
aoqi@0 1705 // Allow a safepoint before returning to frame manager.
aoqi@0 1706 SAFEPOINT;
aoqi@0 1707 goto handle_return;
aoqi@0 1708 }
aoqi@0 1709
aoqi@0 1710 /* Array access byte-codes */
aoqi@0 1711
aoqi@0 1712 /* Every array access byte-code starts out like this */
aoqi@0 1713 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
aoqi@0 1714 #define ARRAY_INTRO(arrayOff) \
aoqi@0 1715 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
aoqi@0 1716 jint index = STACK_INT(arrayOff + 1); \
aoqi@0 1717 char message[jintAsStringSize]; \
aoqi@0 1718 CHECK_NULL(arrObj); \
aoqi@0 1719 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
aoqi@0 1720 sprintf(message, "%d", index); \
aoqi@0 1721 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
aoqi@0 1722 message, note_rangeCheck_trap); \
aoqi@0 1723 }
aoqi@0 1724
aoqi@0 1725 /* 32-bit loads. These handle conversion from < 32-bit types */
aoqi@0 1726 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
aoqi@0 1727 { \
aoqi@0 1728 ARRAY_INTRO(-2); \
aoqi@0 1729 (void)extra; \
aoqi@0 1730 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
aoqi@0 1731 -2); \
aoqi@0 1732 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1733 }
aoqi@0 1734
aoqi@0 1735 /* 64-bit loads */
aoqi@0 1736 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
aoqi@0 1737 { \
aoqi@0 1738 ARRAY_INTRO(-2); \
aoqi@0 1739 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
aoqi@0 1740 (void)extra; \
aoqi@0 1741 UPDATE_PC_AND_CONTINUE(1); \
aoqi@0 1742 }
aoqi@0 1743
aoqi@0 1744 CASE(_iaload):
aoqi@0 1745 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
aoqi@0 1746 CASE(_faload):
aoqi@0 1747 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
aoqi@0 1748 CASE(_aaload): {
aoqi@0 1749 ARRAY_INTRO(-2);
aoqi@0 1750 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
aoqi@0 1751 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1752 }
aoqi@0 1753 CASE(_baload):
aoqi@0 1754 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
aoqi@0 1755 CASE(_caload):
aoqi@0 1756 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
aoqi@0 1757 CASE(_saload):
aoqi@0 1758 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
aoqi@0 1759 CASE(_laload):
aoqi@0 1760 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
aoqi@0 1761 CASE(_daload):
aoqi@0 1762 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
aoqi@0 1763
aoqi@0 1764 /* 32-bit stores. These handle conversion to < 32-bit types */
aoqi@0 1765 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
aoqi@0 1766 { \
aoqi@0 1767 ARRAY_INTRO(-3); \
aoqi@0 1768 (void)extra; \
aoqi@0 1769 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
aoqi@0 1770 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
aoqi@0 1771 }
aoqi@0 1772
aoqi@0 1773 /* 64-bit stores */
aoqi@0 1774 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
aoqi@0 1775 { \
aoqi@0 1776 ARRAY_INTRO(-4); \
aoqi@0 1777 (void)extra; \
aoqi@0 1778 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
aoqi@0 1779 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
aoqi@0 1780 }
aoqi@0 1781
aoqi@0 1782 CASE(_iastore):
aoqi@0 1783 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
aoqi@0 1784 CASE(_fastore):
aoqi@0 1785 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
aoqi@0 1786 /*
aoqi@0 1787 * This one looks different because of the assignability check
aoqi@0 1788 */
aoqi@0 1789 CASE(_aastore): {
aoqi@0 1790 oop rhsObject = STACK_OBJECT(-1);
aoqi@0 1791 VERIFY_OOP(rhsObject);
aoqi@0 1792 ARRAY_INTRO( -3);
aoqi@0 1793 // arrObj, index are set
aoqi@0 1794 if (rhsObject != NULL) {
aoqi@0 1795 /* Check assignability of rhsObject into arrObj */
aoqi@0 1796 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
aoqi@0 1797 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
aoqi@0 1798 //
aoqi@0 1799 // Check for compatibilty. This check must not GC!!
aoqi@0 1800 // Seems way more expensive now that we must dispatch
aoqi@0 1801 //
aoqi@0 1802 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
aoqi@0 1803 // Decrement counter if subtype check failed.
aoqi@0 1804 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
aoqi@0 1805 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
aoqi@0 1806 }
aoqi@0 1807 // Profile checkcast with null_seen and receiver.
aoqi@0 1808 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
aoqi@0 1809 } else {
aoqi@0 1810 // Profile checkcast with null_seen and receiver.
aoqi@0 1811 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
aoqi@0 1812 }
aoqi@0 1813 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
aoqi@0 1814 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
aoqi@0 1815 }
aoqi@0 1816 CASE(_bastore):
aoqi@0 1817 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
aoqi@0 1818 CASE(_castore):
aoqi@0 1819 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
aoqi@0 1820 CASE(_sastore):
aoqi@0 1821 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
aoqi@0 1822 CASE(_lastore):
aoqi@0 1823 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
aoqi@0 1824 CASE(_dastore):
aoqi@0 1825 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
aoqi@0 1826
aoqi@0 1827 CASE(_arraylength):
aoqi@0 1828 {
aoqi@0 1829 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
aoqi@0 1830 CHECK_NULL(ary);
aoqi@0 1831 SET_STACK_INT(ary->length(), -1);
aoqi@0 1832 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1833 }
aoqi@0 1834
aoqi@0 1835 /* monitorenter and monitorexit for locking/unlocking an object */
aoqi@0 1836
aoqi@0 1837 CASE(_monitorenter): {
aoqi@0 1838 oop lockee = STACK_OBJECT(-1);
aoqi@0 1839 // derefing's lockee ought to provoke implicit null check
aoqi@0 1840 CHECK_NULL(lockee);
aoqi@0 1841 // find a free monitor or one already allocated for this object
aoqi@0 1842 // if we find a matching object then we need a new monitor
aoqi@0 1843 // since this is recursive enter
aoqi@0 1844 BasicObjectLock* limit = istate->monitor_base();
aoqi@0 1845 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
aoqi@0 1846 BasicObjectLock* entry = NULL;
aoqi@0 1847 while (most_recent != limit ) {
aoqi@0 1848 if (most_recent->obj() == NULL) entry = most_recent;
aoqi@0 1849 else if (most_recent->obj() == lockee) break;
aoqi@0 1850 most_recent++;
aoqi@0 1851 }
aoqi@0 1852 if (entry != NULL) {
aoqi@0 1853 entry->set_obj(lockee);
aoqi@0 1854 int success = false;
aoqi@0 1855 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 1856
aoqi@0 1857 markOop mark = lockee->mark();
aoqi@0 1858 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 1859 // implies UseBiasedLocking
aoqi@0 1860 if (mark->has_bias_pattern()) {
aoqi@0 1861 uintptr_t thread_ident;
aoqi@0 1862 uintptr_t anticipated_bias_locking_value;
aoqi@0 1863 thread_ident = (uintptr_t)istate->thread();
aoqi@0 1864 anticipated_bias_locking_value =
aoqi@0 1865 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 1866 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 1867
aoqi@0 1868 if (anticipated_bias_locking_value == 0) {
aoqi@0 1869 // already biased towards this thread, nothing to do
aoqi@0 1870 if (PrintBiasedLockingStatistics) {
aoqi@0 1871 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 1872 }
aoqi@0 1873 success = true;
aoqi@0 1874 }
aoqi@0 1875 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 1876 // try revoke bias
aoqi@0 1877 markOop header = lockee->klass()->prototype_header();
aoqi@0 1878 if (hash != markOopDesc::no_hash) {
aoqi@0 1879 header = header->copy_set_hash(hash);
aoqi@0 1880 }
aoqi@0 1881 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
aoqi@0 1882 if (PrintBiasedLockingStatistics)
aoqi@0 1883 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 1884 }
aoqi@0 1885 }
aoqi@0 1886 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
aoqi@0 1887 // try rebias
aoqi@0 1888 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
aoqi@0 1889 if (hash != markOopDesc::no_hash) {
aoqi@0 1890 new_header = new_header->copy_set_hash(hash);
aoqi@0 1891 }
aoqi@0 1892 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
aoqi@0 1893 if (PrintBiasedLockingStatistics)
aoqi@0 1894 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 1895 }
aoqi@0 1896 else {
aoqi@0 1897 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1898 }
aoqi@0 1899 success = true;
aoqi@0 1900 }
aoqi@0 1901 else {
aoqi@0 1902 // try to bias towards thread in case object is anonymously biased
aoqi@0 1903 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 1904 (uintptr_t)markOopDesc::age_mask_in_place |
aoqi@0 1905 epoch_mask_in_place));
aoqi@0 1906 if (hash != markOopDesc::no_hash) {
aoqi@0 1907 header = header->copy_set_hash(hash);
aoqi@0 1908 }
aoqi@0 1909 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 1910 // debugging hint
aoqi@0 1911 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 1912 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
aoqi@0 1913 if (PrintBiasedLockingStatistics)
aoqi@0 1914 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 1915 }
aoqi@0 1916 else {
aoqi@0 1917 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1918 }
aoqi@0 1919 success = true;
aoqi@0 1920 }
aoqi@0 1921 }
aoqi@0 1922
aoqi@0 1923 // traditional lightweight locking
aoqi@0 1924 if (!success) {
aoqi@0 1925 markOop displaced = lockee->mark()->set_unlocked();
aoqi@0 1926 entry->lock()->set_displaced_header(displaced);
aoqi@0 1927 bool call_vm = UseHeavyMonitors;
aoqi@0 1928 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
aoqi@0 1929 // Is it simple recursive case?
aoqi@0 1930 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 1931 entry->lock()->set_displaced_header(NULL);
aoqi@0 1932 } else {
aoqi@0 1933 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1934 }
aoqi@0 1935 }
aoqi@0 1936 }
aoqi@0 1937 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1938 } else {
aoqi@0 1939 istate->set_msg(more_monitors);
aoqi@0 1940 UPDATE_PC_AND_RETURN(0); // Re-execute
aoqi@0 1941 }
aoqi@0 1942 }
aoqi@0 1943
aoqi@0 1944 CASE(_monitorexit): {
aoqi@0 1945 oop lockee = STACK_OBJECT(-1);
aoqi@0 1946 CHECK_NULL(lockee);
aoqi@0 1947 // derefing's lockee ought to provoke implicit null check
aoqi@0 1948 // find our monitor slot
aoqi@0 1949 BasicObjectLock* limit = istate->monitor_base();
aoqi@0 1950 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
aoqi@0 1951 while (most_recent != limit ) {
aoqi@0 1952 if ((most_recent)->obj() == lockee) {
aoqi@0 1953 BasicLock* lock = most_recent->lock();
aoqi@0 1954 markOop header = lock->displaced_header();
aoqi@0 1955 most_recent->set_obj(NULL);
aoqi@0 1956 if (!lockee->mark()->has_bias_pattern()) {
aoqi@0 1957 bool call_vm = UseHeavyMonitors;
aoqi@0 1958 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 1959 if (header != NULL || call_vm) {
aoqi@0 1960 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
aoqi@0 1961 // restore object for the slow case
aoqi@0 1962 most_recent->set_obj(lockee);
aoqi@0 1963 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
aoqi@0 1964 }
aoqi@0 1965 }
aoqi@0 1966 }
aoqi@0 1967 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1968 }
aoqi@0 1969 most_recent++;
aoqi@0 1970 }
aoqi@0 1971 // Need to throw illegal monitor state exception
aoqi@0 1972 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
aoqi@0 1973 ShouldNotReachHere();
aoqi@0 1974 }
aoqi@0 1975
aoqi@0 1976 /* All of the non-quick opcodes. */
aoqi@0 1977
aoqi@0 1978 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
aoqi@0 1979 * constant pool index in the instruction.
aoqi@0 1980 */
aoqi@0 1981 CASE(_getfield):
aoqi@0 1982 CASE(_getstatic):
aoqi@0 1983 {
aoqi@0 1984 u2 index;
aoqi@0 1985 ConstantPoolCacheEntry* cache;
aoqi@0 1986 index = Bytes::get_native_u2(pc+1);
aoqi@0 1987
aoqi@0 1988 // QQQ Need to make this as inlined as possible. Probably need to
aoqi@0 1989 // split all the bytecode cases out so c++ compiler has a chance
aoqi@0 1990 // for constant prop to fold everything possible away.
aoqi@0 1991
aoqi@0 1992 cache = cp->entry_at(index);
aoqi@0 1993 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 1994 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
aoqi@0 1995 handle_exception);
aoqi@0 1996 cache = cp->entry_at(index);
aoqi@0 1997 }
aoqi@0 1998
aoqi@0 1999 #ifdef VM_JVMTI
aoqi@0 2000 if (_jvmti_interp_events) {
aoqi@0 2001 int *count_addr;
aoqi@0 2002 oop obj;
aoqi@0 2003 // Check to see if a field modification watch has been set
aoqi@0 2004 // before we take the time to call into the VM.
aoqi@0 2005 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
aoqi@0 2006 if ( *count_addr > 0 ) {
aoqi@0 2007 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
aoqi@0 2008 obj = (oop)NULL;
aoqi@0 2009 } else {
aoqi@0 2010 obj = (oop) STACK_OBJECT(-1);
aoqi@0 2011 VERIFY_OOP(obj);
aoqi@0 2012 }
aoqi@0 2013 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
aoqi@0 2014 obj,
aoqi@0 2015 cache),
aoqi@0 2016 handle_exception);
aoqi@0 2017 }
aoqi@0 2018 }
aoqi@0 2019 #endif /* VM_JVMTI */
aoqi@0 2020
aoqi@0 2021 oop obj;
aoqi@0 2022 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
aoqi@0 2023 Klass* k = cache->f1_as_klass();
aoqi@0 2024 obj = k->java_mirror();
aoqi@0 2025 MORE_STACK(1); // Assume single slot push
aoqi@0 2026 } else {
aoqi@0 2027 obj = (oop) STACK_OBJECT(-1);
aoqi@0 2028 CHECK_NULL(obj);
aoqi@0 2029 }
aoqi@0 2030
aoqi@0 2031 //
aoqi@0 2032 // Now store the result on the stack
aoqi@0 2033 //
aoqi@0 2034 TosState tos_type = cache->flag_state();
aoqi@0 2035 int field_offset = cache->f2_as_index();
aoqi@0 2036 if (cache->is_volatile()) {
aoqi@0 2037 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
aoqi@0 2038 OrderAccess::fence();
aoqi@0 2039 }
aoqi@0 2040 if (tos_type == atos) {
aoqi@0 2041 VERIFY_OOP(obj->obj_field_acquire(field_offset));
aoqi@0 2042 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
aoqi@0 2043 } else if (tos_type == itos) {
aoqi@0 2044 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
aoqi@0 2045 } else if (tos_type == ltos) {
aoqi@0 2046 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
aoqi@0 2047 MORE_STACK(1);
aoqi@0 2048 } else if (tos_type == btos) {
aoqi@0 2049 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
aoqi@0 2050 } else if (tos_type == ctos) {
aoqi@0 2051 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
aoqi@0 2052 } else if (tos_type == stos) {
aoqi@0 2053 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
aoqi@0 2054 } else if (tos_type == ftos) {
aoqi@0 2055 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
aoqi@0 2056 } else {
aoqi@0 2057 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
aoqi@0 2058 MORE_STACK(1);
aoqi@0 2059 }
aoqi@0 2060 } else {
aoqi@0 2061 if (tos_type == atos) {
aoqi@0 2062 VERIFY_OOP(obj->obj_field(field_offset));
aoqi@0 2063 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
aoqi@0 2064 } else if (tos_type == itos) {
aoqi@0 2065 SET_STACK_INT(obj->int_field(field_offset), -1);
aoqi@0 2066 } else if (tos_type == ltos) {
aoqi@0 2067 SET_STACK_LONG(obj->long_field(field_offset), 0);
aoqi@0 2068 MORE_STACK(1);
aoqi@0 2069 } else if (tos_type == btos) {
aoqi@0 2070 SET_STACK_INT(obj->byte_field(field_offset), -1);
aoqi@0 2071 } else if (tos_type == ctos) {
aoqi@0 2072 SET_STACK_INT(obj->char_field(field_offset), -1);
aoqi@0 2073 } else if (tos_type == stos) {
aoqi@0 2074 SET_STACK_INT(obj->short_field(field_offset), -1);
aoqi@0 2075 } else if (tos_type == ftos) {
aoqi@0 2076 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
aoqi@0 2077 } else {
aoqi@0 2078 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
aoqi@0 2079 MORE_STACK(1);
aoqi@0 2080 }
aoqi@0 2081 }
aoqi@0 2082
aoqi@0 2083 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2084 }
aoqi@0 2085
aoqi@0 2086 CASE(_putfield):
aoqi@0 2087 CASE(_putstatic):
aoqi@0 2088 {
aoqi@0 2089 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2090 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2091 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2092 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2093 handle_exception);
aoqi@0 2094 cache = cp->entry_at(index);
aoqi@0 2095 }
aoqi@0 2096
aoqi@0 2097 #ifdef VM_JVMTI
aoqi@0 2098 if (_jvmti_interp_events) {
aoqi@0 2099 int *count_addr;
aoqi@0 2100 oop obj;
aoqi@0 2101 // Check to see if a field modification watch has been set
aoqi@0 2102 // before we take the time to call into the VM.
aoqi@0 2103 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
aoqi@0 2104 if ( *count_addr > 0 ) {
aoqi@0 2105 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
aoqi@0 2106 obj = (oop)NULL;
aoqi@0 2107 }
aoqi@0 2108 else {
aoqi@0 2109 if (cache->is_long() || cache->is_double()) {
aoqi@0 2110 obj = (oop) STACK_OBJECT(-3);
aoqi@0 2111 } else {
aoqi@0 2112 obj = (oop) STACK_OBJECT(-2);
aoqi@0 2113 }
aoqi@0 2114 VERIFY_OOP(obj);
aoqi@0 2115 }
aoqi@0 2116
aoqi@0 2117 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
aoqi@0 2118 obj,
aoqi@0 2119 cache,
aoqi@0 2120 (jvalue *)STACK_SLOT(-1)),
aoqi@0 2121 handle_exception);
aoqi@0 2122 }
aoqi@0 2123 }
aoqi@0 2124 #endif /* VM_JVMTI */
aoqi@0 2125
aoqi@0 2126 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2127 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2128
aoqi@0 2129 oop obj;
aoqi@0 2130 int count;
aoqi@0 2131 TosState tos_type = cache->flag_state();
aoqi@0 2132
aoqi@0 2133 count = -1;
aoqi@0 2134 if (tos_type == ltos || tos_type == dtos) {
aoqi@0 2135 --count;
aoqi@0 2136 }
aoqi@0 2137 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
aoqi@0 2138 Klass* k = cache->f1_as_klass();
aoqi@0 2139 obj = k->java_mirror();
aoqi@0 2140 } else {
aoqi@0 2141 --count;
aoqi@0 2142 obj = (oop) STACK_OBJECT(count);
aoqi@0 2143 CHECK_NULL(obj);
aoqi@0 2144 }
aoqi@0 2145
aoqi@0 2146 //
aoqi@0 2147 // Now store the result
aoqi@0 2148 //
aoqi@0 2149 int field_offset = cache->f2_as_index();
aoqi@0 2150 if (cache->is_volatile()) {
aoqi@0 2151 if (tos_type == itos) {
aoqi@0 2152 obj->release_int_field_put(field_offset, STACK_INT(-1));
aoqi@0 2153 } else if (tos_type == atos) {
aoqi@0 2154 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2155 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
aoqi@0 2156 } else if (tos_type == btos) {
aoqi@0 2157 obj->release_byte_field_put(field_offset, STACK_INT(-1));
aoqi@0 2158 } else if (tos_type == ltos) {
aoqi@0 2159 obj->release_long_field_put(field_offset, STACK_LONG(-1));
aoqi@0 2160 } else if (tos_type == ctos) {
aoqi@0 2161 obj->release_char_field_put(field_offset, STACK_INT(-1));
aoqi@0 2162 } else if (tos_type == stos) {
aoqi@0 2163 obj->release_short_field_put(field_offset, STACK_INT(-1));
aoqi@0 2164 } else if (tos_type == ftos) {
aoqi@0 2165 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
aoqi@0 2166 } else {
aoqi@0 2167 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
aoqi@0 2168 }
aoqi@0 2169 OrderAccess::storeload();
aoqi@0 2170 } else {
aoqi@0 2171 if (tos_type == itos) {
aoqi@0 2172 obj->int_field_put(field_offset, STACK_INT(-1));
aoqi@0 2173 } else if (tos_type == atos) {
aoqi@0 2174 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2175 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
aoqi@0 2176 } else if (tos_type == btos) {
aoqi@0 2177 obj->byte_field_put(field_offset, STACK_INT(-1));
aoqi@0 2178 } else if (tos_type == ltos) {
aoqi@0 2179 obj->long_field_put(field_offset, STACK_LONG(-1));
aoqi@0 2180 } else if (tos_type == ctos) {
aoqi@0 2181 obj->char_field_put(field_offset, STACK_INT(-1));
aoqi@0 2182 } else if (tos_type == stos) {
aoqi@0 2183 obj->short_field_put(field_offset, STACK_INT(-1));
aoqi@0 2184 } else if (tos_type == ftos) {
aoqi@0 2185 obj->float_field_put(field_offset, STACK_FLOAT(-1));
aoqi@0 2186 } else {
aoqi@0 2187 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
aoqi@0 2188 }
aoqi@0 2189 }
aoqi@0 2190
aoqi@0 2191 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
aoqi@0 2192 }
aoqi@0 2193
aoqi@0 2194 CASE(_new): {
aoqi@0 2195 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2196 ConstantPool* constants = istate->method()->constants();
aoqi@0 2197 if (!constants->tag_at(index).is_unresolved_klass()) {
aoqi@0 2198 // Make sure klass is initialized and doesn't have a finalizer
aoqi@0 2199 Klass* entry = constants->slot_at(index).get_klass();
aoqi@0 2200 assert(entry->is_klass(), "Should be resolved klass");
aoqi@0 2201 Klass* k_entry = (Klass*) entry;
aoqi@0 2202 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
aoqi@0 2203 InstanceKlass* ik = (InstanceKlass*) k_entry;
aoqi@0 2204 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
aoqi@0 2205 size_t obj_size = ik->size_helper();
aoqi@0 2206 oop result = NULL;
aoqi@0 2207 // If the TLAB isn't pre-zeroed then we'll have to do it
aoqi@0 2208 bool need_zero = !ZeroTLAB;
aoqi@0 2209 if (UseTLAB) {
aoqi@0 2210 result = (oop) THREAD->tlab().allocate(obj_size);
aoqi@0 2211 }
aoqi@0 2212 // Disable non-TLAB-based fast-path, because profiling requires that all
aoqi@0 2213 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
aoqi@0 2214 // returns NULL.
aoqi@0 2215 #ifndef CC_INTERP_PROFILE
aoqi@0 2216 if (result == NULL) {
aoqi@0 2217 need_zero = true;
aoqi@0 2218 // Try allocate in shared eden
aoqi@0 2219 retry:
aoqi@0 2220 HeapWord* compare_to = *Universe::heap()->top_addr();
aoqi@0 2221 HeapWord* new_top = compare_to + obj_size;
aoqi@0 2222 if (new_top <= *Universe::heap()->end_addr()) {
aoqi@0 2223 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
aoqi@0 2224 goto retry;
aoqi@0 2225 }
aoqi@0 2226 result = (oop) compare_to;
aoqi@0 2227 }
aoqi@0 2228 }
aoqi@0 2229 #endif
aoqi@0 2230 if (result != NULL) {
aoqi@0 2231 // Initialize object (if nonzero size and need) and then the header
aoqi@0 2232 if (need_zero ) {
aoqi@0 2233 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
aoqi@0 2234 obj_size -= sizeof(oopDesc) / oopSize;
aoqi@0 2235 if (obj_size > 0 ) {
aoqi@0 2236 memset(to_zero, 0, obj_size * HeapWordSize);
aoqi@0 2237 }
aoqi@0 2238 }
aoqi@0 2239 if (UseBiasedLocking) {
aoqi@0 2240 result->set_mark(ik->prototype_header());
aoqi@0 2241 } else {
aoqi@0 2242 result->set_mark(markOopDesc::prototype());
aoqi@0 2243 }
aoqi@0 2244 result->set_klass_gap(0);
aoqi@0 2245 result->set_klass(k_entry);
aoqi@0 2246 // Must prevent reordering of stores for object initialization
aoqi@0 2247 // with stores that publish the new object.
aoqi@0 2248 OrderAccess::storestore();
aoqi@0 2249 SET_STACK_OBJECT(result, 0);
aoqi@0 2250 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 2251 }
aoqi@0 2252 }
aoqi@0 2253 }
aoqi@0 2254 // Slow case allocation
aoqi@0 2255 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
aoqi@0 2256 handle_exception);
aoqi@0 2257 // Must prevent reordering of stores for object initialization
aoqi@0 2258 // with stores that publish the new object.
aoqi@0 2259 OrderAccess::storestore();
aoqi@0 2260 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2261 THREAD->set_vm_result(NULL);
aoqi@0 2262 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 2263 }
aoqi@0 2264 CASE(_anewarray): {
aoqi@0 2265 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2266 jint size = STACK_INT(-1);
aoqi@0 2267 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
aoqi@0 2268 handle_exception);
aoqi@0 2269 // Must prevent reordering of stores for object initialization
aoqi@0 2270 // with stores that publish the new object.
aoqi@0 2271 OrderAccess::storestore();
aoqi@0 2272 SET_STACK_OBJECT(THREAD->vm_result(), -1);
aoqi@0 2273 THREAD->set_vm_result(NULL);
aoqi@0 2274 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2275 }
aoqi@0 2276 CASE(_multianewarray): {
aoqi@0 2277 jint dims = *(pc+3);
aoqi@0 2278 jint size = STACK_INT(-1);
aoqi@0 2279 // stack grows down, dimensions are up!
aoqi@0 2280 jint *dimarray =
aoqi@0 2281 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
aoqi@0 2282 Interpreter::stackElementWords-1];
aoqi@0 2283 //adjust pointer to start of stack element
aoqi@0 2284 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
aoqi@0 2285 handle_exception);
aoqi@0 2286 // Must prevent reordering of stores for object initialization
aoqi@0 2287 // with stores that publish the new object.
aoqi@0 2288 OrderAccess::storestore();
aoqi@0 2289 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
aoqi@0 2290 THREAD->set_vm_result(NULL);
aoqi@0 2291 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
aoqi@0 2292 }
aoqi@0 2293 CASE(_checkcast):
aoqi@0 2294 if (STACK_OBJECT(-1) != NULL) {
aoqi@0 2295 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2296 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2297 // Constant pool may have actual klass or unresolved klass. If it is
aoqi@0 2298 // unresolved we must resolve it.
aoqi@0 2299 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
aoqi@0 2300 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
aoqi@0 2301 }
aoqi@0 2302 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
aoqi@0 2303 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
aoqi@0 2304 //
aoqi@0 2305 // Check for compatibilty. This check must not GC!!
aoqi@0 2306 // Seems way more expensive now that we must dispatch.
aoqi@0 2307 //
aoqi@0 2308 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
aoqi@0 2309 // Decrement counter at checkcast.
aoqi@0 2310 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
aoqi@0 2311 ResourceMark rm(THREAD);
aoqi@0 2312 const char* objName = objKlass->external_name();
aoqi@0 2313 const char* klassName = klassOf->external_name();
aoqi@0 2314 char* message = SharedRuntime::generate_class_cast_message(
aoqi@0 2315 objName, klassName);
aoqi@0 2316 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
aoqi@0 2317 }
aoqi@0 2318 // Profile checkcast with null_seen and receiver.
aoqi@0 2319 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
aoqi@0 2320 } else {
aoqi@0 2321 // Profile checkcast with null_seen and receiver.
aoqi@0 2322 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
aoqi@0 2323 }
aoqi@0 2324 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2325
aoqi@0 2326 CASE(_instanceof):
aoqi@0 2327 if (STACK_OBJECT(-1) == NULL) {
aoqi@0 2328 SET_STACK_INT(0, -1);
aoqi@0 2329 // Profile instanceof with null_seen and receiver.
aoqi@0 2330 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
aoqi@0 2331 } else {
aoqi@0 2332 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2333 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2334 // Constant pool may have actual klass or unresolved klass. If it is
aoqi@0 2335 // unresolved we must resolve it.
aoqi@0 2336 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
aoqi@0 2337 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
aoqi@0 2338 }
aoqi@0 2339 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
aoqi@0 2340 Klass* objKlass = STACK_OBJECT(-1)->klass();
aoqi@0 2341 //
aoqi@0 2342 // Check for compatibilty. This check must not GC!!
aoqi@0 2343 // Seems way more expensive now that we must dispatch.
aoqi@0 2344 //
aoqi@0 2345 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
aoqi@0 2346 SET_STACK_INT(1, -1);
aoqi@0 2347 } else {
aoqi@0 2348 SET_STACK_INT(0, -1);
aoqi@0 2349 // Decrement counter at checkcast.
aoqi@0 2350 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
aoqi@0 2351 }
aoqi@0 2352 // Profile instanceof with null_seen and receiver.
aoqi@0 2353 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
aoqi@0 2354 }
aoqi@0 2355 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2356
aoqi@0 2357 CASE(_ldc_w):
aoqi@0 2358 CASE(_ldc):
aoqi@0 2359 {
aoqi@0 2360 u2 index;
aoqi@0 2361 bool wide = false;
aoqi@0 2362 int incr = 2; // frequent case
aoqi@0 2363 if (opcode == Bytecodes::_ldc) {
aoqi@0 2364 index = pc[1];
aoqi@0 2365 } else {
aoqi@0 2366 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2367 incr = 3;
aoqi@0 2368 wide = true;
aoqi@0 2369 }
aoqi@0 2370
aoqi@0 2371 ConstantPool* constants = METHOD->constants();
aoqi@0 2372 switch (constants->tag_at(index).value()) {
aoqi@0 2373 case JVM_CONSTANT_Integer:
aoqi@0 2374 SET_STACK_INT(constants->int_at(index), 0);
aoqi@0 2375 break;
aoqi@0 2376
aoqi@0 2377 case JVM_CONSTANT_Float:
aoqi@0 2378 SET_STACK_FLOAT(constants->float_at(index), 0);
aoqi@0 2379 break;
aoqi@0 2380
aoqi@0 2381 case JVM_CONSTANT_String:
aoqi@0 2382 {
aoqi@0 2383 oop result = constants->resolved_references()->obj_at(index);
aoqi@0 2384 if (result == NULL) {
aoqi@0 2385 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
aoqi@0 2386 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2387 THREAD->set_vm_result(NULL);
aoqi@0 2388 } else {
aoqi@0 2389 VERIFY_OOP(result);
aoqi@0 2390 SET_STACK_OBJECT(result, 0);
aoqi@0 2391 }
aoqi@0 2392 break;
aoqi@0 2393 }
aoqi@0 2394
aoqi@0 2395 case JVM_CONSTANT_Class:
aoqi@0 2396 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
aoqi@0 2397 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
aoqi@0 2398 break;
aoqi@0 2399
aoqi@0 2400 case JVM_CONSTANT_UnresolvedClass:
aoqi@0 2401 case JVM_CONSTANT_UnresolvedClassInError:
aoqi@0 2402 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
aoqi@0 2403 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2404 THREAD->set_vm_result(NULL);
aoqi@0 2405 break;
aoqi@0 2406
aoqi@0 2407 default: ShouldNotReachHere();
aoqi@0 2408 }
aoqi@0 2409 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
aoqi@0 2410 }
aoqi@0 2411
aoqi@0 2412 CASE(_ldc2_w):
aoqi@0 2413 {
aoqi@0 2414 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2415
aoqi@0 2416 ConstantPool* constants = METHOD->constants();
aoqi@0 2417 switch (constants->tag_at(index).value()) {
aoqi@0 2418
aoqi@0 2419 case JVM_CONSTANT_Long:
aoqi@0 2420 SET_STACK_LONG(constants->long_at(index), 1);
aoqi@0 2421 break;
aoqi@0 2422
aoqi@0 2423 case JVM_CONSTANT_Double:
aoqi@0 2424 SET_STACK_DOUBLE(constants->double_at(index), 1);
aoqi@0 2425 break;
aoqi@0 2426 default: ShouldNotReachHere();
aoqi@0 2427 }
aoqi@0 2428 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
aoqi@0 2429 }
aoqi@0 2430
aoqi@0 2431 CASE(_fast_aldc_w):
aoqi@0 2432 CASE(_fast_aldc): {
aoqi@0 2433 u2 index;
aoqi@0 2434 int incr;
aoqi@0 2435 if (opcode == Bytecodes::_fast_aldc) {
aoqi@0 2436 index = pc[1];
aoqi@0 2437 incr = 2;
aoqi@0 2438 } else {
aoqi@0 2439 index = Bytes::get_native_u2(pc+1);
aoqi@0 2440 incr = 3;
aoqi@0 2441 }
aoqi@0 2442
aoqi@0 2443 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
aoqi@0 2444 // This kind of CP cache entry does not need to match the flags byte, because
aoqi@0 2445 // there is a 1-1 relation between bytecode type and CP entry type.
aoqi@0 2446 ConstantPool* constants = METHOD->constants();
aoqi@0 2447 oop result = constants->resolved_references()->obj_at(index);
aoqi@0 2448 if (result == NULL) {
aoqi@0 2449 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
aoqi@0 2450 handle_exception);
aoqi@0 2451 result = THREAD->vm_result();
aoqi@0 2452 }
aoqi@0 2453
aoqi@0 2454 VERIFY_OOP(result);
aoqi@0 2455 SET_STACK_OBJECT(result, 0);
aoqi@0 2456 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
aoqi@0 2457 }
aoqi@0 2458
aoqi@0 2459 CASE(_invokedynamic): {
aoqi@0 2460
aoqi@0 2461 if (!EnableInvokeDynamic) {
aoqi@0 2462 // We should not encounter this bytecode if !EnableInvokeDynamic.
aoqi@0 2463 // The verifier will stop it. However, if we get past the verifier,
aoqi@0 2464 // this will stop the thread in a reasonable way, without crashing the JVM.
aoqi@0 2465 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
aoqi@0 2466 handle_exception);
aoqi@0 2467 ShouldNotReachHere();
aoqi@0 2468 }
aoqi@0 2469
aoqi@0 2470 u4 index = Bytes::get_native_u4(pc+1);
aoqi@0 2471 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
aoqi@0 2472
aoqi@0 2473 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
aoqi@0 2474 // This kind of CP cache entry does not need to match the flags byte, because
aoqi@0 2475 // there is a 1-1 relation between bytecode type and CP entry type.
aoqi@0 2476 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
aoqi@0 2477 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
aoqi@0 2478 handle_exception);
aoqi@0 2479 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
aoqi@0 2480 }
aoqi@0 2481
aoqi@0 2482 Method* method = cache->f1_as_method();
aoqi@0 2483 if (VerifyOops) method->verify();
aoqi@0 2484
aoqi@0 2485 if (cache->has_appendix()) {
aoqi@0 2486 ConstantPool* constants = METHOD->constants();
aoqi@0 2487 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
aoqi@0 2488 MORE_STACK(1);
aoqi@0 2489 }
aoqi@0 2490
aoqi@0 2491 istate->set_msg(call_method);
aoqi@0 2492 istate->set_callee(method);
aoqi@0 2493 istate->set_callee_entry_point(method->from_interpreted_entry());
aoqi@0 2494 istate->set_bcp_advance(5);
aoqi@0 2495
aoqi@0 2496 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
aoqi@0 2497 BI_PROFILE_UPDATE_CALL();
aoqi@0 2498
aoqi@0 2499 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2500 }
aoqi@0 2501
aoqi@0 2502 CASE(_invokehandle): {
aoqi@0 2503
aoqi@0 2504 if (!EnableInvokeDynamic) {
aoqi@0 2505 ShouldNotReachHere();
aoqi@0 2506 }
aoqi@0 2507
aoqi@0 2508 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2509 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2510
aoqi@0 2511 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
aoqi@0 2512 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
aoqi@0 2513 handle_exception);
aoqi@0 2514 cache = cp->entry_at(index);
aoqi@0 2515 }
aoqi@0 2516
aoqi@0 2517 Method* method = cache->f1_as_method();
aoqi@0 2518 if (VerifyOops) method->verify();
aoqi@0 2519
aoqi@0 2520 if (cache->has_appendix()) {
aoqi@0 2521 ConstantPool* constants = METHOD->constants();
aoqi@0 2522 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
aoqi@0 2523 MORE_STACK(1);
aoqi@0 2524 }
aoqi@0 2525
aoqi@0 2526 istate->set_msg(call_method);
aoqi@0 2527 istate->set_callee(method);
aoqi@0 2528 istate->set_callee_entry_point(method->from_interpreted_entry());
aoqi@0 2529 istate->set_bcp_advance(3);
aoqi@0 2530
aoqi@0 2531 // Invokehandle has got a call counter, just like a final call -> increment!
aoqi@0 2532 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2533
aoqi@0 2534 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2535 }
aoqi@0 2536
aoqi@0 2537 CASE(_invokeinterface): {
aoqi@0 2538 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2539
aoqi@0 2540 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2541 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2542
aoqi@0 2543 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2544 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2545 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2546 handle_exception);
aoqi@0 2547 cache = cp->entry_at(index);
aoqi@0 2548 }
aoqi@0 2549
aoqi@0 2550 istate->set_msg(call_method);
aoqi@0 2551
aoqi@0 2552 // Special case of invokeinterface called for virtual method of
aoqi@0 2553 // java.lang.Object. See cpCacheOop.cpp for details.
aoqi@0 2554 // This code isn't produced by javac, but could be produced by
aoqi@0 2555 // another compliant java compiler.
aoqi@0 2556 if (cache->is_forced_virtual()) {
aoqi@0 2557 Method* callee;
aoqi@0 2558 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2559 if (cache->is_vfinal()) {
aoqi@0 2560 callee = cache->f2_as_vfinal_method();
aoqi@0 2561 // Profile 'special case of invokeinterface' final call.
aoqi@0 2562 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2563 } else {
aoqi@0 2564 // Get receiver.
aoqi@0 2565 int parms = cache->parameter_size();
aoqi@0 2566 // Same comments as invokevirtual apply here.
aoqi@0 2567 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2568 VERIFY_OOP(rcvr);
aoqi@0 2569 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
aoqi@0 2570 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
aoqi@0 2571 // Profile 'special case of invokeinterface' virtual call.
aoqi@0 2572 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2573 }
aoqi@0 2574 istate->set_callee(callee);
aoqi@0 2575 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2576 #ifdef VM_JVMTI
aoqi@0 2577 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2578 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2579 }
aoqi@0 2580 #endif /* VM_JVMTI */
aoqi@0 2581 istate->set_bcp_advance(5);
aoqi@0 2582 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2583 }
aoqi@0 2584
aoqi@0 2585 // this could definitely be cleaned up QQQ
aoqi@0 2586 Method* callee;
aoqi@0 2587 Klass* iclass = cache->f1_as_klass();
aoqi@0 2588 // InstanceKlass* interface = (InstanceKlass*) iclass;
aoqi@0 2589 // get receiver
aoqi@0 2590 int parms = cache->parameter_size();
aoqi@0 2591 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2592 CHECK_NULL(rcvr);
aoqi@0 2593 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
aoqi@0 2594 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
aoqi@0 2595 int i;
aoqi@0 2596 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
aoqi@0 2597 if (ki->interface_klass() == iclass) break;
aoqi@0 2598 }
aoqi@0 2599 // If the interface isn't found, this class doesn't implement this
aoqi@0 2600 // interface. The link resolver checks this but only for the first
aoqi@0 2601 // time this interface is called.
aoqi@0 2602 if (i == int2->itable_length()) {
aoqi@0 2603 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
aoqi@0 2604 }
aoqi@0 2605 int mindex = cache->f2_as_index();
aoqi@0 2606 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
aoqi@0 2607 callee = im[mindex].method();
aoqi@0 2608 if (callee == NULL) {
aoqi@0 2609 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
aoqi@0 2610 }
aoqi@0 2611
aoqi@0 2612 // Profile virtual call.
aoqi@0 2613 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2614
aoqi@0 2615 istate->set_callee(callee);
aoqi@0 2616 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2617 #ifdef VM_JVMTI
aoqi@0 2618 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2619 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2620 }
aoqi@0 2621 #endif /* VM_JVMTI */
aoqi@0 2622 istate->set_bcp_advance(5);
aoqi@0 2623 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2624 }
aoqi@0 2625
aoqi@0 2626 CASE(_invokevirtual):
aoqi@0 2627 CASE(_invokespecial):
aoqi@0 2628 CASE(_invokestatic): {
aoqi@0 2629 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2630
aoqi@0 2631 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2632 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2633 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2634
aoqi@0 2635 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2636 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2637 handle_exception);
aoqi@0 2638 cache = cp->entry_at(index);
aoqi@0 2639 }
aoqi@0 2640
aoqi@0 2641 istate->set_msg(call_method);
aoqi@0 2642 {
aoqi@0 2643 Method* callee;
aoqi@0 2644 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
aoqi@0 2645 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2646 if (cache->is_vfinal()) {
aoqi@0 2647 callee = cache->f2_as_vfinal_method();
aoqi@0 2648 // Profile final call.
aoqi@0 2649 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2650 } else {
aoqi@0 2651 // get receiver
aoqi@0 2652 int parms = cache->parameter_size();
aoqi@0 2653 // this works but needs a resourcemark and seems to create a vtable on every call:
aoqi@0 2654 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
aoqi@0 2655 //
aoqi@0 2656 // this fails with an assert
aoqi@0 2657 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
aoqi@0 2658 // but this works
aoqi@0 2659 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2660 VERIFY_OOP(rcvr);
aoqi@0 2661 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
aoqi@0 2662 /*
aoqi@0 2663 Executing this code in java.lang.String:
aoqi@0 2664 public String(char value[]) {
aoqi@0 2665 this.count = value.length;
aoqi@0 2666 this.value = (char[])value.clone();
aoqi@0 2667 }
aoqi@0 2668
aoqi@0 2669 a find on rcvr->klass() reports:
aoqi@0 2670 {type array char}{type array class}
aoqi@0 2671 - klass: {other class}
aoqi@0 2672
aoqi@0 2673 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
aoqi@0 2674 because rcvr->klass()->oop_is_instance() == 0
aoqi@0 2675 However it seems to have a vtable in the right location. Huh?
aoqi@0 2676
aoqi@0 2677 */
aoqi@0 2678 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
aoqi@0 2679 // Profile virtual call.
aoqi@0 2680 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2681 }
aoqi@0 2682 } else {
aoqi@0 2683 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
aoqi@0 2684 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2685 }
aoqi@0 2686 callee = cache->f1_as_method();
aoqi@0 2687
aoqi@0 2688 // Profile call.
aoqi@0 2689 BI_PROFILE_UPDATE_CALL();
aoqi@0 2690 }
aoqi@0 2691
aoqi@0 2692 istate->set_callee(callee);
aoqi@0 2693 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2694 #ifdef VM_JVMTI
aoqi@0 2695 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2696 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2697 }
aoqi@0 2698 #endif /* VM_JVMTI */
aoqi@0 2699 istate->set_bcp_advance(3);
aoqi@0 2700 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2701 }
aoqi@0 2702 }
aoqi@0 2703
aoqi@0 2704 /* Allocate memory for a new java object. */
aoqi@0 2705
aoqi@0 2706 CASE(_newarray): {
aoqi@0 2707 BasicType atype = (BasicType) *(pc+1);
aoqi@0 2708 jint size = STACK_INT(-1);
aoqi@0 2709 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
aoqi@0 2710 handle_exception);
aoqi@0 2711 // Must prevent reordering of stores for object initialization
aoqi@0 2712 // with stores that publish the new object.
aoqi@0 2713 OrderAccess::storestore();
aoqi@0 2714 SET_STACK_OBJECT(THREAD->vm_result(), -1);
aoqi@0 2715 THREAD->set_vm_result(NULL);
aoqi@0 2716
aoqi@0 2717 UPDATE_PC_AND_CONTINUE(2);
aoqi@0 2718 }
aoqi@0 2719
aoqi@0 2720 /* Throw an exception. */
aoqi@0 2721
aoqi@0 2722 CASE(_athrow): {
aoqi@0 2723 oop except_oop = STACK_OBJECT(-1);
aoqi@0 2724 CHECK_NULL(except_oop);
aoqi@0 2725 // set pending_exception so we use common code
aoqi@0 2726 THREAD->set_pending_exception(except_oop, NULL, 0);
aoqi@0 2727 goto handle_exception;
aoqi@0 2728 }
aoqi@0 2729
aoqi@0 2730 /* goto and jsr. They are exactly the same except jsr pushes
aoqi@0 2731 * the address of the next instruction first.
aoqi@0 2732 */
aoqi@0 2733
aoqi@0 2734 CASE(_jsr): {
aoqi@0 2735 /* push bytecode index on stack */
aoqi@0 2736 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
aoqi@0 2737 MORE_STACK(1);
aoqi@0 2738 /* FALL THROUGH */
aoqi@0 2739 }
aoqi@0 2740
aoqi@0 2741 CASE(_goto):
aoqi@0 2742 {
aoqi@0 2743 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
aoqi@0 2744 // Profile jump.
aoqi@0 2745 BI_PROFILE_UPDATE_JUMP();
aoqi@0 2746 address branch_pc = pc;
aoqi@0 2747 UPDATE_PC(offset);
aoqi@0 2748 DO_BACKEDGE_CHECKS(offset, branch_pc);
aoqi@0 2749 CONTINUE;
aoqi@0 2750 }
aoqi@0 2751
aoqi@0 2752 CASE(_jsr_w): {
aoqi@0 2753 /* push return address on the stack */
aoqi@0 2754 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
aoqi@0 2755 MORE_STACK(1);
aoqi@0 2756 /* FALL THROUGH */
aoqi@0 2757 }
aoqi@0 2758
aoqi@0 2759 CASE(_goto_w):
aoqi@0 2760 {
aoqi@0 2761 int32_t offset = Bytes::get_Java_u4(pc + 1);
aoqi@0 2762 // Profile jump.
aoqi@0 2763 BI_PROFILE_UPDATE_JUMP();
aoqi@0 2764 address branch_pc = pc;
aoqi@0 2765 UPDATE_PC(offset);
aoqi@0 2766 DO_BACKEDGE_CHECKS(offset, branch_pc);
aoqi@0 2767 CONTINUE;
aoqi@0 2768 }
aoqi@0 2769
aoqi@0 2770 /* return from a jsr or jsr_w */
aoqi@0 2771
aoqi@0 2772 CASE(_ret): {
aoqi@0 2773 // Profile ret.
aoqi@0 2774 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
aoqi@0 2775 // Now, update the pc.
aoqi@0 2776 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
aoqi@0 2777 UPDATE_PC_AND_CONTINUE(0);
aoqi@0 2778 }
aoqi@0 2779
aoqi@0 2780 /* debugger breakpoint */
aoqi@0 2781
aoqi@0 2782 CASE(_breakpoint): {
aoqi@0 2783 Bytecodes::Code original_bytecode;
aoqi@0 2784 DECACHE_STATE();
aoqi@0 2785 SET_LAST_JAVA_FRAME();
aoqi@0 2786 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
aoqi@0 2787 METHOD, pc);
aoqi@0 2788 RESET_LAST_JAVA_FRAME();
aoqi@0 2789 CACHE_STATE();
aoqi@0 2790 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 2791 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
aoqi@0 2792 handle_exception);
aoqi@0 2793
aoqi@0 2794 opcode = (jubyte)original_bytecode;
aoqi@0 2795 goto opcode_switch;
aoqi@0 2796 }
aoqi@0 2797
aoqi@0 2798 DEFAULT:
aoqi@0 2799 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
aoqi@0 2800 Bytecodes::name((Bytecodes::Code)opcode)));
aoqi@0 2801 goto finish;
aoqi@0 2802
aoqi@0 2803 } /* switch(opc) */
aoqi@0 2804
aoqi@0 2805
aoqi@0 2806 #ifdef USELABELS
aoqi@0 2807 check_for_exception:
aoqi@0 2808 #endif
aoqi@0 2809 {
aoqi@0 2810 if (!THREAD->has_pending_exception()) {
aoqi@0 2811 CONTINUE;
aoqi@0 2812 }
aoqi@0 2813 /* We will be gcsafe soon, so flush our state. */
aoqi@0 2814 DECACHE_PC();
aoqi@0 2815 goto handle_exception;
aoqi@0 2816 }
aoqi@0 2817 do_continue: ;
aoqi@0 2818
aoqi@0 2819 } /* while (1) interpreter loop */
aoqi@0 2820
aoqi@0 2821
aoqi@0 2822 // An exception exists in the thread state see whether this activation can handle it
aoqi@0 2823 handle_exception: {
aoqi@0 2824
aoqi@0 2825 HandleMarkCleaner __hmc(THREAD);
aoqi@0 2826 Handle except_oop(THREAD, THREAD->pending_exception());
aoqi@0 2827 // Prevent any subsequent HandleMarkCleaner in the VM
aoqi@0 2828 // from freeing the except_oop handle.
aoqi@0 2829 HandleMark __hm(THREAD);
aoqi@0 2830
aoqi@0 2831 THREAD->clear_pending_exception();
aoqi@0 2832 assert(except_oop(), "No exception to process");
aoqi@0 2833 intptr_t continuation_bci;
aoqi@0 2834 // expression stack is emptied
aoqi@0 2835 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
aoqi@0 2836 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
aoqi@0 2837 handle_exception);
aoqi@0 2838
aoqi@0 2839 except_oop = THREAD->vm_result();
aoqi@0 2840 THREAD->set_vm_result(NULL);
aoqi@0 2841 if (continuation_bci >= 0) {
aoqi@0 2842 // Place exception on top of stack
aoqi@0 2843 SET_STACK_OBJECT(except_oop(), 0);
aoqi@0 2844 MORE_STACK(1);
aoqi@0 2845 pc = METHOD->code_base() + continuation_bci;
aoqi@0 2846 if (TraceExceptions) {
aoqi@0 2847 ttyLocker ttyl;
aoqi@0 2848 ResourceMark rm;
aoqi@0 2849 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
aoqi@0 2850 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
aoqi@0 2851 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
aoqi@0 2852 istate->bcp() - (intptr_t)METHOD->code_base(),
aoqi@0 2853 continuation_bci, THREAD);
aoqi@0 2854 }
aoqi@0 2855 // for AbortVMOnException flag
aoqi@0 2856 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
aoqi@0 2857
aoqi@0 2858 // Update profiling data.
aoqi@0 2859 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
aoqi@0 2860 goto run;
aoqi@0 2861 }
aoqi@0 2862 if (TraceExceptions) {
aoqi@0 2863 ttyLocker ttyl;
aoqi@0 2864 ResourceMark rm;
aoqi@0 2865 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
aoqi@0 2866 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
aoqi@0 2867 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
aoqi@0 2868 istate->bcp() - (intptr_t)METHOD->code_base(),
aoqi@0 2869 THREAD);
aoqi@0 2870 }
aoqi@0 2871 // for AbortVMOnException flag
aoqi@0 2872 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
aoqi@0 2873 // No handler in this activation, unwind and try again
aoqi@0 2874 THREAD->set_pending_exception(except_oop(), NULL, 0);
aoqi@0 2875 goto handle_return;
aoqi@0 2876 } // handle_exception:
aoqi@0 2877
aoqi@0 2878 // Return from an interpreter invocation with the result of the interpretation
aoqi@0 2879 // on the top of the Java Stack (or a pending exception)
aoqi@0 2880
aoqi@0 2881 handle_Pop_Frame: {
aoqi@0 2882
aoqi@0 2883 // We don't really do anything special here except we must be aware
aoqi@0 2884 // that we can get here without ever locking the method (if sync).
aoqi@0 2885 // Also we skip the notification of the exit.
aoqi@0 2886
aoqi@0 2887 istate->set_msg(popping_frame);
aoqi@0 2888 // Clear pending so while the pop is in process
aoqi@0 2889 // we don't start another one if a call_vm is done.
aoqi@0 2890 THREAD->clr_pop_frame_pending();
aoqi@0 2891 // Let interpreter (only) see the we're in the process of popping a frame
aoqi@0 2892 THREAD->set_pop_frame_in_process();
aoqi@0 2893
aoqi@0 2894 goto handle_return;
aoqi@0 2895
aoqi@0 2896 } // handle_Pop_Frame
aoqi@0 2897
aoqi@0 2898 // ForceEarlyReturn ends a method, and returns to the caller with a return value
aoqi@0 2899 // given by the invoker of the early return.
aoqi@0 2900 handle_Early_Return: {
aoqi@0 2901
aoqi@0 2902 istate->set_msg(early_return);
aoqi@0 2903
aoqi@0 2904 // Clear expression stack.
aoqi@0 2905 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
aoqi@0 2906
aoqi@0 2907 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
aoqi@0 2908
aoqi@0 2909 // Push the value to be returned.
aoqi@0 2910 switch (istate->method()->result_type()) {
aoqi@0 2911 case T_BOOLEAN:
aoqi@0 2912 case T_SHORT:
aoqi@0 2913 case T_BYTE:
aoqi@0 2914 case T_CHAR:
aoqi@0 2915 case T_INT:
aoqi@0 2916 SET_STACK_INT(ts->earlyret_value().i, 0);
aoqi@0 2917 MORE_STACK(1);
aoqi@0 2918 break;
aoqi@0 2919 case T_LONG:
aoqi@0 2920 SET_STACK_LONG(ts->earlyret_value().j, 1);
aoqi@0 2921 MORE_STACK(2);
aoqi@0 2922 break;
aoqi@0 2923 case T_FLOAT:
aoqi@0 2924 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
aoqi@0 2925 MORE_STACK(1);
aoqi@0 2926 break;
aoqi@0 2927 case T_DOUBLE:
aoqi@0 2928 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
aoqi@0 2929 MORE_STACK(2);
aoqi@0 2930 break;
aoqi@0 2931 case T_ARRAY:
aoqi@0 2932 case T_OBJECT:
aoqi@0 2933 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
aoqi@0 2934 MORE_STACK(1);
aoqi@0 2935 break;
aoqi@0 2936 }
aoqi@0 2937
aoqi@0 2938 ts->clr_earlyret_value();
aoqi@0 2939 ts->set_earlyret_oop(NULL);
aoqi@0 2940 ts->clr_earlyret_pending();
aoqi@0 2941
aoqi@0 2942 // Fall through to handle_return.
aoqi@0 2943
aoqi@0 2944 } // handle_Early_Return
aoqi@0 2945
aoqi@0 2946 handle_return: {
aoqi@0 2947 // A storestore barrier is required to order initialization of
aoqi@0 2948 // final fields with publishing the reference to the object that
aoqi@0 2949 // holds the field. Without the barrier the value of final fields
aoqi@0 2950 // can be observed to change.
aoqi@0 2951 OrderAccess::storestore();
aoqi@0 2952
aoqi@0 2953 DECACHE_STATE();
aoqi@0 2954
aoqi@0 2955 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
aoqi@0 2956 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
aoqi@0 2957 Handle original_exception(THREAD, THREAD->pending_exception());
aoqi@0 2958 Handle illegal_state_oop(THREAD, NULL);
aoqi@0 2959
aoqi@0 2960 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
aoqi@0 2961 // in any following VM entries from freeing our live handles, but illegal_state_oop
aoqi@0 2962 // isn't really allocated yet and so doesn't become live until later and
aoqi@0 2963 // in unpredicatable places. Instead we must protect the places where we enter the
aoqi@0 2964 // VM. It would be much simpler (and safer) if we could allocate a real handle with
aoqi@0 2965 // a NULL oop in it and then overwrite the oop later as needed. This isn't
aoqi@0 2966 // unfortunately isn't possible.
aoqi@0 2967
aoqi@0 2968 THREAD->clear_pending_exception();
aoqi@0 2969
aoqi@0 2970 //
aoqi@0 2971 // As far as we are concerned we have returned. If we have a pending exception
aoqi@0 2972 // that will be returned as this invocation's result. However if we get any
aoqi@0 2973 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
aoqi@0 2974 // will be our final result (i.e. monitor exception trumps a pending exception).
aoqi@0 2975 //
aoqi@0 2976
aoqi@0 2977 // If we never locked the method (or really passed the point where we would have),
aoqi@0 2978 // there is no need to unlock it (or look for other monitors), since that
aoqi@0 2979 // could not have happened.
aoqi@0 2980
aoqi@0 2981 if (THREAD->do_not_unlock()) {
aoqi@0 2982
aoqi@0 2983 // Never locked, reset the flag now because obviously any caller must
aoqi@0 2984 // have passed their point of locking for us to have gotten here.
aoqi@0 2985
aoqi@0 2986 THREAD->clr_do_not_unlock();
aoqi@0 2987 } else {
aoqi@0 2988 // At this point we consider that we have returned. We now check that the
aoqi@0 2989 // locks were properly block structured. If we find that they were not
aoqi@0 2990 // used properly we will return with an illegal monitor exception.
aoqi@0 2991 // The exception is checked by the caller not the callee since this
aoqi@0 2992 // checking is considered to be part of the invocation and therefore
aoqi@0 2993 // in the callers scope (JVM spec 8.13).
aoqi@0 2994 //
aoqi@0 2995 // Another weird thing to watch for is if the method was locked
aoqi@0 2996 // recursively and then not exited properly. This means we must
aoqi@0 2997 // examine all the entries in reverse time(and stack) order and
aoqi@0 2998 // unlock as we find them. If we find the method monitor before
aoqi@0 2999 // we are at the initial entry then we should throw an exception.
aoqi@0 3000 // It is not clear the template based interpreter does this
aoqi@0 3001 // correctly
aoqi@0 3002
aoqi@0 3003 BasicObjectLock* base = istate->monitor_base();
aoqi@0 3004 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
aoqi@0 3005 bool method_unlock_needed = METHOD->is_synchronized();
aoqi@0 3006 // We know the initial monitor was used for the method don't check that
aoqi@0 3007 // slot in the loop
aoqi@0 3008 if (method_unlock_needed) base--;
aoqi@0 3009
aoqi@0 3010 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
aoqi@0 3011 while (end < base) {
aoqi@0 3012 oop lockee = end->obj();
aoqi@0 3013 if (lockee != NULL) {
aoqi@0 3014 BasicLock* lock = end->lock();
aoqi@0 3015 markOop header = lock->displaced_header();
aoqi@0 3016 end->set_obj(NULL);
aoqi@0 3017
aoqi@0 3018 if (!lockee->mark()->has_bias_pattern()) {
aoqi@0 3019 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 3020 if (header != NULL) {
aoqi@0 3021 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
aoqi@0 3022 // restore object for the slow case
aoqi@0 3023 end->set_obj(lockee);
aoqi@0 3024 {
aoqi@0 3025 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3026 HandleMark __hm(THREAD);
aoqi@0 3027 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
aoqi@0 3028 }
aoqi@0 3029 }
aoqi@0 3030 }
aoqi@0 3031 }
aoqi@0 3032 // One error is plenty
aoqi@0 3033 if (illegal_state_oop() == NULL && !suppress_error) {
aoqi@0 3034 {
aoqi@0 3035 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3036 HandleMark __hm(THREAD);
aoqi@0 3037 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
aoqi@0 3038 }
aoqi@0 3039 assert(THREAD->has_pending_exception(), "Lost our exception!");
aoqi@0 3040 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3041 THREAD->clear_pending_exception();
aoqi@0 3042 }
aoqi@0 3043 }
aoqi@0 3044 end++;
aoqi@0 3045 }
aoqi@0 3046 // Unlock the method if needed
aoqi@0 3047 if (method_unlock_needed) {
aoqi@0 3048 if (base->obj() == NULL) {
aoqi@0 3049 // The method is already unlocked this is not good.
aoqi@0 3050 if (illegal_state_oop() == NULL && !suppress_error) {
aoqi@0 3051 {
aoqi@0 3052 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3053 HandleMark __hm(THREAD);
aoqi@0 3054 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
aoqi@0 3055 }
aoqi@0 3056 assert(THREAD->has_pending_exception(), "Lost our exception!");
aoqi@0 3057 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3058 THREAD->clear_pending_exception();
aoqi@0 3059 }
aoqi@0 3060 } else {
aoqi@0 3061 //
aoqi@0 3062 // The initial monitor is always used for the method
aoqi@0 3063 // However if that slot is no longer the oop for the method it was unlocked
aoqi@0 3064 // and reused by something that wasn't unlocked!
aoqi@0 3065 //
aoqi@0 3066 // deopt can come in with rcvr dead because c2 knows
aoqi@0 3067 // its value is preserved in the monitor. So we can't use locals[0] at all
aoqi@0 3068 // and must use first monitor slot.
aoqi@0 3069 //
aoqi@0 3070 oop rcvr = base->obj();
aoqi@0 3071 if (rcvr == NULL) {
aoqi@0 3072 if (!suppress_error) {
aoqi@0 3073 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
aoqi@0 3074 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3075 THREAD->clear_pending_exception();
aoqi@0 3076 }
aoqi@0 3077 } else if (UseHeavyMonitors) {
aoqi@0 3078 {
aoqi@0 3079 // Prevent any HandleMarkCleaner from freeing our live handles.
aoqi@0 3080 HandleMark __hm(THREAD);
aoqi@0 3081 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
aoqi@0 3082 }
aoqi@0 3083 if (THREAD->has_pending_exception()) {
aoqi@0 3084 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
aoqi@0 3085 THREAD->clear_pending_exception();
aoqi@0 3086 }
aoqi@0 3087 } else {
aoqi@0 3088 BasicLock* lock = base->lock();
aoqi@0 3089 markOop header = lock->displaced_header();
aoqi@0 3090 base->set_obj(NULL);
aoqi@0 3091
aoqi@0 3092 if (!rcvr->mark()->has_bias_pattern()) {
aoqi@0 3093 base->set_obj(NULL);
aoqi@0 3094 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 3095 if (header != NULL) {
aoqi@0 3096 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
aoqi@0 3097 // restore object for the slow case
aoqi@0 3098 base->set_obj(rcvr);
aoqi@0 3099 {
aoqi@0 3100 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3101 HandleMark __hm(THREAD);
aoqi@0 3102 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
aoqi@0 3103 }
aoqi@0 3104 if (THREAD->has_pending_exception()) {
aoqi@0 3105 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
aoqi@0 3106 THREAD->clear_pending_exception();
aoqi@0 3107 }
aoqi@0 3108 }
aoqi@0 3109 }
aoqi@0 3110 }
aoqi@0 3111 }
aoqi@0 3112 }
aoqi@0 3113 }
aoqi@0 3114 }
aoqi@0 3115 // Clear the do_not_unlock flag now.
aoqi@0 3116 THREAD->clr_do_not_unlock();
aoqi@0 3117
aoqi@0 3118 //
aoqi@0 3119 // Notify jvmti/jvmdi
aoqi@0 3120 //
aoqi@0 3121 // NOTE: we do not notify a method_exit if we have a pending exception,
aoqi@0 3122 // including an exception we generate for unlocking checks. In the former
aoqi@0 3123 // case, JVMDI has already been notified by our call for the exception handler
aoqi@0 3124 // and in both cases as far as JVMDI is concerned we have already returned.
aoqi@0 3125 // If we notify it again JVMDI will be all confused about how many frames
aoqi@0 3126 // are still on the stack (4340444).
aoqi@0 3127 //
aoqi@0 3128 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
aoqi@0 3129 // method_exit events whenever we leave an activation unless it was done
aoqi@0 3130 // for popframe. This is nothing like jvmdi. However we are passing the
aoqi@0 3131 // tests at the moment (apparently because they are jvmdi based) so rather
aoqi@0 3132 // than change this code and possibly fail tests we will leave it alone
aoqi@0 3133 // (with this note) in anticipation of changing the vm and the tests
aoqi@0 3134 // simultaneously.
aoqi@0 3135
aoqi@0 3136
aoqi@0 3137 //
aoqi@0 3138 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
aoqi@0 3139
aoqi@0 3140
aoqi@0 3141
aoqi@0 3142 #ifdef VM_JVMTI
aoqi@0 3143 if (_jvmti_interp_events) {
aoqi@0 3144 // Whenever JVMTI puts a thread in interp_only_mode, method
aoqi@0 3145 // entry/exit events are sent for that thread to track stack depth.
aoqi@0 3146 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
aoqi@0 3147 {
aoqi@0 3148 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3149 HandleMark __hm(THREAD);
aoqi@0 3150 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
aoqi@0 3151 }
aoqi@0 3152 }
aoqi@0 3153 }
aoqi@0 3154 #endif /* VM_JVMTI */
aoqi@0 3155
aoqi@0 3156 //
aoqi@0 3157 // See if we are returning any exception
aoqi@0 3158 // A pending exception that was pending prior to a possible popping frame
aoqi@0 3159 // overrides the popping frame.
aoqi@0 3160 //
aoqi@0 3161 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
aoqi@0 3162 if (illegal_state_oop() != NULL || original_exception() != NULL) {
aoqi@0 3163 // Inform the frame manager we have no result.
aoqi@0 3164 istate->set_msg(throwing_exception);
aoqi@0 3165 if (illegal_state_oop() != NULL)
aoqi@0 3166 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
aoqi@0 3167 else
aoqi@0 3168 THREAD->set_pending_exception(original_exception(), NULL, 0);
aoqi@0 3169 UPDATE_PC_AND_RETURN(0);
aoqi@0 3170 }
aoqi@0 3171
aoqi@0 3172 if (istate->msg() == popping_frame) {
aoqi@0 3173 // Make it simpler on the assembly code and set the message for the frame pop.
aoqi@0 3174 // returns
aoqi@0 3175 if (istate->prev() == NULL) {
aoqi@0 3176 // We must be returning to a deoptimized frame (because popframe only happens between
aoqi@0 3177 // two interpreted frames). We need to save the current arguments in C heap so that
aoqi@0 3178 // the deoptimized frame when it restarts can copy the arguments to its expression
aoqi@0 3179 // stack and re-execute the call. We also have to notify deoptimization that this
aoqi@0 3180 // has occurred and to pick the preserved args copy them to the deoptimized frame's
aoqi@0 3181 // java expression stack. Yuck.
aoqi@0 3182 //
aoqi@0 3183 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
aoqi@0 3184 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
aoqi@0 3185 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
aoqi@0 3186 }
aoqi@0 3187 } else {
aoqi@0 3188 istate->set_msg(return_from_method);
aoqi@0 3189 }
aoqi@0 3190
aoqi@0 3191 // Normal return
aoqi@0 3192 // Advance the pc and return to frame manager
aoqi@0 3193 UPDATE_PC_AND_RETURN(1);
aoqi@0 3194 } /* handle_return: */
aoqi@0 3195
aoqi@0 3196 // This is really a fatal error return
aoqi@0 3197
aoqi@0 3198 finish:
aoqi@0 3199 DECACHE_TOS();
aoqi@0 3200 DECACHE_PC();
aoqi@0 3201
aoqi@0 3202 return;
aoqi@0 3203 }
aoqi@0 3204
aoqi@0 3205 /*
aoqi@0 3206 * All the code following this point is only produced once and is not present
aoqi@0 3207 * in the JVMTI version of the interpreter
aoqi@0 3208 */
aoqi@0 3209
aoqi@0 3210 #ifndef VM_JVMTI
aoqi@0 3211
aoqi@0 3212 // This constructor should only be used to contruct the object to signal
aoqi@0 3213 // interpreter initialization. All other instances should be created by
aoqi@0 3214 // the frame manager.
aoqi@0 3215 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
aoqi@0 3216 if (msg != initialize) ShouldNotReachHere();
aoqi@0 3217 _msg = msg;
aoqi@0 3218 _self_link = this;
aoqi@0 3219 _prev_link = NULL;
aoqi@0 3220 }
aoqi@0 3221
aoqi@0 3222 // Inline static functions for Java Stack and Local manipulation
aoqi@0 3223
aoqi@0 3224 // The implementations are platform dependent. We have to worry about alignment
aoqi@0 3225 // issues on some machines which can change on the same platform depending on
aoqi@0 3226 // whether it is an LP64 machine also.
aoqi@0 3227 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
aoqi@0 3228 return (address) tos[Interpreter::expr_index_at(-offset)];
aoqi@0 3229 }
aoqi@0 3230
aoqi@0 3231 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
aoqi@0 3232 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
aoqi@0 3233 }
aoqi@0 3234
aoqi@0 3235 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
aoqi@0 3236 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
aoqi@0 3237 }
aoqi@0 3238
aoqi@0 3239 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
aoqi@0 3240 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
aoqi@0 3241 }
aoqi@0 3242
aoqi@0 3243 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
aoqi@0 3244 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
aoqi@0 3245 }
aoqi@0 3246
aoqi@0 3247 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
aoqi@0 3248 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
aoqi@0 3249 }
aoqi@0 3250
aoqi@0 3251 // only used for value types
aoqi@0 3252 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
aoqi@0 3253 int offset) {
aoqi@0 3254 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3255 }
aoqi@0 3256
aoqi@0 3257 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
aoqi@0 3258 int offset) {
aoqi@0 3259 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3260 }
aoqi@0 3261
aoqi@0 3262 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
aoqi@0 3263 int offset) {
aoqi@0 3264 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3265 }
aoqi@0 3266
aoqi@0 3267 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
aoqi@0 3268 int offset) {
aoqi@0 3269 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3270 }
aoqi@0 3271
aoqi@0 3272 // needs to be platform dep for the 32 bit platforms.
aoqi@0 3273 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
aoqi@0 3274 int offset) {
aoqi@0 3275 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
aoqi@0 3276 }
aoqi@0 3277
aoqi@0 3278 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
aoqi@0 3279 address addr, int offset) {
aoqi@0 3280 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
aoqi@0 3281 ((VMJavaVal64*)addr)->d);
aoqi@0 3282 }
aoqi@0 3283
aoqi@0 3284 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
aoqi@0 3285 int offset) {
aoqi@0 3286 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
aoqi@0 3287 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
aoqi@0 3288 }
aoqi@0 3289
aoqi@0 3290 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
aoqi@0 3291 address addr, int offset) {
aoqi@0 3292 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
aoqi@0 3293 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
aoqi@0 3294 ((VMJavaVal64*)addr)->l;
aoqi@0 3295 }
aoqi@0 3296
aoqi@0 3297 // Locals
aoqi@0 3298
aoqi@0 3299 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
aoqi@0 3300 return (address)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3301 }
aoqi@0 3302 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
aoqi@0 3303 return (jint)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3304 }
aoqi@0 3305 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
aoqi@0 3306 return (jfloat)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3307 }
aoqi@0 3308 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
aoqi@0 3309 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
aoqi@0 3310 }
aoqi@0 3311 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
aoqi@0 3312 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
aoqi@0 3313 }
aoqi@0 3314 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
aoqi@0 3315 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
aoqi@0 3316 }
aoqi@0 3317
aoqi@0 3318 // Returns the address of locals value.
aoqi@0 3319 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
aoqi@0 3320 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
aoqi@0 3321 }
aoqi@0 3322 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
aoqi@0 3323 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
aoqi@0 3324 }
aoqi@0 3325
aoqi@0 3326 // Used for local value or returnAddress
aoqi@0 3327 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
aoqi@0 3328 address value, int offset) {
aoqi@0 3329 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3330 }
aoqi@0 3331 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
aoqi@0 3332 jint value, int offset) {
aoqi@0 3333 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3334 }
aoqi@0 3335 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
aoqi@0 3336 jfloat value, int offset) {
aoqi@0 3337 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3338 }
aoqi@0 3339 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
aoqi@0 3340 oop value, int offset) {
aoqi@0 3341 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3342 }
aoqi@0 3343 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
aoqi@0 3344 jdouble value, int offset) {
aoqi@0 3345 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
aoqi@0 3346 }
aoqi@0 3347 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
aoqi@0 3348 jlong value, int offset) {
aoqi@0 3349 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
aoqi@0 3350 }
aoqi@0 3351 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
aoqi@0 3352 address addr, int offset) {
aoqi@0 3353 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
aoqi@0 3354 }
aoqi@0 3355 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
aoqi@0 3356 address addr, int offset) {
aoqi@0 3357 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
aoqi@0 3358 }
aoqi@0 3359
aoqi@0 3360 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
aoqi@0 3361 intptr_t* locals, int locals_offset) {
aoqi@0 3362 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
aoqi@0 3363 locals[Interpreter::local_index_at(-locals_offset)] = value;
aoqi@0 3364 }
aoqi@0 3365
aoqi@0 3366
aoqi@0 3367 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
aoqi@0 3368 int to_offset) {
aoqi@0 3369 tos[Interpreter::expr_index_at(-to_offset)] =
aoqi@0 3370 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
aoqi@0 3371 }
aoqi@0 3372
aoqi@0 3373 void BytecodeInterpreter::dup(intptr_t *tos) {
aoqi@0 3374 copy_stack_slot(tos, -1, 0);
aoqi@0 3375 }
aoqi@0 3376 void BytecodeInterpreter::dup2(intptr_t *tos) {
aoqi@0 3377 copy_stack_slot(tos, -2, 0);
aoqi@0 3378 copy_stack_slot(tos, -1, 1);
aoqi@0 3379 }
aoqi@0 3380
aoqi@0 3381 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
aoqi@0 3382 /* insert top word two down */
aoqi@0 3383 copy_stack_slot(tos, -1, 0);
aoqi@0 3384 copy_stack_slot(tos, -2, -1);
aoqi@0 3385 copy_stack_slot(tos, 0, -2);
aoqi@0 3386 }
aoqi@0 3387
aoqi@0 3388 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
aoqi@0 3389 /* insert top word three down */
aoqi@0 3390 copy_stack_slot(tos, -1, 0);
aoqi@0 3391 copy_stack_slot(tos, -2, -1);
aoqi@0 3392 copy_stack_slot(tos, -3, -2);
aoqi@0 3393 copy_stack_slot(tos, 0, -3);
aoqi@0 3394 }
aoqi@0 3395 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
aoqi@0 3396 /* insert top 2 slots three down */
aoqi@0 3397 copy_stack_slot(tos, -1, 1);
aoqi@0 3398 copy_stack_slot(tos, -2, 0);
aoqi@0 3399 copy_stack_slot(tos, -3, -1);
aoqi@0 3400 copy_stack_slot(tos, 1, -2);
aoqi@0 3401 copy_stack_slot(tos, 0, -3);
aoqi@0 3402 }
aoqi@0 3403 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
aoqi@0 3404 /* insert top 2 slots four down */
aoqi@0 3405 copy_stack_slot(tos, -1, 1);
aoqi@0 3406 copy_stack_slot(tos, -2, 0);
aoqi@0 3407 copy_stack_slot(tos, -3, -1);
aoqi@0 3408 copy_stack_slot(tos, -4, -2);
aoqi@0 3409 copy_stack_slot(tos, 1, -3);
aoqi@0 3410 copy_stack_slot(tos, 0, -4);
aoqi@0 3411 }
aoqi@0 3412
aoqi@0 3413
aoqi@0 3414 void BytecodeInterpreter::swap(intptr_t *tos) {
aoqi@0 3415 // swap top two elements
aoqi@0 3416 intptr_t val = tos[Interpreter::expr_index_at(1)];
aoqi@0 3417 // Copy -2 entry to -1
aoqi@0 3418 copy_stack_slot(tos, -2, -1);
aoqi@0 3419 // Store saved -1 entry into -2
aoqi@0 3420 tos[Interpreter::expr_index_at(2)] = val;
aoqi@0 3421 }
aoqi@0 3422 // --------------------------------------------------------------------------------
aoqi@0 3423 // Non-product code
aoqi@0 3424 #ifndef PRODUCT
aoqi@0 3425
aoqi@0 3426 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
aoqi@0 3427 switch (msg) {
aoqi@0 3428 case BytecodeInterpreter::no_request: return("no_request");
aoqi@0 3429 case BytecodeInterpreter::initialize: return("initialize");
aoqi@0 3430 // status message to C++ interpreter
aoqi@0 3431 case BytecodeInterpreter::method_entry: return("method_entry");
aoqi@0 3432 case BytecodeInterpreter::method_resume: return("method_resume");
aoqi@0 3433 case BytecodeInterpreter::got_monitors: return("got_monitors");
aoqi@0 3434 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
aoqi@0 3435 // requests to frame manager from C++ interpreter
aoqi@0 3436 case BytecodeInterpreter::call_method: return("call_method");
aoqi@0 3437 case BytecodeInterpreter::return_from_method: return("return_from_method");
aoqi@0 3438 case BytecodeInterpreter::more_monitors: return("more_monitors");
aoqi@0 3439 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
aoqi@0 3440 case BytecodeInterpreter::popping_frame: return("popping_frame");
aoqi@0 3441 case BytecodeInterpreter::do_osr: return("do_osr");
aoqi@0 3442 // deopt
aoqi@0 3443 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
aoqi@0 3444 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
aoqi@0 3445 default: return("BAD MSG");
aoqi@0 3446 }
aoqi@0 3447 }
aoqi@0 3448 void
aoqi@0 3449 BytecodeInterpreter::print() {
aoqi@0 3450 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
aoqi@0 3451 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
aoqi@0 3452 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
aoqi@0 3453 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
aoqi@0 3454 {
aoqi@0 3455 ResourceMark rm;
aoqi@0 3456 char *method_name = _method->name_and_sig_as_C_string();
aoqi@0 3457 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
aoqi@0 3458 }
aoqi@0 3459 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
aoqi@0 3460 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
aoqi@0 3461 tty->print_cr("msg: %s", C_msg(this->_msg));
aoqi@0 3462 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
aoqi@0 3463 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
aoqi@0 3464 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
aoqi@0 3465 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
aoqi@0 3466 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
aoqi@0 3467 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
aoqi@0 3468 tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
aoqi@0 3469 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
aoqi@0 3470 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
aoqi@0 3471 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
aoqi@0 3472 #ifdef SPARC
aoqi@0 3473 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
aoqi@0 3474 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
aoqi@0 3475 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
aoqi@0 3476 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
aoqi@0 3477 #endif
aoqi@0 3478 #if !defined(ZERO)
aoqi@0 3479 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
aoqi@0 3480 #endif // !ZERO
aoqi@0 3481 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
aoqi@0 3482 }
aoqi@0 3483
aoqi@0 3484 extern "C" {
aoqi@0 3485 void PI(uintptr_t arg) {
aoqi@0 3486 ((BytecodeInterpreter*)arg)->print();
aoqi@0 3487 }
aoqi@0 3488 }
aoqi@0 3489 #endif // PRODUCT
aoqi@0 3490
aoqi@0 3491 #endif // JVMTI
aoqi@0 3492 #endif // CC_INTERP

mercurial