src/share/vm/interpreter/bytecodeInterpreter.cpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 9122
024be04bb151
permissions
-rw-r--r--

Merge

aoqi@0 1 /*
kevinw@8368 2 * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 // no precompiled headers
aoqi@0 26 #include "classfile/vmSymbols.hpp"
aoqi@0 27 #include "gc_interface/collectedHeap.hpp"
aoqi@0 28 #include "interpreter/bytecodeHistogram.hpp"
aoqi@0 29 #include "interpreter/bytecodeInterpreter.hpp"
aoqi@0 30 #include "interpreter/bytecodeInterpreter.inline.hpp"
aoqi@0 31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
aoqi@0 32 #include "interpreter/interpreter.hpp"
aoqi@0 33 #include "interpreter/interpreterRuntime.hpp"
aoqi@0 34 #include "memory/resourceArea.hpp"
aoqi@0 35 #include "oops/methodCounters.hpp"
aoqi@0 36 #include "oops/objArrayKlass.hpp"
aoqi@0 37 #include "oops/oop.inline.hpp"
aoqi@0 38 #include "prims/jvmtiExport.hpp"
aoqi@0 39 #include "prims/jvmtiThreadState.hpp"
aoqi@0 40 #include "runtime/biasedLocking.hpp"
aoqi@0 41 #include "runtime/frame.inline.hpp"
aoqi@0 42 #include "runtime/handles.inline.hpp"
aoqi@0 43 #include "runtime/interfaceSupport.hpp"
goetz@6911 44 #include "runtime/orderAccess.inline.hpp"
aoqi@0 45 #include "runtime/sharedRuntime.hpp"
aoqi@0 46 #include "runtime/threadCritical.hpp"
aoqi@0 47 #include "utilities/exceptions.hpp"
aoqi@0 48
aoqi@0 49 // no precompiled headers
aoqi@0 50 #ifdef CC_INTERP
aoqi@0 51
aoqi@0 52 /*
aoqi@0 53 * USELABELS - If using GCC, then use labels for the opcode dispatching
aoqi@0 54 * rather -then a switch statement. This improves performance because it
aoqi@0 55 * gives us the oportunity to have the instructions that calculate the
aoqi@0 56 * next opcode to jump to be intermixed with the rest of the instructions
aoqi@0 57 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
aoqi@0 58 */
aoqi@0 59 #undef USELABELS
aoqi@0 60 #ifdef __GNUC__
aoqi@0 61 /*
aoqi@0 62 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
aoqi@0 63 don't use the computed goto approach.
aoqi@0 64 */
aoqi@0 65 #ifndef ASSERT
aoqi@0 66 #define USELABELS
aoqi@0 67 #endif
aoqi@0 68 #endif
aoqi@0 69
aoqi@0 70 #undef CASE
aoqi@0 71 #ifdef USELABELS
aoqi@0 72 #define CASE(opcode) opc ## opcode
aoqi@0 73 #define DEFAULT opc_default
aoqi@0 74 #else
aoqi@0 75 #define CASE(opcode) case Bytecodes:: opcode
aoqi@0 76 #define DEFAULT default
aoqi@0 77 #endif
aoqi@0 78
aoqi@0 79 /*
aoqi@0 80 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
aoqi@0 81 * opcode before going back to the top of the while loop, rather then having
aoqi@0 82 * the top of the while loop handle it. This provides a better opportunity
aoqi@0 83 * for instruction scheduling. Some compilers just do this prefetch
aoqi@0 84 * automatically. Some actually end up with worse performance if you
aoqi@0 85 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
aoqi@0 86 */
aoqi@0 87 #undef PREFETCH_OPCCODE
aoqi@0 88 #define PREFETCH_OPCCODE
aoqi@0 89
aoqi@0 90 /*
aoqi@0 91 Interpreter safepoint: it is expected that the interpreter will have no live
aoqi@0 92 handles of its own creation live at an interpreter safepoint. Therefore we
aoqi@0 93 run a HandleMarkCleaner and trash all handles allocated in the call chain
aoqi@0 94 since the JavaCalls::call_helper invocation that initiated the chain.
aoqi@0 95 There really shouldn't be any handles remaining to trash but this is cheap
aoqi@0 96 in relation to a safepoint.
aoqi@0 97 */
aoqi@0 98 #define SAFEPOINT \
aoqi@0 99 if ( SafepointSynchronize::is_synchronizing()) { \
aoqi@0 100 { \
aoqi@0 101 /* zap freed handles rather than GC'ing them */ \
aoqi@0 102 HandleMarkCleaner __hmc(THREAD); \
aoqi@0 103 } \
aoqi@0 104 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
aoqi@0 105 }
aoqi@0 106
aoqi@0 107 /*
aoqi@0 108 * VM_JAVA_ERROR - Macro for throwing a java exception from
aoqi@0 109 * the interpreter loop. Should really be a CALL_VM but there
aoqi@0 110 * is no entry point to do the transition to vm so we just
aoqi@0 111 * do it by hand here.
aoqi@0 112 */
aoqi@0 113 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
aoqi@0 114 DECACHE_STATE(); \
aoqi@0 115 SET_LAST_JAVA_FRAME(); \
aoqi@0 116 { \
aoqi@0 117 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
aoqi@0 118 ThreadInVMfromJava trans(THREAD); \
aoqi@0 119 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
aoqi@0 120 } \
aoqi@0 121 RESET_LAST_JAVA_FRAME(); \
aoqi@0 122 CACHE_STATE();
aoqi@0 123
aoqi@0 124 // Normal throw of a java error.
aoqi@0 125 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
aoqi@0 126 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
aoqi@0 127 goto handle_exception;
aoqi@0 128
aoqi@0 129 #ifdef PRODUCT
aoqi@0 130 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
aoqi@0 131 #else
aoqi@0 132 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
aoqi@0 133 { \
aoqi@0 134 BytecodeCounter::_counter_value++; \
aoqi@0 135 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
aoqi@0 136 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
aoqi@0 137 if (TraceBytecodes) { \
aoqi@0 138 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
aoqi@0 139 topOfStack[Interpreter::expr_index_at(1)], \
aoqi@0 140 topOfStack[Interpreter::expr_index_at(2)]), \
aoqi@0 141 handle_exception); \
aoqi@0 142 } \
aoqi@0 143 }
aoqi@0 144 #endif
aoqi@0 145
aoqi@0 146 #undef DEBUGGER_SINGLE_STEP_NOTIFY
aoqi@0 147 #ifdef VM_JVMTI
aoqi@0 148 /* NOTE: (kbr) This macro must be called AFTER the PC has been
aoqi@0 149 incremented. JvmtiExport::at_single_stepping_point() may cause a
aoqi@0 150 breakpoint opcode to get inserted at the current PC to allow the
aoqi@0 151 debugger to coalesce single-step events.
aoqi@0 152
aoqi@0 153 As a result if we call at_single_stepping_point() we refetch opcode
aoqi@0 154 to get the current opcode. This will override any other prefetching
aoqi@0 155 that might have occurred.
aoqi@0 156 */
aoqi@0 157 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
aoqi@0 158 { \
aoqi@0 159 if (_jvmti_interp_events) { \
aoqi@0 160 if (JvmtiExport::should_post_single_step()) { \
aoqi@0 161 DECACHE_STATE(); \
aoqi@0 162 SET_LAST_JAVA_FRAME(); \
aoqi@0 163 ThreadInVMfromJava trans(THREAD); \
aoqi@0 164 JvmtiExport::at_single_stepping_point(THREAD, \
aoqi@0 165 istate->method(), \
aoqi@0 166 pc); \
aoqi@0 167 RESET_LAST_JAVA_FRAME(); \
aoqi@0 168 CACHE_STATE(); \
aoqi@0 169 if (THREAD->pop_frame_pending() && \
aoqi@0 170 !THREAD->pop_frame_in_process()) { \
aoqi@0 171 goto handle_Pop_Frame; \
aoqi@0 172 } \
aoqi@0 173 if (THREAD->jvmti_thread_state() && \
aoqi@0 174 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
aoqi@0 175 goto handle_Early_Return; \
aoqi@0 176 } \
aoqi@0 177 opcode = *pc; \
aoqi@0 178 } \
aoqi@0 179 } \
aoqi@0 180 }
aoqi@0 181 #else
aoqi@0 182 #define DEBUGGER_SINGLE_STEP_NOTIFY()
aoqi@0 183 #endif
aoqi@0 184
aoqi@0 185 /*
aoqi@0 186 * CONTINUE - Macro for executing the next opcode.
aoqi@0 187 */
aoqi@0 188 #undef CONTINUE
aoqi@0 189 #ifdef USELABELS
aoqi@0 190 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
aoqi@0 191 // initialization (which is is the initialization of the table pointer...)
aoqi@0 192 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
aoqi@0 193 #define CONTINUE { \
aoqi@0 194 opcode = *pc; \
aoqi@0 195 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 196 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 197 DISPATCH(opcode); \
aoqi@0 198 }
aoqi@0 199 #else
aoqi@0 200 #ifdef PREFETCH_OPCCODE
aoqi@0 201 #define CONTINUE { \
aoqi@0 202 opcode = *pc; \
aoqi@0 203 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 204 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 205 continue; \
aoqi@0 206 }
aoqi@0 207 #else
aoqi@0 208 #define CONTINUE { \
aoqi@0 209 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 210 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 211 continue; \
aoqi@0 212 }
aoqi@0 213 #endif
aoqi@0 214 #endif
aoqi@0 215
aoqi@0 216
aoqi@0 217 #define UPDATE_PC(opsize) {pc += opsize; }
aoqi@0 218 /*
aoqi@0 219 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
aoqi@0 220 */
aoqi@0 221 #undef UPDATE_PC_AND_TOS
aoqi@0 222 #define UPDATE_PC_AND_TOS(opsize, stack) \
aoqi@0 223 {pc += opsize; MORE_STACK(stack); }
aoqi@0 224
aoqi@0 225 /*
aoqi@0 226 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
aoqi@0 227 * and executing the next opcode. It's somewhat similar to the combination
aoqi@0 228 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
aoqi@0 229 */
aoqi@0 230 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
aoqi@0 231 #ifdef USELABELS
aoqi@0 232 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 233 pc += opsize; opcode = *pc; MORE_STACK(stack); \
aoqi@0 234 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 235 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 236 DISPATCH(opcode); \
aoqi@0 237 }
aoqi@0 238
aoqi@0 239 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 240 pc += opsize; opcode = *pc; \
aoqi@0 241 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 242 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 243 DISPATCH(opcode); \
aoqi@0 244 }
aoqi@0 245 #else
aoqi@0 246 #ifdef PREFETCH_OPCCODE
aoqi@0 247 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 248 pc += opsize; opcode = *pc; MORE_STACK(stack); \
aoqi@0 249 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 250 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 251 goto do_continue; \
aoqi@0 252 }
aoqi@0 253
aoqi@0 254 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 255 pc += opsize; opcode = *pc; \
aoqi@0 256 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 257 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 258 goto do_continue; \
aoqi@0 259 }
aoqi@0 260 #else
aoqi@0 261 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
aoqi@0 262 pc += opsize; MORE_STACK(stack); \
aoqi@0 263 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 264 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 265 goto do_continue; \
aoqi@0 266 }
aoqi@0 267
aoqi@0 268 #define UPDATE_PC_AND_CONTINUE(opsize) { \
aoqi@0 269 pc += opsize; \
aoqi@0 270 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
aoqi@0 271 DEBUGGER_SINGLE_STEP_NOTIFY(); \
aoqi@0 272 goto do_continue; \
aoqi@0 273 }
aoqi@0 274 #endif /* PREFETCH_OPCCODE */
aoqi@0 275 #endif /* USELABELS */
aoqi@0 276
aoqi@0 277 // About to call a new method, update the save the adjusted pc and return to frame manager
aoqi@0 278 #define UPDATE_PC_AND_RETURN(opsize) \
aoqi@0 279 DECACHE_TOS(); \
aoqi@0 280 istate->set_bcp(pc+opsize); \
aoqi@0 281 return;
aoqi@0 282
aoqi@0 283
aoqi@0 284 #define METHOD istate->method()
aoqi@0 285 #define GET_METHOD_COUNTERS(res) \
aoqi@0 286 res = METHOD->method_counters(); \
aoqi@0 287 if (res == NULL) { \
aoqi@0 288 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
aoqi@0 289 }
aoqi@0 290
aoqi@0 291 #define OSR_REQUEST(res, branch_pc) \
aoqi@0 292 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
aoqi@0 293 /*
aoqi@0 294 * For those opcodes that need to have a GC point on a backwards branch
aoqi@0 295 */
aoqi@0 296
aoqi@0 297 // Backedge counting is kind of strange. The asm interpreter will increment
aoqi@0 298 // the backedge counter as a separate counter but it does it's comparisons
aoqi@0 299 // to the sum (scaled) of invocation counter and backedge count to make
aoqi@0 300 // a decision. Seems kind of odd to sum them together like that
aoqi@0 301
aoqi@0 302 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
aoqi@0 303
aoqi@0 304
aoqi@0 305 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
aoqi@0 306 if ((skip) <= 0) { \
aoqi@0 307 MethodCounters* mcs; \
aoqi@0 308 GET_METHOD_COUNTERS(mcs); \
aoqi@0 309 if (UseLoopCounter) { \
aoqi@0 310 bool do_OSR = UseOnStackReplacement; \
aoqi@0 311 mcs->backedge_counter()->increment(); \
aoqi@0 312 if (ProfileInterpreter) { \
aoqi@0 313 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
aoqi@0 314 /* Check for overflow against MDO count. */ \
aoqi@0 315 do_OSR = do_OSR \
aoqi@0 316 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
aoqi@0 317 /* When ProfileInterpreter is on, the backedge_count comes */ \
aoqi@0 318 /* from the methodDataOop, which value does not get reset on */ \
aoqi@0 319 /* the call to frequency_counter_overflow(). To avoid */ \
aoqi@0 320 /* excessive calls to the overflow routine while the method is */ \
aoqi@0 321 /* being compiled, add a second test to make sure the overflow */ \
aoqi@0 322 /* function is called only once every overflow_frequency. */ \
aoqi@0 323 && (!(mdo_last_branch_taken_count & 1023)); \
aoqi@0 324 } else { \
aoqi@0 325 /* check for overflow of backedge counter */ \
aoqi@0 326 do_OSR = do_OSR \
aoqi@0 327 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
aoqi@0 328 } \
aoqi@0 329 if (do_OSR) { \
aoqi@0 330 nmethod* osr_nmethod; \
aoqi@0 331 OSR_REQUEST(osr_nmethod, branch_pc); \
aoqi@0 332 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
aoqi@0 333 intptr_t* buf; \
aoqi@0 334 /* Call OSR migration with last java frame only, no checks. */ \
aoqi@0 335 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
aoqi@0 336 istate->set_msg(do_osr); \
aoqi@0 337 istate->set_osr_buf((address)buf); \
aoqi@0 338 istate->set_osr_entry(osr_nmethod->osr_entry()); \
aoqi@0 339 return; \
aoqi@0 340 } \
aoqi@0 341 } \
aoqi@0 342 } /* UseCompiler ... */ \
aoqi@0 343 SAFEPOINT; \
aoqi@0 344 }
aoqi@0 345
aoqi@0 346 /*
aoqi@0 347 * For those opcodes that need to have a GC point on a backwards branch
aoqi@0 348 */
aoqi@0 349
aoqi@0 350 /*
aoqi@0 351 * Macros for caching and flushing the interpreter state. Some local
aoqi@0 352 * variables need to be flushed out to the frame before we do certain
aoqi@0 353 * things (like pushing frames or becomming gc safe) and some need to
aoqi@0 354 * be recached later (like after popping a frame). We could use one
aoqi@0 355 * macro to cache or decache everything, but this would be less then
aoqi@0 356 * optimal because we don't always need to cache or decache everything
aoqi@0 357 * because some things we know are already cached or decached.
aoqi@0 358 */
aoqi@0 359 #undef DECACHE_TOS
aoqi@0 360 #undef CACHE_TOS
aoqi@0 361 #undef CACHE_PREV_TOS
aoqi@0 362 #define DECACHE_TOS() istate->set_stack(topOfStack);
aoqi@0 363
aoqi@0 364 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
aoqi@0 365
aoqi@0 366 #undef DECACHE_PC
aoqi@0 367 #undef CACHE_PC
aoqi@0 368 #define DECACHE_PC() istate->set_bcp(pc);
aoqi@0 369 #define CACHE_PC() pc = istate->bcp();
aoqi@0 370 #define CACHE_CP() cp = istate->constants();
aoqi@0 371 #define CACHE_LOCALS() locals = istate->locals();
aoqi@0 372 #undef CACHE_FRAME
aoqi@0 373 #define CACHE_FRAME()
aoqi@0 374
aoqi@0 375 // BCI() returns the current bytecode-index.
aoqi@0 376 #undef BCI
aoqi@0 377 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
aoqi@0 378
aoqi@0 379 /*
aoqi@0 380 * CHECK_NULL - Macro for throwing a NullPointerException if the object
aoqi@0 381 * passed is a null ref.
aoqi@0 382 * On some architectures/platforms it should be possible to do this implicitly
aoqi@0 383 */
aoqi@0 384 #undef CHECK_NULL
aoqi@0 385 #define CHECK_NULL(obj_) \
aoqi@0 386 if ((obj_) == NULL) { \
aoqi@0 387 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
aoqi@0 388 } \
aoqi@0 389 VERIFY_OOP(obj_)
aoqi@0 390
aoqi@0 391 #define VMdoubleConstZero() 0.0
aoqi@0 392 #define VMdoubleConstOne() 1.0
aoqi@0 393 #define VMlongConstZero() (max_jlong-max_jlong)
aoqi@0 394 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
aoqi@0 395
aoqi@0 396 /*
aoqi@0 397 * Alignment
aoqi@0 398 */
aoqi@0 399 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
aoqi@0 400
aoqi@0 401 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
aoqi@0 402 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
aoqi@0 403
aoqi@0 404 // Reload interpreter state after calling the VM or a possible GC
aoqi@0 405 #define CACHE_STATE() \
aoqi@0 406 CACHE_TOS(); \
aoqi@0 407 CACHE_PC(); \
aoqi@0 408 CACHE_CP(); \
aoqi@0 409 CACHE_LOCALS();
aoqi@0 410
aoqi@0 411 // Call the VM with last java frame only.
aoqi@0 412 #define CALL_VM_NAKED_LJF(func) \
aoqi@0 413 DECACHE_STATE(); \
aoqi@0 414 SET_LAST_JAVA_FRAME(); \
aoqi@0 415 func; \
aoqi@0 416 RESET_LAST_JAVA_FRAME(); \
aoqi@0 417 CACHE_STATE();
aoqi@0 418
aoqi@0 419 // Call the VM. Don't check for pending exceptions.
aoqi@0 420 #define CALL_VM_NOCHECK(func) \
aoqi@0 421 CALL_VM_NAKED_LJF(func) \
aoqi@0 422 if (THREAD->pop_frame_pending() && \
aoqi@0 423 !THREAD->pop_frame_in_process()) { \
aoqi@0 424 goto handle_Pop_Frame; \
aoqi@0 425 } \
aoqi@0 426 if (THREAD->jvmti_thread_state() && \
aoqi@0 427 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
aoqi@0 428 goto handle_Early_Return; \
aoqi@0 429 }
aoqi@0 430
aoqi@0 431 // Call the VM and check for pending exceptions
aoqi@0 432 #define CALL_VM(func, label) { \
aoqi@0 433 CALL_VM_NOCHECK(func); \
aoqi@0 434 if (THREAD->has_pending_exception()) goto label; \
aoqi@0 435 }
aoqi@0 436
aoqi@0 437 /*
aoqi@0 438 * BytecodeInterpreter::run(interpreterState istate)
aoqi@0 439 * BytecodeInterpreter::runWithChecks(interpreterState istate)
aoqi@0 440 *
aoqi@0 441 * The real deal. This is where byte codes actually get interpreted.
aoqi@0 442 * Basically it's a big while loop that iterates until we return from
aoqi@0 443 * the method passed in.
aoqi@0 444 *
aoqi@0 445 * The runWithChecks is used if JVMTI is enabled.
aoqi@0 446 *
aoqi@0 447 */
aoqi@0 448 #if defined(VM_JVMTI)
aoqi@0 449 void
aoqi@0 450 BytecodeInterpreter::runWithChecks(interpreterState istate) {
aoqi@0 451 #else
aoqi@0 452 void
aoqi@0 453 BytecodeInterpreter::run(interpreterState istate) {
aoqi@0 454 #endif
aoqi@0 455
aoqi@0 456 // In order to simplify some tests based on switches set at runtime
aoqi@0 457 // we invoke the interpreter a single time after switches are enabled
aoqi@0 458 // and set simpler to to test variables rather than method calls or complex
aoqi@0 459 // boolean expressions.
aoqi@0 460
aoqi@0 461 static int initialized = 0;
aoqi@0 462 static int checkit = 0;
aoqi@0 463 static intptr_t* c_addr = NULL;
aoqi@0 464 static intptr_t c_value;
aoqi@0 465
aoqi@0 466 if (checkit && *c_addr != c_value) {
aoqi@0 467 os::breakpoint();
aoqi@0 468 }
aoqi@0 469 #ifdef VM_JVMTI
aoqi@0 470 static bool _jvmti_interp_events = 0;
aoqi@0 471 #endif
aoqi@0 472
aoqi@0 473 static int _compiling; // (UseCompiler || CountCompiledCalls)
aoqi@0 474
aoqi@0 475 #ifdef ASSERT
aoqi@0 476 if (istate->_msg != initialize) {
aoqi@0 477 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
aoqi@0 478 // because in that case, EnableInvokeDynamic is true by default but will be later switched off
aoqi@0 479 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
aoqi@0 480 // for the old JSR292 implementation.
aoqi@0 481 // This leads to a situation where 'istate->_stack_limit' always accounts for
aoqi@0 482 // methodOopDesc::extra_stack_entries() because it is computed in
aoqi@0 483 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
aoqi@0 484 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
aoqi@0 485 // account for extra_stack_entries() anymore because at the time when it is called
aoqi@0 486 // EnableInvokeDynamic was already set to false.
aoqi@0 487 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
aoqi@0 488 // switched off because of the wrong classes.
aoqi@0 489 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
aoqi@0 490 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
aoqi@0 491 } else {
aoqi@0 492 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
aoqi@0 493 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
aoqi@0 494 + 1), "bad stack limit");
aoqi@0 495 }
aoqi@0 496 #ifndef SHARK
aoqi@0 497 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
aoqi@0 498 #endif // !SHARK
aoqi@0 499 }
aoqi@0 500 // Verify linkages.
aoqi@0 501 interpreterState l = istate;
aoqi@0 502 do {
aoqi@0 503 assert(l == l->_self_link, "bad link");
aoqi@0 504 l = l->_prev_link;
aoqi@0 505 } while (l != NULL);
aoqi@0 506 // Screwups with stack management usually cause us to overwrite istate
aoqi@0 507 // save a copy so we can verify it.
aoqi@0 508 interpreterState orig = istate;
aoqi@0 509 #endif
aoqi@0 510
aoqi@0 511 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
aoqi@0 512 register address pc = istate->bcp();
aoqi@0 513 register jubyte opcode;
aoqi@0 514 register intptr_t* locals = istate->locals();
aoqi@0 515 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
aoqi@0 516 #ifdef LOTS_OF_REGS
aoqi@0 517 register JavaThread* THREAD = istate->thread();
aoqi@0 518 #else
aoqi@0 519 #undef THREAD
aoqi@0 520 #define THREAD istate->thread()
aoqi@0 521 #endif
aoqi@0 522
aoqi@0 523 #ifdef USELABELS
aoqi@0 524 const static void* const opclabels_data[256] = {
aoqi@0 525 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
aoqi@0 526 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
aoqi@0 527 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
aoqi@0 528 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
aoqi@0 529
aoqi@0 530 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
aoqi@0 531 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
aoqi@0 532 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
aoqi@0 533 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
aoqi@0 534
aoqi@0 535 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
aoqi@0 536 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
aoqi@0 537 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
aoqi@0 538 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
aoqi@0 539
aoqi@0 540 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
aoqi@0 541 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
aoqi@0 542 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
aoqi@0 543 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
aoqi@0 544
aoqi@0 545 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
aoqi@0 546 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
aoqi@0 547 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
aoqi@0 548 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
aoqi@0 549
aoqi@0 550 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
aoqi@0 551 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
aoqi@0 552 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
aoqi@0 553 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
aoqi@0 554
aoqi@0 555 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
aoqi@0 556 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
aoqi@0 557 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
aoqi@0 558 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
aoqi@0 559
aoqi@0 560 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
aoqi@0 561 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
aoqi@0 562 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
aoqi@0 563 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
aoqi@0 564
aoqi@0 565 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
aoqi@0 566 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
aoqi@0 567 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
aoqi@0 568 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
aoqi@0 569
aoqi@0 570 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
aoqi@0 571 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
aoqi@0 572 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
aoqi@0 573 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
aoqi@0 574
aoqi@0 575 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
aoqi@0 576 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
aoqi@0 577 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
aoqi@0 578 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
aoqi@0 579
aoqi@0 580 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
aoqi@0 581 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
aoqi@0 582 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
aoqi@0 583 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
aoqi@0 584
aoqi@0 585 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
aoqi@0 586 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
aoqi@0 587 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
aoqi@0 588 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 589
aoqi@0 590 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 591 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 592 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 593 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 594
aoqi@0 595 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aph@8429 596 /* 0xE4 */ &&opc_default, &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w,
aph@8429 597 /* 0xE8 */ &&opc_return_register_finalizer,
aph@8429 598 &&opc_invokehandle, &&opc_default, &&opc_default,
aoqi@0 599 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 600
aoqi@0 601 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 602 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 603 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
aoqi@0 604 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
aoqi@0 605 };
aoqi@0 606 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
aoqi@0 607 #endif /* USELABELS */
aoqi@0 608
aoqi@0 609 #ifdef ASSERT
aoqi@0 610 // this will trigger a VERIFY_OOP on entry
aoqi@0 611 if (istate->msg() != initialize && ! METHOD->is_static()) {
aoqi@0 612 oop rcvr = LOCALS_OBJECT(0);
aoqi@0 613 VERIFY_OOP(rcvr);
aoqi@0 614 }
aoqi@0 615 #endif
aoqi@0 616 // #define HACK
aoqi@0 617 #ifdef HACK
aoqi@0 618 bool interesting = false;
aoqi@0 619 #endif // HACK
aoqi@0 620
aoqi@0 621 /* QQQ this should be a stack method so we don't know actual direction */
aoqi@0 622 guarantee(istate->msg() == initialize ||
aoqi@0 623 topOfStack >= istate->stack_limit() &&
aoqi@0 624 topOfStack < istate->stack_base(),
aoqi@0 625 "Stack top out of range");
aoqi@0 626
aoqi@0 627 #ifdef CC_INTERP_PROFILE
aoqi@0 628 // MethodData's last branch taken count.
aoqi@0 629 uint mdo_last_branch_taken_count = 0;
aoqi@0 630 #else
aoqi@0 631 const uint mdo_last_branch_taken_count = 0;
aoqi@0 632 #endif
aoqi@0 633
aoqi@0 634 switch (istate->msg()) {
aoqi@0 635 case initialize: {
aoqi@0 636 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
aoqi@0 637 _compiling = (UseCompiler || CountCompiledCalls);
aoqi@0 638 #ifdef VM_JVMTI
aoqi@0 639 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
aoqi@0 640 #endif
aoqi@0 641 return;
aoqi@0 642 }
aoqi@0 643 break;
aoqi@0 644 case method_entry: {
aoqi@0 645 THREAD->set_do_not_unlock();
aoqi@0 646 // count invocations
aoqi@0 647 assert(initialized, "Interpreter not initialized");
aoqi@0 648 if (_compiling) {
aoqi@0 649 MethodCounters* mcs;
aoqi@0 650 GET_METHOD_COUNTERS(mcs);
aoqi@0 651 if (ProfileInterpreter) {
aoqi@0 652 METHOD->increment_interpreter_invocation_count(THREAD);
aoqi@0 653 }
aoqi@0 654 mcs->invocation_counter()->increment();
aoqi@0 655 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
aoqi@0 656 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
aoqi@0 657 // We no longer retry on a counter overflow.
aoqi@0 658 }
aoqi@0 659 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 660 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 661 SAFEPOINT;
aoqi@0 662 }
aoqi@0 663
aoqi@0 664 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
aoqi@0 665 // initialize
aoqi@0 666 os::breakpoint();
aoqi@0 667 }
aoqi@0 668
aoqi@0 669 #ifdef HACK
aoqi@0 670 {
aoqi@0 671 ResourceMark rm;
aoqi@0 672 char *method_name = istate->method()->name_and_sig_as_C_string();
aoqi@0 673 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
aoqi@0 674 tty->print_cr("entering: depth %d bci: %d",
aoqi@0 675 (istate->_stack_base - istate->_stack),
aoqi@0 676 istate->_bcp - istate->_method->code_base());
aoqi@0 677 interesting = true;
aoqi@0 678 }
aoqi@0 679 }
aoqi@0 680 #endif // HACK
aoqi@0 681
aoqi@0 682 // Lock method if synchronized.
aoqi@0 683 if (METHOD->is_synchronized()) {
aoqi@0 684 // oop rcvr = locals[0].j.r;
aoqi@0 685 oop rcvr;
aoqi@0 686 if (METHOD->is_static()) {
aoqi@0 687 rcvr = METHOD->constants()->pool_holder()->java_mirror();
aoqi@0 688 } else {
aoqi@0 689 rcvr = LOCALS_OBJECT(0);
aoqi@0 690 VERIFY_OOP(rcvr);
aoqi@0 691 }
aoqi@0 692 // The initial monitor is ours for the taking.
aoqi@0 693 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
aoqi@0 694 BasicObjectLock* mon = &istate->monitor_base()[-1];
aoqi@0 695 mon->set_obj(rcvr);
aoqi@0 696 bool success = false;
aoqi@0 697 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 698 markOop mark = rcvr->mark();
aoqi@0 699 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 700 // Implies UseBiasedLocking.
aoqi@0 701 if (mark->has_bias_pattern()) {
aoqi@0 702 uintptr_t thread_ident;
aoqi@0 703 uintptr_t anticipated_bias_locking_value;
aoqi@0 704 thread_ident = (uintptr_t)istate->thread();
aoqi@0 705 anticipated_bias_locking_value =
aoqi@0 706 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 707 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 708
aoqi@0 709 if (anticipated_bias_locking_value == 0) {
aoqi@0 710 // Already biased towards this thread, nothing to do.
aoqi@0 711 if (PrintBiasedLockingStatistics) {
aoqi@0 712 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 713 }
aoqi@0 714 success = true;
aoqi@0 715 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 716 // Try to revoke bias.
aoqi@0 717 markOop header = rcvr->klass()->prototype_header();
aoqi@0 718 if (hash != markOopDesc::no_hash) {
aoqi@0 719 header = header->copy_set_hash(hash);
aoqi@0 720 }
aoqi@0 721 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
aoqi@0 722 if (PrintBiasedLockingStatistics)
aoqi@0 723 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 724 }
aoqi@0 725 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
aoqi@0 726 // Try to rebias.
aoqi@0 727 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
aoqi@0 728 if (hash != markOopDesc::no_hash) {
aoqi@0 729 new_header = new_header->copy_set_hash(hash);
aoqi@0 730 }
aoqi@0 731 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
aoqi@0 732 if (PrintBiasedLockingStatistics) {
aoqi@0 733 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 734 }
aoqi@0 735 } else {
aoqi@0 736 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 737 }
aoqi@0 738 success = true;
aoqi@0 739 } else {
aoqi@0 740 // Try to bias towards thread in case object is anonymously biased.
aoqi@0 741 markOop header = (markOop) ((uintptr_t) mark &
aoqi@0 742 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 743 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
aoqi@0 744 if (hash != markOopDesc::no_hash) {
aoqi@0 745 header = header->copy_set_hash(hash);
aoqi@0 746 }
aoqi@0 747 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 748 // Debugging hint.
aoqi@0 749 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 750 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
aoqi@0 751 if (PrintBiasedLockingStatistics) {
aoqi@0 752 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 753 }
aoqi@0 754 } else {
aoqi@0 755 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 756 }
aoqi@0 757 success = true;
aoqi@0 758 }
aoqi@0 759 }
aoqi@0 760
aoqi@0 761 // Traditional lightweight locking.
aoqi@0 762 if (!success) {
aoqi@0 763 markOop displaced = rcvr->mark()->set_unlocked();
aoqi@0 764 mon->lock()->set_displaced_header(displaced);
aoqi@0 765 bool call_vm = UseHeavyMonitors;
aoqi@0 766 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
aoqi@0 767 // Is it simple recursive case?
aoqi@0 768 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 769 mon->lock()->set_displaced_header(NULL);
aoqi@0 770 } else {
aoqi@0 771 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
aoqi@0 772 }
aoqi@0 773 }
aoqi@0 774 }
aoqi@0 775 }
aoqi@0 776 THREAD->clr_do_not_unlock();
aoqi@0 777
aoqi@0 778 // Notify jvmti
aoqi@0 779 #ifdef VM_JVMTI
aoqi@0 780 if (_jvmti_interp_events) {
aoqi@0 781 // Whenever JVMTI puts a thread in interp_only_mode, method
aoqi@0 782 // entry/exit events are sent for that thread to track stack depth.
aoqi@0 783 if (THREAD->is_interp_only_mode()) {
aoqi@0 784 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
aoqi@0 785 handle_exception);
aoqi@0 786 }
aoqi@0 787 }
aoqi@0 788 #endif /* VM_JVMTI */
aoqi@0 789
aoqi@0 790 goto run;
aoqi@0 791 }
aoqi@0 792
aoqi@0 793 case popping_frame: {
aoqi@0 794 // returned from a java call to pop the frame, restart the call
aoqi@0 795 // clear the message so we don't confuse ourselves later
aoqi@0 796 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
aoqi@0 797 istate->set_msg(no_request);
aoqi@0 798 if (_compiling) {
aoqi@0 799 // Set MDX back to the ProfileData of the invoke bytecode that will be
aoqi@0 800 // restarted.
aoqi@0 801 SET_MDX(NULL);
aoqi@0 802 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 803 }
aoqi@0 804 THREAD->clr_pop_frame_in_process();
aoqi@0 805 goto run;
aoqi@0 806 }
aoqi@0 807
aoqi@0 808 case method_resume: {
aoqi@0 809 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
aoqi@0 810 // resume
aoqi@0 811 os::breakpoint();
aoqi@0 812 }
aoqi@0 813 #ifdef HACK
aoqi@0 814 {
aoqi@0 815 ResourceMark rm;
aoqi@0 816 char *method_name = istate->method()->name_and_sig_as_C_string();
aoqi@0 817 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
aoqi@0 818 tty->print_cr("resume: depth %d bci: %d",
aoqi@0 819 (istate->_stack_base - istate->_stack) ,
aoqi@0 820 istate->_bcp - istate->_method->code_base());
aoqi@0 821 interesting = true;
aoqi@0 822 }
aoqi@0 823 }
aoqi@0 824 #endif // HACK
aoqi@0 825 // returned from a java call, continue executing.
aoqi@0 826 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
aoqi@0 827 goto handle_Pop_Frame;
aoqi@0 828 }
aoqi@0 829 if (THREAD->jvmti_thread_state() &&
aoqi@0 830 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
aoqi@0 831 goto handle_Early_Return;
aoqi@0 832 }
aoqi@0 833
aoqi@0 834 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 835 // Update the pc by the saved amount of the invoke bytecode size
aoqi@0 836 UPDATE_PC(istate->bcp_advance());
aoqi@0 837
aoqi@0 838 if (_compiling) {
aoqi@0 839 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 840 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 841 }
aoqi@0 842 goto run;
aoqi@0 843 }
aoqi@0 844
aoqi@0 845 case deopt_resume2: {
aoqi@0 846 // Returned from an opcode that will reexecute. Deopt was
aoqi@0 847 // a result of a PopFrame request.
aoqi@0 848 //
aoqi@0 849
aoqi@0 850 if (_compiling) {
aoqi@0 851 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 852 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 853 }
aoqi@0 854 goto run;
aoqi@0 855 }
aoqi@0 856
aoqi@0 857 case deopt_resume: {
aoqi@0 858 // Returned from an opcode that has completed. The stack has
aoqi@0 859 // the result all we need to do is skip across the bytecode
aoqi@0 860 // and continue (assuming there is no exception pending)
aoqi@0 861 //
aoqi@0 862 // compute continuation length
aoqi@0 863 //
aoqi@0 864 // Note: it is possible to deopt at a return_register_finalizer opcode
aoqi@0 865 // because this requires entering the vm to do the registering. While the
aoqi@0 866 // opcode is complete we can't advance because there are no more opcodes
aoqi@0 867 // much like trying to deopt at a poll return. In that has we simply
aoqi@0 868 // get out of here
aoqi@0 869 //
aoqi@0 870 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
aoqi@0 871 // this will do the right thing even if an exception is pending.
aoqi@0 872 goto handle_return;
aoqi@0 873 }
aoqi@0 874 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
aoqi@0 875 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 876
aoqi@0 877 if (_compiling) {
aoqi@0 878 // Get or create profile data. Check for pending (async) exceptions.
aoqi@0 879 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
aoqi@0 880 }
aoqi@0 881 goto run;
aoqi@0 882 }
aoqi@0 883 case got_monitors: {
aoqi@0 884 // continue locking now that we have a monitor to use
aoqi@0 885 // we expect to find newly allocated monitor at the "top" of the monitor stack.
aoqi@0 886 oop lockee = STACK_OBJECT(-1);
aoqi@0 887 VERIFY_OOP(lockee);
aoqi@0 888 // derefing's lockee ought to provoke implicit null check
aoqi@0 889 // find a free monitor
aoqi@0 890 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
aoqi@0 891 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
aoqi@0 892 entry->set_obj(lockee);
aoqi@0 893 bool success = false;
aoqi@0 894 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 895
aoqi@0 896 markOop mark = lockee->mark();
aoqi@0 897 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 898 // implies UseBiasedLocking
aoqi@0 899 if (mark->has_bias_pattern()) {
aoqi@0 900 uintptr_t thread_ident;
aoqi@0 901 uintptr_t anticipated_bias_locking_value;
aoqi@0 902 thread_ident = (uintptr_t)istate->thread();
aoqi@0 903 anticipated_bias_locking_value =
aoqi@0 904 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 905 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 906
aoqi@0 907 if (anticipated_bias_locking_value == 0) {
aoqi@0 908 // already biased towards this thread, nothing to do
aoqi@0 909 if (PrintBiasedLockingStatistics) {
aoqi@0 910 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 911 }
aoqi@0 912 success = true;
aoqi@0 913 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 914 // try revoke bias
aoqi@0 915 markOop header = lockee->klass()->prototype_header();
aoqi@0 916 if (hash != markOopDesc::no_hash) {
aoqi@0 917 header = header->copy_set_hash(hash);
aoqi@0 918 }
aoqi@0 919 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
aoqi@0 920 if (PrintBiasedLockingStatistics) {
aoqi@0 921 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 922 }
aoqi@0 923 }
aoqi@0 924 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
aoqi@0 925 // try rebias
aoqi@0 926 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
aoqi@0 927 if (hash != markOopDesc::no_hash) {
aoqi@0 928 new_header = new_header->copy_set_hash(hash);
aoqi@0 929 }
aoqi@0 930 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
aoqi@0 931 if (PrintBiasedLockingStatistics) {
aoqi@0 932 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 933 }
aoqi@0 934 } else {
aoqi@0 935 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 936 }
aoqi@0 937 success = true;
aoqi@0 938 } else {
aoqi@0 939 // try to bias towards thread in case object is anonymously biased
aoqi@0 940 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 941 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
aoqi@0 942 if (hash != markOopDesc::no_hash) {
aoqi@0 943 header = header->copy_set_hash(hash);
aoqi@0 944 }
aoqi@0 945 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 946 // debugging hint
aoqi@0 947 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 948 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
aoqi@0 949 if (PrintBiasedLockingStatistics) {
aoqi@0 950 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 951 }
aoqi@0 952 } else {
aoqi@0 953 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 954 }
aoqi@0 955 success = true;
aoqi@0 956 }
aoqi@0 957 }
aoqi@0 958
aoqi@0 959 // traditional lightweight locking
aoqi@0 960 if (!success) {
aoqi@0 961 markOop displaced = lockee->mark()->set_unlocked();
aoqi@0 962 entry->lock()->set_displaced_header(displaced);
aoqi@0 963 bool call_vm = UseHeavyMonitors;
aoqi@0 964 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
aoqi@0 965 // Is it simple recursive case?
aoqi@0 966 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 967 entry->lock()->set_displaced_header(NULL);
aoqi@0 968 } else {
aoqi@0 969 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 970 }
aoqi@0 971 }
aoqi@0 972 }
aoqi@0 973 UPDATE_PC_AND_TOS(1, -1);
aoqi@0 974 goto run;
aoqi@0 975 }
aoqi@0 976 default: {
aoqi@0 977 fatal("Unexpected message from frame manager");
aoqi@0 978 }
aoqi@0 979 }
aoqi@0 980
aoqi@0 981 run:
aoqi@0 982
aoqi@0 983 DO_UPDATE_INSTRUCTION_COUNT(*pc)
aoqi@0 984 DEBUGGER_SINGLE_STEP_NOTIFY();
aoqi@0 985 #ifdef PREFETCH_OPCCODE
aoqi@0 986 opcode = *pc; /* prefetch first opcode */
aoqi@0 987 #endif
aoqi@0 988
aoqi@0 989 #ifndef USELABELS
aoqi@0 990 while (1)
aoqi@0 991 #endif
aoqi@0 992 {
aoqi@0 993 #ifndef PREFETCH_OPCCODE
aoqi@0 994 opcode = *pc;
aoqi@0 995 #endif
aoqi@0 996 // Seems like this happens twice per opcode. At worst this is only
aoqi@0 997 // need at entry to the loop.
aoqi@0 998 // DEBUGGER_SINGLE_STEP_NOTIFY();
aoqi@0 999 /* Using this labels avoids double breakpoints when quickening and
aoqi@0 1000 * when returing from transition frames.
aoqi@0 1001 */
aoqi@0 1002 opcode_switch:
aoqi@0 1003 assert(istate == orig, "Corrupted istate");
aoqi@0 1004 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
aoqi@0 1005 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
aoqi@0 1006 assert(topOfStack < istate->stack_base(), "Stack underrun");
aoqi@0 1007
aoqi@0 1008 #ifdef USELABELS
aoqi@0 1009 DISPATCH(opcode);
aoqi@0 1010 #else
aoqi@0 1011 switch (opcode)
aoqi@0 1012 #endif
aoqi@0 1013 {
aoqi@0 1014 CASE(_nop):
aoqi@0 1015 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1016
aoqi@0 1017 /* Push miscellaneous constants onto the stack. */
aoqi@0 1018
aoqi@0 1019 CASE(_aconst_null):
aoqi@0 1020 SET_STACK_OBJECT(NULL, 0);
aoqi@0 1021 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1022
aoqi@0 1023 #undef OPC_CONST_n
aoqi@0 1024 #define OPC_CONST_n(opcode, const_type, value) \
aoqi@0 1025 CASE(opcode): \
aoqi@0 1026 SET_STACK_ ## const_type(value, 0); \
aoqi@0 1027 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1028
aoqi@0 1029 OPC_CONST_n(_iconst_m1, INT, -1);
aoqi@0 1030 OPC_CONST_n(_iconst_0, INT, 0);
aoqi@0 1031 OPC_CONST_n(_iconst_1, INT, 1);
aoqi@0 1032 OPC_CONST_n(_iconst_2, INT, 2);
aoqi@0 1033 OPC_CONST_n(_iconst_3, INT, 3);
aoqi@0 1034 OPC_CONST_n(_iconst_4, INT, 4);
aoqi@0 1035 OPC_CONST_n(_iconst_5, INT, 5);
aoqi@0 1036 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
aoqi@0 1037 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
aoqi@0 1038 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
aoqi@0 1039
aoqi@0 1040 #undef OPC_CONST2_n
aoqi@0 1041 #define OPC_CONST2_n(opcname, value, key, kind) \
aoqi@0 1042 CASE(_##opcname): \
aoqi@0 1043 { \
aoqi@0 1044 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
aoqi@0 1045 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
aoqi@0 1046 }
aoqi@0 1047 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
aoqi@0 1048 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
aoqi@0 1049 OPC_CONST2_n(lconst_0, Zero, long, LONG);
aoqi@0 1050 OPC_CONST2_n(lconst_1, One, long, LONG);
aoqi@0 1051
aoqi@0 1052 /* Load constant from constant pool: */
aoqi@0 1053
aoqi@0 1054 /* Push a 1-byte signed integer value onto the stack. */
aoqi@0 1055 CASE(_bipush):
aoqi@0 1056 SET_STACK_INT((jbyte)(pc[1]), 0);
aoqi@0 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1058
aoqi@0 1059 /* Push a 2-byte signed integer constant onto the stack. */
aoqi@0 1060 CASE(_sipush):
aoqi@0 1061 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
aoqi@0 1062 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 1063
aoqi@0 1064 /* load from local variable */
aoqi@0 1065
aoqi@0 1066 CASE(_aload):
aoqi@0 1067 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
aoqi@0 1068 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
aoqi@0 1069 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1070
aoqi@0 1071 CASE(_iload):
aoqi@0 1072 CASE(_fload):
aoqi@0 1073 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
aoqi@0 1074 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
aoqi@0 1075
aoqi@0 1076 CASE(_lload):
aoqi@0 1077 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
aoqi@0 1078 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
aoqi@0 1079
aoqi@0 1080 CASE(_dload):
aoqi@0 1081 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
aoqi@0 1082 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
aoqi@0 1083
aoqi@0 1084 #undef OPC_LOAD_n
aoqi@0 1085 #define OPC_LOAD_n(num) \
aoqi@0 1086 CASE(_aload_##num): \
aoqi@0 1087 VERIFY_OOP(LOCALS_OBJECT(num)); \
aoqi@0 1088 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
aoqi@0 1089 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
aoqi@0 1090 \
aoqi@0 1091 CASE(_iload_##num): \
aoqi@0 1092 CASE(_fload_##num): \
aoqi@0 1093 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
aoqi@0 1094 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
aoqi@0 1095 \
aoqi@0 1096 CASE(_lload_##num): \
aoqi@0 1097 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
aoqi@0 1098 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
aoqi@0 1099 CASE(_dload_##num): \
aoqi@0 1100 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
aoqi@0 1101 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1102
aoqi@0 1103 OPC_LOAD_n(0);
aoqi@0 1104 OPC_LOAD_n(1);
aoqi@0 1105 OPC_LOAD_n(2);
aoqi@0 1106 OPC_LOAD_n(3);
aoqi@0 1107
aoqi@0 1108 /* store to a local variable */
aoqi@0 1109
aoqi@0 1110 CASE(_astore):
aoqi@0 1111 astore(topOfStack, -1, locals, pc[1]);
aoqi@0 1112 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
aoqi@0 1113
aoqi@0 1114 CASE(_istore):
aoqi@0 1115 CASE(_fstore):
aoqi@0 1116 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
aoqi@0 1117 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
aoqi@0 1118
aoqi@0 1119 CASE(_lstore):
aoqi@0 1120 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
aoqi@0 1121 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
aoqi@0 1122
aoqi@0 1123 CASE(_dstore):
aoqi@0 1124 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
aoqi@0 1125 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
aoqi@0 1126
aoqi@0 1127 CASE(_wide): {
aoqi@0 1128 uint16_t reg = Bytes::get_Java_u2(pc + 2);
aoqi@0 1129
aoqi@0 1130 opcode = pc[1];
aoqi@0 1131
aoqi@0 1132 // Wide and it's sub-bytecode are counted as separate instructions. If we
aoqi@0 1133 // don't account for this here, the bytecode trace skips the next bytecode.
aoqi@0 1134 DO_UPDATE_INSTRUCTION_COUNT(opcode);
aoqi@0 1135
aoqi@0 1136 switch(opcode) {
aoqi@0 1137 case Bytecodes::_aload:
aoqi@0 1138 VERIFY_OOP(LOCALS_OBJECT(reg));
aoqi@0 1139 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
aoqi@0 1140 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
aoqi@0 1141
aoqi@0 1142 case Bytecodes::_iload:
aoqi@0 1143 case Bytecodes::_fload:
aoqi@0 1144 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
aoqi@0 1145 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
aoqi@0 1146
aoqi@0 1147 case Bytecodes::_lload:
aoqi@0 1148 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
aoqi@0 1149 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
aoqi@0 1150
aoqi@0 1151 case Bytecodes::_dload:
aoqi@0 1152 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
aoqi@0 1153 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
aoqi@0 1154
aoqi@0 1155 case Bytecodes::_astore:
aoqi@0 1156 astore(topOfStack, -1, locals, reg);
aoqi@0 1157 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
aoqi@0 1158
aoqi@0 1159 case Bytecodes::_istore:
aoqi@0 1160 case Bytecodes::_fstore:
aoqi@0 1161 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
aoqi@0 1162 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
aoqi@0 1163
aoqi@0 1164 case Bytecodes::_lstore:
aoqi@0 1165 SET_LOCALS_LONG(STACK_LONG(-1), reg);
aoqi@0 1166 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
aoqi@0 1167
aoqi@0 1168 case Bytecodes::_dstore:
aoqi@0 1169 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
aoqi@0 1170 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
aoqi@0 1171
aoqi@0 1172 case Bytecodes::_iinc: {
aoqi@0 1173 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
aoqi@0 1174 // Be nice to see what this generates.... QQQ
aoqi@0 1175 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
aoqi@0 1176 UPDATE_PC_AND_CONTINUE(6);
aoqi@0 1177 }
aoqi@0 1178 case Bytecodes::_ret:
aoqi@0 1179 // Profile ret.
aoqi@0 1180 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
aoqi@0 1181 // Now, update the pc.
aoqi@0 1182 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
aoqi@0 1183 UPDATE_PC_AND_CONTINUE(0);
aoqi@0 1184 default:
aoqi@0 1185 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
aoqi@0 1186 }
aoqi@0 1187 }
aoqi@0 1188
aoqi@0 1189
aoqi@0 1190 #undef OPC_STORE_n
aoqi@0 1191 #define OPC_STORE_n(num) \
aoqi@0 1192 CASE(_astore_##num): \
aoqi@0 1193 astore(topOfStack, -1, locals, num); \
aoqi@0 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1195 CASE(_istore_##num): \
aoqi@0 1196 CASE(_fstore_##num): \
aoqi@0 1197 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
aoqi@0 1198 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1199
aoqi@0 1200 OPC_STORE_n(0);
aoqi@0 1201 OPC_STORE_n(1);
aoqi@0 1202 OPC_STORE_n(2);
aoqi@0 1203 OPC_STORE_n(3);
aoqi@0 1204
aoqi@0 1205 #undef OPC_DSTORE_n
aoqi@0 1206 #define OPC_DSTORE_n(num) \
aoqi@0 1207 CASE(_dstore_##num): \
aoqi@0 1208 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
aoqi@0 1209 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1210 CASE(_lstore_##num): \
aoqi@0 1211 SET_LOCALS_LONG(STACK_LONG(-1), num); \
aoqi@0 1212 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
aoqi@0 1213
aoqi@0 1214 OPC_DSTORE_n(0);
aoqi@0 1215 OPC_DSTORE_n(1);
aoqi@0 1216 OPC_DSTORE_n(2);
aoqi@0 1217 OPC_DSTORE_n(3);
aoqi@0 1218
aoqi@0 1219 /* stack pop, dup, and insert opcodes */
aoqi@0 1220
aoqi@0 1221
aoqi@0 1222 CASE(_pop): /* Discard the top item on the stack */
aoqi@0 1223 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1224
aoqi@0 1225
aoqi@0 1226 CASE(_pop2): /* Discard the top 2 items on the stack */
aoqi@0 1227 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
aoqi@0 1228
aoqi@0 1229
aoqi@0 1230 CASE(_dup): /* Duplicate the top item on the stack */
aoqi@0 1231 dup(topOfStack);
aoqi@0 1232 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1233
aoqi@0 1234 CASE(_dup2): /* Duplicate the top 2 items on the stack */
aoqi@0 1235 dup2(topOfStack);
aoqi@0 1236 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1237
aoqi@0 1238 CASE(_dup_x1): /* insert top word two down */
aoqi@0 1239 dup_x1(topOfStack);
aoqi@0 1240 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1241
aoqi@0 1242 CASE(_dup_x2): /* insert top word three down */
aoqi@0 1243 dup_x2(topOfStack);
aoqi@0 1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1245
aoqi@0 1246 CASE(_dup2_x1): /* insert top 2 slots three down */
aoqi@0 1247 dup2_x1(topOfStack);
aoqi@0 1248 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1249
aoqi@0 1250 CASE(_dup2_x2): /* insert top 2 slots four down */
aoqi@0 1251 dup2_x2(topOfStack);
aoqi@0 1252 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1253
aoqi@0 1254 CASE(_swap): { /* swap top two elements on the stack */
aoqi@0 1255 swap(topOfStack);
aoqi@0 1256 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1257 }
aoqi@0 1258
aoqi@0 1259 /* Perform various binary integer operations */
aoqi@0 1260
aoqi@0 1261 #undef OPC_INT_BINARY
aoqi@0 1262 #define OPC_INT_BINARY(opcname, opname, test) \
aoqi@0 1263 CASE(_i##opcname): \
aoqi@0 1264 if (test && (STACK_INT(-1) == 0)) { \
aoqi@0 1265 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
aoqi@0 1266 "/ by zero", note_div0Check_trap); \
aoqi@0 1267 } \
aoqi@0 1268 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
aoqi@0 1269 STACK_INT(-1)), \
aoqi@0 1270 -2); \
aoqi@0 1271 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1272 CASE(_l##opcname): \
aoqi@0 1273 { \
aoqi@0 1274 if (test) { \
aoqi@0 1275 jlong l1 = STACK_LONG(-1); \
aoqi@0 1276 if (VMlongEqz(l1)) { \
aoqi@0 1277 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
aoqi@0 1278 "/ by long zero", note_div0Check_trap); \
aoqi@0 1279 } \
aoqi@0 1280 } \
aoqi@0 1281 /* First long at (-1,-2) next long at (-3,-4) */ \
aoqi@0 1282 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
aoqi@0 1283 STACK_LONG(-1)), \
aoqi@0 1284 -3); \
aoqi@0 1285 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1286 }
aoqi@0 1287
aoqi@0 1288 OPC_INT_BINARY(add, Add, 0);
aoqi@0 1289 OPC_INT_BINARY(sub, Sub, 0);
aoqi@0 1290 OPC_INT_BINARY(mul, Mul, 0);
aoqi@0 1291 OPC_INT_BINARY(and, And, 0);
aoqi@0 1292 OPC_INT_BINARY(or, Or, 0);
aoqi@0 1293 OPC_INT_BINARY(xor, Xor, 0);
aoqi@0 1294 OPC_INT_BINARY(div, Div, 1);
aoqi@0 1295 OPC_INT_BINARY(rem, Rem, 1);
aoqi@0 1296
aoqi@0 1297
aoqi@0 1298 /* Perform various binary floating number operations */
aoqi@0 1299 /* On some machine/platforms/compilers div zero check can be implicit */
aoqi@0 1300
aoqi@0 1301 #undef OPC_FLOAT_BINARY
aoqi@0 1302 #define OPC_FLOAT_BINARY(opcname, opname) \
aoqi@0 1303 CASE(_d##opcname): { \
aoqi@0 1304 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
aoqi@0 1305 STACK_DOUBLE(-1)), \
aoqi@0 1306 -3); \
aoqi@0 1307 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
aoqi@0 1308 } \
aoqi@0 1309 CASE(_f##opcname): \
aoqi@0 1310 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
aoqi@0 1311 STACK_FLOAT(-1)), \
aoqi@0 1312 -2); \
aoqi@0 1313 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1314
aoqi@0 1315
aoqi@0 1316 OPC_FLOAT_BINARY(add, Add);
aoqi@0 1317 OPC_FLOAT_BINARY(sub, Sub);
aoqi@0 1318 OPC_FLOAT_BINARY(mul, Mul);
aoqi@0 1319 OPC_FLOAT_BINARY(div, Div);
aoqi@0 1320 OPC_FLOAT_BINARY(rem, Rem);
aoqi@0 1321
aoqi@0 1322 /* Shift operations
aoqi@0 1323 * Shift left int and long: ishl, lshl
aoqi@0 1324 * Logical shift right int and long w/zero extension: iushr, lushr
aoqi@0 1325 * Arithmetic shift right int and long w/sign extension: ishr, lshr
aoqi@0 1326 */
aoqi@0 1327
aoqi@0 1328 #undef OPC_SHIFT_BINARY
aoqi@0 1329 #define OPC_SHIFT_BINARY(opcname, opname) \
aoqi@0 1330 CASE(_i##opcname): \
aoqi@0 1331 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
aoqi@0 1332 STACK_INT(-1)), \
aoqi@0 1333 -2); \
aoqi@0 1334 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1335 CASE(_l##opcname): \
aoqi@0 1336 { \
aoqi@0 1337 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
aoqi@0 1338 STACK_INT(-1)), \
aoqi@0 1339 -2); \
aoqi@0 1340 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1341 }
aoqi@0 1342
aoqi@0 1343 OPC_SHIFT_BINARY(shl, Shl);
aoqi@0 1344 OPC_SHIFT_BINARY(shr, Shr);
aoqi@0 1345 OPC_SHIFT_BINARY(ushr, Ushr);
aoqi@0 1346
aoqi@0 1347 /* Increment local variable by constant */
aoqi@0 1348 CASE(_iinc):
aoqi@0 1349 {
aoqi@0 1350 // locals[pc[1]].j.i += (jbyte)(pc[2]);
aoqi@0 1351 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
aoqi@0 1352 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 1353 }
aoqi@0 1354
aoqi@0 1355 /* negate the value on the top of the stack */
aoqi@0 1356
aoqi@0 1357 CASE(_ineg):
aoqi@0 1358 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
aoqi@0 1359 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1360
aoqi@0 1361 CASE(_fneg):
aoqi@0 1362 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
aoqi@0 1363 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1364
aoqi@0 1365 CASE(_lneg):
aoqi@0 1366 {
aoqi@0 1367 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
aoqi@0 1368 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1369 }
aoqi@0 1370
aoqi@0 1371 CASE(_dneg):
aoqi@0 1372 {
aoqi@0 1373 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
aoqi@0 1374 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1375 }
aoqi@0 1376
aoqi@0 1377 /* Conversion operations */
aoqi@0 1378
aoqi@0 1379 CASE(_i2f): /* convert top of stack int to float */
aoqi@0 1380 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
aoqi@0 1381 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1382
aoqi@0 1383 CASE(_i2l): /* convert top of stack int to long */
aoqi@0 1384 {
aoqi@0 1385 // this is ugly QQQ
aoqi@0 1386 jlong r = VMint2Long(STACK_INT(-1));
aoqi@0 1387 MORE_STACK(-1); // Pop
aoqi@0 1388 SET_STACK_LONG(r, 1);
aoqi@0 1389
aoqi@0 1390 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1391 }
aoqi@0 1392
aoqi@0 1393 CASE(_i2d): /* convert top of stack int to double */
aoqi@0 1394 {
aoqi@0 1395 // this is ugly QQQ (why cast to jlong?? )
aoqi@0 1396 jdouble r = (jlong)STACK_INT(-1);
aoqi@0 1397 MORE_STACK(-1); // Pop
aoqi@0 1398 SET_STACK_DOUBLE(r, 1);
aoqi@0 1399
aoqi@0 1400 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1401 }
aoqi@0 1402
aoqi@0 1403 CASE(_l2i): /* convert top of stack long to int */
aoqi@0 1404 {
aoqi@0 1405 jint r = VMlong2Int(STACK_LONG(-1));
aoqi@0 1406 MORE_STACK(-2); // Pop
aoqi@0 1407 SET_STACK_INT(r, 0);
aoqi@0 1408 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1409 }
aoqi@0 1410
aoqi@0 1411 CASE(_l2f): /* convert top of stack long to float */
aoqi@0 1412 {
aoqi@0 1413 jlong r = STACK_LONG(-1);
aoqi@0 1414 MORE_STACK(-2); // Pop
aoqi@0 1415 SET_STACK_FLOAT(VMlong2Float(r), 0);
aoqi@0 1416 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1417 }
aoqi@0 1418
aoqi@0 1419 CASE(_l2d): /* convert top of stack long to double */
aoqi@0 1420 {
aoqi@0 1421 jlong r = STACK_LONG(-1);
aoqi@0 1422 MORE_STACK(-2); // Pop
aoqi@0 1423 SET_STACK_DOUBLE(VMlong2Double(r), 1);
aoqi@0 1424 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1425 }
aoqi@0 1426
aoqi@0 1427 CASE(_f2i): /* Convert top of stack float to int */
aoqi@0 1428 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
aoqi@0 1429 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1430
aoqi@0 1431 CASE(_f2l): /* convert top of stack float to long */
aoqi@0 1432 {
aoqi@0 1433 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
aoqi@0 1434 MORE_STACK(-1); // POP
aoqi@0 1435 SET_STACK_LONG(r, 1);
aoqi@0 1436 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1437 }
aoqi@0 1438
aoqi@0 1439 CASE(_f2d): /* convert top of stack float to double */
aoqi@0 1440 {
aoqi@0 1441 jfloat f;
aoqi@0 1442 jdouble r;
aoqi@0 1443 f = STACK_FLOAT(-1);
aoqi@0 1444 r = (jdouble) f;
aoqi@0 1445 MORE_STACK(-1); // POP
aoqi@0 1446 SET_STACK_DOUBLE(r, 1);
aoqi@0 1447 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1448 }
aoqi@0 1449
aoqi@0 1450 CASE(_d2i): /* convert top of stack double to int */
aoqi@0 1451 {
aoqi@0 1452 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
aoqi@0 1453 MORE_STACK(-2);
aoqi@0 1454 SET_STACK_INT(r1, 0);
aoqi@0 1455 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1456 }
aoqi@0 1457
aoqi@0 1458 CASE(_d2f): /* convert top of stack double to float */
aoqi@0 1459 {
aoqi@0 1460 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
aoqi@0 1461 MORE_STACK(-2);
aoqi@0 1462 SET_STACK_FLOAT(r1, 0);
aoqi@0 1463 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1464 }
aoqi@0 1465
aoqi@0 1466 CASE(_d2l): /* convert top of stack double to long */
aoqi@0 1467 {
aoqi@0 1468 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
aoqi@0 1469 MORE_STACK(-2);
aoqi@0 1470 SET_STACK_LONG(r1, 1);
aoqi@0 1471 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
aoqi@0 1472 }
aoqi@0 1473
aoqi@0 1474 CASE(_i2b):
aoqi@0 1475 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
aoqi@0 1476 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1477
aoqi@0 1478 CASE(_i2c):
aoqi@0 1479 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
aoqi@0 1480 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1481
aoqi@0 1482 CASE(_i2s):
aoqi@0 1483 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
aoqi@0 1484 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1485
aoqi@0 1486 /* comparison operators */
aoqi@0 1487
aoqi@0 1488
aoqi@0 1489 #define COMPARISON_OP(name, comparison) \
aoqi@0 1490 CASE(_if_icmp##name): { \
aoqi@0 1491 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
aoqi@0 1492 int skip = cmp \
aoqi@0 1493 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1494 address branch_pc = pc; \
aoqi@0 1495 /* Profile branch. */ \
aoqi@0 1496 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1497 UPDATE_PC_AND_TOS(skip, -2); \
aoqi@0 1498 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1499 CONTINUE; \
aoqi@0 1500 } \
aoqi@0 1501 CASE(_if##name): { \
aoqi@0 1502 const bool cmp = (STACK_INT(-1) comparison 0); \
aoqi@0 1503 int skip = cmp \
aoqi@0 1504 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1505 address branch_pc = pc; \
aoqi@0 1506 /* Profile branch. */ \
aoqi@0 1507 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1508 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1509 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1510 CONTINUE; \
aoqi@0 1511 }
aoqi@0 1512
aoqi@0 1513 #define COMPARISON_OP2(name, comparison) \
aoqi@0 1514 COMPARISON_OP(name, comparison) \
aoqi@0 1515 CASE(_if_acmp##name): { \
aoqi@0 1516 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
aoqi@0 1517 int skip = cmp \
aoqi@0 1518 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1519 address branch_pc = pc; \
aoqi@0 1520 /* Profile branch. */ \
aoqi@0 1521 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1522 UPDATE_PC_AND_TOS(skip, -2); \
aoqi@0 1523 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1524 CONTINUE; \
aoqi@0 1525 }
aoqi@0 1526
aoqi@0 1527 #define NULL_COMPARISON_NOT_OP(name) \
aoqi@0 1528 CASE(_if##name): { \
aoqi@0 1529 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
aoqi@0 1530 int skip = cmp \
aoqi@0 1531 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1532 address branch_pc = pc; \
aoqi@0 1533 /* Profile branch. */ \
aoqi@0 1534 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1535 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1536 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1537 CONTINUE; \
aoqi@0 1538 }
aoqi@0 1539
aoqi@0 1540 #define NULL_COMPARISON_OP(name) \
aoqi@0 1541 CASE(_if##name): { \
aoqi@0 1542 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
aoqi@0 1543 int skip = cmp \
aoqi@0 1544 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
aoqi@0 1545 address branch_pc = pc; \
aoqi@0 1546 /* Profile branch. */ \
aoqi@0 1547 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
aoqi@0 1548 UPDATE_PC_AND_TOS(skip, -1); \
aoqi@0 1549 DO_BACKEDGE_CHECKS(skip, branch_pc); \
aoqi@0 1550 CONTINUE; \
aoqi@0 1551 }
aoqi@0 1552 COMPARISON_OP(lt, <);
aoqi@0 1553 COMPARISON_OP(gt, >);
aoqi@0 1554 COMPARISON_OP(le, <=);
aoqi@0 1555 COMPARISON_OP(ge, >=);
aoqi@0 1556 COMPARISON_OP2(eq, ==); /* include ref comparison */
aoqi@0 1557 COMPARISON_OP2(ne, !=); /* include ref comparison */
aoqi@0 1558 NULL_COMPARISON_OP(null);
aoqi@0 1559 NULL_COMPARISON_NOT_OP(nonnull);
aoqi@0 1560
aoqi@0 1561 /* Goto pc at specified offset in switch table. */
aoqi@0 1562
aoqi@0 1563 CASE(_tableswitch): {
aoqi@0 1564 jint* lpc = (jint*)VMalignWordUp(pc+1);
aoqi@0 1565 int32_t key = STACK_INT(-1);
aoqi@0 1566 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
aoqi@0 1567 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
aoqi@0 1568 int32_t skip;
aoqi@0 1569 key -= low;
aoqi@0 1570 if (((uint32_t) key > (uint32_t)(high - low))) {
aoqi@0 1571 key = -1;
aoqi@0 1572 skip = Bytes::get_Java_u4((address)&lpc[0]);
aoqi@0 1573 } else {
aoqi@0 1574 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
aoqi@0 1575 }
aoqi@0 1576 // Profile switch.
aoqi@0 1577 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
aoqi@0 1578 // Does this really need a full backedge check (osr)?
aoqi@0 1579 address branch_pc = pc;
aoqi@0 1580 UPDATE_PC_AND_TOS(skip, -1);
aoqi@0 1581 DO_BACKEDGE_CHECKS(skip, branch_pc);
aoqi@0 1582 CONTINUE;
aoqi@0 1583 }
aoqi@0 1584
aoqi@0 1585 /* Goto pc whose table entry matches specified key. */
aoqi@0 1586
aoqi@0 1587 CASE(_lookupswitch): {
aoqi@0 1588 jint* lpc = (jint*)VMalignWordUp(pc+1);
aoqi@0 1589 int32_t key = STACK_INT(-1);
aoqi@0 1590 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
aoqi@0 1591 // Remember index.
aoqi@0 1592 int index = -1;
aoqi@0 1593 int newindex = 0;
aoqi@0 1594 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
aoqi@0 1595 while (--npairs >= 0) {
aoqi@0 1596 lpc += 2;
aoqi@0 1597 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
aoqi@0 1598 skip = Bytes::get_Java_u4((address)&lpc[1]);
aoqi@0 1599 index = newindex;
aoqi@0 1600 break;
aoqi@0 1601 }
aoqi@0 1602 newindex += 1;
aoqi@0 1603 }
aoqi@0 1604 // Profile switch.
aoqi@0 1605 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
aoqi@0 1606 address branch_pc = pc;
aoqi@0 1607 UPDATE_PC_AND_TOS(skip, -1);
aoqi@0 1608 DO_BACKEDGE_CHECKS(skip, branch_pc);
aoqi@0 1609 CONTINUE;
aoqi@0 1610 }
aoqi@0 1611
aoqi@0 1612 CASE(_fcmpl):
aoqi@0 1613 CASE(_fcmpg):
aoqi@0 1614 {
aoqi@0 1615 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
aoqi@0 1616 STACK_FLOAT(-1),
aoqi@0 1617 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
aoqi@0 1618 -2);
aoqi@0 1619 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1620 }
aoqi@0 1621
aoqi@0 1622 CASE(_dcmpl):
aoqi@0 1623 CASE(_dcmpg):
aoqi@0 1624 {
aoqi@0 1625 int r = VMdoubleCompare(STACK_DOUBLE(-3),
aoqi@0 1626 STACK_DOUBLE(-1),
aoqi@0 1627 (opcode == Bytecodes::_dcmpl ? -1 : 1));
aoqi@0 1628 MORE_STACK(-4); // Pop
aoqi@0 1629 SET_STACK_INT(r, 0);
aoqi@0 1630 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1631 }
aoqi@0 1632
aoqi@0 1633 CASE(_lcmp):
aoqi@0 1634 {
aoqi@0 1635 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
aoqi@0 1636 MORE_STACK(-4);
aoqi@0 1637 SET_STACK_INT(r, 0);
aoqi@0 1638 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
aoqi@0 1639 }
aoqi@0 1640
aoqi@0 1641
aoqi@0 1642 /* Return from a method */
aoqi@0 1643
aoqi@0 1644 CASE(_areturn):
aoqi@0 1645 CASE(_ireturn):
aoqi@0 1646 CASE(_freturn):
aoqi@0 1647 {
aoqi@0 1648 // Allow a safepoint before returning to frame manager.
aoqi@0 1649 SAFEPOINT;
aoqi@0 1650
aoqi@0 1651 goto handle_return;
aoqi@0 1652 }
aoqi@0 1653
aoqi@0 1654 CASE(_lreturn):
aoqi@0 1655 CASE(_dreturn):
aoqi@0 1656 {
aoqi@0 1657 // Allow a safepoint before returning to frame manager.
aoqi@0 1658 SAFEPOINT;
aoqi@0 1659 goto handle_return;
aoqi@0 1660 }
aoqi@0 1661
aoqi@0 1662 CASE(_return_register_finalizer): {
aoqi@0 1663
aoqi@0 1664 oop rcvr = LOCALS_OBJECT(0);
aoqi@0 1665 VERIFY_OOP(rcvr);
aoqi@0 1666 if (rcvr->klass()->has_finalizer()) {
aoqi@0 1667 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
aoqi@0 1668 }
aoqi@0 1669 goto handle_return;
aoqi@0 1670 }
aoqi@0 1671 CASE(_return): {
aoqi@0 1672
aoqi@0 1673 // Allow a safepoint before returning to frame manager.
aoqi@0 1674 SAFEPOINT;
aoqi@0 1675 goto handle_return;
aoqi@0 1676 }
aoqi@0 1677
aoqi@0 1678 /* Array access byte-codes */
aoqi@0 1679
aoqi@0 1680 /* Every array access byte-code starts out like this */
aoqi@0 1681 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
aoqi@0 1682 #define ARRAY_INTRO(arrayOff) \
aoqi@0 1683 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
aoqi@0 1684 jint index = STACK_INT(arrayOff + 1); \
aoqi@0 1685 char message[jintAsStringSize]; \
aoqi@0 1686 CHECK_NULL(arrObj); \
aoqi@0 1687 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
aoqi@0 1688 sprintf(message, "%d", index); \
aoqi@0 1689 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
aoqi@0 1690 message, note_rangeCheck_trap); \
aoqi@0 1691 }
aoqi@0 1692
aoqi@0 1693 /* 32-bit loads. These handle conversion from < 32-bit types */
aoqi@0 1694 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
aoqi@0 1695 { \
aoqi@0 1696 ARRAY_INTRO(-2); \
aoqi@0 1697 (void)extra; \
aoqi@0 1698 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
aoqi@0 1699 -2); \
aoqi@0 1700 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
aoqi@0 1701 }
aoqi@0 1702
aoqi@0 1703 /* 64-bit loads */
aoqi@0 1704 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
aoqi@0 1705 { \
aoqi@0 1706 ARRAY_INTRO(-2); \
aoqi@0 1707 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
aoqi@0 1708 (void)extra; \
aoqi@0 1709 UPDATE_PC_AND_CONTINUE(1); \
aoqi@0 1710 }
aoqi@0 1711
aoqi@0 1712 CASE(_iaload):
aoqi@0 1713 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
aoqi@0 1714 CASE(_faload):
aoqi@0 1715 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
aoqi@0 1716 CASE(_aaload): {
aoqi@0 1717 ARRAY_INTRO(-2);
aoqi@0 1718 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
aoqi@0 1719 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1720 }
aoqi@0 1721 CASE(_baload):
aoqi@0 1722 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
aoqi@0 1723 CASE(_caload):
aoqi@0 1724 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
aoqi@0 1725 CASE(_saload):
aoqi@0 1726 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
aoqi@0 1727 CASE(_laload):
aoqi@0 1728 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
aoqi@0 1729 CASE(_daload):
aoqi@0 1730 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
aoqi@0 1731
aoqi@0 1732 /* 32-bit stores. These handle conversion to < 32-bit types */
aoqi@0 1733 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
aoqi@0 1734 { \
aoqi@0 1735 ARRAY_INTRO(-3); \
aoqi@0 1736 (void)extra; \
aoqi@0 1737 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
aoqi@0 1738 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
aoqi@0 1739 }
aoqi@0 1740
aoqi@0 1741 /* 64-bit stores */
aoqi@0 1742 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
aoqi@0 1743 { \
aoqi@0 1744 ARRAY_INTRO(-4); \
aoqi@0 1745 (void)extra; \
aoqi@0 1746 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
aoqi@0 1747 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
aoqi@0 1748 }
aoqi@0 1749
aoqi@0 1750 CASE(_iastore):
aoqi@0 1751 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
aoqi@0 1752 CASE(_fastore):
aoqi@0 1753 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
aoqi@0 1754 /*
aoqi@0 1755 * This one looks different because of the assignability check
aoqi@0 1756 */
aoqi@0 1757 CASE(_aastore): {
aoqi@0 1758 oop rhsObject = STACK_OBJECT(-1);
aoqi@0 1759 VERIFY_OOP(rhsObject);
aoqi@0 1760 ARRAY_INTRO( -3);
aoqi@0 1761 // arrObj, index are set
aoqi@0 1762 if (rhsObject != NULL) {
aoqi@0 1763 /* Check assignability of rhsObject into arrObj */
aoqi@0 1764 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
aoqi@0 1765 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
aoqi@0 1766 //
aoqi@0 1767 // Check for compatibilty. This check must not GC!!
aoqi@0 1768 // Seems way more expensive now that we must dispatch
aoqi@0 1769 //
aoqi@0 1770 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
aoqi@0 1771 // Decrement counter if subtype check failed.
aoqi@0 1772 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
aoqi@0 1773 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
aoqi@0 1774 }
aoqi@0 1775 // Profile checkcast with null_seen and receiver.
aoqi@0 1776 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
aoqi@0 1777 } else {
aoqi@0 1778 // Profile checkcast with null_seen and receiver.
aoqi@0 1779 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
aoqi@0 1780 }
aoqi@0 1781 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
aoqi@0 1782 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
aoqi@0 1783 }
kevinw@8368 1784 CASE(_bastore): {
kevinw@8368 1785 ARRAY_INTRO(-3);
kevinw@8368 1786 int item = STACK_INT(-1);
kevinw@8368 1787 // if it is a T_BOOLEAN array, mask the stored value to 0/1
kevinw@8368 1788 if (arrObj->klass() == Universe::boolArrayKlassObj()) {
kevinw@8368 1789 item &= 1;
kevinw@8368 1790 } else {
kevinw@8368 1791 assert(arrObj->klass() == Universe::byteArrayKlassObj(),
kevinw@8368 1792 "should be byte array otherwise");
kevinw@8368 1793 }
kevinw@8368 1794 ((typeArrayOop)arrObj)->byte_at_put(index, item);
kevinw@8368 1795 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
kevinw@8368 1796 }
aoqi@0 1797 CASE(_castore):
aoqi@0 1798 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
aoqi@0 1799 CASE(_sastore):
aoqi@0 1800 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
aoqi@0 1801 CASE(_lastore):
aoqi@0 1802 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
aoqi@0 1803 CASE(_dastore):
aoqi@0 1804 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
aoqi@0 1805
aoqi@0 1806 CASE(_arraylength):
aoqi@0 1807 {
aoqi@0 1808 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
aoqi@0 1809 CHECK_NULL(ary);
aoqi@0 1810 SET_STACK_INT(ary->length(), -1);
aoqi@0 1811 UPDATE_PC_AND_CONTINUE(1);
aoqi@0 1812 }
aoqi@0 1813
aoqi@0 1814 /* monitorenter and monitorexit for locking/unlocking an object */
aoqi@0 1815
aoqi@0 1816 CASE(_monitorenter): {
aoqi@0 1817 oop lockee = STACK_OBJECT(-1);
aoqi@0 1818 // derefing's lockee ought to provoke implicit null check
aoqi@0 1819 CHECK_NULL(lockee);
aoqi@0 1820 // find a free monitor or one already allocated for this object
aoqi@0 1821 // if we find a matching object then we need a new monitor
aoqi@0 1822 // since this is recursive enter
aoqi@0 1823 BasicObjectLock* limit = istate->monitor_base();
aoqi@0 1824 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
aoqi@0 1825 BasicObjectLock* entry = NULL;
aoqi@0 1826 while (most_recent != limit ) {
aoqi@0 1827 if (most_recent->obj() == NULL) entry = most_recent;
aoqi@0 1828 else if (most_recent->obj() == lockee) break;
aoqi@0 1829 most_recent++;
aoqi@0 1830 }
aoqi@0 1831 if (entry != NULL) {
aoqi@0 1832 entry->set_obj(lockee);
aoqi@0 1833 int success = false;
aoqi@0 1834 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
aoqi@0 1835
aoqi@0 1836 markOop mark = lockee->mark();
aoqi@0 1837 intptr_t hash = (intptr_t) markOopDesc::no_hash;
aoqi@0 1838 // implies UseBiasedLocking
aoqi@0 1839 if (mark->has_bias_pattern()) {
aoqi@0 1840 uintptr_t thread_ident;
aoqi@0 1841 uintptr_t anticipated_bias_locking_value;
aoqi@0 1842 thread_ident = (uintptr_t)istate->thread();
aoqi@0 1843 anticipated_bias_locking_value =
aoqi@0 1844 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
aoqi@0 1845 ~((uintptr_t) markOopDesc::age_mask_in_place);
aoqi@0 1846
aoqi@0 1847 if (anticipated_bias_locking_value == 0) {
aoqi@0 1848 // already biased towards this thread, nothing to do
aoqi@0 1849 if (PrintBiasedLockingStatistics) {
aoqi@0 1850 (* BiasedLocking::biased_lock_entry_count_addr())++;
aoqi@0 1851 }
aoqi@0 1852 success = true;
aoqi@0 1853 }
aoqi@0 1854 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
aoqi@0 1855 // try revoke bias
aoqi@0 1856 markOop header = lockee->klass()->prototype_header();
aoqi@0 1857 if (hash != markOopDesc::no_hash) {
aoqi@0 1858 header = header->copy_set_hash(hash);
aoqi@0 1859 }
aoqi@0 1860 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
aoqi@0 1861 if (PrintBiasedLockingStatistics)
aoqi@0 1862 (*BiasedLocking::revoked_lock_entry_count_addr())++;
aoqi@0 1863 }
aoqi@0 1864 }
aoqi@0 1865 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
aoqi@0 1866 // try rebias
aoqi@0 1867 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
aoqi@0 1868 if (hash != markOopDesc::no_hash) {
aoqi@0 1869 new_header = new_header->copy_set_hash(hash);
aoqi@0 1870 }
aoqi@0 1871 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
aoqi@0 1872 if (PrintBiasedLockingStatistics)
aoqi@0 1873 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
aoqi@0 1874 }
aoqi@0 1875 else {
aoqi@0 1876 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1877 }
aoqi@0 1878 success = true;
aoqi@0 1879 }
aoqi@0 1880 else {
aoqi@0 1881 // try to bias towards thread in case object is anonymously biased
aoqi@0 1882 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
aoqi@0 1883 (uintptr_t)markOopDesc::age_mask_in_place |
aoqi@0 1884 epoch_mask_in_place));
aoqi@0 1885 if (hash != markOopDesc::no_hash) {
aoqi@0 1886 header = header->copy_set_hash(hash);
aoqi@0 1887 }
aoqi@0 1888 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
aoqi@0 1889 // debugging hint
aoqi@0 1890 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
aoqi@0 1891 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
aoqi@0 1892 if (PrintBiasedLockingStatistics)
aoqi@0 1893 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
aoqi@0 1894 }
aoqi@0 1895 else {
aoqi@0 1896 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1897 }
aoqi@0 1898 success = true;
aoqi@0 1899 }
aoqi@0 1900 }
aoqi@0 1901
aoqi@0 1902 // traditional lightweight locking
aoqi@0 1903 if (!success) {
aoqi@0 1904 markOop displaced = lockee->mark()->set_unlocked();
aoqi@0 1905 entry->lock()->set_displaced_header(displaced);
aoqi@0 1906 bool call_vm = UseHeavyMonitors;
aoqi@0 1907 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
aoqi@0 1908 // Is it simple recursive case?
aoqi@0 1909 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
aoqi@0 1910 entry->lock()->set_displaced_header(NULL);
aoqi@0 1911 } else {
aoqi@0 1912 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
aoqi@0 1913 }
aoqi@0 1914 }
aoqi@0 1915 }
aoqi@0 1916 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1917 } else {
aoqi@0 1918 istate->set_msg(more_monitors);
aoqi@0 1919 UPDATE_PC_AND_RETURN(0); // Re-execute
aoqi@0 1920 }
aoqi@0 1921 }
aoqi@0 1922
aoqi@0 1923 CASE(_monitorexit): {
aoqi@0 1924 oop lockee = STACK_OBJECT(-1);
aoqi@0 1925 CHECK_NULL(lockee);
aoqi@0 1926 // derefing's lockee ought to provoke implicit null check
aoqi@0 1927 // find our monitor slot
aoqi@0 1928 BasicObjectLock* limit = istate->monitor_base();
aoqi@0 1929 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
aoqi@0 1930 while (most_recent != limit ) {
aoqi@0 1931 if ((most_recent)->obj() == lockee) {
aoqi@0 1932 BasicLock* lock = most_recent->lock();
aoqi@0 1933 markOop header = lock->displaced_header();
aoqi@0 1934 most_recent->set_obj(NULL);
aoqi@0 1935 if (!lockee->mark()->has_bias_pattern()) {
aoqi@0 1936 bool call_vm = UseHeavyMonitors;
aoqi@0 1937 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 1938 if (header != NULL || call_vm) {
aoqi@0 1939 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
aoqi@0 1940 // restore object for the slow case
aoqi@0 1941 most_recent->set_obj(lockee);
aoqi@0 1942 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
aoqi@0 1943 }
aoqi@0 1944 }
aoqi@0 1945 }
aoqi@0 1946 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
aoqi@0 1947 }
aoqi@0 1948 most_recent++;
aoqi@0 1949 }
aoqi@0 1950 // Need to throw illegal monitor state exception
aoqi@0 1951 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
aoqi@0 1952 ShouldNotReachHere();
aoqi@0 1953 }
aoqi@0 1954
aoqi@0 1955 /* All of the non-quick opcodes. */
aoqi@0 1956
aoqi@0 1957 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
aoqi@0 1958 * constant pool index in the instruction.
aoqi@0 1959 */
aoqi@0 1960 CASE(_getfield):
aoqi@0 1961 CASE(_getstatic):
aoqi@0 1962 {
aoqi@0 1963 u2 index;
aoqi@0 1964 ConstantPoolCacheEntry* cache;
aoqi@0 1965 index = Bytes::get_native_u2(pc+1);
aoqi@0 1966
aoqi@0 1967 // QQQ Need to make this as inlined as possible. Probably need to
aoqi@0 1968 // split all the bytecode cases out so c++ compiler has a chance
aoqi@0 1969 // for constant prop to fold everything possible away.
aoqi@0 1970
aoqi@0 1971 cache = cp->entry_at(index);
aoqi@0 1972 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 1973 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
aoqi@0 1974 handle_exception);
aoqi@0 1975 cache = cp->entry_at(index);
aoqi@0 1976 }
aoqi@0 1977
aoqi@0 1978 #ifdef VM_JVMTI
aoqi@0 1979 if (_jvmti_interp_events) {
aoqi@0 1980 int *count_addr;
aoqi@0 1981 oop obj;
aoqi@0 1982 // Check to see if a field modification watch has been set
aoqi@0 1983 // before we take the time to call into the VM.
aoqi@0 1984 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
aoqi@0 1985 if ( *count_addr > 0 ) {
aoqi@0 1986 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
aoqi@0 1987 obj = (oop)NULL;
aoqi@0 1988 } else {
aoqi@0 1989 obj = (oop) STACK_OBJECT(-1);
aoqi@0 1990 VERIFY_OOP(obj);
aoqi@0 1991 }
aoqi@0 1992 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
aoqi@0 1993 obj,
aoqi@0 1994 cache),
aoqi@0 1995 handle_exception);
aoqi@0 1996 }
aoqi@0 1997 }
aoqi@0 1998 #endif /* VM_JVMTI */
aoqi@0 1999
aoqi@0 2000 oop obj;
aoqi@0 2001 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
aoqi@0 2002 Klass* k = cache->f1_as_klass();
aoqi@0 2003 obj = k->java_mirror();
aoqi@0 2004 MORE_STACK(1); // Assume single slot push
aoqi@0 2005 } else {
aoqi@0 2006 obj = (oop) STACK_OBJECT(-1);
aoqi@0 2007 CHECK_NULL(obj);
aoqi@0 2008 }
aoqi@0 2009
aoqi@0 2010 //
aoqi@0 2011 // Now store the result on the stack
aoqi@0 2012 //
aoqi@0 2013 TosState tos_type = cache->flag_state();
aoqi@0 2014 int field_offset = cache->f2_as_index();
aoqi@0 2015 if (cache->is_volatile()) {
aoqi@0 2016 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
aoqi@0 2017 OrderAccess::fence();
aoqi@0 2018 }
aoqi@0 2019 if (tos_type == atos) {
aoqi@0 2020 VERIFY_OOP(obj->obj_field_acquire(field_offset));
aoqi@0 2021 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
aoqi@0 2022 } else if (tos_type == itos) {
aoqi@0 2023 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
aoqi@0 2024 } else if (tos_type == ltos) {
aoqi@0 2025 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
aoqi@0 2026 MORE_STACK(1);
kevinw@8368 2027 } else if (tos_type == btos || tos_type == ztos) {
aoqi@0 2028 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
aoqi@0 2029 } else if (tos_type == ctos) {
aoqi@0 2030 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
aoqi@0 2031 } else if (tos_type == stos) {
aoqi@0 2032 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
aoqi@0 2033 } else if (tos_type == ftos) {
aoqi@0 2034 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
aoqi@0 2035 } else {
aoqi@0 2036 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
aoqi@0 2037 MORE_STACK(1);
aoqi@0 2038 }
aoqi@0 2039 } else {
aoqi@0 2040 if (tos_type == atos) {
aoqi@0 2041 VERIFY_OOP(obj->obj_field(field_offset));
aoqi@0 2042 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
aoqi@0 2043 } else if (tos_type == itos) {
aoqi@0 2044 SET_STACK_INT(obj->int_field(field_offset), -1);
aoqi@0 2045 } else if (tos_type == ltos) {
aoqi@0 2046 SET_STACK_LONG(obj->long_field(field_offset), 0);
aoqi@0 2047 MORE_STACK(1);
kevinw@8368 2048 } else if (tos_type == btos || tos_type == ztos) {
aoqi@0 2049 SET_STACK_INT(obj->byte_field(field_offset), -1);
aoqi@0 2050 } else if (tos_type == ctos) {
aoqi@0 2051 SET_STACK_INT(obj->char_field(field_offset), -1);
aoqi@0 2052 } else if (tos_type == stos) {
aoqi@0 2053 SET_STACK_INT(obj->short_field(field_offset), -1);
aoqi@0 2054 } else if (tos_type == ftos) {
aoqi@0 2055 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
aoqi@0 2056 } else {
aoqi@0 2057 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
aoqi@0 2058 MORE_STACK(1);
aoqi@0 2059 }
aoqi@0 2060 }
aoqi@0 2061
aoqi@0 2062 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2063 }
aoqi@0 2064
aoqi@0 2065 CASE(_putfield):
aoqi@0 2066 CASE(_putstatic):
aoqi@0 2067 {
aoqi@0 2068 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2069 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2070 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2071 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2072 handle_exception);
aoqi@0 2073 cache = cp->entry_at(index);
aoqi@0 2074 }
aoqi@0 2075
aoqi@0 2076 #ifdef VM_JVMTI
aoqi@0 2077 if (_jvmti_interp_events) {
aoqi@0 2078 int *count_addr;
aoqi@0 2079 oop obj;
aoqi@0 2080 // Check to see if a field modification watch has been set
aoqi@0 2081 // before we take the time to call into the VM.
aoqi@0 2082 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
aoqi@0 2083 if ( *count_addr > 0 ) {
aoqi@0 2084 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
aoqi@0 2085 obj = (oop)NULL;
aoqi@0 2086 }
aoqi@0 2087 else {
aoqi@0 2088 if (cache->is_long() || cache->is_double()) {
aoqi@0 2089 obj = (oop) STACK_OBJECT(-3);
aoqi@0 2090 } else {
aoqi@0 2091 obj = (oop) STACK_OBJECT(-2);
aoqi@0 2092 }
aoqi@0 2093 VERIFY_OOP(obj);
aoqi@0 2094 }
aoqi@0 2095
aoqi@0 2096 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
aoqi@0 2097 obj,
aoqi@0 2098 cache,
aoqi@0 2099 (jvalue *)STACK_SLOT(-1)),
aoqi@0 2100 handle_exception);
aoqi@0 2101 }
aoqi@0 2102 }
aoqi@0 2103 #endif /* VM_JVMTI */
aoqi@0 2104
aoqi@0 2105 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2106 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2107
aoqi@0 2108 oop obj;
aoqi@0 2109 int count;
aoqi@0 2110 TosState tos_type = cache->flag_state();
aoqi@0 2111
aoqi@0 2112 count = -1;
aoqi@0 2113 if (tos_type == ltos || tos_type == dtos) {
aoqi@0 2114 --count;
aoqi@0 2115 }
aoqi@0 2116 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
aoqi@0 2117 Klass* k = cache->f1_as_klass();
aoqi@0 2118 obj = k->java_mirror();
aoqi@0 2119 } else {
aoqi@0 2120 --count;
aoqi@0 2121 obj = (oop) STACK_OBJECT(count);
aoqi@0 2122 CHECK_NULL(obj);
aoqi@0 2123 }
aoqi@0 2124
aoqi@0 2125 //
aoqi@0 2126 // Now store the result
aoqi@0 2127 //
aoqi@0 2128 int field_offset = cache->f2_as_index();
aoqi@0 2129 if (cache->is_volatile()) {
aoqi@0 2130 if (tos_type == itos) {
aoqi@0 2131 obj->release_int_field_put(field_offset, STACK_INT(-1));
aoqi@0 2132 } else if (tos_type == atos) {
aoqi@0 2133 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2134 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
aoqi@0 2135 } else if (tos_type == btos) {
aoqi@0 2136 obj->release_byte_field_put(field_offset, STACK_INT(-1));
kevinw@8368 2137 } else if (tos_type == ztos) {
kevinw@8368 2138 int bool_field = STACK_INT(-1); // only store LSB
kevinw@8368 2139 obj->release_byte_field_put(field_offset, (bool_field & 1));
aoqi@0 2140 } else if (tos_type == ltos) {
aoqi@0 2141 obj->release_long_field_put(field_offset, STACK_LONG(-1));
aoqi@0 2142 } else if (tos_type == ctos) {
aoqi@0 2143 obj->release_char_field_put(field_offset, STACK_INT(-1));
aoqi@0 2144 } else if (tos_type == stos) {
aoqi@0 2145 obj->release_short_field_put(field_offset, STACK_INT(-1));
aoqi@0 2146 } else if (tos_type == ftos) {
aoqi@0 2147 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
aoqi@0 2148 } else {
aoqi@0 2149 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
aoqi@0 2150 }
aoqi@0 2151 OrderAccess::storeload();
aoqi@0 2152 } else {
aoqi@0 2153 if (tos_type == itos) {
aoqi@0 2154 obj->int_field_put(field_offset, STACK_INT(-1));
aoqi@0 2155 } else if (tos_type == atos) {
aoqi@0 2156 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2157 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
aoqi@0 2158 } else if (tos_type == btos) {
aoqi@0 2159 obj->byte_field_put(field_offset, STACK_INT(-1));
kevinw@8368 2160 } else if (tos_type == ztos) {
kevinw@8368 2161 int bool_field = STACK_INT(-1); // only store LSB
kevinw@8368 2162 obj->byte_field_put(field_offset, (bool_field & 1));
aoqi@0 2163 } else if (tos_type == ltos) {
aoqi@0 2164 obj->long_field_put(field_offset, STACK_LONG(-1));
aoqi@0 2165 } else if (tos_type == ctos) {
aoqi@0 2166 obj->char_field_put(field_offset, STACK_INT(-1));
aoqi@0 2167 } else if (tos_type == stos) {
aoqi@0 2168 obj->short_field_put(field_offset, STACK_INT(-1));
aoqi@0 2169 } else if (tos_type == ftos) {
aoqi@0 2170 obj->float_field_put(field_offset, STACK_FLOAT(-1));
aoqi@0 2171 } else {
aoqi@0 2172 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
aoqi@0 2173 }
aoqi@0 2174 }
aoqi@0 2175
aoqi@0 2176 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
aoqi@0 2177 }
aoqi@0 2178
aoqi@0 2179 CASE(_new): {
aoqi@0 2180 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2181 ConstantPool* constants = istate->method()->constants();
aoqi@0 2182 if (!constants->tag_at(index).is_unresolved_klass()) {
aoqi@0 2183 // Make sure klass is initialized and doesn't have a finalizer
aoqi@0 2184 Klass* entry = constants->slot_at(index).get_klass();
aoqi@0 2185 assert(entry->is_klass(), "Should be resolved klass");
aoqi@0 2186 Klass* k_entry = (Klass*) entry;
aoqi@0 2187 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
aoqi@0 2188 InstanceKlass* ik = (InstanceKlass*) k_entry;
aoqi@0 2189 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
aoqi@0 2190 size_t obj_size = ik->size_helper();
aoqi@0 2191 oop result = NULL;
aoqi@0 2192 // If the TLAB isn't pre-zeroed then we'll have to do it
aoqi@0 2193 bool need_zero = !ZeroTLAB;
aoqi@0 2194 if (UseTLAB) {
aoqi@0 2195 result = (oop) THREAD->tlab().allocate(obj_size);
aoqi@0 2196 }
aoqi@0 2197 // Disable non-TLAB-based fast-path, because profiling requires that all
aoqi@0 2198 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
aoqi@0 2199 // returns NULL.
aoqi@0 2200 #ifndef CC_INTERP_PROFILE
aoqi@0 2201 if (result == NULL) {
aoqi@0 2202 need_zero = true;
aoqi@0 2203 // Try allocate in shared eden
aoqi@0 2204 retry:
aoqi@0 2205 HeapWord* compare_to = *Universe::heap()->top_addr();
aoqi@0 2206 HeapWord* new_top = compare_to + obj_size;
aoqi@0 2207 if (new_top <= *Universe::heap()->end_addr()) {
aoqi@0 2208 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
aoqi@0 2209 goto retry;
aoqi@0 2210 }
aoqi@0 2211 result = (oop) compare_to;
aoqi@0 2212 }
aoqi@0 2213 }
aoqi@0 2214 #endif
aoqi@0 2215 if (result != NULL) {
aoqi@0 2216 // Initialize object (if nonzero size and need) and then the header
aoqi@0 2217 if (need_zero ) {
aoqi@0 2218 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
aoqi@0 2219 obj_size -= sizeof(oopDesc) / oopSize;
aoqi@0 2220 if (obj_size > 0 ) {
aoqi@0 2221 memset(to_zero, 0, obj_size * HeapWordSize);
aoqi@0 2222 }
aoqi@0 2223 }
aoqi@0 2224 if (UseBiasedLocking) {
aoqi@0 2225 result->set_mark(ik->prototype_header());
aoqi@0 2226 } else {
aoqi@0 2227 result->set_mark(markOopDesc::prototype());
aoqi@0 2228 }
aoqi@0 2229 result->set_klass_gap(0);
aoqi@0 2230 result->set_klass(k_entry);
aoqi@0 2231 // Must prevent reordering of stores for object initialization
aoqi@0 2232 // with stores that publish the new object.
aoqi@0 2233 OrderAccess::storestore();
aoqi@0 2234 SET_STACK_OBJECT(result, 0);
aoqi@0 2235 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 2236 }
aoqi@0 2237 }
aoqi@0 2238 }
aoqi@0 2239 // Slow case allocation
aoqi@0 2240 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
aoqi@0 2241 handle_exception);
aoqi@0 2242 // Must prevent reordering of stores for object initialization
aoqi@0 2243 // with stores that publish the new object.
aoqi@0 2244 OrderAccess::storestore();
aoqi@0 2245 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2246 THREAD->set_vm_result(NULL);
aoqi@0 2247 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
aoqi@0 2248 }
aoqi@0 2249 CASE(_anewarray): {
aoqi@0 2250 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2251 jint size = STACK_INT(-1);
aoqi@0 2252 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
aoqi@0 2253 handle_exception);
aoqi@0 2254 // Must prevent reordering of stores for object initialization
aoqi@0 2255 // with stores that publish the new object.
aoqi@0 2256 OrderAccess::storestore();
aoqi@0 2257 SET_STACK_OBJECT(THREAD->vm_result(), -1);
aoqi@0 2258 THREAD->set_vm_result(NULL);
aoqi@0 2259 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2260 }
aoqi@0 2261 CASE(_multianewarray): {
aoqi@0 2262 jint dims = *(pc+3);
aoqi@0 2263 jint size = STACK_INT(-1);
aoqi@0 2264 // stack grows down, dimensions are up!
aoqi@0 2265 jint *dimarray =
aoqi@0 2266 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
aoqi@0 2267 Interpreter::stackElementWords-1];
aoqi@0 2268 //adjust pointer to start of stack element
aoqi@0 2269 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
aoqi@0 2270 handle_exception);
aoqi@0 2271 // Must prevent reordering of stores for object initialization
aoqi@0 2272 // with stores that publish the new object.
aoqi@0 2273 OrderAccess::storestore();
aoqi@0 2274 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
aoqi@0 2275 THREAD->set_vm_result(NULL);
aoqi@0 2276 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
aoqi@0 2277 }
aoqi@0 2278 CASE(_checkcast):
aoqi@0 2279 if (STACK_OBJECT(-1) != NULL) {
aoqi@0 2280 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2281 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2282 // Constant pool may have actual klass or unresolved klass. If it is
aoqi@0 2283 // unresolved we must resolve it.
aoqi@0 2284 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
aoqi@0 2285 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
aoqi@0 2286 }
aoqi@0 2287 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
aoqi@0 2288 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
aoqi@0 2289 //
aoqi@0 2290 // Check for compatibilty. This check must not GC!!
aoqi@0 2291 // Seems way more expensive now that we must dispatch.
aoqi@0 2292 //
aoqi@0 2293 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
aoqi@0 2294 // Decrement counter at checkcast.
aoqi@0 2295 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
aoqi@0 2296 ResourceMark rm(THREAD);
aoqi@0 2297 const char* objName = objKlass->external_name();
aoqi@0 2298 const char* klassName = klassOf->external_name();
aoqi@0 2299 char* message = SharedRuntime::generate_class_cast_message(
aoqi@0 2300 objName, klassName);
aoqi@0 2301 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
aoqi@0 2302 }
aoqi@0 2303 // Profile checkcast with null_seen and receiver.
aoqi@0 2304 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
aoqi@0 2305 } else {
aoqi@0 2306 // Profile checkcast with null_seen and receiver.
aoqi@0 2307 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
aoqi@0 2308 }
aoqi@0 2309 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2310
aoqi@0 2311 CASE(_instanceof):
aoqi@0 2312 if (STACK_OBJECT(-1) == NULL) {
aoqi@0 2313 SET_STACK_INT(0, -1);
aoqi@0 2314 // Profile instanceof with null_seen and receiver.
aoqi@0 2315 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
aoqi@0 2316 } else {
aoqi@0 2317 VERIFY_OOP(STACK_OBJECT(-1));
aoqi@0 2318 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2319 // Constant pool may have actual klass or unresolved klass. If it is
aoqi@0 2320 // unresolved we must resolve it.
aoqi@0 2321 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
aoqi@0 2322 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
aoqi@0 2323 }
aoqi@0 2324 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
aoqi@0 2325 Klass* objKlass = STACK_OBJECT(-1)->klass();
aoqi@0 2326 //
aoqi@0 2327 // Check for compatibilty. This check must not GC!!
aoqi@0 2328 // Seems way more expensive now that we must dispatch.
aoqi@0 2329 //
aoqi@0 2330 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
aoqi@0 2331 SET_STACK_INT(1, -1);
aoqi@0 2332 } else {
aoqi@0 2333 SET_STACK_INT(0, -1);
aoqi@0 2334 // Decrement counter at checkcast.
aoqi@0 2335 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
aoqi@0 2336 }
aoqi@0 2337 // Profile instanceof with null_seen and receiver.
aoqi@0 2338 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
aoqi@0 2339 }
aoqi@0 2340 UPDATE_PC_AND_CONTINUE(3);
aoqi@0 2341
aoqi@0 2342 CASE(_ldc_w):
aoqi@0 2343 CASE(_ldc):
aoqi@0 2344 {
aoqi@0 2345 u2 index;
aoqi@0 2346 bool wide = false;
aoqi@0 2347 int incr = 2; // frequent case
aoqi@0 2348 if (opcode == Bytecodes::_ldc) {
aoqi@0 2349 index = pc[1];
aoqi@0 2350 } else {
aoqi@0 2351 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2352 incr = 3;
aoqi@0 2353 wide = true;
aoqi@0 2354 }
aoqi@0 2355
aoqi@0 2356 ConstantPool* constants = METHOD->constants();
aoqi@0 2357 switch (constants->tag_at(index).value()) {
aoqi@0 2358 case JVM_CONSTANT_Integer:
aoqi@0 2359 SET_STACK_INT(constants->int_at(index), 0);
aoqi@0 2360 break;
aoqi@0 2361
aoqi@0 2362 case JVM_CONSTANT_Float:
aoqi@0 2363 SET_STACK_FLOAT(constants->float_at(index), 0);
aoqi@0 2364 break;
aoqi@0 2365
aoqi@0 2366 case JVM_CONSTANT_String:
aoqi@0 2367 {
aoqi@0 2368 oop result = constants->resolved_references()->obj_at(index);
aoqi@0 2369 if (result == NULL) {
aoqi@0 2370 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
aoqi@0 2371 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2372 THREAD->set_vm_result(NULL);
aoqi@0 2373 } else {
aoqi@0 2374 VERIFY_OOP(result);
aoqi@0 2375 SET_STACK_OBJECT(result, 0);
aoqi@0 2376 }
aoqi@0 2377 break;
aoqi@0 2378 }
aoqi@0 2379
aoqi@0 2380 case JVM_CONSTANT_Class:
aoqi@0 2381 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
aoqi@0 2382 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
aoqi@0 2383 break;
aoqi@0 2384
aoqi@0 2385 case JVM_CONSTANT_UnresolvedClass:
aoqi@0 2386 case JVM_CONSTANT_UnresolvedClassInError:
aoqi@0 2387 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
aoqi@0 2388 SET_STACK_OBJECT(THREAD->vm_result(), 0);
aoqi@0 2389 THREAD->set_vm_result(NULL);
aoqi@0 2390 break;
aoqi@0 2391
aoqi@0 2392 default: ShouldNotReachHere();
aoqi@0 2393 }
aoqi@0 2394 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
aoqi@0 2395 }
aoqi@0 2396
aoqi@0 2397 CASE(_ldc2_w):
aoqi@0 2398 {
aoqi@0 2399 u2 index = Bytes::get_Java_u2(pc+1);
aoqi@0 2400
aoqi@0 2401 ConstantPool* constants = METHOD->constants();
aoqi@0 2402 switch (constants->tag_at(index).value()) {
aoqi@0 2403
aoqi@0 2404 case JVM_CONSTANT_Long:
aoqi@0 2405 SET_STACK_LONG(constants->long_at(index), 1);
aoqi@0 2406 break;
aoqi@0 2407
aoqi@0 2408 case JVM_CONSTANT_Double:
aoqi@0 2409 SET_STACK_DOUBLE(constants->double_at(index), 1);
aoqi@0 2410 break;
aoqi@0 2411 default: ShouldNotReachHere();
aoqi@0 2412 }
aoqi@0 2413 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
aoqi@0 2414 }
aoqi@0 2415
aoqi@0 2416 CASE(_fast_aldc_w):
aoqi@0 2417 CASE(_fast_aldc): {
aoqi@0 2418 u2 index;
aoqi@0 2419 int incr;
aoqi@0 2420 if (opcode == Bytecodes::_fast_aldc) {
aoqi@0 2421 index = pc[1];
aoqi@0 2422 incr = 2;
aoqi@0 2423 } else {
aoqi@0 2424 index = Bytes::get_native_u2(pc+1);
aoqi@0 2425 incr = 3;
aoqi@0 2426 }
aoqi@0 2427
aoqi@0 2428 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
aoqi@0 2429 // This kind of CP cache entry does not need to match the flags byte, because
aoqi@0 2430 // there is a 1-1 relation between bytecode type and CP entry type.
aoqi@0 2431 ConstantPool* constants = METHOD->constants();
aoqi@0 2432 oop result = constants->resolved_references()->obj_at(index);
aoqi@0 2433 if (result == NULL) {
aoqi@0 2434 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
aoqi@0 2435 handle_exception);
aoqi@0 2436 result = THREAD->vm_result();
aoqi@0 2437 }
aoqi@0 2438
aoqi@0 2439 VERIFY_OOP(result);
aoqi@0 2440 SET_STACK_OBJECT(result, 0);
aoqi@0 2441 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
aoqi@0 2442 }
aoqi@0 2443
aoqi@0 2444 CASE(_invokedynamic): {
aoqi@0 2445
aoqi@0 2446 if (!EnableInvokeDynamic) {
aoqi@0 2447 // We should not encounter this bytecode if !EnableInvokeDynamic.
aoqi@0 2448 // The verifier will stop it. However, if we get past the verifier,
aoqi@0 2449 // this will stop the thread in a reasonable way, without crashing the JVM.
aoqi@0 2450 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
aoqi@0 2451 handle_exception);
aoqi@0 2452 ShouldNotReachHere();
aoqi@0 2453 }
aoqi@0 2454
aoqi@0 2455 u4 index = Bytes::get_native_u4(pc+1);
aoqi@0 2456 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
aoqi@0 2457
aoqi@0 2458 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
aoqi@0 2459 // This kind of CP cache entry does not need to match the flags byte, because
aoqi@0 2460 // there is a 1-1 relation between bytecode type and CP entry type.
aoqi@0 2461 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
aoqi@0 2462 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
aoqi@0 2463 handle_exception);
aoqi@0 2464 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
aoqi@0 2465 }
aoqi@0 2466
aoqi@0 2467 Method* method = cache->f1_as_method();
aoqi@0 2468 if (VerifyOops) method->verify();
aoqi@0 2469
aoqi@0 2470 if (cache->has_appendix()) {
aoqi@0 2471 ConstantPool* constants = METHOD->constants();
aoqi@0 2472 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
aoqi@0 2473 MORE_STACK(1);
aoqi@0 2474 }
aoqi@0 2475
aoqi@0 2476 istate->set_msg(call_method);
aoqi@0 2477 istate->set_callee(method);
aoqi@0 2478 istate->set_callee_entry_point(method->from_interpreted_entry());
aoqi@0 2479 istate->set_bcp_advance(5);
aoqi@0 2480
aoqi@0 2481 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
aoqi@0 2482 BI_PROFILE_UPDATE_CALL();
aoqi@0 2483
aoqi@0 2484 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2485 }
aoqi@0 2486
aoqi@0 2487 CASE(_invokehandle): {
aoqi@0 2488
aoqi@0 2489 if (!EnableInvokeDynamic) {
aoqi@0 2490 ShouldNotReachHere();
aoqi@0 2491 }
aoqi@0 2492
aoqi@0 2493 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2494 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2495
aoqi@0 2496 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
aoqi@0 2497 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
aoqi@0 2498 handle_exception);
aoqi@0 2499 cache = cp->entry_at(index);
aoqi@0 2500 }
aoqi@0 2501
aoqi@0 2502 Method* method = cache->f1_as_method();
aoqi@0 2503 if (VerifyOops) method->verify();
aoqi@0 2504
aoqi@0 2505 if (cache->has_appendix()) {
aoqi@0 2506 ConstantPool* constants = METHOD->constants();
aoqi@0 2507 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
aoqi@0 2508 MORE_STACK(1);
aoqi@0 2509 }
aoqi@0 2510
aoqi@0 2511 istate->set_msg(call_method);
aoqi@0 2512 istate->set_callee(method);
aoqi@0 2513 istate->set_callee_entry_point(method->from_interpreted_entry());
aoqi@0 2514 istate->set_bcp_advance(3);
aoqi@0 2515
aoqi@0 2516 // Invokehandle has got a call counter, just like a final call -> increment!
aoqi@0 2517 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2518
aoqi@0 2519 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2520 }
aoqi@0 2521
aoqi@0 2522 CASE(_invokeinterface): {
aoqi@0 2523 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2524
aoqi@0 2525 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2526 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2527
aoqi@0 2528 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2529 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2530 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2531 handle_exception);
aoqi@0 2532 cache = cp->entry_at(index);
aoqi@0 2533 }
aoqi@0 2534
aoqi@0 2535 istate->set_msg(call_method);
aoqi@0 2536
aoqi@0 2537 // Special case of invokeinterface called for virtual method of
aoqi@0 2538 // java.lang.Object. See cpCacheOop.cpp for details.
aoqi@0 2539 // This code isn't produced by javac, but could be produced by
aoqi@0 2540 // another compliant java compiler.
aoqi@0 2541 if (cache->is_forced_virtual()) {
aoqi@0 2542 Method* callee;
aoqi@0 2543 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2544 if (cache->is_vfinal()) {
aoqi@0 2545 callee = cache->f2_as_vfinal_method();
aoqi@0 2546 // Profile 'special case of invokeinterface' final call.
aoqi@0 2547 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2548 } else {
aoqi@0 2549 // Get receiver.
aoqi@0 2550 int parms = cache->parameter_size();
aoqi@0 2551 // Same comments as invokevirtual apply here.
aoqi@0 2552 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2553 VERIFY_OOP(rcvr);
aoqi@0 2554 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
aoqi@0 2555 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
aoqi@0 2556 // Profile 'special case of invokeinterface' virtual call.
aoqi@0 2557 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2558 }
aoqi@0 2559 istate->set_callee(callee);
aoqi@0 2560 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2561 #ifdef VM_JVMTI
aoqi@0 2562 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2563 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2564 }
aoqi@0 2565 #endif /* VM_JVMTI */
aoqi@0 2566 istate->set_bcp_advance(5);
aoqi@0 2567 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2568 }
aoqi@0 2569
aoqi@0 2570 // this could definitely be cleaned up QQQ
aoqi@0 2571 Method* callee;
aph@9110 2572 Method *interface_method = cache->f2_as_interface_method();
aph@9110 2573 InstanceKlass* iclass = interface_method->method_holder();
aph@9110 2574
aoqi@0 2575 // get receiver
aoqi@0 2576 int parms = cache->parameter_size();
aoqi@0 2577 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2578 CHECK_NULL(rcvr);
aoqi@0 2579 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
aph@9110 2580
aph@9110 2581 // Receiver subtype check against resolved interface klass (REFC).
aph@9110 2582 {
aph@9110 2583 Klass* refc = cache->f1_as_klass();
aph@9110 2584 itableOffsetEntry* scan;
aph@9110 2585 for (scan = (itableOffsetEntry*) int2->start_of_itable();
aph@9110 2586 scan->interface_klass() != NULL;
aph@9110 2587 scan++) {
aph@9110 2588 if (scan->interface_klass() == refc) {
aph@9110 2589 break;
aph@9110 2590 }
aph@9110 2591 }
aph@9110 2592 // Check that the entry is non-null. A null entry means
aph@9110 2593 // that the receiver class doesn't implement the
aph@9110 2594 // interface, and wasn't the same as when the caller was
aph@9110 2595 // compiled.
aph@9110 2596 if (scan->interface_klass() == NULL) {
aph@9110 2597 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
aph@9110 2598 }
aph@9110 2599 }
aph@9110 2600
aoqi@0 2601 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
aoqi@0 2602 int i;
aoqi@0 2603 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
aoqi@0 2604 if (ki->interface_klass() == iclass) break;
aoqi@0 2605 }
aoqi@0 2606 // If the interface isn't found, this class doesn't implement this
aoqi@0 2607 // interface. The link resolver checks this but only for the first
aoqi@0 2608 // time this interface is called.
aoqi@0 2609 if (i == int2->itable_length()) {
aoqi@0 2610 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
aoqi@0 2611 }
aph@9110 2612 int mindex = interface_method->itable_index();
aph@9110 2613
aoqi@0 2614 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
aoqi@0 2615 callee = im[mindex].method();
aoqi@0 2616 if (callee == NULL) {
aoqi@0 2617 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
aoqi@0 2618 }
aoqi@0 2619
aoqi@0 2620 // Profile virtual call.
aoqi@0 2621 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2622
aoqi@0 2623 istate->set_callee(callee);
aoqi@0 2624 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2625 #ifdef VM_JVMTI
aoqi@0 2626 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2627 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2628 }
aoqi@0 2629 #endif /* VM_JVMTI */
aoqi@0 2630 istate->set_bcp_advance(5);
aoqi@0 2631 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2632 }
aoqi@0 2633
aoqi@0 2634 CASE(_invokevirtual):
aoqi@0 2635 CASE(_invokespecial):
aoqi@0 2636 CASE(_invokestatic): {
aoqi@0 2637 u2 index = Bytes::get_native_u2(pc+1);
aoqi@0 2638
aoqi@0 2639 ConstantPoolCacheEntry* cache = cp->entry_at(index);
aoqi@0 2640 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
aoqi@0 2641 // out so c++ compiler has a chance for constant prop to fold everything possible away.
aoqi@0 2642
aoqi@0 2643 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
aoqi@0 2644 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
aoqi@0 2645 handle_exception);
aoqi@0 2646 cache = cp->entry_at(index);
aoqi@0 2647 }
aoqi@0 2648
aoqi@0 2649 istate->set_msg(call_method);
aoqi@0 2650 {
aoqi@0 2651 Method* callee;
aoqi@0 2652 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
aoqi@0 2653 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2654 if (cache->is_vfinal()) {
aoqi@0 2655 callee = cache->f2_as_vfinal_method();
aoqi@0 2656 // Profile final call.
aoqi@0 2657 BI_PROFILE_UPDATE_FINALCALL();
aoqi@0 2658 } else {
aoqi@0 2659 // get receiver
aoqi@0 2660 int parms = cache->parameter_size();
aoqi@0 2661 // this works but needs a resourcemark and seems to create a vtable on every call:
aoqi@0 2662 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
aoqi@0 2663 //
aoqi@0 2664 // this fails with an assert
aoqi@0 2665 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
aoqi@0 2666 // but this works
aoqi@0 2667 oop rcvr = STACK_OBJECT(-parms);
aoqi@0 2668 VERIFY_OOP(rcvr);
aoqi@0 2669 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
aoqi@0 2670 /*
aoqi@0 2671 Executing this code in java.lang.String:
aoqi@0 2672 public String(char value[]) {
aoqi@0 2673 this.count = value.length;
aoqi@0 2674 this.value = (char[])value.clone();
aoqi@0 2675 }
aoqi@0 2676
aoqi@0 2677 a find on rcvr->klass() reports:
aoqi@0 2678 {type array char}{type array class}
aoqi@0 2679 - klass: {other class}
aoqi@0 2680
aoqi@0 2681 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
aoqi@0 2682 because rcvr->klass()->oop_is_instance() == 0
aoqi@0 2683 However it seems to have a vtable in the right location. Huh?
aoqi@0 2684
aoqi@0 2685 */
aoqi@0 2686 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
aoqi@0 2687 // Profile virtual call.
aoqi@0 2688 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
aoqi@0 2689 }
aoqi@0 2690 } else {
aoqi@0 2691 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
aoqi@0 2692 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
aoqi@0 2693 }
aoqi@0 2694 callee = cache->f1_as_method();
aoqi@0 2695
aoqi@0 2696 // Profile call.
aoqi@0 2697 BI_PROFILE_UPDATE_CALL();
aoqi@0 2698 }
aoqi@0 2699
aoqi@0 2700 istate->set_callee(callee);
aoqi@0 2701 istate->set_callee_entry_point(callee->from_interpreted_entry());
aoqi@0 2702 #ifdef VM_JVMTI
aoqi@0 2703 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
aoqi@0 2704 istate->set_callee_entry_point(callee->interpreter_entry());
aoqi@0 2705 }
aoqi@0 2706 #endif /* VM_JVMTI */
aoqi@0 2707 istate->set_bcp_advance(3);
aoqi@0 2708 UPDATE_PC_AND_RETURN(0); // I'll be back...
aoqi@0 2709 }
aoqi@0 2710 }
aoqi@0 2711
aoqi@0 2712 /* Allocate memory for a new java object. */
aoqi@0 2713
aoqi@0 2714 CASE(_newarray): {
aoqi@0 2715 BasicType atype = (BasicType) *(pc+1);
aoqi@0 2716 jint size = STACK_INT(-1);
aoqi@0 2717 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
aoqi@0 2718 handle_exception);
aoqi@0 2719 // Must prevent reordering of stores for object initialization
aoqi@0 2720 // with stores that publish the new object.
aoqi@0 2721 OrderAccess::storestore();
aoqi@0 2722 SET_STACK_OBJECT(THREAD->vm_result(), -1);
aoqi@0 2723 THREAD->set_vm_result(NULL);
aoqi@0 2724
aoqi@0 2725 UPDATE_PC_AND_CONTINUE(2);
aoqi@0 2726 }
aoqi@0 2727
aoqi@0 2728 /* Throw an exception. */
aoqi@0 2729
aoqi@0 2730 CASE(_athrow): {
aoqi@0 2731 oop except_oop = STACK_OBJECT(-1);
aoqi@0 2732 CHECK_NULL(except_oop);
aoqi@0 2733 // set pending_exception so we use common code
aoqi@0 2734 THREAD->set_pending_exception(except_oop, NULL, 0);
aoqi@0 2735 goto handle_exception;
aoqi@0 2736 }
aoqi@0 2737
aoqi@0 2738 /* goto and jsr. They are exactly the same except jsr pushes
aoqi@0 2739 * the address of the next instruction first.
aoqi@0 2740 */
aoqi@0 2741
aoqi@0 2742 CASE(_jsr): {
aoqi@0 2743 /* push bytecode index on stack */
aoqi@0 2744 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
aoqi@0 2745 MORE_STACK(1);
aoqi@0 2746 /* FALL THROUGH */
aoqi@0 2747 }
aoqi@0 2748
aoqi@0 2749 CASE(_goto):
aoqi@0 2750 {
aoqi@0 2751 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
aoqi@0 2752 // Profile jump.
aoqi@0 2753 BI_PROFILE_UPDATE_JUMP();
aoqi@0 2754 address branch_pc = pc;
aoqi@0 2755 UPDATE_PC(offset);
aoqi@0 2756 DO_BACKEDGE_CHECKS(offset, branch_pc);
aoqi@0 2757 CONTINUE;
aoqi@0 2758 }
aoqi@0 2759
aoqi@0 2760 CASE(_jsr_w): {
aoqi@0 2761 /* push return address on the stack */
aoqi@0 2762 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
aoqi@0 2763 MORE_STACK(1);
aoqi@0 2764 /* FALL THROUGH */
aoqi@0 2765 }
aoqi@0 2766
aoqi@0 2767 CASE(_goto_w):
aoqi@0 2768 {
aoqi@0 2769 int32_t offset = Bytes::get_Java_u4(pc + 1);
aoqi@0 2770 // Profile jump.
aoqi@0 2771 BI_PROFILE_UPDATE_JUMP();
aoqi@0 2772 address branch_pc = pc;
aoqi@0 2773 UPDATE_PC(offset);
aoqi@0 2774 DO_BACKEDGE_CHECKS(offset, branch_pc);
aoqi@0 2775 CONTINUE;
aoqi@0 2776 }
aoqi@0 2777
aoqi@0 2778 /* return from a jsr or jsr_w */
aoqi@0 2779
aoqi@0 2780 CASE(_ret): {
aoqi@0 2781 // Profile ret.
aoqi@0 2782 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
aoqi@0 2783 // Now, update the pc.
aoqi@0 2784 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
aoqi@0 2785 UPDATE_PC_AND_CONTINUE(0);
aoqi@0 2786 }
aoqi@0 2787
aoqi@0 2788 /* debugger breakpoint */
aoqi@0 2789
aoqi@0 2790 CASE(_breakpoint): {
aoqi@0 2791 Bytecodes::Code original_bytecode;
aoqi@0 2792 DECACHE_STATE();
aoqi@0 2793 SET_LAST_JAVA_FRAME();
aoqi@0 2794 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
aoqi@0 2795 METHOD, pc);
aoqi@0 2796 RESET_LAST_JAVA_FRAME();
aoqi@0 2797 CACHE_STATE();
aoqi@0 2798 if (THREAD->has_pending_exception()) goto handle_exception;
aoqi@0 2799 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
aoqi@0 2800 handle_exception);
aoqi@0 2801
aoqi@0 2802 opcode = (jubyte)original_bytecode;
aoqi@0 2803 goto opcode_switch;
aoqi@0 2804 }
aoqi@0 2805
aoqi@0 2806 DEFAULT:
aoqi@0 2807 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
aoqi@0 2808 Bytecodes::name((Bytecodes::Code)opcode)));
aoqi@0 2809 goto finish;
aoqi@0 2810
aoqi@0 2811 } /* switch(opc) */
aoqi@0 2812
aoqi@0 2813
aoqi@0 2814 #ifdef USELABELS
aoqi@0 2815 check_for_exception:
aoqi@0 2816 #endif
aoqi@0 2817 {
aoqi@0 2818 if (!THREAD->has_pending_exception()) {
aoqi@0 2819 CONTINUE;
aoqi@0 2820 }
aoqi@0 2821 /* We will be gcsafe soon, so flush our state. */
aoqi@0 2822 DECACHE_PC();
aoqi@0 2823 goto handle_exception;
aoqi@0 2824 }
aoqi@0 2825 do_continue: ;
aoqi@0 2826
aoqi@0 2827 } /* while (1) interpreter loop */
aoqi@0 2828
aoqi@0 2829
aoqi@0 2830 // An exception exists in the thread state see whether this activation can handle it
aoqi@0 2831 handle_exception: {
aoqi@0 2832
aoqi@0 2833 HandleMarkCleaner __hmc(THREAD);
aoqi@0 2834 Handle except_oop(THREAD, THREAD->pending_exception());
aoqi@0 2835 // Prevent any subsequent HandleMarkCleaner in the VM
aoqi@0 2836 // from freeing the except_oop handle.
aoqi@0 2837 HandleMark __hm(THREAD);
aoqi@0 2838
aoqi@0 2839 THREAD->clear_pending_exception();
aoqi@0 2840 assert(except_oop(), "No exception to process");
aoqi@0 2841 intptr_t continuation_bci;
aoqi@0 2842 // expression stack is emptied
aoqi@0 2843 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
aoqi@0 2844 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
aoqi@0 2845 handle_exception);
aoqi@0 2846
aoqi@0 2847 except_oop = THREAD->vm_result();
aoqi@0 2848 THREAD->set_vm_result(NULL);
aoqi@0 2849 if (continuation_bci >= 0) {
aoqi@0 2850 // Place exception on top of stack
aoqi@0 2851 SET_STACK_OBJECT(except_oop(), 0);
aoqi@0 2852 MORE_STACK(1);
aoqi@0 2853 pc = METHOD->code_base() + continuation_bci;
aoqi@0 2854 if (TraceExceptions) {
aoqi@0 2855 ttyLocker ttyl;
aoqi@0 2856 ResourceMark rm;
coleenp@7358 2857 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
aoqi@0 2858 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
aoqi@0 2859 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
coleenp@7358 2860 (int)(istate->bcp() - METHOD->code_base()),
coleenp@7358 2861 (int)continuation_bci, p2i(THREAD));
aoqi@0 2862 }
aoqi@0 2863 // for AbortVMOnException flag
aoqi@0 2864 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
aoqi@0 2865
aoqi@0 2866 // Update profiling data.
aoqi@0 2867 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
aoqi@0 2868 goto run;
aoqi@0 2869 }
aoqi@0 2870 if (TraceExceptions) {
aoqi@0 2871 ttyLocker ttyl;
aoqi@0 2872 ResourceMark rm;
coleenp@7358 2873 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
aoqi@0 2874 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
aoqi@0 2875 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
coleenp@7358 2876 (int)(istate->bcp() - METHOD->code_base()),
coleenp@7358 2877 p2i(THREAD));
aoqi@0 2878 }
aoqi@0 2879 // for AbortVMOnException flag
aoqi@0 2880 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
aoqi@0 2881 // No handler in this activation, unwind and try again
aoqi@0 2882 THREAD->set_pending_exception(except_oop(), NULL, 0);
aoqi@0 2883 goto handle_return;
aoqi@0 2884 } // handle_exception:
aoqi@0 2885
aoqi@0 2886 // Return from an interpreter invocation with the result of the interpretation
aoqi@0 2887 // on the top of the Java Stack (or a pending exception)
aoqi@0 2888
aoqi@0 2889 handle_Pop_Frame: {
aoqi@0 2890
aoqi@0 2891 // We don't really do anything special here except we must be aware
aoqi@0 2892 // that we can get here without ever locking the method (if sync).
aoqi@0 2893 // Also we skip the notification of the exit.
aoqi@0 2894
aoqi@0 2895 istate->set_msg(popping_frame);
aoqi@0 2896 // Clear pending so while the pop is in process
aoqi@0 2897 // we don't start another one if a call_vm is done.
aoqi@0 2898 THREAD->clr_pop_frame_pending();
aoqi@0 2899 // Let interpreter (only) see the we're in the process of popping a frame
aoqi@0 2900 THREAD->set_pop_frame_in_process();
aoqi@0 2901
aoqi@0 2902 goto handle_return;
aoqi@0 2903
aoqi@0 2904 } // handle_Pop_Frame
aoqi@0 2905
aoqi@0 2906 // ForceEarlyReturn ends a method, and returns to the caller with a return value
aoqi@0 2907 // given by the invoker of the early return.
aoqi@0 2908 handle_Early_Return: {
aoqi@0 2909
aoqi@0 2910 istate->set_msg(early_return);
aoqi@0 2911
aoqi@0 2912 // Clear expression stack.
aoqi@0 2913 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
aoqi@0 2914
aoqi@0 2915 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
aoqi@0 2916
aoqi@0 2917 // Push the value to be returned.
aoqi@0 2918 switch (istate->method()->result_type()) {
aoqi@0 2919 case T_BOOLEAN:
aoqi@0 2920 case T_SHORT:
aoqi@0 2921 case T_BYTE:
aoqi@0 2922 case T_CHAR:
aoqi@0 2923 case T_INT:
aoqi@0 2924 SET_STACK_INT(ts->earlyret_value().i, 0);
aoqi@0 2925 MORE_STACK(1);
aoqi@0 2926 break;
aoqi@0 2927 case T_LONG:
aoqi@0 2928 SET_STACK_LONG(ts->earlyret_value().j, 1);
aoqi@0 2929 MORE_STACK(2);
aoqi@0 2930 break;
aoqi@0 2931 case T_FLOAT:
aoqi@0 2932 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
aoqi@0 2933 MORE_STACK(1);
aoqi@0 2934 break;
aoqi@0 2935 case T_DOUBLE:
aoqi@0 2936 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
aoqi@0 2937 MORE_STACK(2);
aoqi@0 2938 break;
aoqi@0 2939 case T_ARRAY:
aoqi@0 2940 case T_OBJECT:
aoqi@0 2941 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
aoqi@0 2942 MORE_STACK(1);
aoqi@0 2943 break;
aoqi@0 2944 }
aoqi@0 2945
aoqi@0 2946 ts->clr_earlyret_value();
aoqi@0 2947 ts->set_earlyret_oop(NULL);
aoqi@0 2948 ts->clr_earlyret_pending();
aoqi@0 2949
aoqi@0 2950 // Fall through to handle_return.
aoqi@0 2951
aoqi@0 2952 } // handle_Early_Return
aoqi@0 2953
aoqi@0 2954 handle_return: {
aoqi@0 2955 // A storestore barrier is required to order initialization of
aoqi@0 2956 // final fields with publishing the reference to the object that
aoqi@0 2957 // holds the field. Without the barrier the value of final fields
aoqi@0 2958 // can be observed to change.
aoqi@0 2959 OrderAccess::storestore();
aoqi@0 2960
aoqi@0 2961 DECACHE_STATE();
aoqi@0 2962
aoqi@0 2963 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
aoqi@0 2964 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
aoqi@0 2965 Handle original_exception(THREAD, THREAD->pending_exception());
aoqi@0 2966 Handle illegal_state_oop(THREAD, NULL);
aoqi@0 2967
aoqi@0 2968 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
aoqi@0 2969 // in any following VM entries from freeing our live handles, but illegal_state_oop
aoqi@0 2970 // isn't really allocated yet and so doesn't become live until later and
aoqi@0 2971 // in unpredicatable places. Instead we must protect the places where we enter the
aoqi@0 2972 // VM. It would be much simpler (and safer) if we could allocate a real handle with
aoqi@0 2973 // a NULL oop in it and then overwrite the oop later as needed. This isn't
aoqi@0 2974 // unfortunately isn't possible.
aoqi@0 2975
aoqi@0 2976 THREAD->clear_pending_exception();
aoqi@0 2977
aoqi@0 2978 //
aoqi@0 2979 // As far as we are concerned we have returned. If we have a pending exception
aoqi@0 2980 // that will be returned as this invocation's result. However if we get any
aoqi@0 2981 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
aoqi@0 2982 // will be our final result (i.e. monitor exception trumps a pending exception).
aoqi@0 2983 //
aoqi@0 2984
aoqi@0 2985 // If we never locked the method (or really passed the point where we would have),
aoqi@0 2986 // there is no need to unlock it (or look for other monitors), since that
aoqi@0 2987 // could not have happened.
aoqi@0 2988
aoqi@0 2989 if (THREAD->do_not_unlock()) {
aoqi@0 2990
aoqi@0 2991 // Never locked, reset the flag now because obviously any caller must
aoqi@0 2992 // have passed their point of locking for us to have gotten here.
aoqi@0 2993
aoqi@0 2994 THREAD->clr_do_not_unlock();
aoqi@0 2995 } else {
aoqi@0 2996 // At this point we consider that we have returned. We now check that the
aoqi@0 2997 // locks were properly block structured. If we find that they were not
aoqi@0 2998 // used properly we will return with an illegal monitor exception.
aoqi@0 2999 // The exception is checked by the caller not the callee since this
aoqi@0 3000 // checking is considered to be part of the invocation and therefore
aoqi@0 3001 // in the callers scope (JVM spec 8.13).
aoqi@0 3002 //
aoqi@0 3003 // Another weird thing to watch for is if the method was locked
aoqi@0 3004 // recursively and then not exited properly. This means we must
aoqi@0 3005 // examine all the entries in reverse time(and stack) order and
aoqi@0 3006 // unlock as we find them. If we find the method monitor before
aoqi@0 3007 // we are at the initial entry then we should throw an exception.
aoqi@0 3008 // It is not clear the template based interpreter does this
aoqi@0 3009 // correctly
aoqi@0 3010
aoqi@0 3011 BasicObjectLock* base = istate->monitor_base();
aoqi@0 3012 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
aoqi@0 3013 bool method_unlock_needed = METHOD->is_synchronized();
aoqi@0 3014 // We know the initial monitor was used for the method don't check that
aoqi@0 3015 // slot in the loop
aoqi@0 3016 if (method_unlock_needed) base--;
aoqi@0 3017
aoqi@0 3018 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
aoqi@0 3019 while (end < base) {
aoqi@0 3020 oop lockee = end->obj();
aoqi@0 3021 if (lockee != NULL) {
aoqi@0 3022 BasicLock* lock = end->lock();
aoqi@0 3023 markOop header = lock->displaced_header();
aoqi@0 3024 end->set_obj(NULL);
aoqi@0 3025
aoqi@0 3026 if (!lockee->mark()->has_bias_pattern()) {
aoqi@0 3027 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 3028 if (header != NULL) {
aoqi@0 3029 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
aoqi@0 3030 // restore object for the slow case
aoqi@0 3031 end->set_obj(lockee);
aoqi@0 3032 {
aoqi@0 3033 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3034 HandleMark __hm(THREAD);
aoqi@0 3035 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
aoqi@0 3036 }
aoqi@0 3037 }
aoqi@0 3038 }
aoqi@0 3039 }
aoqi@0 3040 // One error is plenty
aoqi@0 3041 if (illegal_state_oop() == NULL && !suppress_error) {
aoqi@0 3042 {
aoqi@0 3043 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3044 HandleMark __hm(THREAD);
aoqi@0 3045 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
aoqi@0 3046 }
aoqi@0 3047 assert(THREAD->has_pending_exception(), "Lost our exception!");
aoqi@0 3048 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3049 THREAD->clear_pending_exception();
aoqi@0 3050 }
aoqi@0 3051 }
aoqi@0 3052 end++;
aoqi@0 3053 }
aoqi@0 3054 // Unlock the method if needed
aoqi@0 3055 if (method_unlock_needed) {
aoqi@0 3056 if (base->obj() == NULL) {
aoqi@0 3057 // The method is already unlocked this is not good.
aoqi@0 3058 if (illegal_state_oop() == NULL && !suppress_error) {
aoqi@0 3059 {
aoqi@0 3060 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3061 HandleMark __hm(THREAD);
aoqi@0 3062 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
aoqi@0 3063 }
aoqi@0 3064 assert(THREAD->has_pending_exception(), "Lost our exception!");
aoqi@0 3065 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3066 THREAD->clear_pending_exception();
aoqi@0 3067 }
aoqi@0 3068 } else {
aoqi@0 3069 //
aoqi@0 3070 // The initial monitor is always used for the method
aoqi@0 3071 // However if that slot is no longer the oop for the method it was unlocked
aoqi@0 3072 // and reused by something that wasn't unlocked!
aoqi@0 3073 //
aoqi@0 3074 // deopt can come in with rcvr dead because c2 knows
aoqi@0 3075 // its value is preserved in the monitor. So we can't use locals[0] at all
aoqi@0 3076 // and must use first monitor slot.
aoqi@0 3077 //
aoqi@0 3078 oop rcvr = base->obj();
aoqi@0 3079 if (rcvr == NULL) {
aoqi@0 3080 if (!suppress_error) {
aoqi@0 3081 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
aoqi@0 3082 illegal_state_oop = THREAD->pending_exception();
aoqi@0 3083 THREAD->clear_pending_exception();
aoqi@0 3084 }
aoqi@0 3085 } else if (UseHeavyMonitors) {
aoqi@0 3086 {
aoqi@0 3087 // Prevent any HandleMarkCleaner from freeing our live handles.
aoqi@0 3088 HandleMark __hm(THREAD);
aoqi@0 3089 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
aoqi@0 3090 }
aoqi@0 3091 if (THREAD->has_pending_exception()) {
aoqi@0 3092 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
aoqi@0 3093 THREAD->clear_pending_exception();
aoqi@0 3094 }
aoqi@0 3095 } else {
aoqi@0 3096 BasicLock* lock = base->lock();
aoqi@0 3097 markOop header = lock->displaced_header();
aoqi@0 3098 base->set_obj(NULL);
aoqi@0 3099
aoqi@0 3100 if (!rcvr->mark()->has_bias_pattern()) {
aoqi@0 3101 base->set_obj(NULL);
aoqi@0 3102 // If it isn't recursive we either must swap old header or call the runtime
aoqi@0 3103 if (header != NULL) {
aoqi@0 3104 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
aoqi@0 3105 // restore object for the slow case
aoqi@0 3106 base->set_obj(rcvr);
aoqi@0 3107 {
aoqi@0 3108 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3109 HandleMark __hm(THREAD);
aoqi@0 3110 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
aoqi@0 3111 }
aoqi@0 3112 if (THREAD->has_pending_exception()) {
aoqi@0 3113 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
aoqi@0 3114 THREAD->clear_pending_exception();
aoqi@0 3115 }
aoqi@0 3116 }
aoqi@0 3117 }
aoqi@0 3118 }
aoqi@0 3119 }
aoqi@0 3120 }
aoqi@0 3121 }
aoqi@0 3122 }
aoqi@0 3123 // Clear the do_not_unlock flag now.
aoqi@0 3124 THREAD->clr_do_not_unlock();
aoqi@0 3125
aoqi@0 3126 //
aoqi@0 3127 // Notify jvmti/jvmdi
aoqi@0 3128 //
aoqi@0 3129 // NOTE: we do not notify a method_exit if we have a pending exception,
aoqi@0 3130 // including an exception we generate for unlocking checks. In the former
aoqi@0 3131 // case, JVMDI has already been notified by our call for the exception handler
aoqi@0 3132 // and in both cases as far as JVMDI is concerned we have already returned.
aoqi@0 3133 // If we notify it again JVMDI will be all confused about how many frames
aoqi@0 3134 // are still on the stack (4340444).
aoqi@0 3135 //
aoqi@0 3136 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
aoqi@0 3137 // method_exit events whenever we leave an activation unless it was done
aoqi@0 3138 // for popframe. This is nothing like jvmdi. However we are passing the
aoqi@0 3139 // tests at the moment (apparently because they are jvmdi based) so rather
aoqi@0 3140 // than change this code and possibly fail tests we will leave it alone
aoqi@0 3141 // (with this note) in anticipation of changing the vm and the tests
aoqi@0 3142 // simultaneously.
aoqi@0 3143
aoqi@0 3144
aoqi@0 3145 //
aoqi@0 3146 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
aoqi@0 3147
aoqi@0 3148
aoqi@0 3149
aoqi@0 3150 #ifdef VM_JVMTI
aoqi@0 3151 if (_jvmti_interp_events) {
aoqi@0 3152 // Whenever JVMTI puts a thread in interp_only_mode, method
aoqi@0 3153 // entry/exit events are sent for that thread to track stack depth.
aoqi@0 3154 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
aoqi@0 3155 {
aoqi@0 3156 // Prevent any HandleMarkCleaner from freeing our live handles
aoqi@0 3157 HandleMark __hm(THREAD);
aoqi@0 3158 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
aoqi@0 3159 }
aoqi@0 3160 }
aoqi@0 3161 }
aoqi@0 3162 #endif /* VM_JVMTI */
aoqi@0 3163
aoqi@0 3164 //
aoqi@0 3165 // See if we are returning any exception
aoqi@0 3166 // A pending exception that was pending prior to a possible popping frame
aoqi@0 3167 // overrides the popping frame.
aoqi@0 3168 //
aoqi@0 3169 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
aoqi@0 3170 if (illegal_state_oop() != NULL || original_exception() != NULL) {
aoqi@0 3171 // Inform the frame manager we have no result.
aoqi@0 3172 istate->set_msg(throwing_exception);
aoqi@0 3173 if (illegal_state_oop() != NULL)
aoqi@0 3174 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
aoqi@0 3175 else
aoqi@0 3176 THREAD->set_pending_exception(original_exception(), NULL, 0);
aoqi@0 3177 UPDATE_PC_AND_RETURN(0);
aoqi@0 3178 }
aoqi@0 3179
aoqi@0 3180 if (istate->msg() == popping_frame) {
aoqi@0 3181 // Make it simpler on the assembly code and set the message for the frame pop.
aoqi@0 3182 // returns
aoqi@0 3183 if (istate->prev() == NULL) {
aoqi@0 3184 // We must be returning to a deoptimized frame (because popframe only happens between
aoqi@0 3185 // two interpreted frames). We need to save the current arguments in C heap so that
aoqi@0 3186 // the deoptimized frame when it restarts can copy the arguments to its expression
aoqi@0 3187 // stack and re-execute the call. We also have to notify deoptimization that this
aoqi@0 3188 // has occurred and to pick the preserved args copy them to the deoptimized frame's
aoqi@0 3189 // java expression stack. Yuck.
aoqi@0 3190 //
aoqi@0 3191 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
aoqi@0 3192 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
aoqi@0 3193 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
aoqi@0 3194 }
aoqi@0 3195 } else {
aoqi@0 3196 istate->set_msg(return_from_method);
aoqi@0 3197 }
aoqi@0 3198
aoqi@0 3199 // Normal return
aoqi@0 3200 // Advance the pc and return to frame manager
aoqi@0 3201 UPDATE_PC_AND_RETURN(1);
aoqi@0 3202 } /* handle_return: */
aoqi@0 3203
aoqi@0 3204 // This is really a fatal error return
aoqi@0 3205
aoqi@0 3206 finish:
aoqi@0 3207 DECACHE_TOS();
aoqi@0 3208 DECACHE_PC();
aoqi@0 3209
aoqi@0 3210 return;
aoqi@0 3211 }
aoqi@0 3212
aoqi@0 3213 /*
aoqi@0 3214 * All the code following this point is only produced once and is not present
aoqi@0 3215 * in the JVMTI version of the interpreter
aoqi@0 3216 */
aoqi@0 3217
aoqi@0 3218 #ifndef VM_JVMTI
aoqi@0 3219
aoqi@0 3220 // This constructor should only be used to contruct the object to signal
aoqi@0 3221 // interpreter initialization. All other instances should be created by
aoqi@0 3222 // the frame manager.
aoqi@0 3223 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
aoqi@0 3224 if (msg != initialize) ShouldNotReachHere();
aoqi@0 3225 _msg = msg;
aoqi@0 3226 _self_link = this;
aoqi@0 3227 _prev_link = NULL;
aoqi@0 3228 }
aoqi@0 3229
aoqi@0 3230 // Inline static functions for Java Stack and Local manipulation
aoqi@0 3231
aoqi@0 3232 // The implementations are platform dependent. We have to worry about alignment
aoqi@0 3233 // issues on some machines which can change on the same platform depending on
aoqi@0 3234 // whether it is an LP64 machine also.
aoqi@0 3235 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
aoqi@0 3236 return (address) tos[Interpreter::expr_index_at(-offset)];
aoqi@0 3237 }
aoqi@0 3238
aoqi@0 3239 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
aoqi@0 3240 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
aoqi@0 3241 }
aoqi@0 3242
aoqi@0 3243 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
aoqi@0 3244 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
aoqi@0 3245 }
aoqi@0 3246
aoqi@0 3247 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
aoqi@0 3248 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
aoqi@0 3249 }
aoqi@0 3250
aoqi@0 3251 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
aoqi@0 3252 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
aoqi@0 3253 }
aoqi@0 3254
aoqi@0 3255 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
aoqi@0 3256 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
aoqi@0 3257 }
aoqi@0 3258
aoqi@0 3259 // only used for value types
aoqi@0 3260 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
aoqi@0 3261 int offset) {
aoqi@0 3262 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3263 }
aoqi@0 3264
aoqi@0 3265 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
aoqi@0 3266 int offset) {
aoqi@0 3267 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3268 }
aoqi@0 3269
aoqi@0 3270 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
aoqi@0 3271 int offset) {
aoqi@0 3272 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3273 }
aoqi@0 3274
aoqi@0 3275 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
aoqi@0 3276 int offset) {
aoqi@0 3277 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
aoqi@0 3278 }
aoqi@0 3279
aoqi@0 3280 // needs to be platform dep for the 32 bit platforms.
aoqi@0 3281 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
aoqi@0 3282 int offset) {
aoqi@0 3283 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
aoqi@0 3284 }
aoqi@0 3285
aoqi@0 3286 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
aoqi@0 3287 address addr, int offset) {
aoqi@0 3288 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
aoqi@0 3289 ((VMJavaVal64*)addr)->d);
aoqi@0 3290 }
aoqi@0 3291
aoqi@0 3292 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
aoqi@0 3293 int offset) {
aoqi@0 3294 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
aoqi@0 3295 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
aoqi@0 3296 }
aoqi@0 3297
aoqi@0 3298 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
aoqi@0 3299 address addr, int offset) {
aoqi@0 3300 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
aoqi@0 3301 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
aoqi@0 3302 ((VMJavaVal64*)addr)->l;
aoqi@0 3303 }
aoqi@0 3304
aoqi@0 3305 // Locals
aoqi@0 3306
aoqi@0 3307 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
aoqi@0 3308 return (address)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3309 }
aoqi@0 3310 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
aoqi@0 3311 return (jint)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3312 }
aoqi@0 3313 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
aoqi@0 3314 return (jfloat)locals[Interpreter::local_index_at(-offset)];
aoqi@0 3315 }
aoqi@0 3316 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
aoqi@0 3317 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
aoqi@0 3318 }
aoqi@0 3319 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
aoqi@0 3320 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
aoqi@0 3321 }
aoqi@0 3322 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
aoqi@0 3323 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
aoqi@0 3324 }
aoqi@0 3325
aoqi@0 3326 // Returns the address of locals value.
aoqi@0 3327 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
aoqi@0 3328 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
aoqi@0 3329 }
aoqi@0 3330 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
aoqi@0 3331 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
aoqi@0 3332 }
aoqi@0 3333
aoqi@0 3334 // Used for local value or returnAddress
aoqi@0 3335 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
aoqi@0 3336 address value, int offset) {
aoqi@0 3337 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3338 }
aoqi@0 3339 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
aoqi@0 3340 jint value, int offset) {
aoqi@0 3341 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3342 }
aoqi@0 3343 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
aoqi@0 3344 jfloat value, int offset) {
aoqi@0 3345 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3346 }
aoqi@0 3347 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
aoqi@0 3348 oop value, int offset) {
aoqi@0 3349 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
aoqi@0 3350 }
aoqi@0 3351 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
aoqi@0 3352 jdouble value, int offset) {
aoqi@0 3353 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
aoqi@0 3354 }
aoqi@0 3355 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
aoqi@0 3356 jlong value, int offset) {
aoqi@0 3357 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
aoqi@0 3358 }
aoqi@0 3359 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
aoqi@0 3360 address addr, int offset) {
aoqi@0 3361 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
aoqi@0 3362 }
aoqi@0 3363 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
aoqi@0 3364 address addr, int offset) {
aoqi@0 3365 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
aoqi@0 3366 }
aoqi@0 3367
aoqi@0 3368 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
aoqi@0 3369 intptr_t* locals, int locals_offset) {
aoqi@0 3370 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
aoqi@0 3371 locals[Interpreter::local_index_at(-locals_offset)] = value;
aoqi@0 3372 }
aoqi@0 3373
aoqi@0 3374
aoqi@0 3375 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
aoqi@0 3376 int to_offset) {
aoqi@0 3377 tos[Interpreter::expr_index_at(-to_offset)] =
aoqi@0 3378 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
aoqi@0 3379 }
aoqi@0 3380
aoqi@0 3381 void BytecodeInterpreter::dup(intptr_t *tos) {
aoqi@0 3382 copy_stack_slot(tos, -1, 0);
aoqi@0 3383 }
aoqi@0 3384 void BytecodeInterpreter::dup2(intptr_t *tos) {
aoqi@0 3385 copy_stack_slot(tos, -2, 0);
aoqi@0 3386 copy_stack_slot(tos, -1, 1);
aoqi@0 3387 }
aoqi@0 3388
aoqi@0 3389 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
aoqi@0 3390 /* insert top word two down */
aoqi@0 3391 copy_stack_slot(tos, -1, 0);
aoqi@0 3392 copy_stack_slot(tos, -2, -1);
aoqi@0 3393 copy_stack_slot(tos, 0, -2);
aoqi@0 3394 }
aoqi@0 3395
aoqi@0 3396 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
aoqi@0 3397 /* insert top word three down */
aoqi@0 3398 copy_stack_slot(tos, -1, 0);
aoqi@0 3399 copy_stack_slot(tos, -2, -1);
aoqi@0 3400 copy_stack_slot(tos, -3, -2);
aoqi@0 3401 copy_stack_slot(tos, 0, -3);
aoqi@0 3402 }
aoqi@0 3403 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
aoqi@0 3404 /* insert top 2 slots three down */
aoqi@0 3405 copy_stack_slot(tos, -1, 1);
aoqi@0 3406 copy_stack_slot(tos, -2, 0);
aoqi@0 3407 copy_stack_slot(tos, -3, -1);
aoqi@0 3408 copy_stack_slot(tos, 1, -2);
aoqi@0 3409 copy_stack_slot(tos, 0, -3);
aoqi@0 3410 }
aoqi@0 3411 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
aoqi@0 3412 /* insert top 2 slots four down */
aoqi@0 3413 copy_stack_slot(tos, -1, 1);
aoqi@0 3414 copy_stack_slot(tos, -2, 0);
aoqi@0 3415 copy_stack_slot(tos, -3, -1);
aoqi@0 3416 copy_stack_slot(tos, -4, -2);
aoqi@0 3417 copy_stack_slot(tos, 1, -3);
aoqi@0 3418 copy_stack_slot(tos, 0, -4);
aoqi@0 3419 }
aoqi@0 3420
aoqi@0 3421
aoqi@0 3422 void BytecodeInterpreter::swap(intptr_t *tos) {
aoqi@0 3423 // swap top two elements
aoqi@0 3424 intptr_t val = tos[Interpreter::expr_index_at(1)];
aoqi@0 3425 // Copy -2 entry to -1
aoqi@0 3426 copy_stack_slot(tos, -2, -1);
aoqi@0 3427 // Store saved -1 entry into -2
aoqi@0 3428 tos[Interpreter::expr_index_at(2)] = val;
aoqi@0 3429 }
aoqi@0 3430 // --------------------------------------------------------------------------------
aoqi@0 3431 // Non-product code
aoqi@0 3432 #ifndef PRODUCT
aoqi@0 3433
aoqi@0 3434 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
aoqi@0 3435 switch (msg) {
aoqi@0 3436 case BytecodeInterpreter::no_request: return("no_request");
aoqi@0 3437 case BytecodeInterpreter::initialize: return("initialize");
aoqi@0 3438 // status message to C++ interpreter
aoqi@0 3439 case BytecodeInterpreter::method_entry: return("method_entry");
aoqi@0 3440 case BytecodeInterpreter::method_resume: return("method_resume");
aoqi@0 3441 case BytecodeInterpreter::got_monitors: return("got_monitors");
aoqi@0 3442 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
aoqi@0 3443 // requests to frame manager from C++ interpreter
aoqi@0 3444 case BytecodeInterpreter::call_method: return("call_method");
aoqi@0 3445 case BytecodeInterpreter::return_from_method: return("return_from_method");
aoqi@0 3446 case BytecodeInterpreter::more_monitors: return("more_monitors");
aoqi@0 3447 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
aoqi@0 3448 case BytecodeInterpreter::popping_frame: return("popping_frame");
aoqi@0 3449 case BytecodeInterpreter::do_osr: return("do_osr");
aoqi@0 3450 // deopt
aoqi@0 3451 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
aoqi@0 3452 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
aoqi@0 3453 default: return("BAD MSG");
aoqi@0 3454 }
aoqi@0 3455 }
aoqi@0 3456 void
aoqi@0 3457 BytecodeInterpreter::print() {
aoqi@0 3458 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
aoqi@0 3459 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
aoqi@0 3460 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
aoqi@0 3461 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
aoqi@0 3462 {
aoqi@0 3463 ResourceMark rm;
aoqi@0 3464 char *method_name = _method->name_and_sig_as_C_string();
aoqi@0 3465 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
aoqi@0 3466 }
aoqi@0 3467 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
aoqi@0 3468 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
aoqi@0 3469 tty->print_cr("msg: %s", C_msg(this->_msg));
aoqi@0 3470 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
aoqi@0 3471 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
aoqi@0 3472 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
aoqi@0 3473 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
aoqi@0 3474 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
aoqi@0 3475 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
coleenp@7675 3476 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) p2i(this->_oop_temp));
aoqi@0 3477 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
aoqi@0 3478 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
aoqi@0 3479 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
aoqi@0 3480 #ifdef SPARC
aoqi@0 3481 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
aoqi@0 3482 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
aoqi@0 3483 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
aoqi@0 3484 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
aoqi@0 3485 #endif
aoqi@0 3486 #if !defined(ZERO)
aoqi@0 3487 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
aoqi@0 3488 #endif // !ZERO
aoqi@0 3489 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
aoqi@0 3490 }
aoqi@0 3491
aoqi@0 3492 extern "C" {
aoqi@0 3493 void PI(uintptr_t arg) {
aoqi@0 3494 ((BytecodeInterpreter*)arg)->print();
aoqi@0 3495 }
aoqi@0 3496 }
aoqi@0 3497 #endif // PRODUCT
aoqi@0 3498
aoqi@0 3499 #endif // JVMTI
aoqi@0 3500 #endif // CC_INTERP

mercurial