src/share/vm/interpreter/bytecodeInterpreter.cpp

Wed, 26 Jun 2013 16:06:38 +0200

author
goetz
date
Wed, 26 Jun 2013 16:06:38 +0200
changeset 6445
48d3d0eb193b
parent 5225
603ca7e51354
child 6446
583211d4b16b
permissions
-rw-r--r--

8017317: PPC64 (part 7): cppInterpreter: implement support for biased locking
Reviewed-by: kvn, dholmes

duke@435 1 /*
coleenp@4037 2 * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // no precompiled headers
stefank@2314 26 #include "classfile/vmSymbols.hpp"
stefank@2314 27 #include "gc_interface/collectedHeap.hpp"
stefank@2314 28 #include "interpreter/bytecodeHistogram.hpp"
stefank@2314 29 #include "interpreter/bytecodeInterpreter.hpp"
stefank@2314 30 #include "interpreter/bytecodeInterpreter.inline.hpp"
stefank@2314 31 #include "interpreter/interpreter.hpp"
stefank@2314 32 #include "interpreter/interpreterRuntime.hpp"
stefank@2314 33 #include "memory/cardTableModRefBS.hpp"
stefank@2314 34 #include "memory/resourceArea.hpp"
jiangli@5065 35 #include "oops/methodCounters.hpp"
stefank@2314 36 #include "oops/objArrayKlass.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "prims/jvmtiExport.hpp"
goetz@6445 39 #include "runtime/biasedLocking.hpp"
stefank@2314 40 #include "runtime/frame.inline.hpp"
stefank@2314 41 #include "runtime/handles.inline.hpp"
stefank@2314 42 #include "runtime/interfaceSupport.hpp"
stefank@2314 43 #include "runtime/sharedRuntime.hpp"
stefank@2314 44 #include "runtime/threadCritical.hpp"
stefank@2314 45 #include "utilities/exceptions.hpp"
stefank@2314 46 #ifdef TARGET_OS_ARCH_linux_x86
stefank@2314 47 # include "orderAccess_linux_x86.inline.hpp"
stefank@2314 48 #endif
stefank@2314 49 #ifdef TARGET_OS_ARCH_linux_sparc
stefank@2314 50 # include "orderAccess_linux_sparc.inline.hpp"
stefank@2314 51 #endif
stefank@2314 52 #ifdef TARGET_OS_ARCH_linux_zero
stefank@2314 53 # include "orderAccess_linux_zero.inline.hpp"
stefank@2314 54 #endif
stefank@2314 55 #ifdef TARGET_OS_ARCH_solaris_x86
stefank@2314 56 # include "orderAccess_solaris_x86.inline.hpp"
stefank@2314 57 #endif
stefank@2314 58 #ifdef TARGET_OS_ARCH_solaris_sparc
stefank@2314 59 # include "orderAccess_solaris_sparc.inline.hpp"
stefank@2314 60 #endif
stefank@2314 61 #ifdef TARGET_OS_ARCH_windows_x86
stefank@2314 62 # include "orderAccess_windows_x86.inline.hpp"
stefank@2314 63 #endif
bobv@2508 64 #ifdef TARGET_OS_ARCH_linux_arm
bobv@2508 65 # include "orderAccess_linux_arm.inline.hpp"
bobv@2508 66 #endif
bobv@2508 67 #ifdef TARGET_OS_ARCH_linux_ppc
bobv@2508 68 # include "orderAccess_linux_ppc.inline.hpp"
bobv@2508 69 #endif
never@3156 70 #ifdef TARGET_OS_ARCH_bsd_x86
never@3156 71 # include "orderAccess_bsd_x86.inline.hpp"
never@3156 72 #endif
never@3156 73 #ifdef TARGET_OS_ARCH_bsd_zero
never@3156 74 # include "orderAccess_bsd_zero.inline.hpp"
never@3156 75 #endif
stefank@2314 76
stefank@2314 77
stefank@2314 78 // no precompiled headers
duke@435 79 #ifdef CC_INTERP
duke@435 80
duke@435 81 /*
duke@435 82 * USELABELS - If using GCC, then use labels for the opcode dispatching
duke@435 83 * rather -then a switch statement. This improves performance because it
duke@435 84 * gives us the oportunity to have the instructions that calculate the
duke@435 85 * next opcode to jump to be intermixed with the rest of the instructions
duke@435 86 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
duke@435 87 */
duke@435 88 #undef USELABELS
duke@435 89 #ifdef __GNUC__
duke@435 90 /*
duke@435 91 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
duke@435 92 don't use the computed goto approach.
duke@435 93 */
duke@435 94 #ifndef ASSERT
duke@435 95 #define USELABELS
duke@435 96 #endif
duke@435 97 #endif
duke@435 98
duke@435 99 #undef CASE
duke@435 100 #ifdef USELABELS
duke@435 101 #define CASE(opcode) opc ## opcode
duke@435 102 #define DEFAULT opc_default
duke@435 103 #else
duke@435 104 #define CASE(opcode) case Bytecodes:: opcode
duke@435 105 #define DEFAULT default
duke@435 106 #endif
duke@435 107
duke@435 108 /*
duke@435 109 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
duke@435 110 * opcode before going back to the top of the while loop, rather then having
duke@435 111 * the top of the while loop handle it. This provides a better opportunity
duke@435 112 * for instruction scheduling. Some compilers just do this prefetch
duke@435 113 * automatically. Some actually end up with worse performance if you
duke@435 114 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
duke@435 115 */
duke@435 116 #undef PREFETCH_OPCCODE
duke@435 117 #define PREFETCH_OPCCODE
duke@435 118
duke@435 119 /*
duke@435 120 Interpreter safepoint: it is expected that the interpreter will have no live
duke@435 121 handles of its own creation live at an interpreter safepoint. Therefore we
duke@435 122 run a HandleMarkCleaner and trash all handles allocated in the call chain
duke@435 123 since the JavaCalls::call_helper invocation that initiated the chain.
duke@435 124 There really shouldn't be any handles remaining to trash but this is cheap
duke@435 125 in relation to a safepoint.
duke@435 126 */
duke@435 127 #define SAFEPOINT \
duke@435 128 if ( SafepointSynchronize::is_synchronizing()) { \
duke@435 129 { \
duke@435 130 /* zap freed handles rather than GC'ing them */ \
duke@435 131 HandleMarkCleaner __hmc(THREAD); \
duke@435 132 } \
duke@435 133 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
duke@435 134 }
duke@435 135
duke@435 136 /*
duke@435 137 * VM_JAVA_ERROR - Macro for throwing a java exception from
duke@435 138 * the interpreter loop. Should really be a CALL_VM but there
duke@435 139 * is no entry point to do the transition to vm so we just
duke@435 140 * do it by hand here.
duke@435 141 */
duke@435 142 #define VM_JAVA_ERROR_NO_JUMP(name, msg) \
duke@435 143 DECACHE_STATE(); \
duke@435 144 SET_LAST_JAVA_FRAME(); \
duke@435 145 { \
duke@435 146 ThreadInVMfromJava trans(THREAD); \
duke@435 147 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
duke@435 148 } \
duke@435 149 RESET_LAST_JAVA_FRAME(); \
duke@435 150 CACHE_STATE();
duke@435 151
duke@435 152 // Normal throw of a java error
duke@435 153 #define VM_JAVA_ERROR(name, msg) \
duke@435 154 VM_JAVA_ERROR_NO_JUMP(name, msg) \
duke@435 155 goto handle_exception;
duke@435 156
duke@435 157 #ifdef PRODUCT
duke@435 158 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
duke@435 159 #else
duke@435 160 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
duke@435 161 { \
duke@435 162 BytecodeCounter::_counter_value++; \
duke@435 163 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
duke@435 164 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
duke@435 165 if (TraceBytecodes) { \
duke@435 166 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
duke@435 167 topOfStack[Interpreter::expr_index_at(1)], \
duke@435 168 topOfStack[Interpreter::expr_index_at(2)]), \
duke@435 169 handle_exception); \
duke@435 170 } \
duke@435 171 }
duke@435 172 #endif
duke@435 173
duke@435 174 #undef DEBUGGER_SINGLE_STEP_NOTIFY
duke@435 175 #ifdef VM_JVMTI
duke@435 176 /* NOTE: (kbr) This macro must be called AFTER the PC has been
duke@435 177 incremented. JvmtiExport::at_single_stepping_point() may cause a
duke@435 178 breakpoint opcode to get inserted at the current PC to allow the
duke@435 179 debugger to coalesce single-step events.
duke@435 180
duke@435 181 As a result if we call at_single_stepping_point() we refetch opcode
duke@435 182 to get the current opcode. This will override any other prefetching
duke@435 183 that might have occurred.
duke@435 184 */
duke@435 185 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
duke@435 186 { \
duke@435 187 if (_jvmti_interp_events) { \
duke@435 188 if (JvmtiExport::should_post_single_step()) { \
duke@435 189 DECACHE_STATE(); \
duke@435 190 SET_LAST_JAVA_FRAME(); \
duke@435 191 ThreadInVMfromJava trans(THREAD); \
duke@435 192 JvmtiExport::at_single_stepping_point(THREAD, \
duke@435 193 istate->method(), \
duke@435 194 pc); \
duke@435 195 RESET_LAST_JAVA_FRAME(); \
duke@435 196 CACHE_STATE(); \
duke@435 197 if (THREAD->pop_frame_pending() && \
duke@435 198 !THREAD->pop_frame_in_process()) { \
duke@435 199 goto handle_Pop_Frame; \
duke@435 200 } \
duke@435 201 opcode = *pc; \
duke@435 202 } \
duke@435 203 } \
duke@435 204 }
duke@435 205 #else
duke@435 206 #define DEBUGGER_SINGLE_STEP_NOTIFY()
duke@435 207 #endif
duke@435 208
duke@435 209 /*
duke@435 210 * CONTINUE - Macro for executing the next opcode.
duke@435 211 */
duke@435 212 #undef CONTINUE
duke@435 213 #ifdef USELABELS
duke@435 214 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
duke@435 215 // initialization (which is is the initialization of the table pointer...)
coleenp@955 216 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
duke@435 217 #define CONTINUE { \
duke@435 218 opcode = *pc; \
duke@435 219 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 220 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 221 DISPATCH(opcode); \
duke@435 222 }
duke@435 223 #else
duke@435 224 #ifdef PREFETCH_OPCCODE
duke@435 225 #define CONTINUE { \
duke@435 226 opcode = *pc; \
duke@435 227 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 228 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 229 continue; \
duke@435 230 }
duke@435 231 #else
duke@435 232 #define CONTINUE { \
duke@435 233 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 234 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 235 continue; \
duke@435 236 }
duke@435 237 #endif
duke@435 238 #endif
duke@435 239
duke@435 240
duke@435 241 #define UPDATE_PC(opsize) {pc += opsize; }
duke@435 242 /*
duke@435 243 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
duke@435 244 */
duke@435 245 #undef UPDATE_PC_AND_TOS
duke@435 246 #define UPDATE_PC_AND_TOS(opsize, stack) \
duke@435 247 {pc += opsize; MORE_STACK(stack); }
duke@435 248
duke@435 249 /*
duke@435 250 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
duke@435 251 * and executing the next opcode. It's somewhat similar to the combination
duke@435 252 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
duke@435 253 */
duke@435 254 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
duke@435 255 #ifdef USELABELS
duke@435 256 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 257 pc += opsize; opcode = *pc; MORE_STACK(stack); \
duke@435 258 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 259 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 260 DISPATCH(opcode); \
duke@435 261 }
duke@435 262
duke@435 263 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 264 pc += opsize; opcode = *pc; \
duke@435 265 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 266 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 267 DISPATCH(opcode); \
duke@435 268 }
duke@435 269 #else
duke@435 270 #ifdef PREFETCH_OPCCODE
duke@435 271 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 272 pc += opsize; opcode = *pc; MORE_STACK(stack); \
duke@435 273 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 274 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 275 goto do_continue; \
duke@435 276 }
duke@435 277
duke@435 278 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 279 pc += opsize; opcode = *pc; \
duke@435 280 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 281 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 282 goto do_continue; \
duke@435 283 }
duke@435 284 #else
duke@435 285 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
duke@435 286 pc += opsize; MORE_STACK(stack); \
duke@435 287 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 288 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 289 goto do_continue; \
duke@435 290 }
duke@435 291
duke@435 292 #define UPDATE_PC_AND_CONTINUE(opsize) { \
duke@435 293 pc += opsize; \
duke@435 294 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
duke@435 295 DEBUGGER_SINGLE_STEP_NOTIFY(); \
duke@435 296 goto do_continue; \
duke@435 297 }
duke@435 298 #endif /* PREFETCH_OPCCODE */
duke@435 299 #endif /* USELABELS */
duke@435 300
duke@435 301 // About to call a new method, update the save the adjusted pc and return to frame manager
duke@435 302 #define UPDATE_PC_AND_RETURN(opsize) \
duke@435 303 DECACHE_TOS(); \
duke@435 304 istate->set_bcp(pc+opsize); \
duke@435 305 return;
duke@435 306
duke@435 307
duke@435 308 #define METHOD istate->method()
jiangli@5065 309 #define GET_METHOD_COUNTERS(res) \
jiangli@5065 310 res = METHOD->method_counters(); \
jiangli@5065 311 if (res == NULL) { \
jiangli@5065 312 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
jiangli@5065 313 }
jiangli@5065 314
duke@435 315 #define OSR_REQUEST(res, branch_pc) \
duke@435 316 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
duke@435 317 /*
duke@435 318 * For those opcodes that need to have a GC point on a backwards branch
duke@435 319 */
duke@435 320
duke@435 321 // Backedge counting is kind of strange. The asm interpreter will increment
duke@435 322 // the backedge counter as a separate counter but it does it's comparisons
duke@435 323 // to the sum (scaled) of invocation counter and backedge count to make
duke@435 324 // a decision. Seems kind of odd to sum them together like that
duke@435 325
duke@435 326 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
duke@435 327
duke@435 328
duke@435 329 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
duke@435 330 if ((skip) <= 0) { \
jiangli@5065 331 MethodCounters* mcs; \
jiangli@5065 332 GET_METHOD_COUNTERS(mcs); \
twisti@1513 333 if (UseLoopCounter) { \
duke@435 334 bool do_OSR = UseOnStackReplacement; \
jiangli@5065 335 mcs->backedge_counter()->increment(); \
jiangli@5065 336 if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \
duke@435 337 if (do_OSR) { \
duke@435 338 nmethod* osr_nmethod; \
duke@435 339 OSR_REQUEST(osr_nmethod, branch_pc); \
duke@435 340 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
twisti@1513 341 intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \
duke@435 342 istate->set_msg(do_osr); \
duke@435 343 istate->set_osr_buf((address)buf); \
duke@435 344 istate->set_osr_entry(osr_nmethod->osr_entry()); \
duke@435 345 return; \
duke@435 346 } \
duke@435 347 } \
duke@435 348 } /* UseCompiler ... */ \
jiangli@5065 349 mcs->invocation_counter()->increment(); \
duke@435 350 SAFEPOINT; \
duke@435 351 }
duke@435 352
duke@435 353 /*
duke@435 354 * For those opcodes that need to have a GC point on a backwards branch
duke@435 355 */
duke@435 356
duke@435 357 /*
duke@435 358 * Macros for caching and flushing the interpreter state. Some local
duke@435 359 * variables need to be flushed out to the frame before we do certain
duke@435 360 * things (like pushing frames or becomming gc safe) and some need to
duke@435 361 * be recached later (like after popping a frame). We could use one
duke@435 362 * macro to cache or decache everything, but this would be less then
duke@435 363 * optimal because we don't always need to cache or decache everything
duke@435 364 * because some things we know are already cached or decached.
duke@435 365 */
duke@435 366 #undef DECACHE_TOS
duke@435 367 #undef CACHE_TOS
duke@435 368 #undef CACHE_PREV_TOS
duke@435 369 #define DECACHE_TOS() istate->set_stack(topOfStack);
duke@435 370
duke@435 371 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
duke@435 372
duke@435 373 #undef DECACHE_PC
duke@435 374 #undef CACHE_PC
duke@435 375 #define DECACHE_PC() istate->set_bcp(pc);
duke@435 376 #define CACHE_PC() pc = istate->bcp();
duke@435 377 #define CACHE_CP() cp = istate->constants();
duke@435 378 #define CACHE_LOCALS() locals = istate->locals();
duke@435 379 #undef CACHE_FRAME
duke@435 380 #define CACHE_FRAME()
duke@435 381
duke@435 382 /*
duke@435 383 * CHECK_NULL - Macro for throwing a NullPointerException if the object
duke@435 384 * passed is a null ref.
duke@435 385 * On some architectures/platforms it should be possible to do this implicitly
duke@435 386 */
duke@435 387 #undef CHECK_NULL
duke@435 388 #define CHECK_NULL(obj_) \
coleenp@955 389 if ((obj_) == NULL) { \
duke@435 390 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \
bobv@2036 391 } \
bobv@2036 392 VERIFY_OOP(obj_)
duke@435 393
duke@435 394 #define VMdoubleConstZero() 0.0
duke@435 395 #define VMdoubleConstOne() 1.0
duke@435 396 #define VMlongConstZero() (max_jlong-max_jlong)
duke@435 397 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
duke@435 398
duke@435 399 /*
duke@435 400 * Alignment
duke@435 401 */
duke@435 402 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
duke@435 403
duke@435 404 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
duke@435 405 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
duke@435 406
duke@435 407 // Reload interpreter state after calling the VM or a possible GC
duke@435 408 #define CACHE_STATE() \
duke@435 409 CACHE_TOS(); \
duke@435 410 CACHE_PC(); \
duke@435 411 CACHE_CP(); \
duke@435 412 CACHE_LOCALS();
duke@435 413
duke@435 414 // Call the VM don't check for pending exceptions
duke@435 415 #define CALL_VM_NOCHECK(func) \
duke@435 416 DECACHE_STATE(); \
duke@435 417 SET_LAST_JAVA_FRAME(); \
duke@435 418 func; \
duke@435 419 RESET_LAST_JAVA_FRAME(); \
duke@435 420 CACHE_STATE(); \
duke@435 421 if (THREAD->pop_frame_pending() && \
duke@435 422 !THREAD->pop_frame_in_process()) { \
duke@435 423 goto handle_Pop_Frame; \
duke@435 424 }
duke@435 425
duke@435 426 // Call the VM and check for pending exceptions
duke@435 427 #define CALL_VM(func, label) { \
duke@435 428 CALL_VM_NOCHECK(func); \
duke@435 429 if (THREAD->has_pending_exception()) goto label; \
duke@435 430 }
duke@435 431
duke@435 432 /*
duke@435 433 * BytecodeInterpreter::run(interpreterState istate)
duke@435 434 * BytecodeInterpreter::runWithChecks(interpreterState istate)
duke@435 435 *
duke@435 436 * The real deal. This is where byte codes actually get interpreted.
duke@435 437 * Basically it's a big while loop that iterates until we return from
duke@435 438 * the method passed in.
duke@435 439 *
duke@435 440 * The runWithChecks is used if JVMTI is enabled.
duke@435 441 *
duke@435 442 */
duke@435 443 #if defined(VM_JVMTI)
duke@435 444 void
duke@435 445 BytecodeInterpreter::runWithChecks(interpreterState istate) {
duke@435 446 #else
duke@435 447 void
duke@435 448 BytecodeInterpreter::run(interpreterState istate) {
duke@435 449 #endif
duke@435 450
duke@435 451 // In order to simplify some tests based on switches set at runtime
duke@435 452 // we invoke the interpreter a single time after switches are enabled
duke@435 453 // and set simpler to to test variables rather than method calls or complex
duke@435 454 // boolean expressions.
duke@435 455
duke@435 456 static int initialized = 0;
duke@435 457 static int checkit = 0;
duke@435 458 static intptr_t* c_addr = NULL;
duke@435 459 static intptr_t c_value;
duke@435 460
duke@435 461 if (checkit && *c_addr != c_value) {
duke@435 462 os::breakpoint();
duke@435 463 }
duke@435 464 #ifdef VM_JVMTI
duke@435 465 static bool _jvmti_interp_events = 0;
duke@435 466 #endif
duke@435 467
duke@435 468 static int _compiling; // (UseCompiler || CountCompiledCalls)
duke@435 469
duke@435 470 #ifdef ASSERT
duke@435 471 if (istate->_msg != initialize) {
roland@5225 472 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
roland@5225 473 // because in that case, EnableInvokeDynamic is true by default but will be later switched off
roland@5225 474 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
roland@5225 475 // for the old JSR292 implementation.
roland@5225 476 // This leads to a situation where 'istate->_stack_limit' always accounts for
roland@5225 477 // methodOopDesc::extra_stack_entries() because it is computed in
roland@5225 478 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
roland@5225 479 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
roland@5225 480 // account for extra_stack_entries() anymore because at the time when it is called
roland@5225 481 // EnableInvokeDynamic was already set to false.
roland@5225 482 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
roland@5225 483 // switched off because of the wrong classes.
roland@5225 484 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
roland@5225 485 assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
roland@5225 486 } else {
roland@5225 487 const int extra_stack_entries = Method::extra_stack_entries_for_indy;
roland@5225 488 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
roland@5225 489 + 1), "bad stack limit");
roland@5225 490 }
twisti@2084 491 #ifndef SHARK
twisti@2084 492 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
twisti@2084 493 #endif // !SHARK
duke@435 494 }
duke@435 495 // Verify linkages.
duke@435 496 interpreterState l = istate;
duke@435 497 do {
duke@435 498 assert(l == l->_self_link, "bad link");
duke@435 499 l = l->_prev_link;
duke@435 500 } while (l != NULL);
duke@435 501 // Screwups with stack management usually cause us to overwrite istate
duke@435 502 // save a copy so we can verify it.
duke@435 503 interpreterState orig = istate;
duke@435 504 #endif
duke@435 505
duke@435 506 static volatile jbyte* _byte_map_base; // adjusted card table base for oop store barrier
duke@435 507
duke@435 508 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
duke@435 509 register address pc = istate->bcp();
duke@435 510 register jubyte opcode;
duke@435 511 register intptr_t* locals = istate->locals();
coleenp@4037 512 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
duke@435 513 #ifdef LOTS_OF_REGS
duke@435 514 register JavaThread* THREAD = istate->thread();
duke@435 515 register volatile jbyte* BYTE_MAP_BASE = _byte_map_base;
duke@435 516 #else
duke@435 517 #undef THREAD
duke@435 518 #define THREAD istate->thread()
duke@435 519 #undef BYTE_MAP_BASE
duke@435 520 #define BYTE_MAP_BASE _byte_map_base
duke@435 521 #endif
duke@435 522
duke@435 523 #ifdef USELABELS
duke@435 524 const static void* const opclabels_data[256] = {
duke@435 525 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
duke@435 526 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
duke@435 527 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
duke@435 528 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
duke@435 529
duke@435 530 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
duke@435 531 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
duke@435 532 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
duke@435 533 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
duke@435 534
duke@435 535 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
duke@435 536 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
duke@435 537 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
duke@435 538 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
duke@435 539
duke@435 540 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
duke@435 541 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
duke@435 542 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
duke@435 543 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
duke@435 544
duke@435 545 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
duke@435 546 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
duke@435 547 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
duke@435 548 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
duke@435 549
duke@435 550 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
duke@435 551 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
duke@435 552 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
duke@435 553 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
duke@435 554
duke@435 555 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
duke@435 556 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
duke@435 557 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
duke@435 558 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
duke@435 559
duke@435 560 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
duke@435 561 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
duke@435 562 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
duke@435 563 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
duke@435 564
duke@435 565 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
duke@435 566 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
duke@435 567 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
duke@435 568 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
duke@435 569
duke@435 570 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
duke@435 571 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
duke@435 572 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
duke@435 573 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
duke@435 574
duke@435 575 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
duke@435 576 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
duke@435 577 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
duke@435 578 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
duke@435 579
duke@435 580 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
duke@435 581 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
twisti@2762 582 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
duke@435 583 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
duke@435 584
duke@435 585 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
duke@435 586 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
sgoldman@558 587 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
sgoldman@558 588 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
sgoldman@558 589
sgoldman@558 590 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 591 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 592 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 593 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 594
duke@435 595 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
twisti@2762 596 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
twisti@4237 597 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
duke@435 598 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 599
duke@435 600 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 601 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 602 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
duke@435 603 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
duke@435 604 };
duke@435 605 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
duke@435 606 #endif /* USELABELS */
duke@435 607
duke@435 608 #ifdef ASSERT
duke@435 609 // this will trigger a VERIFY_OOP on entry
duke@435 610 if (istate->msg() != initialize && ! METHOD->is_static()) {
duke@435 611 oop rcvr = LOCALS_OBJECT(0);
bobv@2036 612 VERIFY_OOP(rcvr);
duke@435 613 }
duke@435 614 #endif
duke@435 615 // #define HACK
duke@435 616 #ifdef HACK
duke@435 617 bool interesting = false;
duke@435 618 #endif // HACK
duke@435 619
duke@435 620 /* QQQ this should be a stack method so we don't know actual direction */
bobv@2036 621 guarantee(istate->msg() == initialize ||
duke@435 622 topOfStack >= istate->stack_limit() &&
duke@435 623 topOfStack < istate->stack_base(),
duke@435 624 "Stack top out of range");
duke@435 625
duke@435 626 switch (istate->msg()) {
duke@435 627 case initialize: {
duke@435 628 if (initialized++) ShouldNotReachHere(); // Only one initialize call
duke@435 629 _compiling = (UseCompiler || CountCompiledCalls);
duke@435 630 #ifdef VM_JVMTI
duke@435 631 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
duke@435 632 #endif
duke@435 633 BarrierSet* bs = Universe::heap()->barrier_set();
duke@435 634 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
duke@435 635 _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
duke@435 636 return;
duke@435 637 }
duke@435 638 break;
duke@435 639 case method_entry: {
duke@435 640 THREAD->set_do_not_unlock();
duke@435 641 // count invocations
duke@435 642 assert(initialized, "Interpreter not initialized");
duke@435 643 if (_compiling) {
jiangli@5065 644 MethodCounters* mcs;
jiangli@5065 645 GET_METHOD_COUNTERS(mcs);
duke@435 646 if (ProfileInterpreter) {
jiangli@5065 647 METHOD->increment_interpreter_invocation_count(THREAD);
duke@435 648 }
jiangli@5065 649 mcs->invocation_counter()->increment();
jiangli@5065 650 if (mcs->invocation_counter()->reached_InvocationLimit()) {
duke@435 651 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
duke@435 652
duke@435 653 // We no longer retry on a counter overflow
duke@435 654
duke@435 655 // istate->set_msg(retry_method);
duke@435 656 // THREAD->clr_do_not_unlock();
duke@435 657 // return;
duke@435 658 }
duke@435 659 SAFEPOINT;
duke@435 660 }
duke@435 661
duke@435 662 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
duke@435 663 // initialize
duke@435 664 os::breakpoint();
duke@435 665 }
duke@435 666
duke@435 667 #ifdef HACK
duke@435 668 {
duke@435 669 ResourceMark rm;
duke@435 670 char *method_name = istate->method()->name_and_sig_as_C_string();
duke@435 671 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
duke@435 672 tty->print_cr("entering: depth %d bci: %d",
duke@435 673 (istate->_stack_base - istate->_stack),
duke@435 674 istate->_bcp - istate->_method->code_base());
duke@435 675 interesting = true;
duke@435 676 }
duke@435 677 }
duke@435 678 #endif // HACK
duke@435 679
duke@435 680
duke@435 681 // lock method if synchronized
duke@435 682 if (METHOD->is_synchronized()) {
goetz@6445 683 // oop rcvr = locals[0].j.r;
goetz@6445 684 oop rcvr;
goetz@6445 685 if (METHOD->is_static()) {
goetz@6445 686 rcvr = METHOD->constants()->pool_holder()->java_mirror();
goetz@6445 687 } else {
goetz@6445 688 rcvr = LOCALS_OBJECT(0);
goetz@6445 689 VERIFY_OOP(rcvr);
goetz@6445 690 }
goetz@6445 691 // The initial monitor is ours for the taking
goetz@6445 692 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
goetz@6445 693 BasicObjectLock* mon = &istate->monitor_base()[-1];
goetz@6445 694 mon->set_obj(rcvr);
goetz@6445 695 bool success = false;
goetz@6445 696 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 697 markOop mark = rcvr->mark();
goetz@6445 698 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 699 // Implies UseBiasedLocking.
goetz@6445 700 if (mark->has_bias_pattern()) {
goetz@6445 701 uintptr_t thread_ident;
goetz@6445 702 uintptr_t anticipated_bias_locking_value;
goetz@6445 703 thread_ident = (uintptr_t)istate->thread();
goetz@6445 704 anticipated_bias_locking_value =
goetz@6445 705 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 706 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 707
goetz@6445 708 if (anticipated_bias_locking_value == 0) {
goetz@6445 709 // Already biased towards this thread, nothing to do.
goetz@6445 710 if (PrintBiasedLockingStatistics) {
goetz@6445 711 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 712 }
goetz@6445 713 success = true;
goetz@6445 714 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 715 // Try to revoke bias.
goetz@6445 716 markOop header = rcvr->klass()->prototype_header();
goetz@6445 717 if (hash != markOopDesc::no_hash) {
goetz@6445 718 header = header->copy_set_hash(hash);
goetz@6445 719 }
goetz@6445 720 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
goetz@6445 721 if (PrintBiasedLockingStatistics)
goetz@6445 722 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 723 }
goetz@6445 724 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
goetz@6445 725 // Try to rebias.
goetz@6445 726 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
goetz@6445 727 if (hash != markOopDesc::no_hash) {
goetz@6445 728 new_header = new_header->copy_set_hash(hash);
goetz@6445 729 }
goetz@6445 730 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
goetz@6445 731 if (PrintBiasedLockingStatistics) {
goetz@6445 732 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
duke@435 733 }
duke@435 734 } else {
goetz@6445 735 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
goetz@6445 736 }
goetz@6445 737 success = true;
goetz@6445 738 } else {
goetz@6445 739 // Try to bias towards thread in case object is anonymously biased.
goetz@6445 740 markOop header = (markOop) ((uintptr_t) mark &
goetz@6445 741 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 742 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
goetz@6445 743 if (hash != markOopDesc::no_hash) {
goetz@6445 744 header = header->copy_set_hash(hash);
goetz@6445 745 }
goetz@6445 746 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 747 // Debugging hint.
goetz@6445 748 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 749 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
goetz@6445 750 if (PrintBiasedLockingStatistics) {
goetz@6445 751 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 752 }
goetz@6445 753 } else {
goetz@6445 754 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
goetz@6445 755 }
goetz@6445 756 success = true;
goetz@6445 757 }
goetz@6445 758 }
goetz@6445 759
goetz@6445 760 // Traditional lightweight locking.
goetz@6445 761 if (!success) {
goetz@6445 762 markOop displaced = rcvr->mark()->set_unlocked();
goetz@6445 763 mon->lock()->set_displaced_header(displaced);
goetz@6445 764 bool call_vm = UseHeavyMonitors;
goetz@6445 765 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
goetz@6445 766 // Is it simple recursive case?
goetz@6445 767 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 768 mon->lock()->set_displaced_header(NULL);
goetz@6445 769 } else {
goetz@6445 770 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
duke@435 771 }
duke@435 772 }
goetz@6445 773 }
duke@435 774 }
duke@435 775 THREAD->clr_do_not_unlock();
duke@435 776
duke@435 777 // Notify jvmti
duke@435 778 #ifdef VM_JVMTI
duke@435 779 if (_jvmti_interp_events) {
duke@435 780 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 781 // entry/exit events are sent for that thread to track stack depth.
duke@435 782 if (THREAD->is_interp_only_mode()) {
duke@435 783 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
duke@435 784 handle_exception);
duke@435 785 }
duke@435 786 }
duke@435 787 #endif /* VM_JVMTI */
duke@435 788
duke@435 789 goto run;
duke@435 790 }
duke@435 791
duke@435 792 case popping_frame: {
duke@435 793 // returned from a java call to pop the frame, restart the call
duke@435 794 // clear the message so we don't confuse ourselves later
bobv@2036 795 ShouldNotReachHere(); // we don't return this.
duke@435 796 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
duke@435 797 istate->set_msg(no_request);
duke@435 798 THREAD->clr_pop_frame_in_process();
duke@435 799 goto run;
duke@435 800 }
duke@435 801
duke@435 802 case method_resume: {
duke@435 803 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
duke@435 804 // resume
duke@435 805 os::breakpoint();
duke@435 806 }
duke@435 807 #ifdef HACK
duke@435 808 {
duke@435 809 ResourceMark rm;
duke@435 810 char *method_name = istate->method()->name_and_sig_as_C_string();
duke@435 811 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
duke@435 812 tty->print_cr("resume: depth %d bci: %d",
duke@435 813 (istate->_stack_base - istate->_stack) ,
duke@435 814 istate->_bcp - istate->_method->code_base());
duke@435 815 interesting = true;
duke@435 816 }
duke@435 817 }
duke@435 818 #endif // HACK
duke@435 819 // returned from a java call, continue executing.
duke@435 820 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
duke@435 821 goto handle_Pop_Frame;
duke@435 822 }
duke@435 823
duke@435 824 if (THREAD->has_pending_exception()) goto handle_exception;
duke@435 825 // Update the pc by the saved amount of the invoke bytecode size
duke@435 826 UPDATE_PC(istate->bcp_advance());
duke@435 827 goto run;
duke@435 828 }
duke@435 829
duke@435 830 case deopt_resume2: {
duke@435 831 // Returned from an opcode that will reexecute. Deopt was
duke@435 832 // a result of a PopFrame request.
duke@435 833 //
duke@435 834 goto run;
duke@435 835 }
duke@435 836
duke@435 837 case deopt_resume: {
duke@435 838 // Returned from an opcode that has completed. The stack has
duke@435 839 // the result all we need to do is skip across the bytecode
duke@435 840 // and continue (assuming there is no exception pending)
duke@435 841 //
duke@435 842 // compute continuation length
duke@435 843 //
duke@435 844 // Note: it is possible to deopt at a return_register_finalizer opcode
duke@435 845 // because this requires entering the vm to do the registering. While the
duke@435 846 // opcode is complete we can't advance because there are no more opcodes
duke@435 847 // much like trying to deopt at a poll return. In that has we simply
duke@435 848 // get out of here
duke@435 849 //
never@2462 850 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
duke@435 851 // this will do the right thing even if an exception is pending.
duke@435 852 goto handle_return;
duke@435 853 }
never@2462 854 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
duke@435 855 if (THREAD->has_pending_exception()) goto handle_exception;
duke@435 856 goto run;
duke@435 857 }
duke@435 858 case got_monitors: {
duke@435 859 // continue locking now that we have a monitor to use
duke@435 860 // we expect to find newly allocated monitor at the "top" of the monitor stack.
duke@435 861 oop lockee = STACK_OBJECT(-1);
bobv@2036 862 VERIFY_OOP(lockee);
duke@435 863 // derefing's lockee ought to provoke implicit null check
duke@435 864 // find a free monitor
duke@435 865 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
duke@435 866 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
duke@435 867 entry->set_obj(lockee);
goetz@6445 868 bool success = false;
goetz@6445 869 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 870
goetz@6445 871 markOop mark = lockee->mark();
goetz@6445 872 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 873 // implies UseBiasedLocking
goetz@6445 874 if (mark->has_bias_pattern()) {
goetz@6445 875 uintptr_t thread_ident;
goetz@6445 876 uintptr_t anticipated_bias_locking_value;
goetz@6445 877 thread_ident = (uintptr_t)istate->thread();
goetz@6445 878 anticipated_bias_locking_value =
goetz@6445 879 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 880 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 881
goetz@6445 882 if (anticipated_bias_locking_value == 0) {
goetz@6445 883 // already biased towards this thread, nothing to do
goetz@6445 884 if (PrintBiasedLockingStatistics) {
goetz@6445 885 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 886 }
goetz@6445 887 success = true;
goetz@6445 888 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 889 // try revoke bias
goetz@6445 890 markOop header = lockee->klass()->prototype_header();
goetz@6445 891 if (hash != markOopDesc::no_hash) {
goetz@6445 892 header = header->copy_set_hash(hash);
goetz@6445 893 }
goetz@6445 894 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
goetz@6445 895 if (PrintBiasedLockingStatistics) {
goetz@6445 896 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 897 }
goetz@6445 898 }
goetz@6445 899 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
goetz@6445 900 // try rebias
goetz@6445 901 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
goetz@6445 902 if (hash != markOopDesc::no_hash) {
goetz@6445 903 new_header = new_header->copy_set_hash(hash);
goetz@6445 904 }
goetz@6445 905 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
goetz@6445 906 if (PrintBiasedLockingStatistics) {
goetz@6445 907 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
goetz@6445 908 }
goetz@6445 909 } else {
goetz@6445 910 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 911 }
goetz@6445 912 success = true;
duke@435 913 } else {
goetz@6445 914 // try to bias towards thread in case object is anonymously biased
goetz@6445 915 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 916 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
goetz@6445 917 if (hash != markOopDesc::no_hash) {
goetz@6445 918 header = header->copy_set_hash(hash);
goetz@6445 919 }
goetz@6445 920 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 921 // debugging hint
goetz@6445 922 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 923 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
goetz@6445 924 if (PrintBiasedLockingStatistics) {
goetz@6445 925 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 926 }
goetz@6445 927 } else {
goetz@6445 928 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 929 }
goetz@6445 930 success = true;
goetz@6445 931 }
goetz@6445 932 }
goetz@6445 933
goetz@6445 934 // traditional lightweight locking
goetz@6445 935 if (!success) {
goetz@6445 936 markOop displaced = lockee->mark()->set_unlocked();
goetz@6445 937 entry->lock()->set_displaced_header(displaced);
goetz@6445 938 bool call_vm = UseHeavyMonitors;
goetz@6445 939 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
goetz@6445 940 // Is it simple recursive case?
goetz@6445 941 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 942 entry->lock()->set_displaced_header(NULL);
goetz@6445 943 } else {
goetz@6445 944 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 945 }
duke@435 946 }
duke@435 947 }
duke@435 948 UPDATE_PC_AND_TOS(1, -1);
duke@435 949 goto run;
duke@435 950 }
duke@435 951 default: {
duke@435 952 fatal("Unexpected message from frame manager");
duke@435 953 }
duke@435 954 }
duke@435 955
duke@435 956 run:
duke@435 957
duke@435 958 DO_UPDATE_INSTRUCTION_COUNT(*pc)
duke@435 959 DEBUGGER_SINGLE_STEP_NOTIFY();
duke@435 960 #ifdef PREFETCH_OPCCODE
duke@435 961 opcode = *pc; /* prefetch first opcode */
duke@435 962 #endif
duke@435 963
duke@435 964 #ifndef USELABELS
duke@435 965 while (1)
duke@435 966 #endif
duke@435 967 {
duke@435 968 #ifndef PREFETCH_OPCCODE
duke@435 969 opcode = *pc;
duke@435 970 #endif
duke@435 971 // Seems like this happens twice per opcode. At worst this is only
duke@435 972 // need at entry to the loop.
duke@435 973 // DEBUGGER_SINGLE_STEP_NOTIFY();
duke@435 974 /* Using this labels avoids double breakpoints when quickening and
duke@435 975 * when returing from transition frames.
duke@435 976 */
duke@435 977 opcode_switch:
duke@435 978 assert(istate == orig, "Corrupted istate");
duke@435 979 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
duke@435 980 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
duke@435 981 assert(topOfStack < istate->stack_base(), "Stack underrun");
duke@435 982
duke@435 983 #ifdef USELABELS
duke@435 984 DISPATCH(opcode);
duke@435 985 #else
duke@435 986 switch (opcode)
duke@435 987 #endif
duke@435 988 {
duke@435 989 CASE(_nop):
duke@435 990 UPDATE_PC_AND_CONTINUE(1);
duke@435 991
duke@435 992 /* Push miscellaneous constants onto the stack. */
duke@435 993
duke@435 994 CASE(_aconst_null):
duke@435 995 SET_STACK_OBJECT(NULL, 0);
duke@435 996 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 997
duke@435 998 #undef OPC_CONST_n
duke@435 999 #define OPC_CONST_n(opcode, const_type, value) \
duke@435 1000 CASE(opcode): \
duke@435 1001 SET_STACK_ ## const_type(value, 0); \
duke@435 1002 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1003
duke@435 1004 OPC_CONST_n(_iconst_m1, INT, -1);
duke@435 1005 OPC_CONST_n(_iconst_0, INT, 0);
duke@435 1006 OPC_CONST_n(_iconst_1, INT, 1);
duke@435 1007 OPC_CONST_n(_iconst_2, INT, 2);
duke@435 1008 OPC_CONST_n(_iconst_3, INT, 3);
duke@435 1009 OPC_CONST_n(_iconst_4, INT, 4);
duke@435 1010 OPC_CONST_n(_iconst_5, INT, 5);
duke@435 1011 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
duke@435 1012 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
duke@435 1013 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
duke@435 1014
duke@435 1015 #undef OPC_CONST2_n
duke@435 1016 #define OPC_CONST2_n(opcname, value, key, kind) \
duke@435 1017 CASE(_##opcname): \
duke@435 1018 { \
duke@435 1019 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
duke@435 1020 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
duke@435 1021 }
duke@435 1022 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
duke@435 1023 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
duke@435 1024 OPC_CONST2_n(lconst_0, Zero, long, LONG);
duke@435 1025 OPC_CONST2_n(lconst_1, One, long, LONG);
duke@435 1026
duke@435 1027 /* Load constant from constant pool: */
duke@435 1028
duke@435 1029 /* Push a 1-byte signed integer value onto the stack. */
duke@435 1030 CASE(_bipush):
duke@435 1031 SET_STACK_INT((jbyte)(pc[1]), 0);
duke@435 1032 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1033
duke@435 1034 /* Push a 2-byte signed integer constant onto the stack. */
duke@435 1035 CASE(_sipush):
duke@435 1036 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
duke@435 1037 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 1038
duke@435 1039 /* load from local variable */
duke@435 1040
duke@435 1041 CASE(_aload):
bobv@2036 1042 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
duke@435 1043 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
duke@435 1044 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1045
duke@435 1046 CASE(_iload):
duke@435 1047 CASE(_fload):
duke@435 1048 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
duke@435 1049 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
duke@435 1050
duke@435 1051 CASE(_lload):
duke@435 1052 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
duke@435 1053 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
duke@435 1054
duke@435 1055 CASE(_dload):
duke@435 1056 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
duke@435 1057 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
duke@435 1058
duke@435 1059 #undef OPC_LOAD_n
duke@435 1060 #define OPC_LOAD_n(num) \
duke@435 1061 CASE(_aload_##num): \
bobv@2036 1062 VERIFY_OOP(LOCALS_OBJECT(num)); \
duke@435 1063 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
duke@435 1064 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
duke@435 1065 \
duke@435 1066 CASE(_iload_##num): \
duke@435 1067 CASE(_fload_##num): \
duke@435 1068 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
duke@435 1069 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
duke@435 1070 \
duke@435 1071 CASE(_lload_##num): \
duke@435 1072 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
duke@435 1073 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
duke@435 1074 CASE(_dload_##num): \
duke@435 1075 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
duke@435 1076 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1077
duke@435 1078 OPC_LOAD_n(0);
duke@435 1079 OPC_LOAD_n(1);
duke@435 1080 OPC_LOAD_n(2);
duke@435 1081 OPC_LOAD_n(3);
duke@435 1082
duke@435 1083 /* store to a local variable */
duke@435 1084
duke@435 1085 CASE(_astore):
duke@435 1086 astore(topOfStack, -1, locals, pc[1]);
duke@435 1087 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
duke@435 1088
duke@435 1089 CASE(_istore):
duke@435 1090 CASE(_fstore):
duke@435 1091 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
duke@435 1092 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
duke@435 1093
duke@435 1094 CASE(_lstore):
duke@435 1095 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
duke@435 1096 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
duke@435 1097
duke@435 1098 CASE(_dstore):
duke@435 1099 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
duke@435 1100 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
duke@435 1101
duke@435 1102 CASE(_wide): {
duke@435 1103 uint16_t reg = Bytes::get_Java_u2(pc + 2);
duke@435 1104
duke@435 1105 opcode = pc[1];
duke@435 1106 switch(opcode) {
duke@435 1107 case Bytecodes::_aload:
bobv@2036 1108 VERIFY_OOP(LOCALS_OBJECT(reg));
duke@435 1109 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
duke@435 1110 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
duke@435 1111
duke@435 1112 case Bytecodes::_iload:
duke@435 1113 case Bytecodes::_fload:
duke@435 1114 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
duke@435 1115 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
duke@435 1116
duke@435 1117 case Bytecodes::_lload:
duke@435 1118 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
duke@435 1119 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
duke@435 1120
duke@435 1121 case Bytecodes::_dload:
duke@435 1122 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
duke@435 1123 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
duke@435 1124
duke@435 1125 case Bytecodes::_astore:
duke@435 1126 astore(topOfStack, -1, locals, reg);
duke@435 1127 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
duke@435 1128
duke@435 1129 case Bytecodes::_istore:
duke@435 1130 case Bytecodes::_fstore:
duke@435 1131 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
duke@435 1132 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
duke@435 1133
duke@435 1134 case Bytecodes::_lstore:
duke@435 1135 SET_LOCALS_LONG(STACK_LONG(-1), reg);
duke@435 1136 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
duke@435 1137
duke@435 1138 case Bytecodes::_dstore:
duke@435 1139 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
duke@435 1140 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
duke@435 1141
duke@435 1142 case Bytecodes::_iinc: {
duke@435 1143 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
duke@435 1144 // Be nice to see what this generates.... QQQ
duke@435 1145 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
duke@435 1146 UPDATE_PC_AND_CONTINUE(6);
duke@435 1147 }
duke@435 1148 case Bytecodes::_ret:
duke@435 1149 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
duke@435 1150 UPDATE_PC_AND_CONTINUE(0);
duke@435 1151 default:
duke@435 1152 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode");
duke@435 1153 }
duke@435 1154 }
duke@435 1155
duke@435 1156
duke@435 1157 #undef OPC_STORE_n
duke@435 1158 #define OPC_STORE_n(num) \
duke@435 1159 CASE(_astore_##num): \
duke@435 1160 astore(topOfStack, -1, locals, num); \
duke@435 1161 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1162 CASE(_istore_##num): \
duke@435 1163 CASE(_fstore_##num): \
duke@435 1164 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
duke@435 1165 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1166
duke@435 1167 OPC_STORE_n(0);
duke@435 1168 OPC_STORE_n(1);
duke@435 1169 OPC_STORE_n(2);
duke@435 1170 OPC_STORE_n(3);
duke@435 1171
duke@435 1172 #undef OPC_DSTORE_n
duke@435 1173 #define OPC_DSTORE_n(num) \
duke@435 1174 CASE(_dstore_##num): \
duke@435 1175 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
duke@435 1176 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1177 CASE(_lstore_##num): \
duke@435 1178 SET_LOCALS_LONG(STACK_LONG(-1), num); \
duke@435 1179 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
duke@435 1180
duke@435 1181 OPC_DSTORE_n(0);
duke@435 1182 OPC_DSTORE_n(1);
duke@435 1183 OPC_DSTORE_n(2);
duke@435 1184 OPC_DSTORE_n(3);
duke@435 1185
duke@435 1186 /* stack pop, dup, and insert opcodes */
duke@435 1187
duke@435 1188
duke@435 1189 CASE(_pop): /* Discard the top item on the stack */
duke@435 1190 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1191
duke@435 1192
duke@435 1193 CASE(_pop2): /* Discard the top 2 items on the stack */
duke@435 1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
duke@435 1195
duke@435 1196
duke@435 1197 CASE(_dup): /* Duplicate the top item on the stack */
duke@435 1198 dup(topOfStack);
duke@435 1199 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1200
duke@435 1201 CASE(_dup2): /* Duplicate the top 2 items on the stack */
duke@435 1202 dup2(topOfStack);
duke@435 1203 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1204
duke@435 1205 CASE(_dup_x1): /* insert top word two down */
duke@435 1206 dup_x1(topOfStack);
duke@435 1207 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1208
duke@435 1209 CASE(_dup_x2): /* insert top word three down */
duke@435 1210 dup_x2(topOfStack);
duke@435 1211 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1212
duke@435 1213 CASE(_dup2_x1): /* insert top 2 slots three down */
duke@435 1214 dup2_x1(topOfStack);
duke@435 1215 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1216
duke@435 1217 CASE(_dup2_x2): /* insert top 2 slots four down */
duke@435 1218 dup2_x2(topOfStack);
duke@435 1219 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1220
duke@435 1221 CASE(_swap): { /* swap top two elements on the stack */
duke@435 1222 swap(topOfStack);
duke@435 1223 UPDATE_PC_AND_CONTINUE(1);
duke@435 1224 }
duke@435 1225
duke@435 1226 /* Perform various binary integer operations */
duke@435 1227
duke@435 1228 #undef OPC_INT_BINARY
duke@435 1229 #define OPC_INT_BINARY(opcname, opname, test) \
duke@435 1230 CASE(_i##opcname): \
duke@435 1231 if (test && (STACK_INT(-1) == 0)) { \
duke@435 1232 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
bobv@2036 1233 "/ by zero"); \
duke@435 1234 } \
duke@435 1235 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
duke@435 1236 STACK_INT(-1)), \
duke@435 1237 -2); \
duke@435 1238 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1239 CASE(_l##opcname): \
duke@435 1240 { \
duke@435 1241 if (test) { \
duke@435 1242 jlong l1 = STACK_LONG(-1); \
duke@435 1243 if (VMlongEqz(l1)) { \
duke@435 1244 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
duke@435 1245 "/ by long zero"); \
duke@435 1246 } \
duke@435 1247 } \
duke@435 1248 /* First long at (-1,-2) next long at (-3,-4) */ \
duke@435 1249 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
duke@435 1250 STACK_LONG(-1)), \
duke@435 1251 -3); \
duke@435 1252 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1253 }
duke@435 1254
duke@435 1255 OPC_INT_BINARY(add, Add, 0);
duke@435 1256 OPC_INT_BINARY(sub, Sub, 0);
duke@435 1257 OPC_INT_BINARY(mul, Mul, 0);
duke@435 1258 OPC_INT_BINARY(and, And, 0);
duke@435 1259 OPC_INT_BINARY(or, Or, 0);
duke@435 1260 OPC_INT_BINARY(xor, Xor, 0);
duke@435 1261 OPC_INT_BINARY(div, Div, 1);
duke@435 1262 OPC_INT_BINARY(rem, Rem, 1);
duke@435 1263
duke@435 1264
duke@435 1265 /* Perform various binary floating number operations */
duke@435 1266 /* On some machine/platforms/compilers div zero check can be implicit */
duke@435 1267
duke@435 1268 #undef OPC_FLOAT_BINARY
duke@435 1269 #define OPC_FLOAT_BINARY(opcname, opname) \
duke@435 1270 CASE(_d##opcname): { \
duke@435 1271 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
duke@435 1272 STACK_DOUBLE(-1)), \
duke@435 1273 -3); \
duke@435 1274 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
duke@435 1275 } \
duke@435 1276 CASE(_f##opcname): \
duke@435 1277 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
duke@435 1278 STACK_FLOAT(-1)), \
duke@435 1279 -2); \
duke@435 1280 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1281
duke@435 1282
duke@435 1283 OPC_FLOAT_BINARY(add, Add);
duke@435 1284 OPC_FLOAT_BINARY(sub, Sub);
duke@435 1285 OPC_FLOAT_BINARY(mul, Mul);
duke@435 1286 OPC_FLOAT_BINARY(div, Div);
duke@435 1287 OPC_FLOAT_BINARY(rem, Rem);
duke@435 1288
duke@435 1289 /* Shift operations
duke@435 1290 * Shift left int and long: ishl, lshl
duke@435 1291 * Logical shift right int and long w/zero extension: iushr, lushr
duke@435 1292 * Arithmetic shift right int and long w/sign extension: ishr, lshr
duke@435 1293 */
duke@435 1294
duke@435 1295 #undef OPC_SHIFT_BINARY
duke@435 1296 #define OPC_SHIFT_BINARY(opcname, opname) \
duke@435 1297 CASE(_i##opcname): \
duke@435 1298 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
duke@435 1299 STACK_INT(-1)), \
duke@435 1300 -2); \
duke@435 1301 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1302 CASE(_l##opcname): \
duke@435 1303 { \
duke@435 1304 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
duke@435 1305 STACK_INT(-1)), \
duke@435 1306 -2); \
duke@435 1307 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1308 }
duke@435 1309
duke@435 1310 OPC_SHIFT_BINARY(shl, Shl);
duke@435 1311 OPC_SHIFT_BINARY(shr, Shr);
duke@435 1312 OPC_SHIFT_BINARY(ushr, Ushr);
duke@435 1313
duke@435 1314 /* Increment local variable by constant */
duke@435 1315 CASE(_iinc):
duke@435 1316 {
duke@435 1317 // locals[pc[1]].j.i += (jbyte)(pc[2]);
duke@435 1318 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
duke@435 1319 UPDATE_PC_AND_CONTINUE(3);
duke@435 1320 }
duke@435 1321
duke@435 1322 /* negate the value on the top of the stack */
duke@435 1323
duke@435 1324 CASE(_ineg):
duke@435 1325 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
duke@435 1326 UPDATE_PC_AND_CONTINUE(1);
duke@435 1327
duke@435 1328 CASE(_fneg):
duke@435 1329 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
duke@435 1330 UPDATE_PC_AND_CONTINUE(1);
duke@435 1331
duke@435 1332 CASE(_lneg):
duke@435 1333 {
duke@435 1334 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
duke@435 1335 UPDATE_PC_AND_CONTINUE(1);
duke@435 1336 }
duke@435 1337
duke@435 1338 CASE(_dneg):
duke@435 1339 {
duke@435 1340 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
duke@435 1341 UPDATE_PC_AND_CONTINUE(1);
duke@435 1342 }
duke@435 1343
duke@435 1344 /* Conversion operations */
duke@435 1345
duke@435 1346 CASE(_i2f): /* convert top of stack int to float */
duke@435 1347 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
duke@435 1348 UPDATE_PC_AND_CONTINUE(1);
duke@435 1349
duke@435 1350 CASE(_i2l): /* convert top of stack int to long */
duke@435 1351 {
duke@435 1352 // this is ugly QQQ
duke@435 1353 jlong r = VMint2Long(STACK_INT(-1));
duke@435 1354 MORE_STACK(-1); // Pop
duke@435 1355 SET_STACK_LONG(r, 1);
duke@435 1356
duke@435 1357 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1358 }
duke@435 1359
duke@435 1360 CASE(_i2d): /* convert top of stack int to double */
duke@435 1361 {
duke@435 1362 // this is ugly QQQ (why cast to jlong?? )
duke@435 1363 jdouble r = (jlong)STACK_INT(-1);
duke@435 1364 MORE_STACK(-1); // Pop
duke@435 1365 SET_STACK_DOUBLE(r, 1);
duke@435 1366
duke@435 1367 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1368 }
duke@435 1369
duke@435 1370 CASE(_l2i): /* convert top of stack long to int */
duke@435 1371 {
duke@435 1372 jint r = VMlong2Int(STACK_LONG(-1));
duke@435 1373 MORE_STACK(-2); // Pop
duke@435 1374 SET_STACK_INT(r, 0);
duke@435 1375 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1376 }
duke@435 1377
duke@435 1378 CASE(_l2f): /* convert top of stack long to float */
duke@435 1379 {
duke@435 1380 jlong r = STACK_LONG(-1);
duke@435 1381 MORE_STACK(-2); // Pop
duke@435 1382 SET_STACK_FLOAT(VMlong2Float(r), 0);
duke@435 1383 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1384 }
duke@435 1385
duke@435 1386 CASE(_l2d): /* convert top of stack long to double */
duke@435 1387 {
duke@435 1388 jlong r = STACK_LONG(-1);
duke@435 1389 MORE_STACK(-2); // Pop
duke@435 1390 SET_STACK_DOUBLE(VMlong2Double(r), 1);
duke@435 1391 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1392 }
duke@435 1393
duke@435 1394 CASE(_f2i): /* Convert top of stack float to int */
duke@435 1395 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
duke@435 1396 UPDATE_PC_AND_CONTINUE(1);
duke@435 1397
duke@435 1398 CASE(_f2l): /* convert top of stack float to long */
duke@435 1399 {
duke@435 1400 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
duke@435 1401 MORE_STACK(-1); // POP
duke@435 1402 SET_STACK_LONG(r, 1);
duke@435 1403 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1404 }
duke@435 1405
duke@435 1406 CASE(_f2d): /* convert top of stack float to double */
duke@435 1407 {
duke@435 1408 jfloat f;
duke@435 1409 jdouble r;
duke@435 1410 f = STACK_FLOAT(-1);
duke@435 1411 r = (jdouble) f;
duke@435 1412 MORE_STACK(-1); // POP
duke@435 1413 SET_STACK_DOUBLE(r, 1);
duke@435 1414 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1415 }
duke@435 1416
duke@435 1417 CASE(_d2i): /* convert top of stack double to int */
duke@435 1418 {
duke@435 1419 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
duke@435 1420 MORE_STACK(-2);
duke@435 1421 SET_STACK_INT(r1, 0);
duke@435 1422 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1423 }
duke@435 1424
duke@435 1425 CASE(_d2f): /* convert top of stack double to float */
duke@435 1426 {
duke@435 1427 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
duke@435 1428 MORE_STACK(-2);
duke@435 1429 SET_STACK_FLOAT(r1, 0);
duke@435 1430 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1431 }
duke@435 1432
duke@435 1433 CASE(_d2l): /* convert top of stack double to long */
duke@435 1434 {
duke@435 1435 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
duke@435 1436 MORE_STACK(-2);
duke@435 1437 SET_STACK_LONG(r1, 1);
duke@435 1438 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
duke@435 1439 }
duke@435 1440
duke@435 1441 CASE(_i2b):
duke@435 1442 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
duke@435 1443 UPDATE_PC_AND_CONTINUE(1);
duke@435 1444
duke@435 1445 CASE(_i2c):
duke@435 1446 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
duke@435 1447 UPDATE_PC_AND_CONTINUE(1);
duke@435 1448
duke@435 1449 CASE(_i2s):
duke@435 1450 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
duke@435 1451 UPDATE_PC_AND_CONTINUE(1);
duke@435 1452
duke@435 1453 /* comparison operators */
duke@435 1454
duke@435 1455
duke@435 1456 #define COMPARISON_OP(name, comparison) \
duke@435 1457 CASE(_if_icmp##name): { \
duke@435 1458 int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \
duke@435 1459 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1460 address branch_pc = pc; \
duke@435 1461 UPDATE_PC_AND_TOS(skip, -2); \
duke@435 1462 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1463 CONTINUE; \
duke@435 1464 } \
duke@435 1465 CASE(_if##name): { \
duke@435 1466 int skip = (STACK_INT(-1) comparison 0) \
duke@435 1467 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1468 address branch_pc = pc; \
duke@435 1469 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1470 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1471 CONTINUE; \
duke@435 1472 }
duke@435 1473
duke@435 1474 #define COMPARISON_OP2(name, comparison) \
duke@435 1475 COMPARISON_OP(name, comparison) \
duke@435 1476 CASE(_if_acmp##name): { \
duke@435 1477 int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \
duke@435 1478 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1479 address branch_pc = pc; \
duke@435 1480 UPDATE_PC_AND_TOS(skip, -2); \
duke@435 1481 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1482 CONTINUE; \
duke@435 1483 }
duke@435 1484
duke@435 1485 #define NULL_COMPARISON_NOT_OP(name) \
duke@435 1486 CASE(_if##name): { \
coleenp@955 1487 int skip = (!(STACK_OBJECT(-1) == NULL)) \
duke@435 1488 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1489 address branch_pc = pc; \
duke@435 1490 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1491 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1492 CONTINUE; \
duke@435 1493 }
duke@435 1494
duke@435 1495 #define NULL_COMPARISON_OP(name) \
duke@435 1496 CASE(_if##name): { \
coleenp@955 1497 int skip = ((STACK_OBJECT(-1) == NULL)) \
duke@435 1498 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
duke@435 1499 address branch_pc = pc; \
duke@435 1500 UPDATE_PC_AND_TOS(skip, -1); \
duke@435 1501 DO_BACKEDGE_CHECKS(skip, branch_pc); \
duke@435 1502 CONTINUE; \
duke@435 1503 }
duke@435 1504 COMPARISON_OP(lt, <);
duke@435 1505 COMPARISON_OP(gt, >);
duke@435 1506 COMPARISON_OP(le, <=);
duke@435 1507 COMPARISON_OP(ge, >=);
duke@435 1508 COMPARISON_OP2(eq, ==); /* include ref comparison */
duke@435 1509 COMPARISON_OP2(ne, !=); /* include ref comparison */
duke@435 1510 NULL_COMPARISON_OP(null);
duke@435 1511 NULL_COMPARISON_NOT_OP(nonnull);
duke@435 1512
duke@435 1513 /* Goto pc at specified offset in switch table. */
duke@435 1514
duke@435 1515 CASE(_tableswitch): {
duke@435 1516 jint* lpc = (jint*)VMalignWordUp(pc+1);
duke@435 1517 int32_t key = STACK_INT(-1);
duke@435 1518 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
duke@435 1519 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
duke@435 1520 int32_t skip;
duke@435 1521 key -= low;
duke@435 1522 skip = ((uint32_t) key > (uint32_t)(high - low))
duke@435 1523 ? Bytes::get_Java_u4((address)&lpc[0])
duke@435 1524 : Bytes::get_Java_u4((address)&lpc[key + 3]);
duke@435 1525 // Does this really need a full backedge check (osr?)
duke@435 1526 address branch_pc = pc;
duke@435 1527 UPDATE_PC_AND_TOS(skip, -1);
duke@435 1528 DO_BACKEDGE_CHECKS(skip, branch_pc);
duke@435 1529 CONTINUE;
duke@435 1530 }
duke@435 1531
duke@435 1532 /* Goto pc whose table entry matches specified key */
duke@435 1533
duke@435 1534 CASE(_lookupswitch): {
duke@435 1535 jint* lpc = (jint*)VMalignWordUp(pc+1);
duke@435 1536 int32_t key = STACK_INT(-1);
duke@435 1537 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
duke@435 1538 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
duke@435 1539 while (--npairs >= 0) {
duke@435 1540 lpc += 2;
duke@435 1541 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
duke@435 1542 skip = Bytes::get_Java_u4((address)&lpc[1]);
duke@435 1543 break;
duke@435 1544 }
duke@435 1545 }
duke@435 1546 address branch_pc = pc;
duke@435 1547 UPDATE_PC_AND_TOS(skip, -1);
duke@435 1548 DO_BACKEDGE_CHECKS(skip, branch_pc);
duke@435 1549 CONTINUE;
duke@435 1550 }
duke@435 1551
duke@435 1552 CASE(_fcmpl):
duke@435 1553 CASE(_fcmpg):
duke@435 1554 {
duke@435 1555 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
duke@435 1556 STACK_FLOAT(-1),
duke@435 1557 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
duke@435 1558 -2);
duke@435 1559 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1560 }
duke@435 1561
duke@435 1562 CASE(_dcmpl):
duke@435 1563 CASE(_dcmpg):
duke@435 1564 {
duke@435 1565 int r = VMdoubleCompare(STACK_DOUBLE(-3),
duke@435 1566 STACK_DOUBLE(-1),
duke@435 1567 (opcode == Bytecodes::_dcmpl ? -1 : 1));
duke@435 1568 MORE_STACK(-4); // Pop
duke@435 1569 SET_STACK_INT(r, 0);
duke@435 1570 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1571 }
duke@435 1572
duke@435 1573 CASE(_lcmp):
duke@435 1574 {
duke@435 1575 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
duke@435 1576 MORE_STACK(-4);
duke@435 1577 SET_STACK_INT(r, 0);
duke@435 1578 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
duke@435 1579 }
duke@435 1580
duke@435 1581
duke@435 1582 /* Return from a method */
duke@435 1583
duke@435 1584 CASE(_areturn):
duke@435 1585 CASE(_ireturn):
duke@435 1586 CASE(_freturn):
duke@435 1587 {
duke@435 1588 // Allow a safepoint before returning to frame manager.
duke@435 1589 SAFEPOINT;
duke@435 1590
duke@435 1591 goto handle_return;
duke@435 1592 }
duke@435 1593
duke@435 1594 CASE(_lreturn):
duke@435 1595 CASE(_dreturn):
duke@435 1596 {
duke@435 1597 // Allow a safepoint before returning to frame manager.
duke@435 1598 SAFEPOINT;
duke@435 1599 goto handle_return;
duke@435 1600 }
duke@435 1601
duke@435 1602 CASE(_return_register_finalizer): {
duke@435 1603
duke@435 1604 oop rcvr = LOCALS_OBJECT(0);
bobv@2036 1605 VERIFY_OOP(rcvr);
coleenp@4037 1606 if (rcvr->klass()->has_finalizer()) {
duke@435 1607 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
duke@435 1608 }
duke@435 1609 goto handle_return;
duke@435 1610 }
duke@435 1611 CASE(_return): {
duke@435 1612
duke@435 1613 // Allow a safepoint before returning to frame manager.
duke@435 1614 SAFEPOINT;
duke@435 1615 goto handle_return;
duke@435 1616 }
duke@435 1617
duke@435 1618 /* Array access byte-codes */
duke@435 1619
duke@435 1620 /* Every array access byte-code starts out like this */
duke@435 1621 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
duke@435 1622 #define ARRAY_INTRO(arrayOff) \
duke@435 1623 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
duke@435 1624 jint index = STACK_INT(arrayOff + 1); \
duke@435 1625 char message[jintAsStringSize]; \
duke@435 1626 CHECK_NULL(arrObj); \
duke@435 1627 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
duke@435 1628 sprintf(message, "%d", index); \
duke@435 1629 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
duke@435 1630 message); \
duke@435 1631 }
duke@435 1632
duke@435 1633 /* 32-bit loads. These handle conversion from < 32-bit types */
duke@435 1634 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
duke@435 1635 { \
duke@435 1636 ARRAY_INTRO(-2); \
duke@435 1637 extra; \
duke@435 1638 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
duke@435 1639 -2); \
duke@435 1640 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
duke@435 1641 }
duke@435 1642
duke@435 1643 /* 64-bit loads */
duke@435 1644 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
duke@435 1645 { \
duke@435 1646 ARRAY_INTRO(-2); \
duke@435 1647 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
duke@435 1648 extra; \
duke@435 1649 UPDATE_PC_AND_CONTINUE(1); \
duke@435 1650 }
duke@435 1651
duke@435 1652 CASE(_iaload):
duke@435 1653 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
duke@435 1654 CASE(_faload):
duke@435 1655 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
duke@435 1656 CASE(_aaload):
duke@435 1657 ARRAY_LOADTO32(T_OBJECT, oop, INTPTR_FORMAT, STACK_OBJECT, 0);
duke@435 1658 CASE(_baload):
duke@435 1659 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
duke@435 1660 CASE(_caload):
duke@435 1661 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
duke@435 1662 CASE(_saload):
duke@435 1663 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
duke@435 1664 CASE(_laload):
duke@435 1665 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
duke@435 1666 CASE(_daload):
duke@435 1667 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
duke@435 1668
duke@435 1669 /* 32-bit stores. These handle conversion to < 32-bit types */
duke@435 1670 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
duke@435 1671 { \
duke@435 1672 ARRAY_INTRO(-3); \
duke@435 1673 extra; \
duke@435 1674 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
duke@435 1675 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
duke@435 1676 }
duke@435 1677
duke@435 1678 /* 64-bit stores */
duke@435 1679 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
duke@435 1680 { \
duke@435 1681 ARRAY_INTRO(-4); \
duke@435 1682 extra; \
duke@435 1683 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
duke@435 1684 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
duke@435 1685 }
duke@435 1686
duke@435 1687 CASE(_iastore):
duke@435 1688 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
duke@435 1689 CASE(_fastore):
duke@435 1690 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
duke@435 1691 /*
duke@435 1692 * This one looks different because of the assignability check
duke@435 1693 */
duke@435 1694 CASE(_aastore): {
duke@435 1695 oop rhsObject = STACK_OBJECT(-1);
bobv@2036 1696 VERIFY_OOP(rhsObject);
duke@435 1697 ARRAY_INTRO( -3);
duke@435 1698 // arrObj, index are set
duke@435 1699 if (rhsObject != NULL) {
duke@435 1700 /* Check assignability of rhsObject into arrObj */
coleenp@4037 1701 Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass)
coleenp@4142 1702 Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
duke@435 1703 //
duke@435 1704 // Check for compatibilty. This check must not GC!!
duke@435 1705 // Seems way more expensive now that we must dispatch
duke@435 1706 //
coleenp@4037 1707 if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is...
duke@435 1708 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "");
duke@435 1709 }
duke@435 1710 }
duke@435 1711 oop* elem_loc = (oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop));
duke@435 1712 // *(oop*)(((address) arrObj->base(T_OBJECT)) + index * sizeof(oop)) = rhsObject;
duke@435 1713 *elem_loc = rhsObject;
duke@435 1714 // Mark the card
duke@435 1715 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
duke@435 1716 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
duke@435 1717 }
duke@435 1718 CASE(_bastore):
duke@435 1719 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
duke@435 1720 CASE(_castore):
duke@435 1721 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
duke@435 1722 CASE(_sastore):
duke@435 1723 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
duke@435 1724 CASE(_lastore):
duke@435 1725 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
duke@435 1726 CASE(_dastore):
duke@435 1727 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
duke@435 1728
duke@435 1729 CASE(_arraylength):
duke@435 1730 {
duke@435 1731 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
duke@435 1732 CHECK_NULL(ary);
duke@435 1733 SET_STACK_INT(ary->length(), -1);
duke@435 1734 UPDATE_PC_AND_CONTINUE(1);
duke@435 1735 }
duke@435 1736
duke@435 1737 /* monitorenter and monitorexit for locking/unlocking an object */
duke@435 1738
duke@435 1739 CASE(_monitorenter): {
duke@435 1740 oop lockee = STACK_OBJECT(-1);
duke@435 1741 // derefing's lockee ought to provoke implicit null check
duke@435 1742 CHECK_NULL(lockee);
duke@435 1743 // find a free monitor or one already allocated for this object
duke@435 1744 // if we find a matching object then we need a new monitor
duke@435 1745 // since this is recursive enter
duke@435 1746 BasicObjectLock* limit = istate->monitor_base();
duke@435 1747 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
duke@435 1748 BasicObjectLock* entry = NULL;
duke@435 1749 while (most_recent != limit ) {
duke@435 1750 if (most_recent->obj() == NULL) entry = most_recent;
duke@435 1751 else if (most_recent->obj() == lockee) break;
duke@435 1752 most_recent++;
duke@435 1753 }
duke@435 1754 if (entry != NULL) {
duke@435 1755 entry->set_obj(lockee);
goetz@6445 1756 int success = false;
goetz@6445 1757 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
goetz@6445 1758
goetz@6445 1759 markOop mark = lockee->mark();
goetz@6445 1760 intptr_t hash = (intptr_t) markOopDesc::no_hash;
goetz@6445 1761 // implies UseBiasedLocking
goetz@6445 1762 if (mark->has_bias_pattern()) {
goetz@6445 1763 uintptr_t thread_ident;
goetz@6445 1764 uintptr_t anticipated_bias_locking_value;
goetz@6445 1765 thread_ident = (uintptr_t)istate->thread();
goetz@6445 1766 anticipated_bias_locking_value =
goetz@6445 1767 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
goetz@6445 1768 ~((uintptr_t) markOopDesc::age_mask_in_place);
goetz@6445 1769
goetz@6445 1770 if (anticipated_bias_locking_value == 0) {
goetz@6445 1771 // already biased towards this thread, nothing to do
goetz@6445 1772 if (PrintBiasedLockingStatistics) {
goetz@6445 1773 (* BiasedLocking::biased_lock_entry_count_addr())++;
goetz@6445 1774 }
goetz@6445 1775 success = true;
goetz@6445 1776 }
goetz@6445 1777 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
goetz@6445 1778 // try revoke bias
goetz@6445 1779 markOop header = lockee->klass()->prototype_header();
goetz@6445 1780 if (hash != markOopDesc::no_hash) {
goetz@6445 1781 header = header->copy_set_hash(hash);
goetz@6445 1782 }
goetz@6445 1783 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
goetz@6445 1784 if (PrintBiasedLockingStatistics)
goetz@6445 1785 (*BiasedLocking::revoked_lock_entry_count_addr())++;
goetz@6445 1786 }
goetz@6445 1787 }
goetz@6445 1788 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
goetz@6445 1789 // try rebias
goetz@6445 1790 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
goetz@6445 1791 if (hash != markOopDesc::no_hash) {
goetz@6445 1792 new_header = new_header->copy_set_hash(hash);
goetz@6445 1793 }
goetz@6445 1794 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
goetz@6445 1795 if (PrintBiasedLockingStatistics)
goetz@6445 1796 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
goetz@6445 1797 }
goetz@6445 1798 else {
goetz@6445 1799 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1800 }
goetz@6445 1801 success = true;
goetz@6445 1802 }
goetz@6445 1803 else {
goetz@6445 1804 // try to bias towards thread in case object is anonymously biased
goetz@6445 1805 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
goetz@6445 1806 (uintptr_t)markOopDesc::age_mask_in_place |
goetz@6445 1807 epoch_mask_in_place));
goetz@6445 1808 if (hash != markOopDesc::no_hash) {
goetz@6445 1809 header = header->copy_set_hash(hash);
goetz@6445 1810 }
goetz@6445 1811 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
goetz@6445 1812 // debugging hint
goetz@6445 1813 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
goetz@6445 1814 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
goetz@6445 1815 if (PrintBiasedLockingStatistics)
goetz@6445 1816 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
goetz@6445 1817 }
goetz@6445 1818 else {
goetz@6445 1819 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1820 }
goetz@6445 1821 success = true;
goetz@6445 1822 }
goetz@6445 1823 }
goetz@6445 1824
goetz@6445 1825 // traditional lightweight locking
goetz@6445 1826 if (!success) {
goetz@6445 1827 markOop displaced = lockee->mark()->set_unlocked();
goetz@6445 1828 entry->lock()->set_displaced_header(displaced);
goetz@6445 1829 bool call_vm = UseHeavyMonitors;
goetz@6445 1830 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
goetz@6445 1831 // Is it simple recursive case?
goetz@6445 1832 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
goetz@6445 1833 entry->lock()->set_displaced_header(NULL);
goetz@6445 1834 } else {
goetz@6445 1835 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
goetz@6445 1836 }
duke@435 1837 }
duke@435 1838 }
duke@435 1839 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1840 } else {
duke@435 1841 istate->set_msg(more_monitors);
duke@435 1842 UPDATE_PC_AND_RETURN(0); // Re-execute
duke@435 1843 }
duke@435 1844 }
duke@435 1845
duke@435 1846 CASE(_monitorexit): {
duke@435 1847 oop lockee = STACK_OBJECT(-1);
duke@435 1848 CHECK_NULL(lockee);
duke@435 1849 // derefing's lockee ought to provoke implicit null check
duke@435 1850 // find our monitor slot
duke@435 1851 BasicObjectLock* limit = istate->monitor_base();
duke@435 1852 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
duke@435 1853 while (most_recent != limit ) {
duke@435 1854 if ((most_recent)->obj() == lockee) {
duke@435 1855 BasicLock* lock = most_recent->lock();
duke@435 1856 markOop header = lock->displaced_header();
duke@435 1857 most_recent->set_obj(NULL);
goetz@6445 1858 if (!lockee->mark()->has_bias_pattern()) {
goetz@6445 1859 bool call_vm = UseHeavyMonitors;
goetz@6445 1860 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 1861 if (header != NULL || call_vm) {
goetz@6445 1862 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
goetz@6445 1863 // restore object for the slow case
goetz@6445 1864 most_recent->set_obj(lockee);
goetz@6445 1865 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
goetz@6445 1866 }
duke@435 1867 }
duke@435 1868 }
duke@435 1869 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
duke@435 1870 }
duke@435 1871 most_recent++;
duke@435 1872 }
duke@435 1873 // Need to throw illegal monitor state exception
duke@435 1874 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
twisti@2762 1875 ShouldNotReachHere();
duke@435 1876 }
duke@435 1877
duke@435 1878 /* All of the non-quick opcodes. */
duke@435 1879
duke@435 1880 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
duke@435 1881 * constant pool index in the instruction.
duke@435 1882 */
duke@435 1883 CASE(_getfield):
duke@435 1884 CASE(_getstatic):
duke@435 1885 {
duke@435 1886 u2 index;
duke@435 1887 ConstantPoolCacheEntry* cache;
duke@435 1888 index = Bytes::get_native_u2(pc+1);
duke@435 1889
duke@435 1890 // QQQ Need to make this as inlined as possible. Probably need to
duke@435 1891 // split all the bytecode cases out so c++ compiler has a chance
duke@435 1892 // for constant prop to fold everything possible away.
duke@435 1893
duke@435 1894 cache = cp->entry_at(index);
duke@435 1895 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 1896 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
duke@435 1897 handle_exception);
duke@435 1898 cache = cp->entry_at(index);
duke@435 1899 }
duke@435 1900
duke@435 1901 #ifdef VM_JVMTI
duke@435 1902 if (_jvmti_interp_events) {
duke@435 1903 int *count_addr;
duke@435 1904 oop obj;
duke@435 1905 // Check to see if a field modification watch has been set
duke@435 1906 // before we take the time to call into the VM.
duke@435 1907 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
duke@435 1908 if ( *count_addr > 0 ) {
duke@435 1909 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
duke@435 1910 obj = (oop)NULL;
duke@435 1911 } else {
duke@435 1912 obj = (oop) STACK_OBJECT(-1);
bobv@2036 1913 VERIFY_OOP(obj);
duke@435 1914 }
duke@435 1915 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
duke@435 1916 obj,
duke@435 1917 cache),
duke@435 1918 handle_exception);
duke@435 1919 }
duke@435 1920 }
duke@435 1921 #endif /* VM_JVMTI */
duke@435 1922
duke@435 1923 oop obj;
duke@435 1924 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
twisti@4237 1925 Klass* k = cache->f1_as_klass();
coleenp@4037 1926 obj = k->java_mirror();
duke@435 1927 MORE_STACK(1); // Assume single slot push
duke@435 1928 } else {
duke@435 1929 obj = (oop) STACK_OBJECT(-1);
duke@435 1930 CHECK_NULL(obj);
duke@435 1931 }
duke@435 1932
duke@435 1933 //
duke@435 1934 // Now store the result on the stack
duke@435 1935 //
duke@435 1936 TosState tos_type = cache->flag_state();
twisti@3969 1937 int field_offset = cache->f2_as_index();
duke@435 1938 if (cache->is_volatile()) {
duke@435 1939 if (tos_type == atos) {
bobv@2036 1940 VERIFY_OOP(obj->obj_field_acquire(field_offset));
duke@435 1941 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
duke@435 1942 } else if (tos_type == itos) {
duke@435 1943 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
duke@435 1944 } else if (tos_type == ltos) {
duke@435 1945 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
duke@435 1946 MORE_STACK(1);
duke@435 1947 } else if (tos_type == btos) {
duke@435 1948 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
duke@435 1949 } else if (tos_type == ctos) {
duke@435 1950 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
duke@435 1951 } else if (tos_type == stos) {
duke@435 1952 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
duke@435 1953 } else if (tos_type == ftos) {
duke@435 1954 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
duke@435 1955 } else {
duke@435 1956 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
duke@435 1957 MORE_STACK(1);
duke@435 1958 }
duke@435 1959 } else {
duke@435 1960 if (tos_type == atos) {
bobv@2036 1961 VERIFY_OOP(obj->obj_field(field_offset));
duke@435 1962 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
duke@435 1963 } else if (tos_type == itos) {
duke@435 1964 SET_STACK_INT(obj->int_field(field_offset), -1);
duke@435 1965 } else if (tos_type == ltos) {
duke@435 1966 SET_STACK_LONG(obj->long_field(field_offset), 0);
duke@435 1967 MORE_STACK(1);
duke@435 1968 } else if (tos_type == btos) {
duke@435 1969 SET_STACK_INT(obj->byte_field(field_offset), -1);
duke@435 1970 } else if (tos_type == ctos) {
duke@435 1971 SET_STACK_INT(obj->char_field(field_offset), -1);
duke@435 1972 } else if (tos_type == stos) {
duke@435 1973 SET_STACK_INT(obj->short_field(field_offset), -1);
duke@435 1974 } else if (tos_type == ftos) {
duke@435 1975 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
duke@435 1976 } else {
duke@435 1977 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
duke@435 1978 MORE_STACK(1);
duke@435 1979 }
duke@435 1980 }
duke@435 1981
duke@435 1982 UPDATE_PC_AND_CONTINUE(3);
duke@435 1983 }
duke@435 1984
duke@435 1985 CASE(_putfield):
duke@435 1986 CASE(_putstatic):
duke@435 1987 {
duke@435 1988 u2 index = Bytes::get_native_u2(pc+1);
duke@435 1989 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 1990 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 1991 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
duke@435 1992 handle_exception);
duke@435 1993 cache = cp->entry_at(index);
duke@435 1994 }
duke@435 1995
duke@435 1996 #ifdef VM_JVMTI
duke@435 1997 if (_jvmti_interp_events) {
duke@435 1998 int *count_addr;
duke@435 1999 oop obj;
duke@435 2000 // Check to see if a field modification watch has been set
duke@435 2001 // before we take the time to call into the VM.
duke@435 2002 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
duke@435 2003 if ( *count_addr > 0 ) {
duke@435 2004 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
duke@435 2005 obj = (oop)NULL;
duke@435 2006 }
duke@435 2007 else {
duke@435 2008 if (cache->is_long() || cache->is_double()) {
duke@435 2009 obj = (oop) STACK_OBJECT(-3);
duke@435 2010 } else {
duke@435 2011 obj = (oop) STACK_OBJECT(-2);
duke@435 2012 }
bobv@2036 2013 VERIFY_OOP(obj);
duke@435 2014 }
duke@435 2015
duke@435 2016 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
duke@435 2017 obj,
duke@435 2018 cache,
duke@435 2019 (jvalue *)STACK_SLOT(-1)),
duke@435 2020 handle_exception);
duke@435 2021 }
duke@435 2022 }
duke@435 2023 #endif /* VM_JVMTI */
duke@435 2024
duke@435 2025 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2026 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2027
duke@435 2028 oop obj;
duke@435 2029 int count;
duke@435 2030 TosState tos_type = cache->flag_state();
duke@435 2031
duke@435 2032 count = -1;
duke@435 2033 if (tos_type == ltos || tos_type == dtos) {
duke@435 2034 --count;
duke@435 2035 }
duke@435 2036 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
twisti@4237 2037 Klass* k = cache->f1_as_klass();
coleenp@4037 2038 obj = k->java_mirror();
duke@435 2039 } else {
duke@435 2040 --count;
duke@435 2041 obj = (oop) STACK_OBJECT(count);
duke@435 2042 CHECK_NULL(obj);
duke@435 2043 }
duke@435 2044
duke@435 2045 //
duke@435 2046 // Now store the result
duke@435 2047 //
twisti@3969 2048 int field_offset = cache->f2_as_index();
duke@435 2049 if (cache->is_volatile()) {
duke@435 2050 if (tos_type == itos) {
duke@435 2051 obj->release_int_field_put(field_offset, STACK_INT(-1));
duke@435 2052 } else if (tos_type == atos) {
bobv@2036 2053 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2054 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
duke@435 2055 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
duke@435 2056 } else if (tos_type == btos) {
duke@435 2057 obj->release_byte_field_put(field_offset, STACK_INT(-1));
duke@435 2058 } else if (tos_type == ltos) {
duke@435 2059 obj->release_long_field_put(field_offset, STACK_LONG(-1));
duke@435 2060 } else if (tos_type == ctos) {
duke@435 2061 obj->release_char_field_put(field_offset, STACK_INT(-1));
duke@435 2062 } else if (tos_type == stos) {
duke@435 2063 obj->release_short_field_put(field_offset, STACK_INT(-1));
duke@435 2064 } else if (tos_type == ftos) {
duke@435 2065 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
duke@435 2066 } else {
duke@435 2067 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
duke@435 2068 }
duke@435 2069 OrderAccess::storeload();
duke@435 2070 } else {
duke@435 2071 if (tos_type == itos) {
duke@435 2072 obj->int_field_put(field_offset, STACK_INT(-1));
duke@435 2073 } else if (tos_type == atos) {
bobv@2036 2074 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2075 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
duke@435 2076 OrderAccess::release_store(&BYTE_MAP_BASE[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
duke@435 2077 } else if (tos_type == btos) {
duke@435 2078 obj->byte_field_put(field_offset, STACK_INT(-1));
duke@435 2079 } else if (tos_type == ltos) {
duke@435 2080 obj->long_field_put(field_offset, STACK_LONG(-1));
duke@435 2081 } else if (tos_type == ctos) {
duke@435 2082 obj->char_field_put(field_offset, STACK_INT(-1));
duke@435 2083 } else if (tos_type == stos) {
duke@435 2084 obj->short_field_put(field_offset, STACK_INT(-1));
duke@435 2085 } else if (tos_type == ftos) {
duke@435 2086 obj->float_field_put(field_offset, STACK_FLOAT(-1));
duke@435 2087 } else {
duke@435 2088 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
duke@435 2089 }
duke@435 2090 }
duke@435 2091
duke@435 2092 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
duke@435 2093 }
duke@435 2094
duke@435 2095 CASE(_new): {
duke@435 2096 u2 index = Bytes::get_Java_u2(pc+1);
coleenp@4037 2097 ConstantPool* constants = istate->method()->constants();
duke@435 2098 if (!constants->tag_at(index).is_unresolved_klass()) {
duke@435 2099 // Make sure klass is initialized and doesn't have a finalizer
coleenp@4037 2100 Klass* entry = constants->slot_at(index).get_klass();
duke@435 2101 assert(entry->is_klass(), "Should be resolved klass");
coleenp@4037 2102 Klass* k_entry = (Klass*) entry;
coleenp@4037 2103 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
coleenp@4037 2104 InstanceKlass* ik = (InstanceKlass*) k_entry;
duke@435 2105 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
duke@435 2106 size_t obj_size = ik->size_helper();
duke@435 2107 oop result = NULL;
duke@435 2108 // If the TLAB isn't pre-zeroed then we'll have to do it
duke@435 2109 bool need_zero = !ZeroTLAB;
duke@435 2110 if (UseTLAB) {
duke@435 2111 result = (oop) THREAD->tlab().allocate(obj_size);
duke@435 2112 }
duke@435 2113 if (result == NULL) {
duke@435 2114 need_zero = true;
duke@435 2115 // Try allocate in shared eden
duke@435 2116 retry:
duke@435 2117 HeapWord* compare_to = *Universe::heap()->top_addr();
duke@435 2118 HeapWord* new_top = compare_to + obj_size;
duke@435 2119 if (new_top <= *Universe::heap()->end_addr()) {
duke@435 2120 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
duke@435 2121 goto retry;
duke@435 2122 }
duke@435 2123 result = (oop) compare_to;
duke@435 2124 }
duke@435 2125 }
duke@435 2126 if (result != NULL) {
duke@435 2127 // Initialize object (if nonzero size and need) and then the header
duke@435 2128 if (need_zero ) {
duke@435 2129 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
duke@435 2130 obj_size -= sizeof(oopDesc) / oopSize;
duke@435 2131 if (obj_size > 0 ) {
duke@435 2132 memset(to_zero, 0, obj_size * HeapWordSize);
duke@435 2133 }
duke@435 2134 }
duke@435 2135 if (UseBiasedLocking) {
duke@435 2136 result->set_mark(ik->prototype_header());
duke@435 2137 } else {
duke@435 2138 result->set_mark(markOopDesc::prototype());
duke@435 2139 }
coleenp@602 2140 result->set_klass_gap(0);
duke@435 2141 result->set_klass(k_entry);
duke@435 2142 SET_STACK_OBJECT(result, 0);
duke@435 2143 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 2144 }
duke@435 2145 }
duke@435 2146 }
duke@435 2147 // Slow case allocation
duke@435 2148 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
duke@435 2149 handle_exception);
duke@435 2150 SET_STACK_OBJECT(THREAD->vm_result(), 0);
duke@435 2151 THREAD->set_vm_result(NULL);
duke@435 2152 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
duke@435 2153 }
duke@435 2154 CASE(_anewarray): {
duke@435 2155 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2156 jint size = STACK_INT(-1);
duke@435 2157 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
duke@435 2158 handle_exception);
duke@435 2159 SET_STACK_OBJECT(THREAD->vm_result(), -1);
duke@435 2160 THREAD->set_vm_result(NULL);
duke@435 2161 UPDATE_PC_AND_CONTINUE(3);
duke@435 2162 }
duke@435 2163 CASE(_multianewarray): {
duke@435 2164 jint dims = *(pc+3);
duke@435 2165 jint size = STACK_INT(-1);
duke@435 2166 // stack grows down, dimensions are up!
duke@435 2167 jint *dimarray =
twisti@1864 2168 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
twisti@1864 2169 Interpreter::stackElementWords-1];
duke@435 2170 //adjust pointer to start of stack element
duke@435 2171 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
duke@435 2172 handle_exception);
duke@435 2173 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
duke@435 2174 THREAD->set_vm_result(NULL);
duke@435 2175 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
duke@435 2176 }
duke@435 2177 CASE(_checkcast):
duke@435 2178 if (STACK_OBJECT(-1) != NULL) {
bobv@2036 2179 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2180 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2181 if (ProfileInterpreter) {
duke@435 2182 // needs Profile_checkcast QQQ
duke@435 2183 ShouldNotReachHere();
duke@435 2184 }
duke@435 2185 // Constant pool may have actual klass or unresolved klass. If it is
duke@435 2186 // unresolved we must resolve it
duke@435 2187 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
duke@435 2188 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
duke@435 2189 }
coleenp@4037 2190 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
coleenp@4037 2191 Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx
duke@435 2192 //
duke@435 2193 // Check for compatibilty. This check must not GC!!
duke@435 2194 // Seems way more expensive now that we must dispatch
duke@435 2195 //
duke@435 2196 if (objKlassOop != klassOf &&
coleenp@4037 2197 !objKlassOop->is_subtype_of(klassOf)) {
duke@435 2198 ResourceMark rm(THREAD);
hseigel@4278 2199 const char* objName = objKlassOop->external_name();
hseigel@4278 2200 const char* klassName = klassOf->external_name();
duke@435 2201 char* message = SharedRuntime::generate_class_cast_message(
duke@435 2202 objName, klassName);
duke@435 2203 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message);
duke@435 2204 }
duke@435 2205 } else {
duke@435 2206 if (UncommonNullCast) {
duke@435 2207 // istate->method()->set_null_cast_seen();
duke@435 2208 // [RGV] Not sure what to do here!
duke@435 2209
duke@435 2210 }
duke@435 2211 }
duke@435 2212 UPDATE_PC_AND_CONTINUE(3);
duke@435 2213
duke@435 2214 CASE(_instanceof):
duke@435 2215 if (STACK_OBJECT(-1) == NULL) {
duke@435 2216 SET_STACK_INT(0, -1);
duke@435 2217 } else {
bobv@2036 2218 VERIFY_OOP(STACK_OBJECT(-1));
duke@435 2219 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2220 // Constant pool may have actual klass or unresolved klass. If it is
duke@435 2221 // unresolved we must resolve it
duke@435 2222 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
duke@435 2223 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
duke@435 2224 }
coleenp@4037 2225 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
coleenp@4037 2226 Klass* objKlassOop = STACK_OBJECT(-1)->klass();
duke@435 2227 //
duke@435 2228 // Check for compatibilty. This check must not GC!!
duke@435 2229 // Seems way more expensive now that we must dispatch
duke@435 2230 //
coleenp@4037 2231 if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) {
duke@435 2232 SET_STACK_INT(1, -1);
duke@435 2233 } else {
duke@435 2234 SET_STACK_INT(0, -1);
duke@435 2235 }
duke@435 2236 }
duke@435 2237 UPDATE_PC_AND_CONTINUE(3);
duke@435 2238
duke@435 2239 CASE(_ldc_w):
duke@435 2240 CASE(_ldc):
duke@435 2241 {
duke@435 2242 u2 index;
duke@435 2243 bool wide = false;
duke@435 2244 int incr = 2; // frequent case
duke@435 2245 if (opcode == Bytecodes::_ldc) {
duke@435 2246 index = pc[1];
duke@435 2247 } else {
duke@435 2248 index = Bytes::get_Java_u2(pc+1);
duke@435 2249 incr = 3;
duke@435 2250 wide = true;
duke@435 2251 }
duke@435 2252
coleenp@4037 2253 ConstantPool* constants = METHOD->constants();
duke@435 2254 switch (constants->tag_at(index).value()) {
duke@435 2255 case JVM_CONSTANT_Integer:
duke@435 2256 SET_STACK_INT(constants->int_at(index), 0);
duke@435 2257 break;
duke@435 2258
duke@435 2259 case JVM_CONSTANT_Float:
duke@435 2260 SET_STACK_FLOAT(constants->float_at(index), 0);
duke@435 2261 break;
duke@435 2262
duke@435 2263 case JVM_CONSTANT_String:
coleenp@4037 2264 {
coleenp@4037 2265 oop result = constants->resolved_references()->obj_at(index);
coleenp@4037 2266 if (result == NULL) {
coleenp@4037 2267 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
coleenp@4037 2268 SET_STACK_OBJECT(THREAD->vm_result(), 0);
coleenp@4037 2269 THREAD->set_vm_result(NULL);
coleenp@4037 2270 } else {
coleenp@4037 2271 VERIFY_OOP(result);
coleenp@4037 2272 SET_STACK_OBJECT(result, 0);
coleenp@4037 2273 }
duke@435 2274 break;
coleenp@4037 2275 }
duke@435 2276
duke@435 2277 case JVM_CONSTANT_Class:
never@2658 2278 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
never@2658 2279 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
duke@435 2280 break;
duke@435 2281
duke@435 2282 case JVM_CONSTANT_UnresolvedClass:
duke@435 2283 case JVM_CONSTANT_UnresolvedClassInError:
duke@435 2284 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
duke@435 2285 SET_STACK_OBJECT(THREAD->vm_result(), 0);
duke@435 2286 THREAD->set_vm_result(NULL);
duke@435 2287 break;
duke@435 2288
duke@435 2289 default: ShouldNotReachHere();
duke@435 2290 }
duke@435 2291 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
duke@435 2292 }
duke@435 2293
duke@435 2294 CASE(_ldc2_w):
duke@435 2295 {
duke@435 2296 u2 index = Bytes::get_Java_u2(pc+1);
duke@435 2297
coleenp@4037 2298 ConstantPool* constants = METHOD->constants();
duke@435 2299 switch (constants->tag_at(index).value()) {
duke@435 2300
duke@435 2301 case JVM_CONSTANT_Long:
duke@435 2302 SET_STACK_LONG(constants->long_at(index), 1);
duke@435 2303 break;
duke@435 2304
duke@435 2305 case JVM_CONSTANT_Double:
duke@435 2306 SET_STACK_DOUBLE(constants->double_at(index), 1);
duke@435 2307 break;
duke@435 2308 default: ShouldNotReachHere();
duke@435 2309 }
duke@435 2310 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
duke@435 2311 }
duke@435 2312
twisti@2762 2313 CASE(_fast_aldc_w):
twisti@2762 2314 CASE(_fast_aldc): {
twisti@2762 2315 u2 index;
twisti@2762 2316 int incr;
twisti@2762 2317 if (opcode == Bytecodes::_fast_aldc) {
twisti@2762 2318 index = pc[1];
twisti@2762 2319 incr = 2;
twisti@2762 2320 } else {
twisti@2762 2321 index = Bytes::get_native_u2(pc+1);
twisti@2762 2322 incr = 3;
twisti@2762 2323 }
twisti@2762 2324
twisti@2762 2325 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
twisti@2762 2326 // This kind of CP cache entry does not need to match the flags byte, because
twisti@2762 2327 // there is a 1-1 relation between bytecode type and CP entry type.
coleenp@4037 2328 ConstantPool* constants = METHOD->constants();
coleenp@4037 2329 oop result = constants->resolved_references()->obj_at(index);
twisti@3969 2330 if (result == NULL) {
twisti@2762 2331 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
twisti@2762 2332 handle_exception);
coleenp@4037 2333 result = THREAD->vm_result();
twisti@2762 2334 }
twisti@2762 2335
twisti@3969 2336 VERIFY_OOP(result);
twisti@3969 2337 SET_STACK_OBJECT(result, 0);
twisti@2762 2338 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
twisti@2762 2339 }
twisti@2762 2340
twisti@2762 2341 CASE(_invokedynamic): {
twisti@4237 2342
twisti@2762 2343 if (!EnableInvokeDynamic) {
twisti@2762 2344 // We should not encounter this bytecode if !EnableInvokeDynamic.
twisti@2762 2345 // The verifier will stop it. However, if we get past the verifier,
twisti@2762 2346 // this will stop the thread in a reasonable way, without crashing the JVM.
twisti@2762 2347 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
twisti@2762 2348 handle_exception);
twisti@2762 2349 ShouldNotReachHere();
twisti@2762 2350 }
twisti@2762 2351
twisti@4237 2352 u4 index = Bytes::get_native_u4(pc+1);
twisti@4237 2353 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
twisti@2762 2354
coleenp@4037 2355 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
twisti@2762 2356 // This kind of CP cache entry does not need to match the flags byte, because
twisti@2762 2357 // there is a 1-1 relation between bytecode type and CP entry type.
twisti@4237 2358 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
twisti@2762 2359 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
twisti@2762 2360 handle_exception);
twisti@4237 2361 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
twisti@2762 2362 }
twisti@2762 2363
twisti@4237 2364 Method* method = cache->f1_as_method();
twisti@4237 2365 VERIFY_OOP(method);
twisti@4237 2366
twisti@4237 2367 if (cache->has_appendix()) {
twisti@4237 2368 ConstantPool* constants = METHOD->constants();
twisti@4237 2369 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
twisti@4237 2370 MORE_STACK(1);
twisti@4237 2371 }
twisti@4237 2372
twisti@4237 2373 istate->set_msg(call_method);
twisti@4237 2374 istate->set_callee(method);
twisti@4237 2375 istate->set_callee_entry_point(method->from_interpreted_entry());
twisti@2762 2376 istate->set_bcp_advance(5);
twisti@2762 2377
twisti@2762 2378 UPDATE_PC_AND_RETURN(0); // I'll be back...
twisti@2762 2379 }
twisti@2762 2380
twisti@4237 2381 CASE(_invokehandle): {
twisti@4237 2382
twisti@4237 2383 if (!EnableInvokeDynamic) {
twisti@4237 2384 ShouldNotReachHere();
twisti@4237 2385 }
twisti@4237 2386
twisti@4237 2387 u2 index = Bytes::get_native_u2(pc+1);
twisti@4237 2388 ConstantPoolCacheEntry* cache = cp->entry_at(index);
twisti@4237 2389
twisti@4237 2390 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
twisti@4237 2391 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
twisti@4237 2392 handle_exception);
twisti@4237 2393 cache = cp->entry_at(index);
twisti@4237 2394 }
twisti@4237 2395
twisti@4237 2396 Method* method = cache->f1_as_method();
twisti@4237 2397
twisti@4237 2398 VERIFY_OOP(method);
twisti@4237 2399
twisti@4237 2400 if (cache->has_appendix()) {
twisti@4237 2401 ConstantPool* constants = METHOD->constants();
twisti@4237 2402 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
twisti@4237 2403 MORE_STACK(1);
twisti@4237 2404 }
twisti@4237 2405
twisti@4237 2406 istate->set_msg(call_method);
twisti@4237 2407 istate->set_callee(method);
twisti@4237 2408 istate->set_callee_entry_point(method->from_interpreted_entry());
twisti@4237 2409 istate->set_bcp_advance(3);
twisti@4237 2410
twisti@4237 2411 UPDATE_PC_AND_RETURN(0); // I'll be back...
twisti@4237 2412 }
twisti@4237 2413
duke@435 2414 CASE(_invokeinterface): {
duke@435 2415 u2 index = Bytes::get_native_u2(pc+1);
duke@435 2416
duke@435 2417 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2418 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2419
duke@435 2420 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 2421 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 2422 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
duke@435 2423 handle_exception);
duke@435 2424 cache = cp->entry_at(index);
duke@435 2425 }
duke@435 2426
duke@435 2427 istate->set_msg(call_method);
duke@435 2428
duke@435 2429 // Special case of invokeinterface called for virtual method of
duke@435 2430 // java.lang.Object. See cpCacheOop.cpp for details.
duke@435 2431 // This code isn't produced by javac, but could be produced by
duke@435 2432 // another compliant java compiler.
twisti@3969 2433 if (cache->is_forced_virtual()) {
coleenp@4037 2434 Method* callee;
duke@435 2435 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
duke@435 2436 if (cache->is_vfinal()) {
twisti@3969 2437 callee = cache->f2_as_vfinal_method();
duke@435 2438 } else {
duke@435 2439 // get receiver
duke@435 2440 int parms = cache->parameter_size();
duke@435 2441 // Same comments as invokevirtual apply here
bobv@2036 2442 VERIFY_OOP(STACK_OBJECT(-parms));
coleenp@4037 2443 InstanceKlass* rcvrKlass = (InstanceKlass*)
coleenp@4037 2444 STACK_OBJECT(-parms)->klass();
coleenp@4037 2445 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
duke@435 2446 }
duke@435 2447 istate->set_callee(callee);
duke@435 2448 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2449 #ifdef VM_JVMTI
duke@435 2450 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2451 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2452 }
duke@435 2453 #endif /* VM_JVMTI */
duke@435 2454 istate->set_bcp_advance(5);
duke@435 2455 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2456 }
duke@435 2457
duke@435 2458 // this could definitely be cleaned up QQQ
coleenp@4037 2459 Method* callee;
coleenp@4037 2460 Klass* iclass = cache->f1_as_klass();
coleenp@4037 2461 // InstanceKlass* interface = (InstanceKlass*) iclass;
duke@435 2462 // get receiver
duke@435 2463 int parms = cache->parameter_size();
duke@435 2464 oop rcvr = STACK_OBJECT(-parms);
duke@435 2465 CHECK_NULL(rcvr);
coleenp@4037 2466 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
duke@435 2467 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
duke@435 2468 int i;
duke@435 2469 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
duke@435 2470 if (ki->interface_klass() == iclass) break;
duke@435 2471 }
duke@435 2472 // If the interface isn't found, this class doesn't implement this
duke@435 2473 // interface. The link resolver checks this but only for the first
duke@435 2474 // time this interface is called.
duke@435 2475 if (i == int2->itable_length()) {
duke@435 2476 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "");
duke@435 2477 }
twisti@3969 2478 int mindex = cache->f2_as_index();
duke@435 2479 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
duke@435 2480 callee = im[mindex].method();
duke@435 2481 if (callee == NULL) {
duke@435 2482 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "");
duke@435 2483 }
duke@435 2484
duke@435 2485 istate->set_callee(callee);
duke@435 2486 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2487 #ifdef VM_JVMTI
duke@435 2488 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2489 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2490 }
duke@435 2491 #endif /* VM_JVMTI */
duke@435 2492 istate->set_bcp_advance(5);
duke@435 2493 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2494 }
duke@435 2495
duke@435 2496 CASE(_invokevirtual):
duke@435 2497 CASE(_invokespecial):
duke@435 2498 CASE(_invokestatic): {
duke@435 2499 u2 index = Bytes::get_native_u2(pc+1);
duke@435 2500
duke@435 2501 ConstantPoolCacheEntry* cache = cp->entry_at(index);
duke@435 2502 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
duke@435 2503 // out so c++ compiler has a chance for constant prop to fold everything possible away.
duke@435 2504
duke@435 2505 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
duke@435 2506 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
duke@435 2507 handle_exception);
duke@435 2508 cache = cp->entry_at(index);
duke@435 2509 }
duke@435 2510
duke@435 2511 istate->set_msg(call_method);
duke@435 2512 {
coleenp@4037 2513 Method* callee;
duke@435 2514 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
duke@435 2515 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
twisti@3969 2516 if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method();
duke@435 2517 else {
duke@435 2518 // get receiver
duke@435 2519 int parms = cache->parameter_size();
duke@435 2520 // this works but needs a resourcemark and seems to create a vtable on every call:
coleenp@4037 2521 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
duke@435 2522 //
duke@435 2523 // this fails with an assert
coleenp@4037 2524 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
duke@435 2525 // but this works
bobv@2036 2526 VERIFY_OOP(STACK_OBJECT(-parms));
coleenp@4037 2527 InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass();
duke@435 2528 /*
duke@435 2529 Executing this code in java.lang.String:
duke@435 2530 public String(char value[]) {
duke@435 2531 this.count = value.length;
duke@435 2532 this.value = (char[])value.clone();
duke@435 2533 }
duke@435 2534
coleenp@4037 2535 a find on rcvr->klass() reports:
duke@435 2536 {type array char}{type array class}
duke@435 2537 - klass: {other class}
duke@435 2538
coleenp@4037 2539 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
coleenp@4037 2540 because rcvr->klass()->oop_is_instance() == 0
duke@435 2541 However it seems to have a vtable in the right location. Huh?
duke@435 2542
duke@435 2543 */
coleenp@4037 2544 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
duke@435 2545 }
duke@435 2546 } else {
duke@435 2547 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
duke@435 2548 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
duke@435 2549 }
twisti@3969 2550 callee = cache->f1_as_method();
duke@435 2551 }
duke@435 2552
duke@435 2553 istate->set_callee(callee);
duke@435 2554 istate->set_callee_entry_point(callee->from_interpreted_entry());
duke@435 2555 #ifdef VM_JVMTI
duke@435 2556 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
duke@435 2557 istate->set_callee_entry_point(callee->interpreter_entry());
duke@435 2558 }
duke@435 2559 #endif /* VM_JVMTI */
duke@435 2560 istate->set_bcp_advance(3);
duke@435 2561 UPDATE_PC_AND_RETURN(0); // I'll be back...
duke@435 2562 }
duke@435 2563 }
duke@435 2564
duke@435 2565 /* Allocate memory for a new java object. */
duke@435 2566
duke@435 2567 CASE(_newarray): {
duke@435 2568 BasicType atype = (BasicType) *(pc+1);
duke@435 2569 jint size = STACK_INT(-1);
duke@435 2570 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
duke@435 2571 handle_exception);
duke@435 2572 SET_STACK_OBJECT(THREAD->vm_result(), -1);
duke@435 2573 THREAD->set_vm_result(NULL);
duke@435 2574
duke@435 2575 UPDATE_PC_AND_CONTINUE(2);
duke@435 2576 }
duke@435 2577
duke@435 2578 /* Throw an exception. */
duke@435 2579
duke@435 2580 CASE(_athrow): {
duke@435 2581 oop except_oop = STACK_OBJECT(-1);
duke@435 2582 CHECK_NULL(except_oop);
duke@435 2583 // set pending_exception so we use common code
duke@435 2584 THREAD->set_pending_exception(except_oop, NULL, 0);
duke@435 2585 goto handle_exception;
duke@435 2586 }
duke@435 2587
duke@435 2588 /* goto and jsr. They are exactly the same except jsr pushes
duke@435 2589 * the address of the next instruction first.
duke@435 2590 */
duke@435 2591
duke@435 2592 CASE(_jsr): {
duke@435 2593 /* push bytecode index on stack */
duke@435 2594 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
duke@435 2595 MORE_STACK(1);
duke@435 2596 /* FALL THROUGH */
duke@435 2597 }
duke@435 2598
duke@435 2599 CASE(_goto):
duke@435 2600 {
duke@435 2601 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
duke@435 2602 address branch_pc = pc;
duke@435 2603 UPDATE_PC(offset);
duke@435 2604 DO_BACKEDGE_CHECKS(offset, branch_pc);
duke@435 2605 CONTINUE;
duke@435 2606 }
duke@435 2607
duke@435 2608 CASE(_jsr_w): {
duke@435 2609 /* push return address on the stack */
duke@435 2610 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
duke@435 2611 MORE_STACK(1);
duke@435 2612 /* FALL THROUGH */
duke@435 2613 }
duke@435 2614
duke@435 2615 CASE(_goto_w):
duke@435 2616 {
duke@435 2617 int32_t offset = Bytes::get_Java_u4(pc + 1);
duke@435 2618 address branch_pc = pc;
duke@435 2619 UPDATE_PC(offset);
duke@435 2620 DO_BACKEDGE_CHECKS(offset, branch_pc);
duke@435 2621 CONTINUE;
duke@435 2622 }
duke@435 2623
duke@435 2624 /* return from a jsr or jsr_w */
duke@435 2625
duke@435 2626 CASE(_ret): {
duke@435 2627 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
duke@435 2628 UPDATE_PC_AND_CONTINUE(0);
duke@435 2629 }
duke@435 2630
duke@435 2631 /* debugger breakpoint */
duke@435 2632
duke@435 2633 CASE(_breakpoint): {
duke@435 2634 Bytecodes::Code original_bytecode;
duke@435 2635 DECACHE_STATE();
duke@435 2636 SET_LAST_JAVA_FRAME();
duke@435 2637 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
duke@435 2638 METHOD, pc);
duke@435 2639 RESET_LAST_JAVA_FRAME();
duke@435 2640 CACHE_STATE();
duke@435 2641 if (THREAD->has_pending_exception()) goto handle_exception;
duke@435 2642 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
duke@435 2643 handle_exception);
duke@435 2644
duke@435 2645 opcode = (jubyte)original_bytecode;
duke@435 2646 goto opcode_switch;
duke@435 2647 }
duke@435 2648
duke@435 2649 DEFAULT:
jcoomes@1845 2650 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
jcoomes@1845 2651 Bytecodes::name((Bytecodes::Code)opcode)));
duke@435 2652 goto finish;
duke@435 2653
duke@435 2654 } /* switch(opc) */
duke@435 2655
duke@435 2656
duke@435 2657 #ifdef USELABELS
duke@435 2658 check_for_exception:
duke@435 2659 #endif
duke@435 2660 {
duke@435 2661 if (!THREAD->has_pending_exception()) {
duke@435 2662 CONTINUE;
duke@435 2663 }
duke@435 2664 /* We will be gcsafe soon, so flush our state. */
duke@435 2665 DECACHE_PC();
duke@435 2666 goto handle_exception;
duke@435 2667 }
duke@435 2668 do_continue: ;
duke@435 2669
duke@435 2670 } /* while (1) interpreter loop */
duke@435 2671
duke@435 2672
duke@435 2673 // An exception exists in the thread state see whether this activation can handle it
duke@435 2674 handle_exception: {
duke@435 2675
duke@435 2676 HandleMarkCleaner __hmc(THREAD);
duke@435 2677 Handle except_oop(THREAD, THREAD->pending_exception());
duke@435 2678 // Prevent any subsequent HandleMarkCleaner in the VM
duke@435 2679 // from freeing the except_oop handle.
duke@435 2680 HandleMark __hm(THREAD);
duke@435 2681
duke@435 2682 THREAD->clear_pending_exception();
duke@435 2683 assert(except_oop(), "No exception to process");
duke@435 2684 intptr_t continuation_bci;
duke@435 2685 // expression stack is emptied
twisti@1864 2686 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
duke@435 2687 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
duke@435 2688 handle_exception);
duke@435 2689
coleenp@4037 2690 except_oop = THREAD->vm_result();
duke@435 2691 THREAD->set_vm_result(NULL);
duke@435 2692 if (continuation_bci >= 0) {
duke@435 2693 // Place exception on top of stack
duke@435 2694 SET_STACK_OBJECT(except_oop(), 0);
duke@435 2695 MORE_STACK(1);
duke@435 2696 pc = METHOD->code_base() + continuation_bci;
duke@435 2697 if (TraceExceptions) {
duke@435 2698 ttyLocker ttyl;
duke@435 2699 ResourceMark rm;
duke@435 2700 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
duke@435 2701 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
duke@435 2702 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
duke@435 2703 pc - (intptr_t)METHOD->code_base(),
duke@435 2704 continuation_bci, THREAD);
duke@435 2705 }
duke@435 2706 // for AbortVMOnException flag
duke@435 2707 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
duke@435 2708 goto run;
duke@435 2709 }
duke@435 2710 if (TraceExceptions) {
duke@435 2711 ttyLocker ttyl;
duke@435 2712 ResourceMark rm;
duke@435 2713 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop());
duke@435 2714 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
duke@435 2715 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
duke@435 2716 pc - (intptr_t) METHOD->code_base(),
duke@435 2717 THREAD);
duke@435 2718 }
duke@435 2719 // for AbortVMOnException flag
duke@435 2720 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
duke@435 2721 // No handler in this activation, unwind and try again
duke@435 2722 THREAD->set_pending_exception(except_oop(), NULL, 0);
duke@435 2723 goto handle_return;
duke@435 2724 } /* handle_exception: */
duke@435 2725
duke@435 2726
duke@435 2727
duke@435 2728 // Return from an interpreter invocation with the result of the interpretation
duke@435 2729 // on the top of the Java Stack (or a pending exception)
duke@435 2730
duke@435 2731 handle_Pop_Frame:
duke@435 2732
duke@435 2733 // We don't really do anything special here except we must be aware
duke@435 2734 // that we can get here without ever locking the method (if sync).
duke@435 2735 // Also we skip the notification of the exit.
duke@435 2736
duke@435 2737 istate->set_msg(popping_frame);
duke@435 2738 // Clear pending so while the pop is in process
duke@435 2739 // we don't start another one if a call_vm is done.
duke@435 2740 THREAD->clr_pop_frame_pending();
duke@435 2741 // Let interpreter (only) see the we're in the process of popping a frame
duke@435 2742 THREAD->set_pop_frame_in_process();
duke@435 2743
duke@435 2744 handle_return:
duke@435 2745 {
duke@435 2746 DECACHE_STATE();
duke@435 2747
duke@435 2748 bool suppress_error = istate->msg() == popping_frame;
duke@435 2749 bool suppress_exit_event = THREAD->has_pending_exception() || suppress_error;
duke@435 2750 Handle original_exception(THREAD, THREAD->pending_exception());
duke@435 2751 Handle illegal_state_oop(THREAD, NULL);
duke@435 2752
duke@435 2753 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
duke@435 2754 // in any following VM entries from freeing our live handles, but illegal_state_oop
duke@435 2755 // isn't really allocated yet and so doesn't become live until later and
duke@435 2756 // in unpredicatable places. Instead we must protect the places where we enter the
duke@435 2757 // VM. It would be much simpler (and safer) if we could allocate a real handle with
duke@435 2758 // a NULL oop in it and then overwrite the oop later as needed. This isn't
duke@435 2759 // unfortunately isn't possible.
duke@435 2760
duke@435 2761 THREAD->clear_pending_exception();
duke@435 2762
duke@435 2763 //
duke@435 2764 // As far as we are concerned we have returned. If we have a pending exception
duke@435 2765 // that will be returned as this invocation's result. However if we get any
duke@435 2766 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
duke@435 2767 // will be our final result (i.e. monitor exception trumps a pending exception).
duke@435 2768 //
duke@435 2769
duke@435 2770 // If we never locked the method (or really passed the point where we would have),
duke@435 2771 // there is no need to unlock it (or look for other monitors), since that
duke@435 2772 // could not have happened.
duke@435 2773
duke@435 2774 if (THREAD->do_not_unlock()) {
duke@435 2775
duke@435 2776 // Never locked, reset the flag now because obviously any caller must
duke@435 2777 // have passed their point of locking for us to have gotten here.
duke@435 2778
duke@435 2779 THREAD->clr_do_not_unlock();
duke@435 2780 } else {
duke@435 2781 // At this point we consider that we have returned. We now check that the
duke@435 2782 // locks were properly block structured. If we find that they were not
duke@435 2783 // used properly we will return with an illegal monitor exception.
duke@435 2784 // The exception is checked by the caller not the callee since this
duke@435 2785 // checking is considered to be part of the invocation and therefore
duke@435 2786 // in the callers scope (JVM spec 8.13).
duke@435 2787 //
duke@435 2788 // Another weird thing to watch for is if the method was locked
duke@435 2789 // recursively and then not exited properly. This means we must
duke@435 2790 // examine all the entries in reverse time(and stack) order and
duke@435 2791 // unlock as we find them. If we find the method monitor before
duke@435 2792 // we are at the initial entry then we should throw an exception.
duke@435 2793 // It is not clear the template based interpreter does this
duke@435 2794 // correctly
duke@435 2795
duke@435 2796 BasicObjectLock* base = istate->monitor_base();
duke@435 2797 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
duke@435 2798 bool method_unlock_needed = METHOD->is_synchronized();
duke@435 2799 // We know the initial monitor was used for the method don't check that
duke@435 2800 // slot in the loop
duke@435 2801 if (method_unlock_needed) base--;
duke@435 2802
duke@435 2803 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
duke@435 2804 while (end < base) {
duke@435 2805 oop lockee = end->obj();
duke@435 2806 if (lockee != NULL) {
duke@435 2807 BasicLock* lock = end->lock();
duke@435 2808 markOop header = lock->displaced_header();
duke@435 2809 end->set_obj(NULL);
goetz@6445 2810
goetz@6445 2811 if (!lockee->mark()->has_bias_pattern()) {
goetz@6445 2812 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 2813 if (header != NULL) {
goetz@6445 2814 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
goetz@6445 2815 // restore object for the slow case
goetz@6445 2816 end->set_obj(lockee);
goetz@6445 2817 {
goetz@6445 2818 // Prevent any HandleMarkCleaner from freeing our live handles
goetz@6445 2819 HandleMark __hm(THREAD);
goetz@6445 2820 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
goetz@6445 2821 }
duke@435 2822 }
duke@435 2823 }
duke@435 2824 }
duke@435 2825 // One error is plenty
duke@435 2826 if (illegal_state_oop() == NULL && !suppress_error) {
duke@435 2827 {
duke@435 2828 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 2829 HandleMark __hm(THREAD);
duke@435 2830 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
duke@435 2831 }
duke@435 2832 assert(THREAD->has_pending_exception(), "Lost our exception!");
duke@435 2833 illegal_state_oop = THREAD->pending_exception();
duke@435 2834 THREAD->clear_pending_exception();
duke@435 2835 }
duke@435 2836 }
duke@435 2837 end++;
duke@435 2838 }
duke@435 2839 // Unlock the method if needed
duke@435 2840 if (method_unlock_needed) {
duke@435 2841 if (base->obj() == NULL) {
duke@435 2842 // The method is already unlocked this is not good.
duke@435 2843 if (illegal_state_oop() == NULL && !suppress_error) {
duke@435 2844 {
duke@435 2845 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 2846 HandleMark __hm(THREAD);
duke@435 2847 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
duke@435 2848 }
duke@435 2849 assert(THREAD->has_pending_exception(), "Lost our exception!");
duke@435 2850 illegal_state_oop = THREAD->pending_exception();
duke@435 2851 THREAD->clear_pending_exception();
duke@435 2852 }
duke@435 2853 } else {
duke@435 2854 //
duke@435 2855 // The initial monitor is always used for the method
duke@435 2856 // However if that slot is no longer the oop for the method it was unlocked
duke@435 2857 // and reused by something that wasn't unlocked!
duke@435 2858 //
duke@435 2859 // deopt can come in with rcvr dead because c2 knows
duke@435 2860 // its value is preserved in the monitor. So we can't use locals[0] at all
duke@435 2861 // and must use first monitor slot.
duke@435 2862 //
duke@435 2863 oop rcvr = base->obj();
duke@435 2864 if (rcvr == NULL) {
duke@435 2865 if (!suppress_error) {
duke@435 2866 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "");
duke@435 2867 illegal_state_oop = THREAD->pending_exception();
duke@435 2868 THREAD->clear_pending_exception();
duke@435 2869 }
goetz@6445 2870 } else if (UseHeavyMonitors) {
goetz@6445 2871 {
goetz@6445 2872 // Prevent any HandleMarkCleaner from freeing our live handles.
goetz@6445 2873 HandleMark __hm(THREAD);
goetz@6445 2874 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
goetz@6445 2875 }
goetz@6445 2876 if (THREAD->has_pending_exception()) {
goetz@6445 2877 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
goetz@6445 2878 THREAD->clear_pending_exception();
goetz@6445 2879 }
duke@435 2880 } else {
duke@435 2881 BasicLock* lock = base->lock();
duke@435 2882 markOop header = lock->displaced_header();
duke@435 2883 base->set_obj(NULL);
goetz@6445 2884
goetz@6445 2885 if (!rcvr->mark()->has_bias_pattern()) {
goetz@6445 2886 base->set_obj(NULL);
goetz@6445 2887 // If it isn't recursive we either must swap old header or call the runtime
goetz@6445 2888 if (header != NULL) {
goetz@6445 2889 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
goetz@6445 2890 // restore object for the slow case
goetz@6445 2891 base->set_obj(rcvr);
goetz@6445 2892 {
goetz@6445 2893 // Prevent any HandleMarkCleaner from freeing our live handles
goetz@6445 2894 HandleMark __hm(THREAD);
goetz@6445 2895 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
goetz@6445 2896 }
goetz@6445 2897 if (THREAD->has_pending_exception()) {
goetz@6445 2898 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
goetz@6445 2899 THREAD->clear_pending_exception();
goetz@6445 2900 }
duke@435 2901 }
duke@435 2902 }
duke@435 2903 }
duke@435 2904 }
duke@435 2905 }
duke@435 2906 }
duke@435 2907 }
goetz@6445 2908 // Clear the do_not_unlock flag now.
goetz@6445 2909 THREAD->clr_do_not_unlock();
duke@435 2910
duke@435 2911 //
duke@435 2912 // Notify jvmti/jvmdi
duke@435 2913 //
duke@435 2914 // NOTE: we do not notify a method_exit if we have a pending exception,
duke@435 2915 // including an exception we generate for unlocking checks. In the former
duke@435 2916 // case, JVMDI has already been notified by our call for the exception handler
duke@435 2917 // and in both cases as far as JVMDI is concerned we have already returned.
duke@435 2918 // If we notify it again JVMDI will be all confused about how many frames
duke@435 2919 // are still on the stack (4340444).
duke@435 2920 //
duke@435 2921 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
duke@435 2922 // method_exit events whenever we leave an activation unless it was done
duke@435 2923 // for popframe. This is nothing like jvmdi. However we are passing the
duke@435 2924 // tests at the moment (apparently because they are jvmdi based) so rather
duke@435 2925 // than change this code and possibly fail tests we will leave it alone
duke@435 2926 // (with this note) in anticipation of changing the vm and the tests
duke@435 2927 // simultaneously.
duke@435 2928
duke@435 2929
duke@435 2930 //
duke@435 2931 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
duke@435 2932
duke@435 2933
duke@435 2934
duke@435 2935 #ifdef VM_JVMTI
duke@435 2936 if (_jvmti_interp_events) {
duke@435 2937 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 2938 // entry/exit events are sent for that thread to track stack depth.
duke@435 2939 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
duke@435 2940 {
duke@435 2941 // Prevent any HandleMarkCleaner from freeing our live handles
duke@435 2942 HandleMark __hm(THREAD);
duke@435 2943 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
duke@435 2944 }
duke@435 2945 }
duke@435 2946 }
duke@435 2947 #endif /* VM_JVMTI */
duke@435 2948
duke@435 2949 //
duke@435 2950 // See if we are returning any exception
duke@435 2951 // A pending exception that was pending prior to a possible popping frame
duke@435 2952 // overrides the popping frame.
duke@435 2953 //
duke@435 2954 assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed");
duke@435 2955 if (illegal_state_oop() != NULL || original_exception() != NULL) {
duke@435 2956 // inform the frame manager we have no result
duke@435 2957 istate->set_msg(throwing_exception);
duke@435 2958 if (illegal_state_oop() != NULL)
duke@435 2959 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
duke@435 2960 else
duke@435 2961 THREAD->set_pending_exception(original_exception(), NULL, 0);
duke@435 2962 istate->set_return_kind((Bytecodes::Code)opcode);
duke@435 2963 UPDATE_PC_AND_RETURN(0);
duke@435 2964 }
duke@435 2965
duke@435 2966 if (istate->msg() == popping_frame) {
duke@435 2967 // Make it simpler on the assembly code and set the message for the frame pop.
duke@435 2968 // returns
duke@435 2969 if (istate->prev() == NULL) {
duke@435 2970 // We must be returning to a deoptimized frame (because popframe only happens between
duke@435 2971 // two interpreted frames). We need to save the current arguments in C heap so that
duke@435 2972 // the deoptimized frame when it restarts can copy the arguments to its expression
duke@435 2973 // stack and re-execute the call. We also have to notify deoptimization that this
twisti@1040 2974 // has occurred and to pick the preserved args copy them to the deoptimized frame's
duke@435 2975 // java expression stack. Yuck.
duke@435 2976 //
duke@435 2977 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
duke@435 2978 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
duke@435 2979 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
duke@435 2980 }
bobv@2036 2981 THREAD->clr_pop_frame_in_process();
duke@435 2982 }
bobv@2036 2983
bobv@2036 2984 // Normal return
bobv@2036 2985 // Advance the pc and return to frame manager
bobv@2036 2986 istate->set_msg(return_from_method);
bobv@2036 2987 istate->set_return_kind((Bytecodes::Code)opcode);
bobv@2036 2988 UPDATE_PC_AND_RETURN(1);
duke@435 2989 } /* handle_return: */
duke@435 2990
duke@435 2991 // This is really a fatal error return
duke@435 2992
duke@435 2993 finish:
duke@435 2994 DECACHE_TOS();
duke@435 2995 DECACHE_PC();
duke@435 2996
duke@435 2997 return;
duke@435 2998 }
duke@435 2999
duke@435 3000 /*
duke@435 3001 * All the code following this point is only produced once and is not present
duke@435 3002 * in the JVMTI version of the interpreter
duke@435 3003 */
duke@435 3004
duke@435 3005 #ifndef VM_JVMTI
duke@435 3006
duke@435 3007 // This constructor should only be used to contruct the object to signal
duke@435 3008 // interpreter initialization. All other instances should be created by
duke@435 3009 // the frame manager.
duke@435 3010 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
duke@435 3011 if (msg != initialize) ShouldNotReachHere();
duke@435 3012 _msg = msg;
duke@435 3013 _self_link = this;
duke@435 3014 _prev_link = NULL;
duke@435 3015 }
duke@435 3016
duke@435 3017 // Inline static functions for Java Stack and Local manipulation
duke@435 3018
duke@435 3019 // The implementations are platform dependent. We have to worry about alignment
duke@435 3020 // issues on some machines which can change on the same platform depending on
duke@435 3021 // whether it is an LP64 machine also.
duke@435 3022 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
duke@435 3023 return (address) tos[Interpreter::expr_index_at(-offset)];
duke@435 3024 }
duke@435 3025
duke@435 3026 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
duke@435 3027 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
duke@435 3028 }
duke@435 3029
duke@435 3030 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
duke@435 3031 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
duke@435 3032 }
duke@435 3033
duke@435 3034 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
duke@435 3035 return (oop)tos [Interpreter::expr_index_at(-offset)];
duke@435 3036 }
duke@435 3037
duke@435 3038 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
duke@435 3039 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
duke@435 3040 }
duke@435 3041
duke@435 3042 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
duke@435 3043 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
duke@435 3044 }
duke@435 3045
duke@435 3046 // only used for value types
duke@435 3047 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
duke@435 3048 int offset) {
duke@435 3049 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3050 }
duke@435 3051
duke@435 3052 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
duke@435 3053 int offset) {
duke@435 3054 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3055 }
duke@435 3056
duke@435 3057 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
duke@435 3058 int offset) {
duke@435 3059 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3060 }
duke@435 3061
duke@435 3062 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
duke@435 3063 int offset) {
duke@435 3064 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
duke@435 3065 }
duke@435 3066
duke@435 3067 // needs to be platform dep for the 32 bit platforms.
duke@435 3068 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
duke@435 3069 int offset) {
duke@435 3070 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
duke@435 3071 }
duke@435 3072
duke@435 3073 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
duke@435 3074 address addr, int offset) {
duke@435 3075 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
duke@435 3076 ((VMJavaVal64*)addr)->d);
duke@435 3077 }
duke@435 3078
duke@435 3079 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
duke@435 3080 int offset) {
duke@435 3081 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
duke@435 3082 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
duke@435 3083 }
duke@435 3084
duke@435 3085 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
duke@435 3086 address addr, int offset) {
duke@435 3087 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
duke@435 3088 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
duke@435 3089 ((VMJavaVal64*)addr)->l;
duke@435 3090 }
duke@435 3091
duke@435 3092 // Locals
duke@435 3093
duke@435 3094 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
duke@435 3095 return (address)locals[Interpreter::local_index_at(-offset)];
duke@435 3096 }
duke@435 3097 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
duke@435 3098 return (jint)locals[Interpreter::local_index_at(-offset)];
duke@435 3099 }
duke@435 3100 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
duke@435 3101 return (jfloat)locals[Interpreter::local_index_at(-offset)];
duke@435 3102 }
duke@435 3103 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
duke@435 3104 return (oop)locals[Interpreter::local_index_at(-offset)];
duke@435 3105 }
duke@435 3106 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
duke@435 3107 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
duke@435 3108 }
duke@435 3109 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
duke@435 3110 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
duke@435 3111 }
duke@435 3112
duke@435 3113 // Returns the address of locals value.
duke@435 3114 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
duke@435 3115 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
duke@435 3116 }
duke@435 3117 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
duke@435 3118 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
duke@435 3119 }
duke@435 3120
duke@435 3121 // Used for local value or returnAddress
duke@435 3122 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
duke@435 3123 address value, int offset) {
duke@435 3124 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3125 }
duke@435 3126 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
duke@435 3127 jint value, int offset) {
duke@435 3128 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3129 }
duke@435 3130 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
duke@435 3131 jfloat value, int offset) {
duke@435 3132 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3133 }
duke@435 3134 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
duke@435 3135 oop value, int offset) {
duke@435 3136 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
duke@435 3137 }
duke@435 3138 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
duke@435 3139 jdouble value, int offset) {
duke@435 3140 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
duke@435 3141 }
duke@435 3142 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
duke@435 3143 jlong value, int offset) {
duke@435 3144 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
duke@435 3145 }
duke@435 3146 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
duke@435 3147 address addr, int offset) {
duke@435 3148 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
duke@435 3149 }
duke@435 3150 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
duke@435 3151 address addr, int offset) {
duke@435 3152 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
duke@435 3153 }
duke@435 3154
duke@435 3155 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
duke@435 3156 intptr_t* locals, int locals_offset) {
duke@435 3157 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
duke@435 3158 locals[Interpreter::local_index_at(-locals_offset)] = value;
duke@435 3159 }
duke@435 3160
duke@435 3161
duke@435 3162 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
duke@435 3163 int to_offset) {
duke@435 3164 tos[Interpreter::expr_index_at(-to_offset)] =
duke@435 3165 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
duke@435 3166 }
duke@435 3167
duke@435 3168 void BytecodeInterpreter::dup(intptr_t *tos) {
duke@435 3169 copy_stack_slot(tos, -1, 0);
duke@435 3170 }
duke@435 3171 void BytecodeInterpreter::dup2(intptr_t *tos) {
duke@435 3172 copy_stack_slot(tos, -2, 0);
duke@435 3173 copy_stack_slot(tos, -1, 1);
duke@435 3174 }
duke@435 3175
duke@435 3176 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
duke@435 3177 /* insert top word two down */
duke@435 3178 copy_stack_slot(tos, -1, 0);
duke@435 3179 copy_stack_slot(tos, -2, -1);
duke@435 3180 copy_stack_slot(tos, 0, -2);
duke@435 3181 }
duke@435 3182
duke@435 3183 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
duke@435 3184 /* insert top word three down */
duke@435 3185 copy_stack_slot(tos, -1, 0);
duke@435 3186 copy_stack_slot(tos, -2, -1);
duke@435 3187 copy_stack_slot(tos, -3, -2);
duke@435 3188 copy_stack_slot(tos, 0, -3);
duke@435 3189 }
duke@435 3190 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
duke@435 3191 /* insert top 2 slots three down */
duke@435 3192 copy_stack_slot(tos, -1, 1);
duke@435 3193 copy_stack_slot(tos, -2, 0);
duke@435 3194 copy_stack_slot(tos, -3, -1);
duke@435 3195 copy_stack_slot(tos, 1, -2);
duke@435 3196 copy_stack_slot(tos, 0, -3);
duke@435 3197 }
duke@435 3198 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
duke@435 3199 /* insert top 2 slots four down */
duke@435 3200 copy_stack_slot(tos, -1, 1);
duke@435 3201 copy_stack_slot(tos, -2, 0);
duke@435 3202 copy_stack_slot(tos, -3, -1);
duke@435 3203 copy_stack_slot(tos, -4, -2);
duke@435 3204 copy_stack_slot(tos, 1, -3);
duke@435 3205 copy_stack_slot(tos, 0, -4);
duke@435 3206 }
duke@435 3207
duke@435 3208
duke@435 3209 void BytecodeInterpreter::swap(intptr_t *tos) {
duke@435 3210 // swap top two elements
duke@435 3211 intptr_t val = tos[Interpreter::expr_index_at(1)];
duke@435 3212 // Copy -2 entry to -1
duke@435 3213 copy_stack_slot(tos, -2, -1);
duke@435 3214 // Store saved -1 entry into -2
duke@435 3215 tos[Interpreter::expr_index_at(2)] = val;
duke@435 3216 }
duke@435 3217 // --------------------------------------------------------------------------------
duke@435 3218 // Non-product code
duke@435 3219 #ifndef PRODUCT
duke@435 3220
duke@435 3221 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
duke@435 3222 switch (msg) {
duke@435 3223 case BytecodeInterpreter::no_request: return("no_request");
duke@435 3224 case BytecodeInterpreter::initialize: return("initialize");
duke@435 3225 // status message to C++ interpreter
duke@435 3226 case BytecodeInterpreter::method_entry: return("method_entry");
duke@435 3227 case BytecodeInterpreter::method_resume: return("method_resume");
duke@435 3228 case BytecodeInterpreter::got_monitors: return("got_monitors");
duke@435 3229 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
duke@435 3230 // requests to frame manager from C++ interpreter
duke@435 3231 case BytecodeInterpreter::call_method: return("call_method");
duke@435 3232 case BytecodeInterpreter::return_from_method: return("return_from_method");
duke@435 3233 case BytecodeInterpreter::more_monitors: return("more_monitors");
duke@435 3234 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
duke@435 3235 case BytecodeInterpreter::popping_frame: return("popping_frame");
duke@435 3236 case BytecodeInterpreter::do_osr: return("do_osr");
duke@435 3237 // deopt
duke@435 3238 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
duke@435 3239 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
duke@435 3240 default: return("BAD MSG");
duke@435 3241 }
duke@435 3242 }
duke@435 3243 void
duke@435 3244 BytecodeInterpreter::print() {
duke@435 3245 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
duke@435 3246 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
duke@435 3247 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
duke@435 3248 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
duke@435 3249 {
duke@435 3250 ResourceMark rm;
duke@435 3251 char *method_name = _method->name_and_sig_as_C_string();
duke@435 3252 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
duke@435 3253 }
duke@435 3254 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
duke@435 3255 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
duke@435 3256 tty->print_cr("msg: %s", C_msg(this->_msg));
duke@435 3257 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
duke@435 3258 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
duke@435 3259 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
duke@435 3260 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
duke@435 3261 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
duke@435 3262 tty->print_cr("result_return_kind 0x%x ", (int) this->_result._return_kind);
duke@435 3263 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
duke@435 3264 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
duke@435 3265 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
duke@435 3266 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
duke@435 3267 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
duke@435 3268 #ifdef SPARC
duke@435 3269 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
duke@435 3270 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
duke@435 3271 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
duke@435 3272 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
duke@435 3273 #endif
morris@4535 3274 #if !defined(ZERO)
duke@435 3275 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
morris@4535 3276 #endif // !ZERO
duke@435 3277 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
duke@435 3278 }
duke@435 3279
duke@435 3280 extern "C" {
goetz@6445 3281 void PI(uintptr_t arg) {
goetz@6445 3282 ((BytecodeInterpreter*)arg)->print();
goetz@6445 3283 }
duke@435 3284 }
duke@435 3285 #endif // PRODUCT
duke@435 3286
duke@435 3287 #endif // JVMTI
duke@435 3288 #endif // CC_INTERP

mercurial