Tue, 09 Jul 2013 14:28:07 +0200
8020121: PPC64: fix build in cppInterpreter after 8019519
Reviewed-by: kvn
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // no precompiled headers |
stefank@2314 | 26 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 27 | #include "gc_interface/collectedHeap.hpp" |
stefank@2314 | 28 | #include "interpreter/bytecodeHistogram.hpp" |
stefank@2314 | 29 | #include "interpreter/bytecodeInterpreter.hpp" |
stefank@2314 | 30 | #include "interpreter/bytecodeInterpreter.inline.hpp" |
stefank@2314 | 31 | #include "interpreter/interpreter.hpp" |
stefank@2314 | 32 | #include "interpreter/interpreterRuntime.hpp" |
stefank@2314 | 33 | #include "memory/resourceArea.hpp" |
jiangli@5065 | 34 | #include "oops/methodCounters.hpp" |
stefank@2314 | 35 | #include "oops/objArrayKlass.hpp" |
stefank@2314 | 36 | #include "oops/oop.inline.hpp" |
stefank@2314 | 37 | #include "prims/jvmtiExport.hpp" |
goetz@6450 | 38 | #include "prims/jvmtiThreadState.hpp" |
goetz@6445 | 39 | #include "runtime/biasedLocking.hpp" |
stefank@2314 | 40 | #include "runtime/frame.inline.hpp" |
stefank@2314 | 41 | #include "runtime/handles.inline.hpp" |
stefank@2314 | 42 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 43 | #include "runtime/sharedRuntime.hpp" |
stefank@2314 | 44 | #include "runtime/threadCritical.hpp" |
stefank@2314 | 45 | #include "utilities/exceptions.hpp" |
stefank@2314 | 46 | #ifdef TARGET_OS_ARCH_linux_x86 |
stefank@2314 | 47 | # include "orderAccess_linux_x86.inline.hpp" |
stefank@2314 | 48 | #endif |
stefank@2314 | 49 | #ifdef TARGET_OS_ARCH_linux_sparc |
stefank@2314 | 50 | # include "orderAccess_linux_sparc.inline.hpp" |
stefank@2314 | 51 | #endif |
stefank@2314 | 52 | #ifdef TARGET_OS_ARCH_linux_zero |
stefank@2314 | 53 | # include "orderAccess_linux_zero.inline.hpp" |
stefank@2314 | 54 | #endif |
stefank@2314 | 55 | #ifdef TARGET_OS_ARCH_solaris_x86 |
stefank@2314 | 56 | # include "orderAccess_solaris_x86.inline.hpp" |
stefank@2314 | 57 | #endif |
stefank@2314 | 58 | #ifdef TARGET_OS_ARCH_solaris_sparc |
stefank@2314 | 59 | # include "orderAccess_solaris_sparc.inline.hpp" |
stefank@2314 | 60 | #endif |
stefank@2314 | 61 | #ifdef TARGET_OS_ARCH_windows_x86 |
stefank@2314 | 62 | # include "orderAccess_windows_x86.inline.hpp" |
stefank@2314 | 63 | #endif |
bobv@2508 | 64 | #ifdef TARGET_OS_ARCH_linux_arm |
bobv@2508 | 65 | # include "orderAccess_linux_arm.inline.hpp" |
bobv@2508 | 66 | #endif |
bobv@2508 | 67 | #ifdef TARGET_OS_ARCH_linux_ppc |
bobv@2508 | 68 | # include "orderAccess_linux_ppc.inline.hpp" |
bobv@2508 | 69 | #endif |
never@3156 | 70 | #ifdef TARGET_OS_ARCH_bsd_x86 |
never@3156 | 71 | # include "orderAccess_bsd_x86.inline.hpp" |
never@3156 | 72 | #endif |
never@3156 | 73 | #ifdef TARGET_OS_ARCH_bsd_zero |
never@3156 | 74 | # include "orderAccess_bsd_zero.inline.hpp" |
never@3156 | 75 | #endif |
stefank@2314 | 76 | |
stefank@2314 | 77 | |
stefank@2314 | 78 | // no precompiled headers |
duke@435 | 79 | #ifdef CC_INTERP |
duke@435 | 80 | |
duke@435 | 81 | /* |
duke@435 | 82 | * USELABELS - If using GCC, then use labels for the opcode dispatching |
duke@435 | 83 | * rather -then a switch statement. This improves performance because it |
duke@435 | 84 | * gives us the oportunity to have the instructions that calculate the |
duke@435 | 85 | * next opcode to jump to be intermixed with the rest of the instructions |
duke@435 | 86 | * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro). |
duke@435 | 87 | */ |
duke@435 | 88 | #undef USELABELS |
duke@435 | 89 | #ifdef __GNUC__ |
duke@435 | 90 | /* |
duke@435 | 91 | ASSERT signifies debugging. It is much easier to step thru bytecodes if we |
duke@435 | 92 | don't use the computed goto approach. |
duke@435 | 93 | */ |
duke@435 | 94 | #ifndef ASSERT |
duke@435 | 95 | #define USELABELS |
duke@435 | 96 | #endif |
duke@435 | 97 | #endif |
duke@435 | 98 | |
duke@435 | 99 | #undef CASE |
duke@435 | 100 | #ifdef USELABELS |
duke@435 | 101 | #define CASE(opcode) opc ## opcode |
duke@435 | 102 | #define DEFAULT opc_default |
duke@435 | 103 | #else |
duke@435 | 104 | #define CASE(opcode) case Bytecodes:: opcode |
duke@435 | 105 | #define DEFAULT default |
duke@435 | 106 | #endif |
duke@435 | 107 | |
duke@435 | 108 | /* |
duke@435 | 109 | * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next |
duke@435 | 110 | * opcode before going back to the top of the while loop, rather then having |
duke@435 | 111 | * the top of the while loop handle it. This provides a better opportunity |
duke@435 | 112 | * for instruction scheduling. Some compilers just do this prefetch |
duke@435 | 113 | * automatically. Some actually end up with worse performance if you |
duke@435 | 114 | * force the prefetch. Solaris gcc seems to do better, but cc does worse. |
duke@435 | 115 | */ |
duke@435 | 116 | #undef PREFETCH_OPCCODE |
duke@435 | 117 | #define PREFETCH_OPCCODE |
duke@435 | 118 | |
duke@435 | 119 | /* |
duke@435 | 120 | Interpreter safepoint: it is expected that the interpreter will have no live |
duke@435 | 121 | handles of its own creation live at an interpreter safepoint. Therefore we |
duke@435 | 122 | run a HandleMarkCleaner and trash all handles allocated in the call chain |
duke@435 | 123 | since the JavaCalls::call_helper invocation that initiated the chain. |
duke@435 | 124 | There really shouldn't be any handles remaining to trash but this is cheap |
duke@435 | 125 | in relation to a safepoint. |
duke@435 | 126 | */ |
duke@435 | 127 | #define SAFEPOINT \ |
duke@435 | 128 | if ( SafepointSynchronize::is_synchronizing()) { \ |
duke@435 | 129 | { \ |
duke@435 | 130 | /* zap freed handles rather than GC'ing them */ \ |
duke@435 | 131 | HandleMarkCleaner __hmc(THREAD); \ |
duke@435 | 132 | } \ |
duke@435 | 133 | CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \ |
duke@435 | 134 | } |
duke@435 | 135 | |
duke@435 | 136 | /* |
duke@435 | 137 | * VM_JAVA_ERROR - Macro for throwing a java exception from |
duke@435 | 138 | * the interpreter loop. Should really be a CALL_VM but there |
duke@435 | 139 | * is no entry point to do the transition to vm so we just |
duke@435 | 140 | * do it by hand here. |
duke@435 | 141 | */ |
duke@435 | 142 | #define VM_JAVA_ERROR_NO_JUMP(name, msg) \ |
duke@435 | 143 | DECACHE_STATE(); \ |
duke@435 | 144 | SET_LAST_JAVA_FRAME(); \ |
duke@435 | 145 | { \ |
duke@435 | 146 | ThreadInVMfromJava trans(THREAD); \ |
duke@435 | 147 | Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \ |
duke@435 | 148 | } \ |
duke@435 | 149 | RESET_LAST_JAVA_FRAME(); \ |
duke@435 | 150 | CACHE_STATE(); |
duke@435 | 151 | |
duke@435 | 152 | // Normal throw of a java error |
duke@435 | 153 | #define VM_JAVA_ERROR(name, msg) \ |
duke@435 | 154 | VM_JAVA_ERROR_NO_JUMP(name, msg) \ |
duke@435 | 155 | goto handle_exception; |
duke@435 | 156 | |
duke@435 | 157 | #ifdef PRODUCT |
duke@435 | 158 | #define DO_UPDATE_INSTRUCTION_COUNT(opcode) |
duke@435 | 159 | #else |
duke@435 | 160 | #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \ |
duke@435 | 161 | { \ |
duke@435 | 162 | BytecodeCounter::_counter_value++; \ |
duke@435 | 163 | BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \ |
duke@435 | 164 | if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \ |
duke@435 | 165 | if (TraceBytecodes) { \ |
duke@435 | 166 | CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \ |
duke@435 | 167 | topOfStack[Interpreter::expr_index_at(1)], \ |
duke@435 | 168 | topOfStack[Interpreter::expr_index_at(2)]), \ |
duke@435 | 169 | handle_exception); \ |
duke@435 | 170 | } \ |
duke@435 | 171 | } |
duke@435 | 172 | #endif |
duke@435 | 173 | |
duke@435 | 174 | #undef DEBUGGER_SINGLE_STEP_NOTIFY |
duke@435 | 175 | #ifdef VM_JVMTI |
duke@435 | 176 | /* NOTE: (kbr) This macro must be called AFTER the PC has been |
duke@435 | 177 | incremented. JvmtiExport::at_single_stepping_point() may cause a |
duke@435 | 178 | breakpoint opcode to get inserted at the current PC to allow the |
duke@435 | 179 | debugger to coalesce single-step events. |
duke@435 | 180 | |
duke@435 | 181 | As a result if we call at_single_stepping_point() we refetch opcode |
duke@435 | 182 | to get the current opcode. This will override any other prefetching |
duke@435 | 183 | that might have occurred. |
duke@435 | 184 | */ |
duke@435 | 185 | #define DEBUGGER_SINGLE_STEP_NOTIFY() \ |
duke@435 | 186 | { \ |
duke@435 | 187 | if (_jvmti_interp_events) { \ |
duke@435 | 188 | if (JvmtiExport::should_post_single_step()) { \ |
duke@435 | 189 | DECACHE_STATE(); \ |
duke@435 | 190 | SET_LAST_JAVA_FRAME(); \ |
duke@435 | 191 | ThreadInVMfromJava trans(THREAD); \ |
duke@435 | 192 | JvmtiExport::at_single_stepping_point(THREAD, \ |
duke@435 | 193 | istate->method(), \ |
duke@435 | 194 | pc); \ |
duke@435 | 195 | RESET_LAST_JAVA_FRAME(); \ |
duke@435 | 196 | CACHE_STATE(); \ |
duke@435 | 197 | if (THREAD->pop_frame_pending() && \ |
duke@435 | 198 | !THREAD->pop_frame_in_process()) { \ |
duke@435 | 199 | goto handle_Pop_Frame; \ |
duke@435 | 200 | } \ |
goetz@6450 | 201 | if (THREAD->jvmti_thread_state() && \ |
goetz@6450 | 202 | THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ |
goetz@6450 | 203 | goto handle_Early_Return; \ |
goetz@6450 | 204 | } \ |
duke@435 | 205 | opcode = *pc; \ |
duke@435 | 206 | } \ |
duke@435 | 207 | } \ |
duke@435 | 208 | } |
duke@435 | 209 | #else |
duke@435 | 210 | #define DEBUGGER_SINGLE_STEP_NOTIFY() |
duke@435 | 211 | #endif |
duke@435 | 212 | |
duke@435 | 213 | /* |
duke@435 | 214 | * CONTINUE - Macro for executing the next opcode. |
duke@435 | 215 | */ |
duke@435 | 216 | #undef CONTINUE |
duke@435 | 217 | #ifdef USELABELS |
duke@435 | 218 | // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an |
duke@435 | 219 | // initialization (which is is the initialization of the table pointer...) |
coleenp@955 | 220 | #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode] |
duke@435 | 221 | #define CONTINUE { \ |
duke@435 | 222 | opcode = *pc; \ |
duke@435 | 223 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 224 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 225 | DISPATCH(opcode); \ |
duke@435 | 226 | } |
duke@435 | 227 | #else |
duke@435 | 228 | #ifdef PREFETCH_OPCCODE |
duke@435 | 229 | #define CONTINUE { \ |
duke@435 | 230 | opcode = *pc; \ |
duke@435 | 231 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 232 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 233 | continue; \ |
duke@435 | 234 | } |
duke@435 | 235 | #else |
duke@435 | 236 | #define CONTINUE { \ |
duke@435 | 237 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 238 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 239 | continue; \ |
duke@435 | 240 | } |
duke@435 | 241 | #endif |
duke@435 | 242 | #endif |
duke@435 | 243 | |
duke@435 | 244 | |
duke@435 | 245 | #define UPDATE_PC(opsize) {pc += opsize; } |
duke@435 | 246 | /* |
duke@435 | 247 | * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack. |
duke@435 | 248 | */ |
duke@435 | 249 | #undef UPDATE_PC_AND_TOS |
duke@435 | 250 | #define UPDATE_PC_AND_TOS(opsize, stack) \ |
duke@435 | 251 | {pc += opsize; MORE_STACK(stack); } |
duke@435 | 252 | |
duke@435 | 253 | /* |
duke@435 | 254 | * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack, |
duke@435 | 255 | * and executing the next opcode. It's somewhat similar to the combination |
duke@435 | 256 | * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations. |
duke@435 | 257 | */ |
duke@435 | 258 | #undef UPDATE_PC_AND_TOS_AND_CONTINUE |
duke@435 | 259 | #ifdef USELABELS |
duke@435 | 260 | #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ |
duke@435 | 261 | pc += opsize; opcode = *pc; MORE_STACK(stack); \ |
duke@435 | 262 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 263 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 264 | DISPATCH(opcode); \ |
duke@435 | 265 | } |
duke@435 | 266 | |
duke@435 | 267 | #define UPDATE_PC_AND_CONTINUE(opsize) { \ |
duke@435 | 268 | pc += opsize; opcode = *pc; \ |
duke@435 | 269 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 270 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 271 | DISPATCH(opcode); \ |
duke@435 | 272 | } |
duke@435 | 273 | #else |
duke@435 | 274 | #ifdef PREFETCH_OPCCODE |
duke@435 | 275 | #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ |
duke@435 | 276 | pc += opsize; opcode = *pc; MORE_STACK(stack); \ |
duke@435 | 277 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 278 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 279 | goto do_continue; \ |
duke@435 | 280 | } |
duke@435 | 281 | |
duke@435 | 282 | #define UPDATE_PC_AND_CONTINUE(opsize) { \ |
duke@435 | 283 | pc += opsize; opcode = *pc; \ |
duke@435 | 284 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 285 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 286 | goto do_continue; \ |
duke@435 | 287 | } |
duke@435 | 288 | #else |
duke@435 | 289 | #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \ |
duke@435 | 290 | pc += opsize; MORE_STACK(stack); \ |
duke@435 | 291 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 292 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 293 | goto do_continue; \ |
duke@435 | 294 | } |
duke@435 | 295 | |
duke@435 | 296 | #define UPDATE_PC_AND_CONTINUE(opsize) { \ |
duke@435 | 297 | pc += opsize; \ |
duke@435 | 298 | DO_UPDATE_INSTRUCTION_COUNT(opcode); \ |
duke@435 | 299 | DEBUGGER_SINGLE_STEP_NOTIFY(); \ |
duke@435 | 300 | goto do_continue; \ |
duke@435 | 301 | } |
duke@435 | 302 | #endif /* PREFETCH_OPCCODE */ |
duke@435 | 303 | #endif /* USELABELS */ |
duke@435 | 304 | |
duke@435 | 305 | // About to call a new method, update the save the adjusted pc and return to frame manager |
duke@435 | 306 | #define UPDATE_PC_AND_RETURN(opsize) \ |
duke@435 | 307 | DECACHE_TOS(); \ |
duke@435 | 308 | istate->set_bcp(pc+opsize); \ |
duke@435 | 309 | return; |
duke@435 | 310 | |
duke@435 | 311 | |
duke@435 | 312 | #define METHOD istate->method() |
jiangli@5065 | 313 | #define GET_METHOD_COUNTERS(res) \ |
jiangli@5065 | 314 | res = METHOD->method_counters(); \ |
jiangli@5065 | 315 | if (res == NULL) { \ |
jiangli@5065 | 316 | CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \ |
jiangli@5065 | 317 | } |
jiangli@5065 | 318 | |
duke@435 | 319 | #define OSR_REQUEST(res, branch_pc) \ |
duke@435 | 320 | CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception); |
duke@435 | 321 | /* |
duke@435 | 322 | * For those opcodes that need to have a GC point on a backwards branch |
duke@435 | 323 | */ |
duke@435 | 324 | |
duke@435 | 325 | // Backedge counting is kind of strange. The asm interpreter will increment |
duke@435 | 326 | // the backedge counter as a separate counter but it does it's comparisons |
duke@435 | 327 | // to the sum (scaled) of invocation counter and backedge count to make |
duke@435 | 328 | // a decision. Seems kind of odd to sum them together like that |
duke@435 | 329 | |
duke@435 | 330 | // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp |
duke@435 | 331 | |
duke@435 | 332 | |
duke@435 | 333 | #define DO_BACKEDGE_CHECKS(skip, branch_pc) \ |
duke@435 | 334 | if ((skip) <= 0) { \ |
jiangli@5065 | 335 | MethodCounters* mcs; \ |
jiangli@5065 | 336 | GET_METHOD_COUNTERS(mcs); \ |
twisti@1513 | 337 | if (UseLoopCounter) { \ |
duke@435 | 338 | bool do_OSR = UseOnStackReplacement; \ |
jiangli@5065 | 339 | mcs->backedge_counter()->increment(); \ |
jiangli@5065 | 340 | if (do_OSR) do_OSR = mcs->backedge_counter()->reached_InvocationLimit(); \ |
duke@435 | 341 | if (do_OSR) { \ |
duke@435 | 342 | nmethod* osr_nmethod; \ |
duke@435 | 343 | OSR_REQUEST(osr_nmethod, branch_pc); \ |
duke@435 | 344 | if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \ |
twisti@1513 | 345 | intptr_t* buf = SharedRuntime::OSR_migration_begin(THREAD); \ |
duke@435 | 346 | istate->set_msg(do_osr); \ |
duke@435 | 347 | istate->set_osr_buf((address)buf); \ |
duke@435 | 348 | istate->set_osr_entry(osr_nmethod->osr_entry()); \ |
duke@435 | 349 | return; \ |
duke@435 | 350 | } \ |
duke@435 | 351 | } \ |
duke@435 | 352 | } /* UseCompiler ... */ \ |
jiangli@5065 | 353 | mcs->invocation_counter()->increment(); \ |
duke@435 | 354 | SAFEPOINT; \ |
duke@435 | 355 | } |
duke@435 | 356 | |
duke@435 | 357 | /* |
duke@435 | 358 | * For those opcodes that need to have a GC point on a backwards branch |
duke@435 | 359 | */ |
duke@435 | 360 | |
duke@435 | 361 | /* |
duke@435 | 362 | * Macros for caching and flushing the interpreter state. Some local |
duke@435 | 363 | * variables need to be flushed out to the frame before we do certain |
duke@435 | 364 | * things (like pushing frames or becomming gc safe) and some need to |
duke@435 | 365 | * be recached later (like after popping a frame). We could use one |
duke@435 | 366 | * macro to cache or decache everything, but this would be less then |
duke@435 | 367 | * optimal because we don't always need to cache or decache everything |
duke@435 | 368 | * because some things we know are already cached or decached. |
duke@435 | 369 | */ |
duke@435 | 370 | #undef DECACHE_TOS |
duke@435 | 371 | #undef CACHE_TOS |
duke@435 | 372 | #undef CACHE_PREV_TOS |
duke@435 | 373 | #define DECACHE_TOS() istate->set_stack(topOfStack); |
duke@435 | 374 | |
duke@435 | 375 | #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack(); |
duke@435 | 376 | |
duke@435 | 377 | #undef DECACHE_PC |
duke@435 | 378 | #undef CACHE_PC |
duke@435 | 379 | #define DECACHE_PC() istate->set_bcp(pc); |
duke@435 | 380 | #define CACHE_PC() pc = istate->bcp(); |
duke@435 | 381 | #define CACHE_CP() cp = istate->constants(); |
duke@435 | 382 | #define CACHE_LOCALS() locals = istate->locals(); |
duke@435 | 383 | #undef CACHE_FRAME |
duke@435 | 384 | #define CACHE_FRAME() |
duke@435 | 385 | |
duke@435 | 386 | /* |
duke@435 | 387 | * CHECK_NULL - Macro for throwing a NullPointerException if the object |
duke@435 | 388 | * passed is a null ref. |
duke@435 | 389 | * On some architectures/platforms it should be possible to do this implicitly |
duke@435 | 390 | */ |
duke@435 | 391 | #undef CHECK_NULL |
duke@435 | 392 | #define CHECK_NULL(obj_) \ |
coleenp@955 | 393 | if ((obj_) == NULL) { \ |
duke@435 | 394 | VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), ""); \ |
bobv@2036 | 395 | } \ |
bobv@2036 | 396 | VERIFY_OOP(obj_) |
duke@435 | 397 | |
duke@435 | 398 | #define VMdoubleConstZero() 0.0 |
duke@435 | 399 | #define VMdoubleConstOne() 1.0 |
duke@435 | 400 | #define VMlongConstZero() (max_jlong-max_jlong) |
duke@435 | 401 | #define VMlongConstOne() ((max_jlong-max_jlong)+1) |
duke@435 | 402 | |
duke@435 | 403 | /* |
duke@435 | 404 | * Alignment |
duke@435 | 405 | */ |
duke@435 | 406 | #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3) |
duke@435 | 407 | |
duke@435 | 408 | // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod) |
duke@435 | 409 | #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS(); |
duke@435 | 410 | |
duke@435 | 411 | // Reload interpreter state after calling the VM or a possible GC |
duke@435 | 412 | #define CACHE_STATE() \ |
duke@435 | 413 | CACHE_TOS(); \ |
duke@435 | 414 | CACHE_PC(); \ |
duke@435 | 415 | CACHE_CP(); \ |
duke@435 | 416 | CACHE_LOCALS(); |
duke@435 | 417 | |
duke@435 | 418 | // Call the VM don't check for pending exceptions |
goetz@6450 | 419 | #define CALL_VM_NOCHECK(func) \ |
goetz@6450 | 420 | DECACHE_STATE(); \ |
goetz@6450 | 421 | SET_LAST_JAVA_FRAME(); \ |
goetz@6450 | 422 | func; \ |
goetz@6450 | 423 | RESET_LAST_JAVA_FRAME(); \ |
goetz@6450 | 424 | CACHE_STATE(); \ |
goetz@6450 | 425 | if (THREAD->pop_frame_pending() && \ |
goetz@6450 | 426 | !THREAD->pop_frame_in_process()) { \ |
goetz@6450 | 427 | goto handle_Pop_Frame; \ |
goetz@6450 | 428 | } \ |
goetz@6450 | 429 | if (THREAD->jvmti_thread_state() && \ |
goetz@6450 | 430 | THREAD->jvmti_thread_state()->is_earlyret_pending()) { \ |
goetz@6450 | 431 | goto handle_Early_Return; \ |
goetz@6450 | 432 | } |
duke@435 | 433 | |
duke@435 | 434 | // Call the VM and check for pending exceptions |
goetz@6450 | 435 | #define CALL_VM(func, label) { \ |
goetz@6450 | 436 | CALL_VM_NOCHECK(func); \ |
goetz@6450 | 437 | if (THREAD->has_pending_exception()) goto label; \ |
duke@435 | 438 | } |
duke@435 | 439 | |
duke@435 | 440 | /* |
duke@435 | 441 | * BytecodeInterpreter::run(interpreterState istate) |
duke@435 | 442 | * BytecodeInterpreter::runWithChecks(interpreterState istate) |
duke@435 | 443 | * |
duke@435 | 444 | * The real deal. This is where byte codes actually get interpreted. |
duke@435 | 445 | * Basically it's a big while loop that iterates until we return from |
duke@435 | 446 | * the method passed in. |
duke@435 | 447 | * |
duke@435 | 448 | * The runWithChecks is used if JVMTI is enabled. |
duke@435 | 449 | * |
duke@435 | 450 | */ |
duke@435 | 451 | #if defined(VM_JVMTI) |
duke@435 | 452 | void |
duke@435 | 453 | BytecodeInterpreter::runWithChecks(interpreterState istate) { |
duke@435 | 454 | #else |
duke@435 | 455 | void |
duke@435 | 456 | BytecodeInterpreter::run(interpreterState istate) { |
duke@435 | 457 | #endif |
duke@435 | 458 | |
duke@435 | 459 | // In order to simplify some tests based on switches set at runtime |
duke@435 | 460 | // we invoke the interpreter a single time after switches are enabled |
duke@435 | 461 | // and set simpler to to test variables rather than method calls or complex |
duke@435 | 462 | // boolean expressions. |
duke@435 | 463 | |
duke@435 | 464 | static int initialized = 0; |
duke@435 | 465 | static int checkit = 0; |
duke@435 | 466 | static intptr_t* c_addr = NULL; |
duke@435 | 467 | static intptr_t c_value; |
duke@435 | 468 | |
duke@435 | 469 | if (checkit && *c_addr != c_value) { |
duke@435 | 470 | os::breakpoint(); |
duke@435 | 471 | } |
duke@435 | 472 | #ifdef VM_JVMTI |
duke@435 | 473 | static bool _jvmti_interp_events = 0; |
duke@435 | 474 | #endif |
duke@435 | 475 | |
duke@435 | 476 | static int _compiling; // (UseCompiler || CountCompiledCalls) |
duke@435 | 477 | |
duke@435 | 478 | #ifdef ASSERT |
duke@435 | 479 | if (istate->_msg != initialize) { |
roland@5225 | 480 | // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap) |
roland@5225 | 481 | // because in that case, EnableInvokeDynamic is true by default but will be later switched off |
roland@5225 | 482 | // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes |
roland@5225 | 483 | // for the old JSR292 implementation. |
roland@5225 | 484 | // This leads to a situation where 'istate->_stack_limit' always accounts for |
roland@5225 | 485 | // methodOopDesc::extra_stack_entries() because it is computed in |
roland@5225 | 486 | // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while |
roland@5225 | 487 | // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't |
roland@5225 | 488 | // account for extra_stack_entries() anymore because at the time when it is called |
roland@5225 | 489 | // EnableInvokeDynamic was already set to false. |
roland@5225 | 490 | // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was |
roland@5225 | 491 | // switched off because of the wrong classes. |
roland@5225 | 492 | if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { |
goetz@5319 | 493 | assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); |
roland@5225 | 494 | } else { |
goetz@5319 | 495 | const int extra_stack_entries = Method::extra_stack_entries_for_jsr292; |
roland@5225 | 496 | assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries |
roland@5225 | 497 | + 1), "bad stack limit"); |
roland@5225 | 498 | } |
twisti@2084 | 499 | #ifndef SHARK |
twisti@2084 | 500 | IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong")); |
twisti@2084 | 501 | #endif // !SHARK |
duke@435 | 502 | } |
duke@435 | 503 | // Verify linkages. |
duke@435 | 504 | interpreterState l = istate; |
duke@435 | 505 | do { |
duke@435 | 506 | assert(l == l->_self_link, "bad link"); |
duke@435 | 507 | l = l->_prev_link; |
duke@435 | 508 | } while (l != NULL); |
duke@435 | 509 | // Screwups with stack management usually cause us to overwrite istate |
duke@435 | 510 | // save a copy so we can verify it. |
duke@435 | 511 | interpreterState orig = istate; |
duke@435 | 512 | #endif |
duke@435 | 513 | |
duke@435 | 514 | register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */ |
duke@435 | 515 | register address pc = istate->bcp(); |
duke@435 | 516 | register jubyte opcode; |
duke@435 | 517 | register intptr_t* locals = istate->locals(); |
coleenp@4037 | 518 | register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache() |
duke@435 | 519 | #ifdef LOTS_OF_REGS |
duke@435 | 520 | register JavaThread* THREAD = istate->thread(); |
duke@435 | 521 | #else |
duke@435 | 522 | #undef THREAD |
duke@435 | 523 | #define THREAD istate->thread() |
duke@435 | 524 | #endif |
duke@435 | 525 | |
duke@435 | 526 | #ifdef USELABELS |
duke@435 | 527 | const static void* const opclabels_data[256] = { |
duke@435 | 528 | /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0, |
duke@435 | 529 | /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4, |
duke@435 | 530 | /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0, |
duke@435 | 531 | /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1, |
duke@435 | 532 | |
duke@435 | 533 | /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w, |
duke@435 | 534 | /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload, |
duke@435 | 535 | /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1, |
duke@435 | 536 | /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1, |
duke@435 | 537 | |
duke@435 | 538 | /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1, |
duke@435 | 539 | /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1, |
duke@435 | 540 | /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1, |
duke@435 | 541 | /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload, |
duke@435 | 542 | |
duke@435 | 543 | /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload, |
duke@435 | 544 | /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore, |
duke@435 | 545 | /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0, |
duke@435 | 546 | /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0, |
duke@435 | 547 | |
duke@435 | 548 | /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0, |
duke@435 | 549 | /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0, |
duke@435 | 550 | /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0, |
duke@435 | 551 | /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore, |
duke@435 | 552 | |
duke@435 | 553 | /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore, |
duke@435 | 554 | /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop, |
duke@435 | 555 | /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2, |
duke@435 | 556 | /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap, |
duke@435 | 557 | |
duke@435 | 558 | /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd, |
duke@435 | 559 | /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub, |
duke@435 | 560 | /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul, |
duke@435 | 561 | /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv, |
duke@435 | 562 | |
duke@435 | 563 | /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem, |
duke@435 | 564 | /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg, |
duke@435 | 565 | /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr, |
duke@435 | 566 | /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land, |
duke@435 | 567 | |
duke@435 | 568 | /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor, |
duke@435 | 569 | /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d, |
duke@435 | 570 | /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i, |
duke@435 | 571 | /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l, |
duke@435 | 572 | |
duke@435 | 573 | /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s, |
duke@435 | 574 | /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl, |
duke@435 | 575 | /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt, |
duke@435 | 576 | /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq, |
duke@435 | 577 | |
duke@435 | 578 | /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt, |
duke@435 | 579 | /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto, |
duke@435 | 580 | /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch, |
duke@435 | 581 | /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn, |
duke@435 | 582 | |
duke@435 | 583 | /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic, |
duke@435 | 584 | /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial, |
twisti@2762 | 585 | /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new, |
duke@435 | 586 | /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow, |
duke@435 | 587 | |
duke@435 | 588 | /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit, |
duke@435 | 589 | /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull, |
sgoldman@558 | 590 | /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default, |
sgoldman@558 | 591 | /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
sgoldman@558 | 592 | |
sgoldman@558 | 593 | /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 594 | /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 595 | /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 596 | /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 597 | |
duke@435 | 598 | /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
twisti@2762 | 599 | /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer, |
twisti@4237 | 600 | /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default, |
duke@435 | 601 | /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 602 | |
duke@435 | 603 | /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 604 | /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 605 | /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default, |
duke@435 | 606 | /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default |
duke@435 | 607 | }; |
duke@435 | 608 | register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0]; |
duke@435 | 609 | #endif /* USELABELS */ |
duke@435 | 610 | |
duke@435 | 611 | #ifdef ASSERT |
duke@435 | 612 | // this will trigger a VERIFY_OOP on entry |
duke@435 | 613 | if (istate->msg() != initialize && ! METHOD->is_static()) { |
duke@435 | 614 | oop rcvr = LOCALS_OBJECT(0); |
bobv@2036 | 615 | VERIFY_OOP(rcvr); |
duke@435 | 616 | } |
duke@435 | 617 | #endif |
duke@435 | 618 | // #define HACK |
duke@435 | 619 | #ifdef HACK |
duke@435 | 620 | bool interesting = false; |
duke@435 | 621 | #endif // HACK |
duke@435 | 622 | |
duke@435 | 623 | /* QQQ this should be a stack method so we don't know actual direction */ |
bobv@2036 | 624 | guarantee(istate->msg() == initialize || |
duke@435 | 625 | topOfStack >= istate->stack_limit() && |
duke@435 | 626 | topOfStack < istate->stack_base(), |
duke@435 | 627 | "Stack top out of range"); |
duke@435 | 628 | |
duke@435 | 629 | switch (istate->msg()) { |
duke@435 | 630 | case initialize: { |
duke@435 | 631 | if (initialized++) ShouldNotReachHere(); // Only one initialize call |
duke@435 | 632 | _compiling = (UseCompiler || CountCompiledCalls); |
duke@435 | 633 | #ifdef VM_JVMTI |
duke@435 | 634 | _jvmti_interp_events = JvmtiExport::can_post_interpreter_events(); |
duke@435 | 635 | #endif |
duke@435 | 636 | return; |
duke@435 | 637 | } |
duke@435 | 638 | break; |
duke@435 | 639 | case method_entry: { |
duke@435 | 640 | THREAD->set_do_not_unlock(); |
duke@435 | 641 | // count invocations |
duke@435 | 642 | assert(initialized, "Interpreter not initialized"); |
duke@435 | 643 | if (_compiling) { |
jiangli@5065 | 644 | MethodCounters* mcs; |
jiangli@5065 | 645 | GET_METHOD_COUNTERS(mcs); |
duke@435 | 646 | if (ProfileInterpreter) { |
jiangli@5065 | 647 | METHOD->increment_interpreter_invocation_count(THREAD); |
duke@435 | 648 | } |
jiangli@5065 | 649 | mcs->invocation_counter()->increment(); |
jiangli@5065 | 650 | if (mcs->invocation_counter()->reached_InvocationLimit()) { |
duke@435 | 651 | CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception); |
duke@435 | 652 | |
duke@435 | 653 | // We no longer retry on a counter overflow |
duke@435 | 654 | |
duke@435 | 655 | // istate->set_msg(retry_method); |
duke@435 | 656 | // THREAD->clr_do_not_unlock(); |
duke@435 | 657 | // return; |
duke@435 | 658 | } |
duke@435 | 659 | SAFEPOINT; |
duke@435 | 660 | } |
duke@435 | 661 | |
duke@435 | 662 | if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { |
duke@435 | 663 | // initialize |
duke@435 | 664 | os::breakpoint(); |
duke@435 | 665 | } |
duke@435 | 666 | |
duke@435 | 667 | #ifdef HACK |
duke@435 | 668 | { |
duke@435 | 669 | ResourceMark rm; |
duke@435 | 670 | char *method_name = istate->method()->name_and_sig_as_C_string(); |
duke@435 | 671 | if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { |
duke@435 | 672 | tty->print_cr("entering: depth %d bci: %d", |
duke@435 | 673 | (istate->_stack_base - istate->_stack), |
duke@435 | 674 | istate->_bcp - istate->_method->code_base()); |
duke@435 | 675 | interesting = true; |
duke@435 | 676 | } |
duke@435 | 677 | } |
duke@435 | 678 | #endif // HACK |
duke@435 | 679 | |
duke@435 | 680 | |
duke@435 | 681 | // lock method if synchronized |
duke@435 | 682 | if (METHOD->is_synchronized()) { |
goetz@6445 | 683 | // oop rcvr = locals[0].j.r; |
goetz@6445 | 684 | oop rcvr; |
goetz@6445 | 685 | if (METHOD->is_static()) { |
goetz@6445 | 686 | rcvr = METHOD->constants()->pool_holder()->java_mirror(); |
goetz@6445 | 687 | } else { |
goetz@6445 | 688 | rcvr = LOCALS_OBJECT(0); |
goetz@6445 | 689 | VERIFY_OOP(rcvr); |
goetz@6445 | 690 | } |
goetz@6445 | 691 | // The initial monitor is ours for the taking |
goetz@6445 | 692 | // Monitor not filled in frame manager any longer as this caused race condition with biased locking. |
goetz@6445 | 693 | BasicObjectLock* mon = &istate->monitor_base()[-1]; |
goetz@6445 | 694 | mon->set_obj(rcvr); |
goetz@6445 | 695 | bool success = false; |
goetz@6445 | 696 | uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
goetz@6445 | 697 | markOop mark = rcvr->mark(); |
goetz@6445 | 698 | intptr_t hash = (intptr_t) markOopDesc::no_hash; |
goetz@6445 | 699 | // Implies UseBiasedLocking. |
goetz@6445 | 700 | if (mark->has_bias_pattern()) { |
goetz@6445 | 701 | uintptr_t thread_ident; |
goetz@6445 | 702 | uintptr_t anticipated_bias_locking_value; |
goetz@6445 | 703 | thread_ident = (uintptr_t)istate->thread(); |
goetz@6445 | 704 | anticipated_bias_locking_value = |
goetz@6445 | 705 | (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & |
goetz@6445 | 706 | ~((uintptr_t) markOopDesc::age_mask_in_place); |
goetz@6445 | 707 | |
goetz@6445 | 708 | if (anticipated_bias_locking_value == 0) { |
goetz@6445 | 709 | // Already biased towards this thread, nothing to do. |
goetz@6445 | 710 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 711 | (* BiasedLocking::biased_lock_entry_count_addr())++; |
goetz@6445 | 712 | } |
goetz@6445 | 713 | success = true; |
goetz@6445 | 714 | } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { |
goetz@6445 | 715 | // Try to revoke bias. |
goetz@6445 | 716 | markOop header = rcvr->klass()->prototype_header(); |
goetz@6445 | 717 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 718 | header = header->copy_set_hash(hash); |
goetz@6445 | 719 | } |
goetz@6445 | 720 | if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) { |
goetz@6445 | 721 | if (PrintBiasedLockingStatistics) |
goetz@6445 | 722 | (*BiasedLocking::revoked_lock_entry_count_addr())++; |
goetz@6445 | 723 | } |
goetz@6445 | 724 | } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) { |
goetz@6445 | 725 | // Try to rebias. |
goetz@6445 | 726 | markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident); |
goetz@6445 | 727 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 728 | new_header = new_header->copy_set_hash(hash); |
goetz@6445 | 729 | } |
goetz@6445 | 730 | if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) { |
goetz@6445 | 731 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 732 | (* BiasedLocking::rebiased_lock_entry_count_addr())++; |
duke@435 | 733 | } |
duke@435 | 734 | } else { |
goetz@6445 | 735 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); |
goetz@6445 | 736 | } |
goetz@6445 | 737 | success = true; |
goetz@6445 | 738 | } else { |
goetz@6445 | 739 | // Try to bias towards thread in case object is anonymously biased. |
goetz@6445 | 740 | markOop header = (markOop) ((uintptr_t) mark & |
goetz@6445 | 741 | ((uintptr_t)markOopDesc::biased_lock_mask_in_place | |
goetz@6445 | 742 | (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); |
goetz@6445 | 743 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 744 | header = header->copy_set_hash(hash); |
goetz@6445 | 745 | } |
goetz@6445 | 746 | markOop new_header = (markOop) ((uintptr_t) header | thread_ident); |
goetz@6445 | 747 | // Debugging hint. |
goetz@6445 | 748 | DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) |
goetz@6445 | 749 | if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) { |
goetz@6445 | 750 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 751 | (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; |
goetz@6445 | 752 | } |
goetz@6445 | 753 | } else { |
goetz@6445 | 754 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); |
goetz@6445 | 755 | } |
goetz@6445 | 756 | success = true; |
goetz@6445 | 757 | } |
goetz@6445 | 758 | } |
goetz@6445 | 759 | |
goetz@6445 | 760 | // Traditional lightweight locking. |
goetz@6445 | 761 | if (!success) { |
goetz@6445 | 762 | markOop displaced = rcvr->mark()->set_unlocked(); |
goetz@6445 | 763 | mon->lock()->set_displaced_header(displaced); |
goetz@6445 | 764 | bool call_vm = UseHeavyMonitors; |
goetz@6445 | 765 | if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) { |
goetz@6445 | 766 | // Is it simple recursive case? |
goetz@6445 | 767 | if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { |
goetz@6445 | 768 | mon->lock()->set_displaced_header(NULL); |
goetz@6445 | 769 | } else { |
goetz@6445 | 770 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception); |
duke@435 | 771 | } |
duke@435 | 772 | } |
goetz@6445 | 773 | } |
duke@435 | 774 | } |
duke@435 | 775 | THREAD->clr_do_not_unlock(); |
duke@435 | 776 | |
duke@435 | 777 | // Notify jvmti |
duke@435 | 778 | #ifdef VM_JVMTI |
duke@435 | 779 | if (_jvmti_interp_events) { |
duke@435 | 780 | // Whenever JVMTI puts a thread in interp_only_mode, method |
duke@435 | 781 | // entry/exit events are sent for that thread to track stack depth. |
duke@435 | 782 | if (THREAD->is_interp_only_mode()) { |
duke@435 | 783 | CALL_VM(InterpreterRuntime::post_method_entry(THREAD), |
duke@435 | 784 | handle_exception); |
duke@435 | 785 | } |
duke@435 | 786 | } |
duke@435 | 787 | #endif /* VM_JVMTI */ |
duke@435 | 788 | |
duke@435 | 789 | goto run; |
duke@435 | 790 | } |
duke@435 | 791 | |
duke@435 | 792 | case popping_frame: { |
duke@435 | 793 | // returned from a java call to pop the frame, restart the call |
duke@435 | 794 | // clear the message so we don't confuse ourselves later |
duke@435 | 795 | assert(THREAD->pop_frame_in_process(), "wrong frame pop state"); |
duke@435 | 796 | istate->set_msg(no_request); |
duke@435 | 797 | THREAD->clr_pop_frame_in_process(); |
duke@435 | 798 | goto run; |
duke@435 | 799 | } |
duke@435 | 800 | |
duke@435 | 801 | case method_resume: { |
duke@435 | 802 | if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) { |
duke@435 | 803 | // resume |
duke@435 | 804 | os::breakpoint(); |
duke@435 | 805 | } |
duke@435 | 806 | #ifdef HACK |
duke@435 | 807 | { |
duke@435 | 808 | ResourceMark rm; |
duke@435 | 809 | char *method_name = istate->method()->name_and_sig_as_C_string(); |
duke@435 | 810 | if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) { |
duke@435 | 811 | tty->print_cr("resume: depth %d bci: %d", |
duke@435 | 812 | (istate->_stack_base - istate->_stack) , |
duke@435 | 813 | istate->_bcp - istate->_method->code_base()); |
duke@435 | 814 | interesting = true; |
duke@435 | 815 | } |
duke@435 | 816 | } |
duke@435 | 817 | #endif // HACK |
duke@435 | 818 | // returned from a java call, continue executing. |
duke@435 | 819 | if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) { |
duke@435 | 820 | goto handle_Pop_Frame; |
duke@435 | 821 | } |
goetz@6450 | 822 | if (THREAD->jvmti_thread_state() && |
goetz@6450 | 823 | THREAD->jvmti_thread_state()->is_earlyret_pending()) { |
goetz@6450 | 824 | goto handle_Early_Return; |
goetz@6450 | 825 | } |
duke@435 | 826 | |
duke@435 | 827 | if (THREAD->has_pending_exception()) goto handle_exception; |
duke@435 | 828 | // Update the pc by the saved amount of the invoke bytecode size |
duke@435 | 829 | UPDATE_PC(istate->bcp_advance()); |
duke@435 | 830 | goto run; |
duke@435 | 831 | } |
duke@435 | 832 | |
duke@435 | 833 | case deopt_resume2: { |
duke@435 | 834 | // Returned from an opcode that will reexecute. Deopt was |
duke@435 | 835 | // a result of a PopFrame request. |
duke@435 | 836 | // |
duke@435 | 837 | goto run; |
duke@435 | 838 | } |
duke@435 | 839 | |
duke@435 | 840 | case deopt_resume: { |
duke@435 | 841 | // Returned from an opcode that has completed. The stack has |
duke@435 | 842 | // the result all we need to do is skip across the bytecode |
duke@435 | 843 | // and continue (assuming there is no exception pending) |
duke@435 | 844 | // |
duke@435 | 845 | // compute continuation length |
duke@435 | 846 | // |
duke@435 | 847 | // Note: it is possible to deopt at a return_register_finalizer opcode |
duke@435 | 848 | // because this requires entering the vm to do the registering. While the |
duke@435 | 849 | // opcode is complete we can't advance because there are no more opcodes |
duke@435 | 850 | // much like trying to deopt at a poll return. In that has we simply |
duke@435 | 851 | // get out of here |
duke@435 | 852 | // |
never@2462 | 853 | if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) { |
duke@435 | 854 | // this will do the right thing even if an exception is pending. |
duke@435 | 855 | goto handle_return; |
duke@435 | 856 | } |
never@2462 | 857 | UPDATE_PC(Bytecodes::length_at(METHOD, pc)); |
duke@435 | 858 | if (THREAD->has_pending_exception()) goto handle_exception; |
duke@435 | 859 | goto run; |
duke@435 | 860 | } |
duke@435 | 861 | case got_monitors: { |
duke@435 | 862 | // continue locking now that we have a monitor to use |
duke@435 | 863 | // we expect to find newly allocated monitor at the "top" of the monitor stack. |
duke@435 | 864 | oop lockee = STACK_OBJECT(-1); |
bobv@2036 | 865 | VERIFY_OOP(lockee); |
duke@435 | 866 | // derefing's lockee ought to provoke implicit null check |
duke@435 | 867 | // find a free monitor |
duke@435 | 868 | BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base(); |
duke@435 | 869 | assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor"); |
duke@435 | 870 | entry->set_obj(lockee); |
goetz@6445 | 871 | bool success = false; |
goetz@6445 | 872 | uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
goetz@6445 | 873 | |
goetz@6445 | 874 | markOop mark = lockee->mark(); |
goetz@6445 | 875 | intptr_t hash = (intptr_t) markOopDesc::no_hash; |
goetz@6445 | 876 | // implies UseBiasedLocking |
goetz@6445 | 877 | if (mark->has_bias_pattern()) { |
goetz@6445 | 878 | uintptr_t thread_ident; |
goetz@6445 | 879 | uintptr_t anticipated_bias_locking_value; |
goetz@6445 | 880 | thread_ident = (uintptr_t)istate->thread(); |
goetz@6445 | 881 | anticipated_bias_locking_value = |
goetz@6445 | 882 | (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & |
goetz@6445 | 883 | ~((uintptr_t) markOopDesc::age_mask_in_place); |
goetz@6445 | 884 | |
goetz@6445 | 885 | if (anticipated_bias_locking_value == 0) { |
goetz@6445 | 886 | // already biased towards this thread, nothing to do |
goetz@6445 | 887 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 888 | (* BiasedLocking::biased_lock_entry_count_addr())++; |
goetz@6445 | 889 | } |
goetz@6445 | 890 | success = true; |
goetz@6445 | 891 | } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { |
goetz@6445 | 892 | // try revoke bias |
goetz@6445 | 893 | markOop header = lockee->klass()->prototype_header(); |
goetz@6445 | 894 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 895 | header = header->copy_set_hash(hash); |
goetz@6445 | 896 | } |
goetz@6445 | 897 | if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { |
goetz@6445 | 898 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 899 | (*BiasedLocking::revoked_lock_entry_count_addr())++; |
goetz@6445 | 900 | } |
goetz@6445 | 901 | } |
goetz@6445 | 902 | } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { |
goetz@6445 | 903 | // try rebias |
goetz@6445 | 904 | markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); |
goetz@6445 | 905 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 906 | new_header = new_header->copy_set_hash(hash); |
goetz@6445 | 907 | } |
goetz@6445 | 908 | if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { |
goetz@6445 | 909 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 910 | (* BiasedLocking::rebiased_lock_entry_count_addr())++; |
goetz@6445 | 911 | } |
goetz@6445 | 912 | } else { |
goetz@6445 | 913 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 914 | } |
goetz@6445 | 915 | success = true; |
duke@435 | 916 | } else { |
goetz@6445 | 917 | // try to bias towards thread in case object is anonymously biased |
goetz@6445 | 918 | markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | |
goetz@6445 | 919 | (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place)); |
goetz@6445 | 920 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 921 | header = header->copy_set_hash(hash); |
goetz@6445 | 922 | } |
goetz@6445 | 923 | markOop new_header = (markOop) ((uintptr_t) header | thread_ident); |
goetz@6445 | 924 | // debugging hint |
goetz@6445 | 925 | DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) |
goetz@6445 | 926 | if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { |
goetz@6445 | 927 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 928 | (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; |
goetz@6445 | 929 | } |
goetz@6445 | 930 | } else { |
goetz@6445 | 931 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 932 | } |
goetz@6445 | 933 | success = true; |
goetz@6445 | 934 | } |
goetz@6445 | 935 | } |
goetz@6445 | 936 | |
goetz@6445 | 937 | // traditional lightweight locking |
goetz@6445 | 938 | if (!success) { |
goetz@6445 | 939 | markOop displaced = lockee->mark()->set_unlocked(); |
goetz@6445 | 940 | entry->lock()->set_displaced_header(displaced); |
goetz@6445 | 941 | bool call_vm = UseHeavyMonitors; |
goetz@6445 | 942 | if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { |
goetz@6445 | 943 | // Is it simple recursive case? |
goetz@6445 | 944 | if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { |
goetz@6445 | 945 | entry->lock()->set_displaced_header(NULL); |
goetz@6445 | 946 | } else { |
goetz@6445 | 947 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 948 | } |
duke@435 | 949 | } |
duke@435 | 950 | } |
duke@435 | 951 | UPDATE_PC_AND_TOS(1, -1); |
duke@435 | 952 | goto run; |
duke@435 | 953 | } |
duke@435 | 954 | default: { |
duke@435 | 955 | fatal("Unexpected message from frame manager"); |
duke@435 | 956 | } |
duke@435 | 957 | } |
duke@435 | 958 | |
duke@435 | 959 | run: |
duke@435 | 960 | |
duke@435 | 961 | DO_UPDATE_INSTRUCTION_COUNT(*pc) |
duke@435 | 962 | DEBUGGER_SINGLE_STEP_NOTIFY(); |
duke@435 | 963 | #ifdef PREFETCH_OPCCODE |
duke@435 | 964 | opcode = *pc; /* prefetch first opcode */ |
duke@435 | 965 | #endif |
duke@435 | 966 | |
duke@435 | 967 | #ifndef USELABELS |
duke@435 | 968 | while (1) |
duke@435 | 969 | #endif |
duke@435 | 970 | { |
duke@435 | 971 | #ifndef PREFETCH_OPCCODE |
duke@435 | 972 | opcode = *pc; |
duke@435 | 973 | #endif |
duke@435 | 974 | // Seems like this happens twice per opcode. At worst this is only |
duke@435 | 975 | // need at entry to the loop. |
duke@435 | 976 | // DEBUGGER_SINGLE_STEP_NOTIFY(); |
duke@435 | 977 | /* Using this labels avoids double breakpoints when quickening and |
duke@435 | 978 | * when returing from transition frames. |
duke@435 | 979 | */ |
duke@435 | 980 | opcode_switch: |
duke@435 | 981 | assert(istate == orig, "Corrupted istate"); |
duke@435 | 982 | /* QQQ Hmm this has knowledge of direction, ought to be a stack method */ |
duke@435 | 983 | assert(topOfStack >= istate->stack_limit(), "Stack overrun"); |
duke@435 | 984 | assert(topOfStack < istate->stack_base(), "Stack underrun"); |
duke@435 | 985 | |
duke@435 | 986 | #ifdef USELABELS |
duke@435 | 987 | DISPATCH(opcode); |
duke@435 | 988 | #else |
duke@435 | 989 | switch (opcode) |
duke@435 | 990 | #endif |
duke@435 | 991 | { |
duke@435 | 992 | CASE(_nop): |
duke@435 | 993 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 994 | |
duke@435 | 995 | /* Push miscellaneous constants onto the stack. */ |
duke@435 | 996 | |
duke@435 | 997 | CASE(_aconst_null): |
duke@435 | 998 | SET_STACK_OBJECT(NULL, 0); |
duke@435 | 999 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1000 | |
duke@435 | 1001 | #undef OPC_CONST_n |
duke@435 | 1002 | #define OPC_CONST_n(opcode, const_type, value) \ |
duke@435 | 1003 | CASE(opcode): \ |
duke@435 | 1004 | SET_STACK_ ## const_type(value, 0); \ |
duke@435 | 1005 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1006 | |
duke@435 | 1007 | OPC_CONST_n(_iconst_m1, INT, -1); |
duke@435 | 1008 | OPC_CONST_n(_iconst_0, INT, 0); |
duke@435 | 1009 | OPC_CONST_n(_iconst_1, INT, 1); |
duke@435 | 1010 | OPC_CONST_n(_iconst_2, INT, 2); |
duke@435 | 1011 | OPC_CONST_n(_iconst_3, INT, 3); |
duke@435 | 1012 | OPC_CONST_n(_iconst_4, INT, 4); |
duke@435 | 1013 | OPC_CONST_n(_iconst_5, INT, 5); |
duke@435 | 1014 | OPC_CONST_n(_fconst_0, FLOAT, 0.0); |
duke@435 | 1015 | OPC_CONST_n(_fconst_1, FLOAT, 1.0); |
duke@435 | 1016 | OPC_CONST_n(_fconst_2, FLOAT, 2.0); |
duke@435 | 1017 | |
duke@435 | 1018 | #undef OPC_CONST2_n |
duke@435 | 1019 | #define OPC_CONST2_n(opcname, value, key, kind) \ |
duke@435 | 1020 | CASE(_##opcname): \ |
duke@435 | 1021 | { \ |
duke@435 | 1022 | SET_STACK_ ## kind(VM##key##Const##value(), 1); \ |
duke@435 | 1023 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ |
duke@435 | 1024 | } |
duke@435 | 1025 | OPC_CONST2_n(dconst_0, Zero, double, DOUBLE); |
duke@435 | 1026 | OPC_CONST2_n(dconst_1, One, double, DOUBLE); |
duke@435 | 1027 | OPC_CONST2_n(lconst_0, Zero, long, LONG); |
duke@435 | 1028 | OPC_CONST2_n(lconst_1, One, long, LONG); |
duke@435 | 1029 | |
duke@435 | 1030 | /* Load constant from constant pool: */ |
duke@435 | 1031 | |
duke@435 | 1032 | /* Push a 1-byte signed integer value onto the stack. */ |
duke@435 | 1033 | CASE(_bipush): |
duke@435 | 1034 | SET_STACK_INT((jbyte)(pc[1]), 0); |
duke@435 | 1035 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); |
duke@435 | 1036 | |
duke@435 | 1037 | /* Push a 2-byte signed integer constant onto the stack. */ |
duke@435 | 1038 | CASE(_sipush): |
duke@435 | 1039 | SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0); |
duke@435 | 1040 | UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); |
duke@435 | 1041 | |
duke@435 | 1042 | /* load from local variable */ |
duke@435 | 1043 | |
duke@435 | 1044 | CASE(_aload): |
bobv@2036 | 1045 | VERIFY_OOP(LOCALS_OBJECT(pc[1])); |
duke@435 | 1046 | SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0); |
duke@435 | 1047 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); |
duke@435 | 1048 | |
duke@435 | 1049 | CASE(_iload): |
duke@435 | 1050 | CASE(_fload): |
duke@435 | 1051 | SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0); |
duke@435 | 1052 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1); |
duke@435 | 1053 | |
duke@435 | 1054 | CASE(_lload): |
duke@435 | 1055 | SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1); |
duke@435 | 1056 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); |
duke@435 | 1057 | |
duke@435 | 1058 | CASE(_dload): |
duke@435 | 1059 | SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1); |
duke@435 | 1060 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2); |
duke@435 | 1061 | |
duke@435 | 1062 | #undef OPC_LOAD_n |
duke@435 | 1063 | #define OPC_LOAD_n(num) \ |
duke@435 | 1064 | CASE(_aload_##num): \ |
bobv@2036 | 1065 | VERIFY_OOP(LOCALS_OBJECT(num)); \ |
duke@435 | 1066 | SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \ |
duke@435 | 1067 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ |
duke@435 | 1068 | \ |
duke@435 | 1069 | CASE(_iload_##num): \ |
duke@435 | 1070 | CASE(_fload_##num): \ |
duke@435 | 1071 | SET_STACK_SLOT(LOCALS_SLOT(num), 0); \ |
duke@435 | 1072 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \ |
duke@435 | 1073 | \ |
duke@435 | 1074 | CASE(_lload_##num): \ |
duke@435 | 1075 | SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \ |
duke@435 | 1076 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \ |
duke@435 | 1077 | CASE(_dload_##num): \ |
duke@435 | 1078 | SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \ |
duke@435 | 1079 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1080 | |
duke@435 | 1081 | OPC_LOAD_n(0); |
duke@435 | 1082 | OPC_LOAD_n(1); |
duke@435 | 1083 | OPC_LOAD_n(2); |
duke@435 | 1084 | OPC_LOAD_n(3); |
duke@435 | 1085 | |
duke@435 | 1086 | /* store to a local variable */ |
duke@435 | 1087 | |
duke@435 | 1088 | CASE(_astore): |
duke@435 | 1089 | astore(topOfStack, -1, locals, pc[1]); |
duke@435 | 1090 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); |
duke@435 | 1091 | |
duke@435 | 1092 | CASE(_istore): |
duke@435 | 1093 | CASE(_fstore): |
duke@435 | 1094 | SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]); |
duke@435 | 1095 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1); |
duke@435 | 1096 | |
duke@435 | 1097 | CASE(_lstore): |
duke@435 | 1098 | SET_LOCALS_LONG(STACK_LONG(-1), pc[1]); |
duke@435 | 1099 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); |
duke@435 | 1100 | |
duke@435 | 1101 | CASE(_dstore): |
duke@435 | 1102 | SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]); |
duke@435 | 1103 | UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2); |
duke@435 | 1104 | |
duke@435 | 1105 | CASE(_wide): { |
duke@435 | 1106 | uint16_t reg = Bytes::get_Java_u2(pc + 2); |
duke@435 | 1107 | |
duke@435 | 1108 | opcode = pc[1]; |
duke@435 | 1109 | switch(opcode) { |
duke@435 | 1110 | case Bytecodes::_aload: |
bobv@2036 | 1111 | VERIFY_OOP(LOCALS_OBJECT(reg)); |
duke@435 | 1112 | SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0); |
duke@435 | 1113 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); |
duke@435 | 1114 | |
duke@435 | 1115 | case Bytecodes::_iload: |
duke@435 | 1116 | case Bytecodes::_fload: |
duke@435 | 1117 | SET_STACK_SLOT(LOCALS_SLOT(reg), 0); |
duke@435 | 1118 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1); |
duke@435 | 1119 | |
duke@435 | 1120 | case Bytecodes::_lload: |
duke@435 | 1121 | SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1); |
duke@435 | 1122 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); |
duke@435 | 1123 | |
duke@435 | 1124 | case Bytecodes::_dload: |
duke@435 | 1125 | SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1); |
duke@435 | 1126 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2); |
duke@435 | 1127 | |
duke@435 | 1128 | case Bytecodes::_astore: |
duke@435 | 1129 | astore(topOfStack, -1, locals, reg); |
duke@435 | 1130 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); |
duke@435 | 1131 | |
duke@435 | 1132 | case Bytecodes::_istore: |
duke@435 | 1133 | case Bytecodes::_fstore: |
duke@435 | 1134 | SET_LOCALS_SLOT(STACK_SLOT(-1), reg); |
duke@435 | 1135 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1); |
duke@435 | 1136 | |
duke@435 | 1137 | case Bytecodes::_lstore: |
duke@435 | 1138 | SET_LOCALS_LONG(STACK_LONG(-1), reg); |
duke@435 | 1139 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); |
duke@435 | 1140 | |
duke@435 | 1141 | case Bytecodes::_dstore: |
duke@435 | 1142 | SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg); |
duke@435 | 1143 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2); |
duke@435 | 1144 | |
duke@435 | 1145 | case Bytecodes::_iinc: { |
duke@435 | 1146 | int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4); |
duke@435 | 1147 | // Be nice to see what this generates.... QQQ |
duke@435 | 1148 | SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg); |
duke@435 | 1149 | UPDATE_PC_AND_CONTINUE(6); |
duke@435 | 1150 | } |
duke@435 | 1151 | case Bytecodes::_ret: |
duke@435 | 1152 | pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg)); |
duke@435 | 1153 | UPDATE_PC_AND_CONTINUE(0); |
duke@435 | 1154 | default: |
duke@435 | 1155 | VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode"); |
duke@435 | 1156 | } |
duke@435 | 1157 | } |
duke@435 | 1158 | |
duke@435 | 1159 | |
duke@435 | 1160 | #undef OPC_STORE_n |
duke@435 | 1161 | #define OPC_STORE_n(num) \ |
duke@435 | 1162 | CASE(_astore_##num): \ |
duke@435 | 1163 | astore(topOfStack, -1, locals, num); \ |
duke@435 | 1164 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
duke@435 | 1165 | CASE(_istore_##num): \ |
duke@435 | 1166 | CASE(_fstore_##num): \ |
duke@435 | 1167 | SET_LOCALS_SLOT(STACK_SLOT(-1), num); \ |
duke@435 | 1168 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1169 | |
duke@435 | 1170 | OPC_STORE_n(0); |
duke@435 | 1171 | OPC_STORE_n(1); |
duke@435 | 1172 | OPC_STORE_n(2); |
duke@435 | 1173 | OPC_STORE_n(3); |
duke@435 | 1174 | |
duke@435 | 1175 | #undef OPC_DSTORE_n |
duke@435 | 1176 | #define OPC_DSTORE_n(num) \ |
duke@435 | 1177 | CASE(_dstore_##num): \ |
duke@435 | 1178 | SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \ |
duke@435 | 1179 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ |
duke@435 | 1180 | CASE(_lstore_##num): \ |
duke@435 | 1181 | SET_LOCALS_LONG(STACK_LONG(-1), num); \ |
duke@435 | 1182 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); |
duke@435 | 1183 | |
duke@435 | 1184 | OPC_DSTORE_n(0); |
duke@435 | 1185 | OPC_DSTORE_n(1); |
duke@435 | 1186 | OPC_DSTORE_n(2); |
duke@435 | 1187 | OPC_DSTORE_n(3); |
duke@435 | 1188 | |
duke@435 | 1189 | /* stack pop, dup, and insert opcodes */ |
duke@435 | 1190 | |
duke@435 | 1191 | |
duke@435 | 1192 | CASE(_pop): /* Discard the top item on the stack */ |
duke@435 | 1193 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1194 | |
duke@435 | 1195 | |
duke@435 | 1196 | CASE(_pop2): /* Discard the top 2 items on the stack */ |
duke@435 | 1197 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); |
duke@435 | 1198 | |
duke@435 | 1199 | |
duke@435 | 1200 | CASE(_dup): /* Duplicate the top item on the stack */ |
duke@435 | 1201 | dup(topOfStack); |
duke@435 | 1202 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1203 | |
duke@435 | 1204 | CASE(_dup2): /* Duplicate the top 2 items on the stack */ |
duke@435 | 1205 | dup2(topOfStack); |
duke@435 | 1206 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1207 | |
duke@435 | 1208 | CASE(_dup_x1): /* insert top word two down */ |
duke@435 | 1209 | dup_x1(topOfStack); |
duke@435 | 1210 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1211 | |
duke@435 | 1212 | CASE(_dup_x2): /* insert top word three down */ |
duke@435 | 1213 | dup_x2(topOfStack); |
duke@435 | 1214 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1215 | |
duke@435 | 1216 | CASE(_dup2_x1): /* insert top 2 slots three down */ |
duke@435 | 1217 | dup2_x1(topOfStack); |
duke@435 | 1218 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1219 | |
duke@435 | 1220 | CASE(_dup2_x2): /* insert top 2 slots four down */ |
duke@435 | 1221 | dup2_x2(topOfStack); |
duke@435 | 1222 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1223 | |
duke@435 | 1224 | CASE(_swap): { /* swap top two elements on the stack */ |
duke@435 | 1225 | swap(topOfStack); |
duke@435 | 1226 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1227 | } |
duke@435 | 1228 | |
duke@435 | 1229 | /* Perform various binary integer operations */ |
duke@435 | 1230 | |
duke@435 | 1231 | #undef OPC_INT_BINARY |
duke@435 | 1232 | #define OPC_INT_BINARY(opcname, opname, test) \ |
duke@435 | 1233 | CASE(_i##opcname): \ |
duke@435 | 1234 | if (test && (STACK_INT(-1) == 0)) { \ |
duke@435 | 1235 | VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ |
bobv@2036 | 1236 | "/ by zero"); \ |
duke@435 | 1237 | } \ |
duke@435 | 1238 | SET_STACK_INT(VMint##opname(STACK_INT(-2), \ |
duke@435 | 1239 | STACK_INT(-1)), \ |
duke@435 | 1240 | -2); \ |
duke@435 | 1241 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
duke@435 | 1242 | CASE(_l##opcname): \ |
duke@435 | 1243 | { \ |
duke@435 | 1244 | if (test) { \ |
duke@435 | 1245 | jlong l1 = STACK_LONG(-1); \ |
duke@435 | 1246 | if (VMlongEqz(l1)) { \ |
duke@435 | 1247 | VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \ |
duke@435 | 1248 | "/ by long zero"); \ |
duke@435 | 1249 | } \ |
duke@435 | 1250 | } \ |
duke@435 | 1251 | /* First long at (-1,-2) next long at (-3,-4) */ \ |
duke@435 | 1252 | SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \ |
duke@435 | 1253 | STACK_LONG(-1)), \ |
duke@435 | 1254 | -3); \ |
duke@435 | 1255 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ |
duke@435 | 1256 | } |
duke@435 | 1257 | |
duke@435 | 1258 | OPC_INT_BINARY(add, Add, 0); |
duke@435 | 1259 | OPC_INT_BINARY(sub, Sub, 0); |
duke@435 | 1260 | OPC_INT_BINARY(mul, Mul, 0); |
duke@435 | 1261 | OPC_INT_BINARY(and, And, 0); |
duke@435 | 1262 | OPC_INT_BINARY(or, Or, 0); |
duke@435 | 1263 | OPC_INT_BINARY(xor, Xor, 0); |
duke@435 | 1264 | OPC_INT_BINARY(div, Div, 1); |
duke@435 | 1265 | OPC_INT_BINARY(rem, Rem, 1); |
duke@435 | 1266 | |
duke@435 | 1267 | |
duke@435 | 1268 | /* Perform various binary floating number operations */ |
duke@435 | 1269 | /* On some machine/platforms/compilers div zero check can be implicit */ |
duke@435 | 1270 | |
duke@435 | 1271 | #undef OPC_FLOAT_BINARY |
duke@435 | 1272 | #define OPC_FLOAT_BINARY(opcname, opname) \ |
duke@435 | 1273 | CASE(_d##opcname): { \ |
duke@435 | 1274 | SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \ |
duke@435 | 1275 | STACK_DOUBLE(-1)), \ |
duke@435 | 1276 | -3); \ |
duke@435 | 1277 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \ |
duke@435 | 1278 | } \ |
duke@435 | 1279 | CASE(_f##opcname): \ |
duke@435 | 1280 | SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \ |
duke@435 | 1281 | STACK_FLOAT(-1)), \ |
duke@435 | 1282 | -2); \ |
duke@435 | 1283 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1284 | |
duke@435 | 1285 | |
duke@435 | 1286 | OPC_FLOAT_BINARY(add, Add); |
duke@435 | 1287 | OPC_FLOAT_BINARY(sub, Sub); |
duke@435 | 1288 | OPC_FLOAT_BINARY(mul, Mul); |
duke@435 | 1289 | OPC_FLOAT_BINARY(div, Div); |
duke@435 | 1290 | OPC_FLOAT_BINARY(rem, Rem); |
duke@435 | 1291 | |
duke@435 | 1292 | /* Shift operations |
duke@435 | 1293 | * Shift left int and long: ishl, lshl |
duke@435 | 1294 | * Logical shift right int and long w/zero extension: iushr, lushr |
duke@435 | 1295 | * Arithmetic shift right int and long w/sign extension: ishr, lshr |
duke@435 | 1296 | */ |
duke@435 | 1297 | |
duke@435 | 1298 | #undef OPC_SHIFT_BINARY |
duke@435 | 1299 | #define OPC_SHIFT_BINARY(opcname, opname) \ |
duke@435 | 1300 | CASE(_i##opcname): \ |
duke@435 | 1301 | SET_STACK_INT(VMint##opname(STACK_INT(-2), \ |
duke@435 | 1302 | STACK_INT(-1)), \ |
duke@435 | 1303 | -2); \ |
duke@435 | 1304 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
duke@435 | 1305 | CASE(_l##opcname): \ |
duke@435 | 1306 | { \ |
duke@435 | 1307 | SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \ |
duke@435 | 1308 | STACK_INT(-1)), \ |
duke@435 | 1309 | -2); \ |
duke@435 | 1310 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
duke@435 | 1311 | } |
duke@435 | 1312 | |
duke@435 | 1313 | OPC_SHIFT_BINARY(shl, Shl); |
duke@435 | 1314 | OPC_SHIFT_BINARY(shr, Shr); |
duke@435 | 1315 | OPC_SHIFT_BINARY(ushr, Ushr); |
duke@435 | 1316 | |
duke@435 | 1317 | /* Increment local variable by constant */ |
duke@435 | 1318 | CASE(_iinc): |
duke@435 | 1319 | { |
duke@435 | 1320 | // locals[pc[1]].j.i += (jbyte)(pc[2]); |
duke@435 | 1321 | SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]); |
duke@435 | 1322 | UPDATE_PC_AND_CONTINUE(3); |
duke@435 | 1323 | } |
duke@435 | 1324 | |
duke@435 | 1325 | /* negate the value on the top of the stack */ |
duke@435 | 1326 | |
duke@435 | 1327 | CASE(_ineg): |
duke@435 | 1328 | SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1); |
duke@435 | 1329 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1330 | |
duke@435 | 1331 | CASE(_fneg): |
duke@435 | 1332 | SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1); |
duke@435 | 1333 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1334 | |
duke@435 | 1335 | CASE(_lneg): |
duke@435 | 1336 | { |
duke@435 | 1337 | SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1); |
duke@435 | 1338 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1339 | } |
duke@435 | 1340 | |
duke@435 | 1341 | CASE(_dneg): |
duke@435 | 1342 | { |
duke@435 | 1343 | SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1); |
duke@435 | 1344 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1345 | } |
duke@435 | 1346 | |
duke@435 | 1347 | /* Conversion operations */ |
duke@435 | 1348 | |
duke@435 | 1349 | CASE(_i2f): /* convert top of stack int to float */ |
duke@435 | 1350 | SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1); |
duke@435 | 1351 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1352 | |
duke@435 | 1353 | CASE(_i2l): /* convert top of stack int to long */ |
duke@435 | 1354 | { |
duke@435 | 1355 | // this is ugly QQQ |
duke@435 | 1356 | jlong r = VMint2Long(STACK_INT(-1)); |
duke@435 | 1357 | MORE_STACK(-1); // Pop |
duke@435 | 1358 | SET_STACK_LONG(r, 1); |
duke@435 | 1359 | |
duke@435 | 1360 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1361 | } |
duke@435 | 1362 | |
duke@435 | 1363 | CASE(_i2d): /* convert top of stack int to double */ |
duke@435 | 1364 | { |
duke@435 | 1365 | // this is ugly QQQ (why cast to jlong?? ) |
duke@435 | 1366 | jdouble r = (jlong)STACK_INT(-1); |
duke@435 | 1367 | MORE_STACK(-1); // Pop |
duke@435 | 1368 | SET_STACK_DOUBLE(r, 1); |
duke@435 | 1369 | |
duke@435 | 1370 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1371 | } |
duke@435 | 1372 | |
duke@435 | 1373 | CASE(_l2i): /* convert top of stack long to int */ |
duke@435 | 1374 | { |
duke@435 | 1375 | jint r = VMlong2Int(STACK_LONG(-1)); |
duke@435 | 1376 | MORE_STACK(-2); // Pop |
duke@435 | 1377 | SET_STACK_INT(r, 0); |
duke@435 | 1378 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1379 | } |
duke@435 | 1380 | |
duke@435 | 1381 | CASE(_l2f): /* convert top of stack long to float */ |
duke@435 | 1382 | { |
duke@435 | 1383 | jlong r = STACK_LONG(-1); |
duke@435 | 1384 | MORE_STACK(-2); // Pop |
duke@435 | 1385 | SET_STACK_FLOAT(VMlong2Float(r), 0); |
duke@435 | 1386 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1387 | } |
duke@435 | 1388 | |
duke@435 | 1389 | CASE(_l2d): /* convert top of stack long to double */ |
duke@435 | 1390 | { |
duke@435 | 1391 | jlong r = STACK_LONG(-1); |
duke@435 | 1392 | MORE_STACK(-2); // Pop |
duke@435 | 1393 | SET_STACK_DOUBLE(VMlong2Double(r), 1); |
duke@435 | 1394 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1395 | } |
duke@435 | 1396 | |
duke@435 | 1397 | CASE(_f2i): /* Convert top of stack float to int */ |
duke@435 | 1398 | SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1); |
duke@435 | 1399 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1400 | |
duke@435 | 1401 | CASE(_f2l): /* convert top of stack float to long */ |
duke@435 | 1402 | { |
duke@435 | 1403 | jlong r = SharedRuntime::f2l(STACK_FLOAT(-1)); |
duke@435 | 1404 | MORE_STACK(-1); // POP |
duke@435 | 1405 | SET_STACK_LONG(r, 1); |
duke@435 | 1406 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1407 | } |
duke@435 | 1408 | |
duke@435 | 1409 | CASE(_f2d): /* convert top of stack float to double */ |
duke@435 | 1410 | { |
duke@435 | 1411 | jfloat f; |
duke@435 | 1412 | jdouble r; |
duke@435 | 1413 | f = STACK_FLOAT(-1); |
duke@435 | 1414 | r = (jdouble) f; |
duke@435 | 1415 | MORE_STACK(-1); // POP |
duke@435 | 1416 | SET_STACK_DOUBLE(r, 1); |
duke@435 | 1417 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1418 | } |
duke@435 | 1419 | |
duke@435 | 1420 | CASE(_d2i): /* convert top of stack double to int */ |
duke@435 | 1421 | { |
duke@435 | 1422 | jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1)); |
duke@435 | 1423 | MORE_STACK(-2); |
duke@435 | 1424 | SET_STACK_INT(r1, 0); |
duke@435 | 1425 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1426 | } |
duke@435 | 1427 | |
duke@435 | 1428 | CASE(_d2f): /* convert top of stack double to float */ |
duke@435 | 1429 | { |
duke@435 | 1430 | jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1)); |
duke@435 | 1431 | MORE_STACK(-2); |
duke@435 | 1432 | SET_STACK_FLOAT(r1, 0); |
duke@435 | 1433 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1434 | } |
duke@435 | 1435 | |
duke@435 | 1436 | CASE(_d2l): /* convert top of stack double to long */ |
duke@435 | 1437 | { |
duke@435 | 1438 | jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1)); |
duke@435 | 1439 | MORE_STACK(-2); |
duke@435 | 1440 | SET_STACK_LONG(r1, 1); |
duke@435 | 1441 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); |
duke@435 | 1442 | } |
duke@435 | 1443 | |
duke@435 | 1444 | CASE(_i2b): |
duke@435 | 1445 | SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1); |
duke@435 | 1446 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1447 | |
duke@435 | 1448 | CASE(_i2c): |
duke@435 | 1449 | SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1); |
duke@435 | 1450 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1451 | |
duke@435 | 1452 | CASE(_i2s): |
duke@435 | 1453 | SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1); |
duke@435 | 1454 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1455 | |
duke@435 | 1456 | /* comparison operators */ |
duke@435 | 1457 | |
duke@435 | 1458 | |
duke@435 | 1459 | #define COMPARISON_OP(name, comparison) \ |
duke@435 | 1460 | CASE(_if_icmp##name): { \ |
duke@435 | 1461 | int skip = (STACK_INT(-2) comparison STACK_INT(-1)) \ |
duke@435 | 1462 | ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
duke@435 | 1463 | address branch_pc = pc; \ |
duke@435 | 1464 | UPDATE_PC_AND_TOS(skip, -2); \ |
duke@435 | 1465 | DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
duke@435 | 1466 | CONTINUE; \ |
duke@435 | 1467 | } \ |
duke@435 | 1468 | CASE(_if##name): { \ |
duke@435 | 1469 | int skip = (STACK_INT(-1) comparison 0) \ |
duke@435 | 1470 | ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
duke@435 | 1471 | address branch_pc = pc; \ |
duke@435 | 1472 | UPDATE_PC_AND_TOS(skip, -1); \ |
duke@435 | 1473 | DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
duke@435 | 1474 | CONTINUE; \ |
duke@435 | 1475 | } |
duke@435 | 1476 | |
duke@435 | 1477 | #define COMPARISON_OP2(name, comparison) \ |
duke@435 | 1478 | COMPARISON_OP(name, comparison) \ |
duke@435 | 1479 | CASE(_if_acmp##name): { \ |
duke@435 | 1480 | int skip = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)) \ |
duke@435 | 1481 | ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
duke@435 | 1482 | address branch_pc = pc; \ |
duke@435 | 1483 | UPDATE_PC_AND_TOS(skip, -2); \ |
duke@435 | 1484 | DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
duke@435 | 1485 | CONTINUE; \ |
duke@435 | 1486 | } |
duke@435 | 1487 | |
duke@435 | 1488 | #define NULL_COMPARISON_NOT_OP(name) \ |
duke@435 | 1489 | CASE(_if##name): { \ |
coleenp@955 | 1490 | int skip = (!(STACK_OBJECT(-1) == NULL)) \ |
duke@435 | 1491 | ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
duke@435 | 1492 | address branch_pc = pc; \ |
duke@435 | 1493 | UPDATE_PC_AND_TOS(skip, -1); \ |
duke@435 | 1494 | DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
duke@435 | 1495 | CONTINUE; \ |
duke@435 | 1496 | } |
duke@435 | 1497 | |
duke@435 | 1498 | #define NULL_COMPARISON_OP(name) \ |
duke@435 | 1499 | CASE(_if##name): { \ |
coleenp@955 | 1500 | int skip = ((STACK_OBJECT(-1) == NULL)) \ |
duke@435 | 1501 | ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \ |
duke@435 | 1502 | address branch_pc = pc; \ |
duke@435 | 1503 | UPDATE_PC_AND_TOS(skip, -1); \ |
duke@435 | 1504 | DO_BACKEDGE_CHECKS(skip, branch_pc); \ |
duke@435 | 1505 | CONTINUE; \ |
duke@435 | 1506 | } |
duke@435 | 1507 | COMPARISON_OP(lt, <); |
duke@435 | 1508 | COMPARISON_OP(gt, >); |
duke@435 | 1509 | COMPARISON_OP(le, <=); |
duke@435 | 1510 | COMPARISON_OP(ge, >=); |
duke@435 | 1511 | COMPARISON_OP2(eq, ==); /* include ref comparison */ |
duke@435 | 1512 | COMPARISON_OP2(ne, !=); /* include ref comparison */ |
duke@435 | 1513 | NULL_COMPARISON_OP(null); |
duke@435 | 1514 | NULL_COMPARISON_NOT_OP(nonnull); |
duke@435 | 1515 | |
duke@435 | 1516 | /* Goto pc at specified offset in switch table. */ |
duke@435 | 1517 | |
duke@435 | 1518 | CASE(_tableswitch): { |
duke@435 | 1519 | jint* lpc = (jint*)VMalignWordUp(pc+1); |
duke@435 | 1520 | int32_t key = STACK_INT(-1); |
duke@435 | 1521 | int32_t low = Bytes::get_Java_u4((address)&lpc[1]); |
duke@435 | 1522 | int32_t high = Bytes::get_Java_u4((address)&lpc[2]); |
duke@435 | 1523 | int32_t skip; |
duke@435 | 1524 | key -= low; |
duke@435 | 1525 | skip = ((uint32_t) key > (uint32_t)(high - low)) |
duke@435 | 1526 | ? Bytes::get_Java_u4((address)&lpc[0]) |
duke@435 | 1527 | : Bytes::get_Java_u4((address)&lpc[key + 3]); |
duke@435 | 1528 | // Does this really need a full backedge check (osr?) |
duke@435 | 1529 | address branch_pc = pc; |
duke@435 | 1530 | UPDATE_PC_AND_TOS(skip, -1); |
duke@435 | 1531 | DO_BACKEDGE_CHECKS(skip, branch_pc); |
duke@435 | 1532 | CONTINUE; |
duke@435 | 1533 | } |
duke@435 | 1534 | |
duke@435 | 1535 | /* Goto pc whose table entry matches specified key */ |
duke@435 | 1536 | |
duke@435 | 1537 | CASE(_lookupswitch): { |
duke@435 | 1538 | jint* lpc = (jint*)VMalignWordUp(pc+1); |
duke@435 | 1539 | int32_t key = STACK_INT(-1); |
duke@435 | 1540 | int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */ |
duke@435 | 1541 | int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]); |
duke@435 | 1542 | while (--npairs >= 0) { |
duke@435 | 1543 | lpc += 2; |
duke@435 | 1544 | if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) { |
duke@435 | 1545 | skip = Bytes::get_Java_u4((address)&lpc[1]); |
duke@435 | 1546 | break; |
duke@435 | 1547 | } |
duke@435 | 1548 | } |
duke@435 | 1549 | address branch_pc = pc; |
duke@435 | 1550 | UPDATE_PC_AND_TOS(skip, -1); |
duke@435 | 1551 | DO_BACKEDGE_CHECKS(skip, branch_pc); |
duke@435 | 1552 | CONTINUE; |
duke@435 | 1553 | } |
duke@435 | 1554 | |
duke@435 | 1555 | CASE(_fcmpl): |
duke@435 | 1556 | CASE(_fcmpg): |
duke@435 | 1557 | { |
duke@435 | 1558 | SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2), |
duke@435 | 1559 | STACK_FLOAT(-1), |
duke@435 | 1560 | (opcode == Bytecodes::_fcmpl ? -1 : 1)), |
duke@435 | 1561 | -2); |
duke@435 | 1562 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1563 | } |
duke@435 | 1564 | |
duke@435 | 1565 | CASE(_dcmpl): |
duke@435 | 1566 | CASE(_dcmpg): |
duke@435 | 1567 | { |
duke@435 | 1568 | int r = VMdoubleCompare(STACK_DOUBLE(-3), |
duke@435 | 1569 | STACK_DOUBLE(-1), |
duke@435 | 1570 | (opcode == Bytecodes::_dcmpl ? -1 : 1)); |
duke@435 | 1571 | MORE_STACK(-4); // Pop |
duke@435 | 1572 | SET_STACK_INT(r, 0); |
duke@435 | 1573 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1574 | } |
duke@435 | 1575 | |
duke@435 | 1576 | CASE(_lcmp): |
duke@435 | 1577 | { |
duke@435 | 1578 | int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1)); |
duke@435 | 1579 | MORE_STACK(-4); |
duke@435 | 1580 | SET_STACK_INT(r, 0); |
duke@435 | 1581 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); |
duke@435 | 1582 | } |
duke@435 | 1583 | |
duke@435 | 1584 | |
duke@435 | 1585 | /* Return from a method */ |
duke@435 | 1586 | |
duke@435 | 1587 | CASE(_areturn): |
duke@435 | 1588 | CASE(_ireturn): |
duke@435 | 1589 | CASE(_freturn): |
duke@435 | 1590 | { |
duke@435 | 1591 | // Allow a safepoint before returning to frame manager. |
duke@435 | 1592 | SAFEPOINT; |
duke@435 | 1593 | |
duke@435 | 1594 | goto handle_return; |
duke@435 | 1595 | } |
duke@435 | 1596 | |
duke@435 | 1597 | CASE(_lreturn): |
duke@435 | 1598 | CASE(_dreturn): |
duke@435 | 1599 | { |
duke@435 | 1600 | // Allow a safepoint before returning to frame manager. |
duke@435 | 1601 | SAFEPOINT; |
duke@435 | 1602 | goto handle_return; |
duke@435 | 1603 | } |
duke@435 | 1604 | |
duke@435 | 1605 | CASE(_return_register_finalizer): { |
duke@435 | 1606 | |
duke@435 | 1607 | oop rcvr = LOCALS_OBJECT(0); |
bobv@2036 | 1608 | VERIFY_OOP(rcvr); |
coleenp@4037 | 1609 | if (rcvr->klass()->has_finalizer()) { |
duke@435 | 1610 | CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception); |
duke@435 | 1611 | } |
duke@435 | 1612 | goto handle_return; |
duke@435 | 1613 | } |
duke@435 | 1614 | CASE(_return): { |
duke@435 | 1615 | |
duke@435 | 1616 | // Allow a safepoint before returning to frame manager. |
duke@435 | 1617 | SAFEPOINT; |
duke@435 | 1618 | goto handle_return; |
duke@435 | 1619 | } |
duke@435 | 1620 | |
duke@435 | 1621 | /* Array access byte-codes */ |
duke@435 | 1622 | |
duke@435 | 1623 | /* Every array access byte-code starts out like this */ |
duke@435 | 1624 | // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff); |
duke@435 | 1625 | #define ARRAY_INTRO(arrayOff) \ |
duke@435 | 1626 | arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \ |
duke@435 | 1627 | jint index = STACK_INT(arrayOff + 1); \ |
duke@435 | 1628 | char message[jintAsStringSize]; \ |
duke@435 | 1629 | CHECK_NULL(arrObj); \ |
duke@435 | 1630 | if ((uint32_t)index >= (uint32_t)arrObj->length()) { \ |
duke@435 | 1631 | sprintf(message, "%d", index); \ |
duke@435 | 1632 | VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \ |
duke@435 | 1633 | message); \ |
duke@435 | 1634 | } |
duke@435 | 1635 | |
duke@435 | 1636 | /* 32-bit loads. These handle conversion from < 32-bit types */ |
duke@435 | 1637 | #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ |
duke@435 | 1638 | { \ |
duke@435 | 1639 | ARRAY_INTRO(-2); \ |
duke@435 | 1640 | extra; \ |
duke@435 | 1641 | SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ |
duke@435 | 1642 | -2); \ |
duke@435 | 1643 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ |
duke@435 | 1644 | } |
duke@435 | 1645 | |
duke@435 | 1646 | /* 64-bit loads */ |
duke@435 | 1647 | #define ARRAY_LOADTO64(T,T2, stackRes, extra) \ |
duke@435 | 1648 | { \ |
duke@435 | 1649 | ARRAY_INTRO(-2); \ |
duke@435 | 1650 | SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ |
duke@435 | 1651 | extra; \ |
duke@435 | 1652 | UPDATE_PC_AND_CONTINUE(1); \ |
duke@435 | 1653 | } |
duke@435 | 1654 | |
duke@435 | 1655 | CASE(_iaload): |
duke@435 | 1656 | ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0); |
duke@435 | 1657 | CASE(_faload): |
duke@435 | 1658 | ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); |
goetz@6449 | 1659 | CASE(_aaload): { |
goetz@6449 | 1660 | ARRAY_INTRO(-2); |
goetz@6449 | 1661 | SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2); |
goetz@6449 | 1662 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
goetz@6449 | 1663 | } |
duke@435 | 1664 | CASE(_baload): |
duke@435 | 1665 | ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0); |
duke@435 | 1666 | CASE(_caload): |
duke@435 | 1667 | ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0); |
duke@435 | 1668 | CASE(_saload): |
duke@435 | 1669 | ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0); |
duke@435 | 1670 | CASE(_laload): |
duke@435 | 1671 | ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0); |
duke@435 | 1672 | CASE(_daload): |
duke@435 | 1673 | ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); |
duke@435 | 1674 | |
duke@435 | 1675 | /* 32-bit stores. These handle conversion to < 32-bit types */ |
duke@435 | 1676 | #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ |
duke@435 | 1677 | { \ |
duke@435 | 1678 | ARRAY_INTRO(-3); \ |
duke@435 | 1679 | extra; \ |
duke@435 | 1680 | *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ |
duke@435 | 1681 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ |
duke@435 | 1682 | } |
duke@435 | 1683 | |
duke@435 | 1684 | /* 64-bit stores */ |
duke@435 | 1685 | #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ |
duke@435 | 1686 | { \ |
duke@435 | 1687 | ARRAY_INTRO(-4); \ |
duke@435 | 1688 | extra; \ |
duke@435 | 1689 | *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ |
duke@435 | 1690 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ |
duke@435 | 1691 | } |
duke@435 | 1692 | |
duke@435 | 1693 | CASE(_iastore): |
duke@435 | 1694 | ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0); |
duke@435 | 1695 | CASE(_fastore): |
duke@435 | 1696 | ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0); |
duke@435 | 1697 | /* |
duke@435 | 1698 | * This one looks different because of the assignability check |
duke@435 | 1699 | */ |
duke@435 | 1700 | CASE(_aastore): { |
duke@435 | 1701 | oop rhsObject = STACK_OBJECT(-1); |
bobv@2036 | 1702 | VERIFY_OOP(rhsObject); |
duke@435 | 1703 | ARRAY_INTRO( -3); |
duke@435 | 1704 | // arrObj, index are set |
duke@435 | 1705 | if (rhsObject != NULL) { |
duke@435 | 1706 | /* Check assignability of rhsObject into arrObj */ |
coleenp@4037 | 1707 | Klass* rhsKlassOop = rhsObject->klass(); // EBX (subclass) |
coleenp@4142 | 1708 | Klass* elemKlassOop = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX |
duke@435 | 1709 | // |
duke@435 | 1710 | // Check for compatibilty. This check must not GC!! |
duke@435 | 1711 | // Seems way more expensive now that we must dispatch |
duke@435 | 1712 | // |
coleenp@4037 | 1713 | if (rhsKlassOop != elemKlassOop && !rhsKlassOop->is_subtype_of(elemKlassOop)) { // ebx->is... |
duke@435 | 1714 | VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), ""); |
duke@435 | 1715 | } |
duke@435 | 1716 | } |
goetz@6448 | 1717 | ((objArrayOopDesc *) arrObj)->obj_at_put(index, rhsObject); |
duke@435 | 1718 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); |
duke@435 | 1719 | } |
duke@435 | 1720 | CASE(_bastore): |
duke@435 | 1721 | ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0); |
duke@435 | 1722 | CASE(_castore): |
duke@435 | 1723 | ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0); |
duke@435 | 1724 | CASE(_sastore): |
duke@435 | 1725 | ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0); |
duke@435 | 1726 | CASE(_lastore): |
duke@435 | 1727 | ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0); |
duke@435 | 1728 | CASE(_dastore): |
duke@435 | 1729 | ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0); |
duke@435 | 1730 | |
duke@435 | 1731 | CASE(_arraylength): |
duke@435 | 1732 | { |
duke@435 | 1733 | arrayOop ary = (arrayOop) STACK_OBJECT(-1); |
duke@435 | 1734 | CHECK_NULL(ary); |
duke@435 | 1735 | SET_STACK_INT(ary->length(), -1); |
duke@435 | 1736 | UPDATE_PC_AND_CONTINUE(1); |
duke@435 | 1737 | } |
duke@435 | 1738 | |
duke@435 | 1739 | /* monitorenter and monitorexit for locking/unlocking an object */ |
duke@435 | 1740 | |
duke@435 | 1741 | CASE(_monitorenter): { |
duke@435 | 1742 | oop lockee = STACK_OBJECT(-1); |
duke@435 | 1743 | // derefing's lockee ought to provoke implicit null check |
duke@435 | 1744 | CHECK_NULL(lockee); |
duke@435 | 1745 | // find a free monitor or one already allocated for this object |
duke@435 | 1746 | // if we find a matching object then we need a new monitor |
duke@435 | 1747 | // since this is recursive enter |
duke@435 | 1748 | BasicObjectLock* limit = istate->monitor_base(); |
duke@435 | 1749 | BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); |
duke@435 | 1750 | BasicObjectLock* entry = NULL; |
duke@435 | 1751 | while (most_recent != limit ) { |
duke@435 | 1752 | if (most_recent->obj() == NULL) entry = most_recent; |
duke@435 | 1753 | else if (most_recent->obj() == lockee) break; |
duke@435 | 1754 | most_recent++; |
duke@435 | 1755 | } |
duke@435 | 1756 | if (entry != NULL) { |
duke@435 | 1757 | entry->set_obj(lockee); |
goetz@6445 | 1758 | int success = false; |
goetz@6445 | 1759 | uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place; |
goetz@6445 | 1760 | |
goetz@6445 | 1761 | markOop mark = lockee->mark(); |
goetz@6445 | 1762 | intptr_t hash = (intptr_t) markOopDesc::no_hash; |
goetz@6445 | 1763 | // implies UseBiasedLocking |
goetz@6445 | 1764 | if (mark->has_bias_pattern()) { |
goetz@6445 | 1765 | uintptr_t thread_ident; |
goetz@6445 | 1766 | uintptr_t anticipated_bias_locking_value; |
goetz@6445 | 1767 | thread_ident = (uintptr_t)istate->thread(); |
goetz@6445 | 1768 | anticipated_bias_locking_value = |
goetz@6445 | 1769 | (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) & |
goetz@6445 | 1770 | ~((uintptr_t) markOopDesc::age_mask_in_place); |
goetz@6445 | 1771 | |
goetz@6445 | 1772 | if (anticipated_bias_locking_value == 0) { |
goetz@6445 | 1773 | // already biased towards this thread, nothing to do |
goetz@6445 | 1774 | if (PrintBiasedLockingStatistics) { |
goetz@6445 | 1775 | (* BiasedLocking::biased_lock_entry_count_addr())++; |
goetz@6445 | 1776 | } |
goetz@6445 | 1777 | success = true; |
goetz@6445 | 1778 | } |
goetz@6445 | 1779 | else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) { |
goetz@6445 | 1780 | // try revoke bias |
goetz@6445 | 1781 | markOop header = lockee->klass()->prototype_header(); |
goetz@6445 | 1782 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 1783 | header = header->copy_set_hash(hash); |
goetz@6445 | 1784 | } |
goetz@6445 | 1785 | if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) { |
goetz@6445 | 1786 | if (PrintBiasedLockingStatistics) |
goetz@6445 | 1787 | (*BiasedLocking::revoked_lock_entry_count_addr())++; |
goetz@6445 | 1788 | } |
goetz@6445 | 1789 | } |
goetz@6445 | 1790 | else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) { |
goetz@6445 | 1791 | // try rebias |
goetz@6445 | 1792 | markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident); |
goetz@6445 | 1793 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 1794 | new_header = new_header->copy_set_hash(hash); |
goetz@6445 | 1795 | } |
goetz@6445 | 1796 | if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) { |
goetz@6445 | 1797 | if (PrintBiasedLockingStatistics) |
goetz@6445 | 1798 | (* BiasedLocking::rebiased_lock_entry_count_addr())++; |
goetz@6445 | 1799 | } |
goetz@6445 | 1800 | else { |
goetz@6445 | 1801 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 1802 | } |
goetz@6445 | 1803 | success = true; |
goetz@6445 | 1804 | } |
goetz@6445 | 1805 | else { |
goetz@6445 | 1806 | // try to bias towards thread in case object is anonymously biased |
goetz@6445 | 1807 | markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place | |
goetz@6445 | 1808 | (uintptr_t)markOopDesc::age_mask_in_place | |
goetz@6445 | 1809 | epoch_mask_in_place)); |
goetz@6445 | 1810 | if (hash != markOopDesc::no_hash) { |
goetz@6445 | 1811 | header = header->copy_set_hash(hash); |
goetz@6445 | 1812 | } |
goetz@6445 | 1813 | markOop new_header = (markOop) ((uintptr_t) header | thread_ident); |
goetz@6445 | 1814 | // debugging hint |
goetz@6445 | 1815 | DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);) |
goetz@6445 | 1816 | if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) { |
goetz@6445 | 1817 | if (PrintBiasedLockingStatistics) |
goetz@6445 | 1818 | (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++; |
goetz@6445 | 1819 | } |
goetz@6445 | 1820 | else { |
goetz@6445 | 1821 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 1822 | } |
goetz@6445 | 1823 | success = true; |
goetz@6445 | 1824 | } |
goetz@6445 | 1825 | } |
goetz@6445 | 1826 | |
goetz@6445 | 1827 | // traditional lightweight locking |
goetz@6445 | 1828 | if (!success) { |
goetz@6445 | 1829 | markOop displaced = lockee->mark()->set_unlocked(); |
goetz@6445 | 1830 | entry->lock()->set_displaced_header(displaced); |
goetz@6445 | 1831 | bool call_vm = UseHeavyMonitors; |
goetz@6445 | 1832 | if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) { |
goetz@6445 | 1833 | // Is it simple recursive case? |
goetz@6445 | 1834 | if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) { |
goetz@6445 | 1835 | entry->lock()->set_displaced_header(NULL); |
goetz@6445 | 1836 | } else { |
goetz@6445 | 1837 | CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception); |
goetz@6445 | 1838 | } |
duke@435 | 1839 | } |
duke@435 | 1840 | } |
duke@435 | 1841 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1842 | } else { |
duke@435 | 1843 | istate->set_msg(more_monitors); |
duke@435 | 1844 | UPDATE_PC_AND_RETURN(0); // Re-execute |
duke@435 | 1845 | } |
duke@435 | 1846 | } |
duke@435 | 1847 | |
duke@435 | 1848 | CASE(_monitorexit): { |
duke@435 | 1849 | oop lockee = STACK_OBJECT(-1); |
duke@435 | 1850 | CHECK_NULL(lockee); |
duke@435 | 1851 | // derefing's lockee ought to provoke implicit null check |
duke@435 | 1852 | // find our monitor slot |
duke@435 | 1853 | BasicObjectLock* limit = istate->monitor_base(); |
duke@435 | 1854 | BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base(); |
duke@435 | 1855 | while (most_recent != limit ) { |
duke@435 | 1856 | if ((most_recent)->obj() == lockee) { |
duke@435 | 1857 | BasicLock* lock = most_recent->lock(); |
duke@435 | 1858 | markOop header = lock->displaced_header(); |
duke@435 | 1859 | most_recent->set_obj(NULL); |
goetz@6445 | 1860 | if (!lockee->mark()->has_bias_pattern()) { |
goetz@6445 | 1861 | bool call_vm = UseHeavyMonitors; |
goetz@6445 | 1862 | // If it isn't recursive we either must swap old header or call the runtime |
goetz@6445 | 1863 | if (header != NULL || call_vm) { |
goetz@6445 | 1864 | if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { |
goetz@6445 | 1865 | // restore object for the slow case |
goetz@6445 | 1866 | most_recent->set_obj(lockee); |
goetz@6445 | 1867 | CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception); |
goetz@6445 | 1868 | } |
duke@435 | 1869 | } |
duke@435 | 1870 | } |
duke@435 | 1871 | UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); |
duke@435 | 1872 | } |
duke@435 | 1873 | most_recent++; |
duke@435 | 1874 | } |
duke@435 | 1875 | // Need to throw illegal monitor state exception |
duke@435 | 1876 | CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception); |
twisti@2762 | 1877 | ShouldNotReachHere(); |
duke@435 | 1878 | } |
duke@435 | 1879 | |
duke@435 | 1880 | /* All of the non-quick opcodes. */ |
duke@435 | 1881 | |
duke@435 | 1882 | /* -Set clobbersCpIndex true if the quickened opcode clobbers the |
duke@435 | 1883 | * constant pool index in the instruction. |
duke@435 | 1884 | */ |
duke@435 | 1885 | CASE(_getfield): |
duke@435 | 1886 | CASE(_getstatic): |
duke@435 | 1887 | { |
duke@435 | 1888 | u2 index; |
duke@435 | 1889 | ConstantPoolCacheEntry* cache; |
duke@435 | 1890 | index = Bytes::get_native_u2(pc+1); |
duke@435 | 1891 | |
duke@435 | 1892 | // QQQ Need to make this as inlined as possible. Probably need to |
duke@435 | 1893 | // split all the bytecode cases out so c++ compiler has a chance |
duke@435 | 1894 | // for constant prop to fold everything possible away. |
duke@435 | 1895 | |
duke@435 | 1896 | cache = cp->entry_at(index); |
duke@435 | 1897 | if (!cache->is_resolved((Bytecodes::Code)opcode)) { |
duke@435 | 1898 | CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), |
duke@435 | 1899 | handle_exception); |
duke@435 | 1900 | cache = cp->entry_at(index); |
duke@435 | 1901 | } |
duke@435 | 1902 | |
duke@435 | 1903 | #ifdef VM_JVMTI |
duke@435 | 1904 | if (_jvmti_interp_events) { |
duke@435 | 1905 | int *count_addr; |
duke@435 | 1906 | oop obj; |
duke@435 | 1907 | // Check to see if a field modification watch has been set |
duke@435 | 1908 | // before we take the time to call into the VM. |
duke@435 | 1909 | count_addr = (int *)JvmtiExport::get_field_access_count_addr(); |
duke@435 | 1910 | if ( *count_addr > 0 ) { |
duke@435 | 1911 | if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { |
duke@435 | 1912 | obj = (oop)NULL; |
duke@435 | 1913 | } else { |
duke@435 | 1914 | obj = (oop) STACK_OBJECT(-1); |
bobv@2036 | 1915 | VERIFY_OOP(obj); |
duke@435 | 1916 | } |
duke@435 | 1917 | CALL_VM(InterpreterRuntime::post_field_access(THREAD, |
duke@435 | 1918 | obj, |
duke@435 | 1919 | cache), |
duke@435 | 1920 | handle_exception); |
duke@435 | 1921 | } |
duke@435 | 1922 | } |
duke@435 | 1923 | #endif /* VM_JVMTI */ |
duke@435 | 1924 | |
duke@435 | 1925 | oop obj; |
duke@435 | 1926 | if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) { |
twisti@4237 | 1927 | Klass* k = cache->f1_as_klass(); |
coleenp@4037 | 1928 | obj = k->java_mirror(); |
duke@435 | 1929 | MORE_STACK(1); // Assume single slot push |
duke@435 | 1930 | } else { |
duke@435 | 1931 | obj = (oop) STACK_OBJECT(-1); |
duke@435 | 1932 | CHECK_NULL(obj); |
duke@435 | 1933 | } |
duke@435 | 1934 | |
duke@435 | 1935 | // |
duke@435 | 1936 | // Now store the result on the stack |
duke@435 | 1937 | // |
duke@435 | 1938 | TosState tos_type = cache->flag_state(); |
twisti@3969 | 1939 | int field_offset = cache->f2_as_index(); |
duke@435 | 1940 | if (cache->is_volatile()) { |
duke@435 | 1941 | if (tos_type == atos) { |
bobv@2036 | 1942 | VERIFY_OOP(obj->obj_field_acquire(field_offset)); |
duke@435 | 1943 | SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1); |
duke@435 | 1944 | } else if (tos_type == itos) { |
duke@435 | 1945 | SET_STACK_INT(obj->int_field_acquire(field_offset), -1); |
duke@435 | 1946 | } else if (tos_type == ltos) { |
duke@435 | 1947 | SET_STACK_LONG(obj->long_field_acquire(field_offset), 0); |
duke@435 | 1948 | MORE_STACK(1); |
duke@435 | 1949 | } else if (tos_type == btos) { |
duke@435 | 1950 | SET_STACK_INT(obj->byte_field_acquire(field_offset), -1); |
duke@435 | 1951 | } else if (tos_type == ctos) { |
duke@435 | 1952 | SET_STACK_INT(obj->char_field_acquire(field_offset), -1); |
duke@435 | 1953 | } else if (tos_type == stos) { |
duke@435 | 1954 | SET_STACK_INT(obj->short_field_acquire(field_offset), -1); |
duke@435 | 1955 | } else if (tos_type == ftos) { |
duke@435 | 1956 | SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1); |
duke@435 | 1957 | } else { |
duke@435 | 1958 | SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0); |
duke@435 | 1959 | MORE_STACK(1); |
duke@435 | 1960 | } |
duke@435 | 1961 | } else { |
duke@435 | 1962 | if (tos_type == atos) { |
bobv@2036 | 1963 | VERIFY_OOP(obj->obj_field(field_offset)); |
duke@435 | 1964 | SET_STACK_OBJECT(obj->obj_field(field_offset), -1); |
duke@435 | 1965 | } else if (tos_type == itos) { |
duke@435 | 1966 | SET_STACK_INT(obj->int_field(field_offset), -1); |
duke@435 | 1967 | } else if (tos_type == ltos) { |
duke@435 | 1968 | SET_STACK_LONG(obj->long_field(field_offset), 0); |
duke@435 | 1969 | MORE_STACK(1); |
duke@435 | 1970 | } else if (tos_type == btos) { |
duke@435 | 1971 | SET_STACK_INT(obj->byte_field(field_offset), -1); |
duke@435 | 1972 | } else if (tos_type == ctos) { |
duke@435 | 1973 | SET_STACK_INT(obj->char_field(field_offset), -1); |
duke@435 | 1974 | } else if (tos_type == stos) { |
duke@435 | 1975 | SET_STACK_INT(obj->short_field(field_offset), -1); |
duke@435 | 1976 | } else if (tos_type == ftos) { |
duke@435 | 1977 | SET_STACK_FLOAT(obj->float_field(field_offset), -1); |
duke@435 | 1978 | } else { |
duke@435 | 1979 | SET_STACK_DOUBLE(obj->double_field(field_offset), 0); |
duke@435 | 1980 | MORE_STACK(1); |
duke@435 | 1981 | } |
duke@435 | 1982 | } |
duke@435 | 1983 | |
duke@435 | 1984 | UPDATE_PC_AND_CONTINUE(3); |
duke@435 | 1985 | } |
duke@435 | 1986 | |
duke@435 | 1987 | CASE(_putfield): |
duke@435 | 1988 | CASE(_putstatic): |
duke@435 | 1989 | { |
duke@435 | 1990 | u2 index = Bytes::get_native_u2(pc+1); |
duke@435 | 1991 | ConstantPoolCacheEntry* cache = cp->entry_at(index); |
duke@435 | 1992 | if (!cache->is_resolved((Bytecodes::Code)opcode)) { |
duke@435 | 1993 | CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode), |
duke@435 | 1994 | handle_exception); |
duke@435 | 1995 | cache = cp->entry_at(index); |
duke@435 | 1996 | } |
duke@435 | 1997 | |
duke@435 | 1998 | #ifdef VM_JVMTI |
duke@435 | 1999 | if (_jvmti_interp_events) { |
duke@435 | 2000 | int *count_addr; |
duke@435 | 2001 | oop obj; |
duke@435 | 2002 | // Check to see if a field modification watch has been set |
duke@435 | 2003 | // before we take the time to call into the VM. |
duke@435 | 2004 | count_addr = (int *)JvmtiExport::get_field_modification_count_addr(); |
duke@435 | 2005 | if ( *count_addr > 0 ) { |
duke@435 | 2006 | if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { |
duke@435 | 2007 | obj = (oop)NULL; |
duke@435 | 2008 | } |
duke@435 | 2009 | else { |
duke@435 | 2010 | if (cache->is_long() || cache->is_double()) { |
duke@435 | 2011 | obj = (oop) STACK_OBJECT(-3); |
duke@435 | 2012 | } else { |
duke@435 | 2013 | obj = (oop) STACK_OBJECT(-2); |
duke@435 | 2014 | } |
bobv@2036 | 2015 | VERIFY_OOP(obj); |
duke@435 | 2016 | } |
duke@435 | 2017 | |
duke@435 | 2018 | CALL_VM(InterpreterRuntime::post_field_modification(THREAD, |
duke@435 | 2019 | obj, |
duke@435 | 2020 | cache, |
duke@435 | 2021 | (jvalue *)STACK_SLOT(-1)), |
duke@435 | 2022 | handle_exception); |
duke@435 | 2023 | } |
duke@435 | 2024 | } |
duke@435 | 2025 | #endif /* VM_JVMTI */ |
duke@435 | 2026 | |
duke@435 | 2027 | // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases |
duke@435 | 2028 | // out so c++ compiler has a chance for constant prop to fold everything possible away. |
duke@435 | 2029 | |
duke@435 | 2030 | oop obj; |
duke@435 | 2031 | int count; |
duke@435 | 2032 | TosState tos_type = cache->flag_state(); |
duke@435 | 2033 | |
duke@435 | 2034 | count = -1; |
duke@435 | 2035 | if (tos_type == ltos || tos_type == dtos) { |
duke@435 | 2036 | --count; |
duke@435 | 2037 | } |
duke@435 | 2038 | if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) { |
twisti@4237 | 2039 | Klass* k = cache->f1_as_klass(); |
coleenp@4037 | 2040 | obj = k->java_mirror(); |
duke@435 | 2041 | } else { |
duke@435 | 2042 | --count; |
duke@435 | 2043 | obj = (oop) STACK_OBJECT(count); |
duke@435 | 2044 | CHECK_NULL(obj); |
duke@435 | 2045 | } |
duke@435 | 2046 | |
duke@435 | 2047 | // |
duke@435 | 2048 | // Now store the result |
duke@435 | 2049 | // |
twisti@3969 | 2050 | int field_offset = cache->f2_as_index(); |
duke@435 | 2051 | if (cache->is_volatile()) { |
duke@435 | 2052 | if (tos_type == itos) { |
duke@435 | 2053 | obj->release_int_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2054 | } else if (tos_type == atos) { |
bobv@2036 | 2055 | VERIFY_OOP(STACK_OBJECT(-1)); |
duke@435 | 2056 | obj->release_obj_field_put(field_offset, STACK_OBJECT(-1)); |
duke@435 | 2057 | } else if (tos_type == btos) { |
duke@435 | 2058 | obj->release_byte_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2059 | } else if (tos_type == ltos) { |
duke@435 | 2060 | obj->release_long_field_put(field_offset, STACK_LONG(-1)); |
duke@435 | 2061 | } else if (tos_type == ctos) { |
duke@435 | 2062 | obj->release_char_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2063 | } else if (tos_type == stos) { |
duke@435 | 2064 | obj->release_short_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2065 | } else if (tos_type == ftos) { |
duke@435 | 2066 | obj->release_float_field_put(field_offset, STACK_FLOAT(-1)); |
duke@435 | 2067 | } else { |
duke@435 | 2068 | obj->release_double_field_put(field_offset, STACK_DOUBLE(-1)); |
duke@435 | 2069 | } |
duke@435 | 2070 | OrderAccess::storeload(); |
duke@435 | 2071 | } else { |
duke@435 | 2072 | if (tos_type == itos) { |
duke@435 | 2073 | obj->int_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2074 | } else if (tos_type == atos) { |
bobv@2036 | 2075 | VERIFY_OOP(STACK_OBJECT(-1)); |
duke@435 | 2076 | obj->obj_field_put(field_offset, STACK_OBJECT(-1)); |
duke@435 | 2077 | } else if (tos_type == btos) { |
duke@435 | 2078 | obj->byte_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2079 | } else if (tos_type == ltos) { |
duke@435 | 2080 | obj->long_field_put(field_offset, STACK_LONG(-1)); |
duke@435 | 2081 | } else if (tos_type == ctos) { |
duke@435 | 2082 | obj->char_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2083 | } else if (tos_type == stos) { |
duke@435 | 2084 | obj->short_field_put(field_offset, STACK_INT(-1)); |
duke@435 | 2085 | } else if (tos_type == ftos) { |
duke@435 | 2086 | obj->float_field_put(field_offset, STACK_FLOAT(-1)); |
duke@435 | 2087 | } else { |
duke@435 | 2088 | obj->double_field_put(field_offset, STACK_DOUBLE(-1)); |
duke@435 | 2089 | } |
duke@435 | 2090 | } |
duke@435 | 2091 | |
duke@435 | 2092 | UPDATE_PC_AND_TOS_AND_CONTINUE(3, count); |
duke@435 | 2093 | } |
duke@435 | 2094 | |
duke@435 | 2095 | CASE(_new): { |
duke@435 | 2096 | u2 index = Bytes::get_Java_u2(pc+1); |
coleenp@4037 | 2097 | ConstantPool* constants = istate->method()->constants(); |
duke@435 | 2098 | if (!constants->tag_at(index).is_unresolved_klass()) { |
duke@435 | 2099 | // Make sure klass is initialized and doesn't have a finalizer |
coleenp@4037 | 2100 | Klass* entry = constants->slot_at(index).get_klass(); |
duke@435 | 2101 | assert(entry->is_klass(), "Should be resolved klass"); |
coleenp@4037 | 2102 | Klass* k_entry = (Klass*) entry; |
coleenp@4037 | 2103 | assert(k_entry->oop_is_instance(), "Should be InstanceKlass"); |
coleenp@4037 | 2104 | InstanceKlass* ik = (InstanceKlass*) k_entry; |
duke@435 | 2105 | if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) { |
duke@435 | 2106 | size_t obj_size = ik->size_helper(); |
duke@435 | 2107 | oop result = NULL; |
duke@435 | 2108 | // If the TLAB isn't pre-zeroed then we'll have to do it |
duke@435 | 2109 | bool need_zero = !ZeroTLAB; |
duke@435 | 2110 | if (UseTLAB) { |
duke@435 | 2111 | result = (oop) THREAD->tlab().allocate(obj_size); |
duke@435 | 2112 | } |
duke@435 | 2113 | if (result == NULL) { |
duke@435 | 2114 | need_zero = true; |
duke@435 | 2115 | // Try allocate in shared eden |
duke@435 | 2116 | retry: |
duke@435 | 2117 | HeapWord* compare_to = *Universe::heap()->top_addr(); |
duke@435 | 2118 | HeapWord* new_top = compare_to + obj_size; |
duke@435 | 2119 | if (new_top <= *Universe::heap()->end_addr()) { |
duke@435 | 2120 | if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) { |
duke@435 | 2121 | goto retry; |
duke@435 | 2122 | } |
duke@435 | 2123 | result = (oop) compare_to; |
duke@435 | 2124 | } |
duke@435 | 2125 | } |
duke@435 | 2126 | if (result != NULL) { |
duke@435 | 2127 | // Initialize object (if nonzero size and need) and then the header |
duke@435 | 2128 | if (need_zero ) { |
duke@435 | 2129 | HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize; |
duke@435 | 2130 | obj_size -= sizeof(oopDesc) / oopSize; |
duke@435 | 2131 | if (obj_size > 0 ) { |
duke@435 | 2132 | memset(to_zero, 0, obj_size * HeapWordSize); |
duke@435 | 2133 | } |
duke@435 | 2134 | } |
duke@435 | 2135 | if (UseBiasedLocking) { |
duke@435 | 2136 | result->set_mark(ik->prototype_header()); |
duke@435 | 2137 | } else { |
duke@435 | 2138 | result->set_mark(markOopDesc::prototype()); |
duke@435 | 2139 | } |
coleenp@602 | 2140 | result->set_klass_gap(0); |
duke@435 | 2141 | result->set_klass(k_entry); |
duke@435 | 2142 | SET_STACK_OBJECT(result, 0); |
duke@435 | 2143 | UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); |
duke@435 | 2144 | } |
duke@435 | 2145 | } |
duke@435 | 2146 | } |
duke@435 | 2147 | // Slow case allocation |
duke@435 | 2148 | CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index), |
duke@435 | 2149 | handle_exception); |
duke@435 | 2150 | SET_STACK_OBJECT(THREAD->vm_result(), 0); |
duke@435 | 2151 | THREAD->set_vm_result(NULL); |
duke@435 | 2152 | UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1); |
duke@435 | 2153 | } |
duke@435 | 2154 | CASE(_anewarray): { |
duke@435 | 2155 | u2 index = Bytes::get_Java_u2(pc+1); |
duke@435 | 2156 | jint size = STACK_INT(-1); |
duke@435 | 2157 | CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size), |
duke@435 | 2158 | handle_exception); |
duke@435 | 2159 | SET_STACK_OBJECT(THREAD->vm_result(), -1); |
duke@435 | 2160 | THREAD->set_vm_result(NULL); |
duke@435 | 2161 | UPDATE_PC_AND_CONTINUE(3); |
duke@435 | 2162 | } |
duke@435 | 2163 | CASE(_multianewarray): { |
duke@435 | 2164 | jint dims = *(pc+3); |
duke@435 | 2165 | jint size = STACK_INT(-1); |
duke@435 | 2166 | // stack grows down, dimensions are up! |
duke@435 | 2167 | jint *dimarray = |
twisti@1864 | 2168 | (jint*)&topOfStack[dims * Interpreter::stackElementWords+ |
twisti@1864 | 2169 | Interpreter::stackElementWords-1]; |
duke@435 | 2170 | //adjust pointer to start of stack element |
duke@435 | 2171 | CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray), |
duke@435 | 2172 | handle_exception); |
duke@435 | 2173 | SET_STACK_OBJECT(THREAD->vm_result(), -dims); |
duke@435 | 2174 | THREAD->set_vm_result(NULL); |
duke@435 | 2175 | UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1)); |
duke@435 | 2176 | } |
duke@435 | 2177 | CASE(_checkcast): |
duke@435 | 2178 | if (STACK_OBJECT(-1) != NULL) { |
bobv@2036 | 2179 | VERIFY_OOP(STACK_OBJECT(-1)); |
duke@435 | 2180 | u2 index = Bytes::get_Java_u2(pc+1); |
duke@435 | 2181 | if (ProfileInterpreter) { |
duke@435 | 2182 | // needs Profile_checkcast QQQ |
duke@435 | 2183 | ShouldNotReachHere(); |
duke@435 | 2184 | } |
duke@435 | 2185 | // Constant pool may have actual klass or unresolved klass. If it is |
duke@435 | 2186 | // unresolved we must resolve it |
duke@435 | 2187 | if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { |
duke@435 | 2188 | CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); |
duke@435 | 2189 | } |
coleenp@4037 | 2190 | Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); |
coleenp@4037 | 2191 | Klass* objKlassOop = STACK_OBJECT(-1)->klass(); //ebx |
duke@435 | 2192 | // |
duke@435 | 2193 | // Check for compatibilty. This check must not GC!! |
duke@435 | 2194 | // Seems way more expensive now that we must dispatch |
duke@435 | 2195 | // |
duke@435 | 2196 | if (objKlassOop != klassOf && |
coleenp@4037 | 2197 | !objKlassOop->is_subtype_of(klassOf)) { |
duke@435 | 2198 | ResourceMark rm(THREAD); |
hseigel@4278 | 2199 | const char* objName = objKlassOop->external_name(); |
hseigel@4278 | 2200 | const char* klassName = klassOf->external_name(); |
duke@435 | 2201 | char* message = SharedRuntime::generate_class_cast_message( |
duke@435 | 2202 | objName, klassName); |
duke@435 | 2203 | VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message); |
duke@435 | 2204 | } |
duke@435 | 2205 | } else { |
duke@435 | 2206 | if (UncommonNullCast) { |
duke@435 | 2207 | // istate->method()->set_null_cast_seen(); |
duke@435 | 2208 | // [RGV] Not sure what to do here! |
duke@435 | 2209 | |
duke@435 | 2210 | } |
duke@435 | 2211 | } |
duke@435 | 2212 | UPDATE_PC_AND_CONTINUE(3); |
duke@435 | 2213 | |
duke@435 | 2214 | CASE(_instanceof): |
duke@435 | 2215 | if (STACK_OBJECT(-1) == NULL) { |
duke@435 | 2216 | SET_STACK_INT(0, -1); |
duke@435 | 2217 | } else { |
bobv@2036 | 2218 | VERIFY_OOP(STACK_OBJECT(-1)); |
duke@435 | 2219 | u2 index = Bytes::get_Java_u2(pc+1); |
duke@435 | 2220 | // Constant pool may have actual klass or unresolved klass. If it is |
duke@435 | 2221 | // unresolved we must resolve it |
duke@435 | 2222 | if (METHOD->constants()->tag_at(index).is_unresolved_klass()) { |
duke@435 | 2223 | CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception); |
duke@435 | 2224 | } |
coleenp@4037 | 2225 | Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass(); |
coleenp@4037 | 2226 | Klass* objKlassOop = STACK_OBJECT(-1)->klass(); |
duke@435 | 2227 | // |
duke@435 | 2228 | // Check for compatibilty. This check must not GC!! |
duke@435 | 2229 | // Seems way more expensive now that we must dispatch |
duke@435 | 2230 | // |
coleenp@4037 | 2231 | if ( objKlassOop == klassOf || objKlassOop->is_subtype_of(klassOf)) { |
duke@435 | 2232 | SET_STACK_INT(1, -1); |
duke@435 | 2233 | } else { |
duke@435 | 2234 | SET_STACK_INT(0, -1); |
duke@435 | 2235 | } |
duke@435 | 2236 | } |
duke@435 | 2237 | UPDATE_PC_AND_CONTINUE(3); |
duke@435 | 2238 | |
duke@435 | 2239 | CASE(_ldc_w): |
duke@435 | 2240 | CASE(_ldc): |
duke@435 | 2241 | { |
duke@435 | 2242 | u2 index; |
duke@435 | 2243 | bool wide = false; |
duke@435 | 2244 | int incr = 2; // frequent case |
duke@435 | 2245 | if (opcode == Bytecodes::_ldc) { |
duke@435 | 2246 | index = pc[1]; |
duke@435 | 2247 | } else { |
duke@435 | 2248 | index = Bytes::get_Java_u2(pc+1); |
duke@435 | 2249 | incr = 3; |
duke@435 | 2250 | wide = true; |
duke@435 | 2251 | } |
duke@435 | 2252 | |
coleenp@4037 | 2253 | ConstantPool* constants = METHOD->constants(); |
duke@435 | 2254 | switch (constants->tag_at(index).value()) { |
duke@435 | 2255 | case JVM_CONSTANT_Integer: |
duke@435 | 2256 | SET_STACK_INT(constants->int_at(index), 0); |
duke@435 | 2257 | break; |
duke@435 | 2258 | |
duke@435 | 2259 | case JVM_CONSTANT_Float: |
duke@435 | 2260 | SET_STACK_FLOAT(constants->float_at(index), 0); |
duke@435 | 2261 | break; |
duke@435 | 2262 | |
duke@435 | 2263 | case JVM_CONSTANT_String: |
coleenp@4037 | 2264 | { |
coleenp@4037 | 2265 | oop result = constants->resolved_references()->obj_at(index); |
coleenp@4037 | 2266 | if (result == NULL) { |
coleenp@4037 | 2267 | CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception); |
coleenp@4037 | 2268 | SET_STACK_OBJECT(THREAD->vm_result(), 0); |
coleenp@4037 | 2269 | THREAD->set_vm_result(NULL); |
coleenp@4037 | 2270 | } else { |
coleenp@4037 | 2271 | VERIFY_OOP(result); |
coleenp@4037 | 2272 | SET_STACK_OBJECT(result, 0); |
coleenp@4037 | 2273 | } |
duke@435 | 2274 | break; |
coleenp@4037 | 2275 | } |
duke@435 | 2276 | |
duke@435 | 2277 | case JVM_CONSTANT_Class: |
never@2658 | 2278 | VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror()); |
never@2658 | 2279 | SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0); |
duke@435 | 2280 | break; |
duke@435 | 2281 | |
duke@435 | 2282 | case JVM_CONSTANT_UnresolvedClass: |
duke@435 | 2283 | case JVM_CONSTANT_UnresolvedClassInError: |
duke@435 | 2284 | CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception); |
duke@435 | 2285 | SET_STACK_OBJECT(THREAD->vm_result(), 0); |
duke@435 | 2286 | THREAD->set_vm_result(NULL); |
duke@435 | 2287 | break; |
duke@435 | 2288 | |
duke@435 | 2289 | default: ShouldNotReachHere(); |
duke@435 | 2290 | } |
duke@435 | 2291 | UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); |
duke@435 | 2292 | } |
duke@435 | 2293 | |
duke@435 | 2294 | CASE(_ldc2_w): |
duke@435 | 2295 | { |
duke@435 | 2296 | u2 index = Bytes::get_Java_u2(pc+1); |
duke@435 | 2297 | |
coleenp@4037 | 2298 | ConstantPool* constants = METHOD->constants(); |
duke@435 | 2299 | switch (constants->tag_at(index).value()) { |
duke@435 | 2300 | |
duke@435 | 2301 | case JVM_CONSTANT_Long: |
duke@435 | 2302 | SET_STACK_LONG(constants->long_at(index), 1); |
duke@435 | 2303 | break; |
duke@435 | 2304 | |
duke@435 | 2305 | case JVM_CONSTANT_Double: |
duke@435 | 2306 | SET_STACK_DOUBLE(constants->double_at(index), 1); |
duke@435 | 2307 | break; |
duke@435 | 2308 | default: ShouldNotReachHere(); |
duke@435 | 2309 | } |
duke@435 | 2310 | UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2); |
duke@435 | 2311 | } |
duke@435 | 2312 | |
twisti@2762 | 2313 | CASE(_fast_aldc_w): |
twisti@2762 | 2314 | CASE(_fast_aldc): { |
twisti@2762 | 2315 | u2 index; |
twisti@2762 | 2316 | int incr; |
twisti@2762 | 2317 | if (opcode == Bytecodes::_fast_aldc) { |
twisti@2762 | 2318 | index = pc[1]; |
twisti@2762 | 2319 | incr = 2; |
twisti@2762 | 2320 | } else { |
twisti@2762 | 2321 | index = Bytes::get_native_u2(pc+1); |
twisti@2762 | 2322 | incr = 3; |
twisti@2762 | 2323 | } |
twisti@2762 | 2324 | |
twisti@2762 | 2325 | // We are resolved if the f1 field contains a non-null object (CallSite, etc.) |
twisti@2762 | 2326 | // This kind of CP cache entry does not need to match the flags byte, because |
twisti@2762 | 2327 | // there is a 1-1 relation between bytecode type and CP entry type. |
coleenp@4037 | 2328 | ConstantPool* constants = METHOD->constants(); |
coleenp@4037 | 2329 | oop result = constants->resolved_references()->obj_at(index); |
twisti@3969 | 2330 | if (result == NULL) { |
twisti@2762 | 2331 | CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), |
twisti@2762 | 2332 | handle_exception); |
coleenp@4037 | 2333 | result = THREAD->vm_result(); |
twisti@2762 | 2334 | } |
twisti@2762 | 2335 | |
twisti@3969 | 2336 | VERIFY_OOP(result); |
twisti@3969 | 2337 | SET_STACK_OBJECT(result, 0); |
twisti@2762 | 2338 | UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1); |
twisti@2762 | 2339 | } |
twisti@2762 | 2340 | |
twisti@2762 | 2341 | CASE(_invokedynamic): { |
twisti@4237 | 2342 | |
twisti@2762 | 2343 | if (!EnableInvokeDynamic) { |
twisti@2762 | 2344 | // We should not encounter this bytecode if !EnableInvokeDynamic. |
twisti@2762 | 2345 | // The verifier will stop it. However, if we get past the verifier, |
twisti@2762 | 2346 | // this will stop the thread in a reasonable way, without crashing the JVM. |
twisti@2762 | 2347 | CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD), |
twisti@2762 | 2348 | handle_exception); |
twisti@2762 | 2349 | ShouldNotReachHere(); |
twisti@2762 | 2350 | } |
twisti@2762 | 2351 | |
twisti@4237 | 2352 | u4 index = Bytes::get_native_u4(pc+1); |
twisti@4237 | 2353 | ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); |
twisti@2762 | 2354 | |
coleenp@4037 | 2355 | // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.) |
twisti@2762 | 2356 | // This kind of CP cache entry does not need to match the flags byte, because |
twisti@2762 | 2357 | // there is a 1-1 relation between bytecode type and CP entry type. |
twisti@4237 | 2358 | if (! cache->is_resolved((Bytecodes::Code) opcode)) { |
twisti@2762 | 2359 | CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD), |
twisti@2762 | 2360 | handle_exception); |
twisti@4237 | 2361 | cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index); |
twisti@2762 | 2362 | } |
twisti@2762 | 2363 | |
twisti@4237 | 2364 | Method* method = cache->f1_as_method(); |
goetz@5319 | 2365 | if (VerifyOops) method->verify(); |
twisti@4237 | 2366 | |
twisti@4237 | 2367 | if (cache->has_appendix()) { |
twisti@4237 | 2368 | ConstantPool* constants = METHOD->constants(); |
twisti@4237 | 2369 | SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); |
twisti@4237 | 2370 | MORE_STACK(1); |
twisti@4237 | 2371 | } |
twisti@4237 | 2372 | |
twisti@4237 | 2373 | istate->set_msg(call_method); |
twisti@4237 | 2374 | istate->set_callee(method); |
twisti@4237 | 2375 | istate->set_callee_entry_point(method->from_interpreted_entry()); |
twisti@2762 | 2376 | istate->set_bcp_advance(5); |
twisti@2762 | 2377 | |
twisti@2762 | 2378 | UPDATE_PC_AND_RETURN(0); // I'll be back... |
twisti@2762 | 2379 | } |
twisti@2762 | 2380 | |
twisti@4237 | 2381 | CASE(_invokehandle): { |
twisti@4237 | 2382 | |
twisti@4237 | 2383 | if (!EnableInvokeDynamic) { |
twisti@4237 | 2384 | ShouldNotReachHere(); |
twisti@4237 | 2385 | } |
twisti@4237 | 2386 | |
twisti@4237 | 2387 | u2 index = Bytes::get_native_u2(pc+1); |
twisti@4237 | 2388 | ConstantPoolCacheEntry* cache = cp->entry_at(index); |
twisti@4237 | 2389 | |
twisti@4237 | 2390 | if (! cache->is_resolved((Bytecodes::Code) opcode)) { |
twisti@4237 | 2391 | CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD), |
twisti@4237 | 2392 | handle_exception); |
twisti@4237 | 2393 | cache = cp->entry_at(index); |
twisti@4237 | 2394 | } |
twisti@4237 | 2395 | |
twisti@4237 | 2396 | Method* method = cache->f1_as_method(); |
goetz@5319 | 2397 | if (VerifyOops) method->verify(); |
twisti@4237 | 2398 | |
twisti@4237 | 2399 | if (cache->has_appendix()) { |
twisti@4237 | 2400 | ConstantPool* constants = METHOD->constants(); |
twisti@4237 | 2401 | SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0); |
twisti@4237 | 2402 | MORE_STACK(1); |
twisti@4237 | 2403 | } |
twisti@4237 | 2404 | |
twisti@4237 | 2405 | istate->set_msg(call_method); |
twisti@4237 | 2406 | istate->set_callee(method); |
twisti@4237 | 2407 | istate->set_callee_entry_point(method->from_interpreted_entry()); |
twisti@4237 | 2408 | istate->set_bcp_advance(3); |
twisti@4237 | 2409 | |
twisti@4237 | 2410 | UPDATE_PC_AND_RETURN(0); // I'll be back... |
twisti@4237 | 2411 | } |
twisti@4237 | 2412 | |
duke@435 | 2413 | CASE(_invokeinterface): { |
duke@435 | 2414 | u2 index = Bytes::get_native_u2(pc+1); |
duke@435 | 2415 | |
duke@435 | 2416 | // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases |
duke@435 | 2417 | // out so c++ compiler has a chance for constant prop to fold everything possible away. |
duke@435 | 2418 | |
duke@435 | 2419 | ConstantPoolCacheEntry* cache = cp->entry_at(index); |
duke@435 | 2420 | if (!cache->is_resolved((Bytecodes::Code)opcode)) { |
duke@435 | 2421 | CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), |
duke@435 | 2422 | handle_exception); |
duke@435 | 2423 | cache = cp->entry_at(index); |
duke@435 | 2424 | } |
duke@435 | 2425 | |
duke@435 | 2426 | istate->set_msg(call_method); |
duke@435 | 2427 | |
duke@435 | 2428 | // Special case of invokeinterface called for virtual method of |
duke@435 | 2429 | // java.lang.Object. See cpCacheOop.cpp for details. |
duke@435 | 2430 | // This code isn't produced by javac, but could be produced by |
duke@435 | 2431 | // another compliant java compiler. |
twisti@3969 | 2432 | if (cache->is_forced_virtual()) { |
coleenp@4037 | 2433 | Method* callee; |
duke@435 | 2434 | CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
duke@435 | 2435 | if (cache->is_vfinal()) { |
twisti@3969 | 2436 | callee = cache->f2_as_vfinal_method(); |
duke@435 | 2437 | } else { |
duke@435 | 2438 | // get receiver |
duke@435 | 2439 | int parms = cache->parameter_size(); |
duke@435 | 2440 | // Same comments as invokevirtual apply here |
bobv@2036 | 2441 | VERIFY_OOP(STACK_OBJECT(-parms)); |
coleenp@4037 | 2442 | InstanceKlass* rcvrKlass = (InstanceKlass*) |
coleenp@4037 | 2443 | STACK_OBJECT(-parms)->klass(); |
coleenp@4037 | 2444 | callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; |
duke@435 | 2445 | } |
duke@435 | 2446 | istate->set_callee(callee); |
duke@435 | 2447 | istate->set_callee_entry_point(callee->from_interpreted_entry()); |
duke@435 | 2448 | #ifdef VM_JVMTI |
duke@435 | 2449 | if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { |
duke@435 | 2450 | istate->set_callee_entry_point(callee->interpreter_entry()); |
duke@435 | 2451 | } |
duke@435 | 2452 | #endif /* VM_JVMTI */ |
duke@435 | 2453 | istate->set_bcp_advance(5); |
duke@435 | 2454 | UPDATE_PC_AND_RETURN(0); // I'll be back... |
duke@435 | 2455 | } |
duke@435 | 2456 | |
duke@435 | 2457 | // this could definitely be cleaned up QQQ |
coleenp@4037 | 2458 | Method* callee; |
coleenp@4037 | 2459 | Klass* iclass = cache->f1_as_klass(); |
coleenp@4037 | 2460 | // InstanceKlass* interface = (InstanceKlass*) iclass; |
duke@435 | 2461 | // get receiver |
duke@435 | 2462 | int parms = cache->parameter_size(); |
duke@435 | 2463 | oop rcvr = STACK_OBJECT(-parms); |
duke@435 | 2464 | CHECK_NULL(rcvr); |
coleenp@4037 | 2465 | InstanceKlass* int2 = (InstanceKlass*) rcvr->klass(); |
duke@435 | 2466 | itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable(); |
duke@435 | 2467 | int i; |
duke@435 | 2468 | for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) { |
duke@435 | 2469 | if (ki->interface_klass() == iclass) break; |
duke@435 | 2470 | } |
duke@435 | 2471 | // If the interface isn't found, this class doesn't implement this |
duke@435 | 2472 | // interface. The link resolver checks this but only for the first |
duke@435 | 2473 | // time this interface is called. |
duke@435 | 2474 | if (i == int2->itable_length()) { |
duke@435 | 2475 | VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), ""); |
duke@435 | 2476 | } |
twisti@3969 | 2477 | int mindex = cache->f2_as_index(); |
duke@435 | 2478 | itableMethodEntry* im = ki->first_method_entry(rcvr->klass()); |
duke@435 | 2479 | callee = im[mindex].method(); |
duke@435 | 2480 | if (callee == NULL) { |
duke@435 | 2481 | VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), ""); |
duke@435 | 2482 | } |
duke@435 | 2483 | |
duke@435 | 2484 | istate->set_callee(callee); |
duke@435 | 2485 | istate->set_callee_entry_point(callee->from_interpreted_entry()); |
duke@435 | 2486 | #ifdef VM_JVMTI |
duke@435 | 2487 | if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { |
duke@435 | 2488 | istate->set_callee_entry_point(callee->interpreter_entry()); |
duke@435 | 2489 | } |
duke@435 | 2490 | #endif /* VM_JVMTI */ |
duke@435 | 2491 | istate->set_bcp_advance(5); |
duke@435 | 2492 | UPDATE_PC_AND_RETURN(0); // I'll be back... |
duke@435 | 2493 | } |
duke@435 | 2494 | |
duke@435 | 2495 | CASE(_invokevirtual): |
duke@435 | 2496 | CASE(_invokespecial): |
duke@435 | 2497 | CASE(_invokestatic): { |
duke@435 | 2498 | u2 index = Bytes::get_native_u2(pc+1); |
duke@435 | 2499 | |
duke@435 | 2500 | ConstantPoolCacheEntry* cache = cp->entry_at(index); |
duke@435 | 2501 | // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases |
duke@435 | 2502 | // out so c++ compiler has a chance for constant prop to fold everything possible away. |
duke@435 | 2503 | |
duke@435 | 2504 | if (!cache->is_resolved((Bytecodes::Code)opcode)) { |
duke@435 | 2505 | CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode), |
duke@435 | 2506 | handle_exception); |
duke@435 | 2507 | cache = cp->entry_at(index); |
duke@435 | 2508 | } |
duke@435 | 2509 | |
duke@435 | 2510 | istate->set_msg(call_method); |
duke@435 | 2511 | { |
coleenp@4037 | 2512 | Method* callee; |
duke@435 | 2513 | if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) { |
duke@435 | 2514 | CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
twisti@3969 | 2515 | if (cache->is_vfinal()) callee = cache->f2_as_vfinal_method(); |
duke@435 | 2516 | else { |
duke@435 | 2517 | // get receiver |
duke@435 | 2518 | int parms = cache->parameter_size(); |
duke@435 | 2519 | // this works but needs a resourcemark and seems to create a vtable on every call: |
coleenp@4037 | 2520 | // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index()); |
duke@435 | 2521 | // |
duke@435 | 2522 | // this fails with an assert |
coleenp@4037 | 2523 | // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass()); |
duke@435 | 2524 | // but this works |
bobv@2036 | 2525 | VERIFY_OOP(STACK_OBJECT(-parms)); |
coleenp@4037 | 2526 | InstanceKlass* rcvrKlass = (InstanceKlass*) STACK_OBJECT(-parms)->klass(); |
duke@435 | 2527 | /* |
duke@435 | 2528 | Executing this code in java.lang.String: |
duke@435 | 2529 | public String(char value[]) { |
duke@435 | 2530 | this.count = value.length; |
duke@435 | 2531 | this.value = (char[])value.clone(); |
duke@435 | 2532 | } |
duke@435 | 2533 | |
coleenp@4037 | 2534 | a find on rcvr->klass() reports: |
duke@435 | 2535 | {type array char}{type array class} |
duke@435 | 2536 | - klass: {other class} |
duke@435 | 2537 | |
coleenp@4037 | 2538 | but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure |
coleenp@4037 | 2539 | because rcvr->klass()->oop_is_instance() == 0 |
duke@435 | 2540 | However it seems to have a vtable in the right location. Huh? |
duke@435 | 2541 | |
duke@435 | 2542 | */ |
coleenp@4037 | 2543 | callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()]; |
duke@435 | 2544 | } |
duke@435 | 2545 | } else { |
duke@435 | 2546 | if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) { |
duke@435 | 2547 | CHECK_NULL(STACK_OBJECT(-(cache->parameter_size()))); |
duke@435 | 2548 | } |
twisti@3969 | 2549 | callee = cache->f1_as_method(); |
duke@435 | 2550 | } |
duke@435 | 2551 | |
duke@435 | 2552 | istate->set_callee(callee); |
duke@435 | 2553 | istate->set_callee_entry_point(callee->from_interpreted_entry()); |
duke@435 | 2554 | #ifdef VM_JVMTI |
duke@435 | 2555 | if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) { |
duke@435 | 2556 | istate->set_callee_entry_point(callee->interpreter_entry()); |
duke@435 | 2557 | } |
duke@435 | 2558 | #endif /* VM_JVMTI */ |
duke@435 | 2559 | istate->set_bcp_advance(3); |
duke@435 | 2560 | UPDATE_PC_AND_RETURN(0); // I'll be back... |
duke@435 | 2561 | } |
duke@435 | 2562 | } |
duke@435 | 2563 | |
duke@435 | 2564 | /* Allocate memory for a new java object. */ |
duke@435 | 2565 | |
duke@435 | 2566 | CASE(_newarray): { |
duke@435 | 2567 | BasicType atype = (BasicType) *(pc+1); |
duke@435 | 2568 | jint size = STACK_INT(-1); |
duke@435 | 2569 | CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size), |
duke@435 | 2570 | handle_exception); |
duke@435 | 2571 | SET_STACK_OBJECT(THREAD->vm_result(), -1); |
duke@435 | 2572 | THREAD->set_vm_result(NULL); |
duke@435 | 2573 | |
duke@435 | 2574 | UPDATE_PC_AND_CONTINUE(2); |
duke@435 | 2575 | } |
duke@435 | 2576 | |
duke@435 | 2577 | /* Throw an exception. */ |
duke@435 | 2578 | |
duke@435 | 2579 | CASE(_athrow): { |
duke@435 | 2580 | oop except_oop = STACK_OBJECT(-1); |
duke@435 | 2581 | CHECK_NULL(except_oop); |
duke@435 | 2582 | // set pending_exception so we use common code |
duke@435 | 2583 | THREAD->set_pending_exception(except_oop, NULL, 0); |
duke@435 | 2584 | goto handle_exception; |
duke@435 | 2585 | } |
duke@435 | 2586 | |
duke@435 | 2587 | /* goto and jsr. They are exactly the same except jsr pushes |
duke@435 | 2588 | * the address of the next instruction first. |
duke@435 | 2589 | */ |
duke@435 | 2590 | |
duke@435 | 2591 | CASE(_jsr): { |
duke@435 | 2592 | /* push bytecode index on stack */ |
duke@435 | 2593 | SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0); |
duke@435 | 2594 | MORE_STACK(1); |
duke@435 | 2595 | /* FALL THROUGH */ |
duke@435 | 2596 | } |
duke@435 | 2597 | |
duke@435 | 2598 | CASE(_goto): |
duke@435 | 2599 | { |
duke@435 | 2600 | int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1); |
duke@435 | 2601 | address branch_pc = pc; |
duke@435 | 2602 | UPDATE_PC(offset); |
duke@435 | 2603 | DO_BACKEDGE_CHECKS(offset, branch_pc); |
duke@435 | 2604 | CONTINUE; |
duke@435 | 2605 | } |
duke@435 | 2606 | |
duke@435 | 2607 | CASE(_jsr_w): { |
duke@435 | 2608 | /* push return address on the stack */ |
duke@435 | 2609 | SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0); |
duke@435 | 2610 | MORE_STACK(1); |
duke@435 | 2611 | /* FALL THROUGH */ |
duke@435 | 2612 | } |
duke@435 | 2613 | |
duke@435 | 2614 | CASE(_goto_w): |
duke@435 | 2615 | { |
duke@435 | 2616 | int32_t offset = Bytes::get_Java_u4(pc + 1); |
duke@435 | 2617 | address branch_pc = pc; |
duke@435 | 2618 | UPDATE_PC(offset); |
duke@435 | 2619 | DO_BACKEDGE_CHECKS(offset, branch_pc); |
duke@435 | 2620 | CONTINUE; |
duke@435 | 2621 | } |
duke@435 | 2622 | |
duke@435 | 2623 | /* return from a jsr or jsr_w */ |
duke@435 | 2624 | |
duke@435 | 2625 | CASE(_ret): { |
duke@435 | 2626 | pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1])); |
duke@435 | 2627 | UPDATE_PC_AND_CONTINUE(0); |
duke@435 | 2628 | } |
duke@435 | 2629 | |
duke@435 | 2630 | /* debugger breakpoint */ |
duke@435 | 2631 | |
duke@435 | 2632 | CASE(_breakpoint): { |
duke@435 | 2633 | Bytecodes::Code original_bytecode; |
duke@435 | 2634 | DECACHE_STATE(); |
duke@435 | 2635 | SET_LAST_JAVA_FRAME(); |
duke@435 | 2636 | original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD, |
duke@435 | 2637 | METHOD, pc); |
duke@435 | 2638 | RESET_LAST_JAVA_FRAME(); |
duke@435 | 2639 | CACHE_STATE(); |
duke@435 | 2640 | if (THREAD->has_pending_exception()) goto handle_exception; |
duke@435 | 2641 | CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc), |
duke@435 | 2642 | handle_exception); |
duke@435 | 2643 | |
duke@435 | 2644 | opcode = (jubyte)original_bytecode; |
duke@435 | 2645 | goto opcode_switch; |
duke@435 | 2646 | } |
duke@435 | 2647 | |
duke@435 | 2648 | DEFAULT: |
jcoomes@1845 | 2649 | fatal(err_msg("Unimplemented opcode %d = %s", opcode, |
jcoomes@1845 | 2650 | Bytecodes::name((Bytecodes::Code)opcode))); |
duke@435 | 2651 | goto finish; |
duke@435 | 2652 | |
duke@435 | 2653 | } /* switch(opc) */ |
duke@435 | 2654 | |
duke@435 | 2655 | |
duke@435 | 2656 | #ifdef USELABELS |
duke@435 | 2657 | check_for_exception: |
duke@435 | 2658 | #endif |
duke@435 | 2659 | { |
duke@435 | 2660 | if (!THREAD->has_pending_exception()) { |
duke@435 | 2661 | CONTINUE; |
duke@435 | 2662 | } |
duke@435 | 2663 | /* We will be gcsafe soon, so flush our state. */ |
duke@435 | 2664 | DECACHE_PC(); |
duke@435 | 2665 | goto handle_exception; |
duke@435 | 2666 | } |
duke@435 | 2667 | do_continue: ; |
duke@435 | 2668 | |
duke@435 | 2669 | } /* while (1) interpreter loop */ |
duke@435 | 2670 | |
duke@435 | 2671 | |
duke@435 | 2672 | // An exception exists in the thread state see whether this activation can handle it |
duke@435 | 2673 | handle_exception: { |
duke@435 | 2674 | |
duke@435 | 2675 | HandleMarkCleaner __hmc(THREAD); |
duke@435 | 2676 | Handle except_oop(THREAD, THREAD->pending_exception()); |
duke@435 | 2677 | // Prevent any subsequent HandleMarkCleaner in the VM |
duke@435 | 2678 | // from freeing the except_oop handle. |
duke@435 | 2679 | HandleMark __hm(THREAD); |
duke@435 | 2680 | |
duke@435 | 2681 | THREAD->clear_pending_exception(); |
duke@435 | 2682 | assert(except_oop(), "No exception to process"); |
duke@435 | 2683 | intptr_t continuation_bci; |
duke@435 | 2684 | // expression stack is emptied |
twisti@1864 | 2685 | topOfStack = istate->stack_base() - Interpreter::stackElementWords; |
duke@435 | 2686 | CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()), |
duke@435 | 2687 | handle_exception); |
duke@435 | 2688 | |
coleenp@4037 | 2689 | except_oop = THREAD->vm_result(); |
duke@435 | 2690 | THREAD->set_vm_result(NULL); |
duke@435 | 2691 | if (continuation_bci >= 0) { |
duke@435 | 2692 | // Place exception on top of stack |
duke@435 | 2693 | SET_STACK_OBJECT(except_oop(), 0); |
duke@435 | 2694 | MORE_STACK(1); |
duke@435 | 2695 | pc = METHOD->code_base() + continuation_bci; |
duke@435 | 2696 | if (TraceExceptions) { |
duke@435 | 2697 | ttyLocker ttyl; |
duke@435 | 2698 | ResourceMark rm; |
duke@435 | 2699 | tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); |
duke@435 | 2700 | tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); |
duke@435 | 2701 | tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT, |
duke@435 | 2702 | pc - (intptr_t)METHOD->code_base(), |
duke@435 | 2703 | continuation_bci, THREAD); |
duke@435 | 2704 | } |
duke@435 | 2705 | // for AbortVMOnException flag |
duke@435 | 2706 | NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); |
duke@435 | 2707 | goto run; |
duke@435 | 2708 | } |
duke@435 | 2709 | if (TraceExceptions) { |
duke@435 | 2710 | ttyLocker ttyl; |
duke@435 | 2711 | ResourceMark rm; |
duke@435 | 2712 | tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), except_oop()); |
duke@435 | 2713 | tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string()); |
duke@435 | 2714 | tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT, |
duke@435 | 2715 | pc - (intptr_t) METHOD->code_base(), |
duke@435 | 2716 | THREAD); |
duke@435 | 2717 | } |
duke@435 | 2718 | // for AbortVMOnException flag |
duke@435 | 2719 | NOT_PRODUCT(Exceptions::debug_check_abort(except_oop)); |
duke@435 | 2720 | // No handler in this activation, unwind and try again |
duke@435 | 2721 | THREAD->set_pending_exception(except_oop(), NULL, 0); |
duke@435 | 2722 | goto handle_return; |
goetz@6450 | 2723 | } // handle_exception: |
duke@435 | 2724 | |
duke@435 | 2725 | // Return from an interpreter invocation with the result of the interpretation |
duke@435 | 2726 | // on the top of the Java Stack (or a pending exception) |
duke@435 | 2727 | |
goetz@6450 | 2728 | handle_Pop_Frame: { |
goetz@6450 | 2729 | |
goetz@6450 | 2730 | // We don't really do anything special here except we must be aware |
goetz@6450 | 2731 | // that we can get here without ever locking the method (if sync). |
goetz@6450 | 2732 | // Also we skip the notification of the exit. |
goetz@6450 | 2733 | |
goetz@6450 | 2734 | istate->set_msg(popping_frame); |
goetz@6450 | 2735 | // Clear pending so while the pop is in process |
goetz@6450 | 2736 | // we don't start another one if a call_vm is done. |
goetz@6450 | 2737 | THREAD->clr_pop_frame_pending(); |
goetz@6450 | 2738 | // Let interpreter (only) see the we're in the process of popping a frame |
goetz@6450 | 2739 | THREAD->set_pop_frame_in_process(); |
goetz@6450 | 2740 | |
goetz@6450 | 2741 | goto handle_return; |
goetz@6450 | 2742 | |
goetz@6450 | 2743 | } // handle_Pop_Frame |
goetz@6450 | 2744 | |
goetz@6450 | 2745 | // ForceEarlyReturn ends a method, and returns to the caller with a return value |
goetz@6450 | 2746 | // given by the invoker of the early return. |
goetz@6450 | 2747 | handle_Early_Return: { |
goetz@6450 | 2748 | |
goetz@6450 | 2749 | istate->set_msg(early_return); |
goetz@6450 | 2750 | |
goetz@6450 | 2751 | // Clear expression stack. |
goetz@6450 | 2752 | topOfStack = istate->stack_base() - Interpreter::stackElementWords; |
goetz@6450 | 2753 | |
goetz@6450 | 2754 | JvmtiThreadState *ts = THREAD->jvmti_thread_state(); |
goetz@6450 | 2755 | |
goetz@6450 | 2756 | // Push the value to be returned. |
goetz@6450 | 2757 | switch (istate->method()->result_type()) { |
goetz@6450 | 2758 | case T_BOOLEAN: |
goetz@6450 | 2759 | case T_SHORT: |
goetz@6450 | 2760 | case T_BYTE: |
goetz@6450 | 2761 | case T_CHAR: |
goetz@6450 | 2762 | case T_INT: |
goetz@6451 | 2763 | SET_STACK_INT(ts->earlyret_value().i, 0); |
goetz@6450 | 2764 | MORE_STACK(1); |
goetz@6450 | 2765 | break; |
goetz@6450 | 2766 | case T_LONG: |
goetz@6450 | 2767 | SET_STACK_LONG(ts->earlyret_value().j, 1); |
goetz@6450 | 2768 | MORE_STACK(2); |
goetz@6450 | 2769 | break; |
goetz@6450 | 2770 | case T_FLOAT: |
goetz@6450 | 2771 | SET_STACK_FLOAT(ts->earlyret_value().f, 0); |
goetz@6450 | 2772 | MORE_STACK(1); |
goetz@6450 | 2773 | break; |
goetz@6450 | 2774 | case T_DOUBLE: |
goetz@6450 | 2775 | SET_STACK_DOUBLE(ts->earlyret_value().d, 1); |
goetz@6450 | 2776 | MORE_STACK(2); |
goetz@6450 | 2777 | break; |
goetz@6450 | 2778 | case T_ARRAY: |
goetz@6450 | 2779 | case T_OBJECT: |
goetz@6450 | 2780 | SET_STACK_OBJECT(ts->earlyret_oop(), 0); |
goetz@6450 | 2781 | MORE_STACK(1); |
goetz@6450 | 2782 | break; |
goetz@6450 | 2783 | } |
goetz@6450 | 2784 | |
goetz@6450 | 2785 | ts->clr_earlyret_value(); |
goetz@6450 | 2786 | ts->set_earlyret_oop(NULL); |
goetz@6450 | 2787 | ts->clr_earlyret_pending(); |
goetz@6450 | 2788 | |
goetz@6450 | 2789 | // Fall through to handle_return. |
goetz@6450 | 2790 | |
goetz@6450 | 2791 | } // handle_Early_Return |
goetz@6450 | 2792 | |
goetz@6450 | 2793 | handle_return: { |
duke@435 | 2794 | DECACHE_STATE(); |
duke@435 | 2795 | |
goetz@6450 | 2796 | bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return; |
goetz@6450 | 2797 | bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame; |
duke@435 | 2798 | Handle original_exception(THREAD, THREAD->pending_exception()); |
duke@435 | 2799 | Handle illegal_state_oop(THREAD, NULL); |
duke@435 | 2800 | |
duke@435 | 2801 | // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner |
duke@435 | 2802 | // in any following VM entries from freeing our live handles, but illegal_state_oop |
duke@435 | 2803 | // isn't really allocated yet and so doesn't become live until later and |
duke@435 | 2804 | // in unpredicatable places. Instead we must protect the places where we enter the |
duke@435 | 2805 | // VM. It would be much simpler (and safer) if we could allocate a real handle with |
duke@435 | 2806 | // a NULL oop in it and then overwrite the oop later as needed. This isn't |
duke@435 | 2807 | // unfortunately isn't possible. |
duke@435 | 2808 | |
duke@435 | 2809 | THREAD->clear_pending_exception(); |
duke@435 | 2810 | |
duke@435 | 2811 | // |
duke@435 | 2812 | // As far as we are concerned we have returned. If we have a pending exception |
duke@435 | 2813 | // that will be returned as this invocation's result. However if we get any |
duke@435 | 2814 | // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions |
duke@435 | 2815 | // will be our final result (i.e. monitor exception trumps a pending exception). |
duke@435 | 2816 | // |
duke@435 | 2817 | |
duke@435 | 2818 | // If we never locked the method (or really passed the point where we would have), |
duke@435 | 2819 | // there is no need to unlock it (or look for other monitors), since that |
duke@435 | 2820 | // could not have happened. |
duke@435 | 2821 | |
duke@435 | 2822 | if (THREAD->do_not_unlock()) { |
duke@435 | 2823 | |
duke@435 | 2824 | // Never locked, reset the flag now because obviously any caller must |
duke@435 | 2825 | // have passed their point of locking for us to have gotten here. |
duke@435 | 2826 | |
duke@435 | 2827 | THREAD->clr_do_not_unlock(); |
duke@435 | 2828 | } else { |
duke@435 | 2829 | // At this point we consider that we have returned. We now check that the |
duke@435 | 2830 | // locks were properly block structured. If we find that they were not |
duke@435 | 2831 | // used properly we will return with an illegal monitor exception. |
duke@435 | 2832 | // The exception is checked by the caller not the callee since this |
duke@435 | 2833 | // checking is considered to be part of the invocation and therefore |
duke@435 | 2834 | // in the callers scope (JVM spec 8.13). |
duke@435 | 2835 | // |
duke@435 | 2836 | // Another weird thing to watch for is if the method was locked |
duke@435 | 2837 | // recursively and then not exited properly. This means we must |
duke@435 | 2838 | // examine all the entries in reverse time(and stack) order and |
duke@435 | 2839 | // unlock as we find them. If we find the method monitor before |
duke@435 | 2840 | // we are at the initial entry then we should throw an exception. |
duke@435 | 2841 | // It is not clear the template based interpreter does this |
duke@435 | 2842 | // correctly |
duke@435 | 2843 | |
duke@435 | 2844 | BasicObjectLock* base = istate->monitor_base(); |
duke@435 | 2845 | BasicObjectLock* end = (BasicObjectLock*) istate->stack_base(); |
duke@435 | 2846 | bool method_unlock_needed = METHOD->is_synchronized(); |
duke@435 | 2847 | // We know the initial monitor was used for the method don't check that |
duke@435 | 2848 | // slot in the loop |
duke@435 | 2849 | if (method_unlock_needed) base--; |
duke@435 | 2850 | |
duke@435 | 2851 | // Check all the monitors to see they are unlocked. Install exception if found to be locked. |
duke@435 | 2852 | while (end < base) { |
duke@435 | 2853 | oop lockee = end->obj(); |
duke@435 | 2854 | if (lockee != NULL) { |
duke@435 | 2855 | BasicLock* lock = end->lock(); |
duke@435 | 2856 | markOop header = lock->displaced_header(); |
duke@435 | 2857 | end->set_obj(NULL); |
goetz@6445 | 2858 | |
goetz@6445 | 2859 | if (!lockee->mark()->has_bias_pattern()) { |
goetz@6445 | 2860 | // If it isn't recursive we either must swap old header or call the runtime |
goetz@6445 | 2861 | if (header != NULL) { |
goetz@6445 | 2862 | if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) { |
goetz@6445 | 2863 | // restore object for the slow case |
goetz@6445 | 2864 | end->set_obj(lockee); |
goetz@6445 | 2865 | { |
goetz@6445 | 2866 | // Prevent any HandleMarkCleaner from freeing our live handles |
goetz@6445 | 2867 | HandleMark __hm(THREAD); |
goetz@6445 | 2868 | CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end)); |
goetz@6445 | 2869 | } |
duke@435 | 2870 | } |
duke@435 | 2871 | } |
duke@435 | 2872 | } |
duke@435 | 2873 | // One error is plenty |
duke@435 | 2874 | if (illegal_state_oop() == NULL && !suppress_error) { |
duke@435 | 2875 | { |
duke@435 | 2876 | // Prevent any HandleMarkCleaner from freeing our live handles |
duke@435 | 2877 | HandleMark __hm(THREAD); |
duke@435 | 2878 | CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); |
duke@435 | 2879 | } |
duke@435 | 2880 | assert(THREAD->has_pending_exception(), "Lost our exception!"); |
duke@435 | 2881 | illegal_state_oop = THREAD->pending_exception(); |
duke@435 | 2882 | THREAD->clear_pending_exception(); |
duke@435 | 2883 | } |
duke@435 | 2884 | } |
duke@435 | 2885 | end++; |
duke@435 | 2886 | } |
duke@435 | 2887 | // Unlock the method if needed |
duke@435 | 2888 | if (method_unlock_needed) { |
duke@435 | 2889 | if (base->obj() == NULL) { |
duke@435 | 2890 | // The method is already unlocked this is not good. |
duke@435 | 2891 | if (illegal_state_oop() == NULL && !suppress_error) { |
duke@435 | 2892 | { |
duke@435 | 2893 | // Prevent any HandleMarkCleaner from freeing our live handles |
duke@435 | 2894 | HandleMark __hm(THREAD); |
duke@435 | 2895 | CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD)); |
duke@435 | 2896 | } |
duke@435 | 2897 | assert(THREAD->has_pending_exception(), "Lost our exception!"); |
duke@435 | 2898 | illegal_state_oop = THREAD->pending_exception(); |
duke@435 | 2899 | THREAD->clear_pending_exception(); |
duke@435 | 2900 | } |
duke@435 | 2901 | } else { |
duke@435 | 2902 | // |
duke@435 | 2903 | // The initial monitor is always used for the method |
duke@435 | 2904 | // However if that slot is no longer the oop for the method it was unlocked |
duke@435 | 2905 | // and reused by something that wasn't unlocked! |
duke@435 | 2906 | // |
duke@435 | 2907 | // deopt can come in with rcvr dead because c2 knows |
duke@435 | 2908 | // its value is preserved in the monitor. So we can't use locals[0] at all |
duke@435 | 2909 | // and must use first monitor slot. |
duke@435 | 2910 | // |
duke@435 | 2911 | oop rcvr = base->obj(); |
duke@435 | 2912 | if (rcvr == NULL) { |
duke@435 | 2913 | if (!suppress_error) { |
duke@435 | 2914 | VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), ""); |
duke@435 | 2915 | illegal_state_oop = THREAD->pending_exception(); |
duke@435 | 2916 | THREAD->clear_pending_exception(); |
duke@435 | 2917 | } |
goetz@6445 | 2918 | } else if (UseHeavyMonitors) { |
goetz@6445 | 2919 | { |
goetz@6445 | 2920 | // Prevent any HandleMarkCleaner from freeing our live handles. |
goetz@6445 | 2921 | HandleMark __hm(THREAD); |
goetz@6445 | 2922 | CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); |
goetz@6445 | 2923 | } |
goetz@6445 | 2924 | if (THREAD->has_pending_exception()) { |
goetz@6445 | 2925 | if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); |
goetz@6445 | 2926 | THREAD->clear_pending_exception(); |
goetz@6445 | 2927 | } |
duke@435 | 2928 | } else { |
duke@435 | 2929 | BasicLock* lock = base->lock(); |
duke@435 | 2930 | markOop header = lock->displaced_header(); |
duke@435 | 2931 | base->set_obj(NULL); |
goetz@6445 | 2932 | |
goetz@6445 | 2933 | if (!rcvr->mark()->has_bias_pattern()) { |
goetz@6445 | 2934 | base->set_obj(NULL); |
goetz@6445 | 2935 | // If it isn't recursive we either must swap old header or call the runtime |
goetz@6445 | 2936 | if (header != NULL) { |
goetz@6445 | 2937 | if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) { |
goetz@6445 | 2938 | // restore object for the slow case |
goetz@6445 | 2939 | base->set_obj(rcvr); |
goetz@6445 | 2940 | { |
goetz@6445 | 2941 | // Prevent any HandleMarkCleaner from freeing our live handles |
goetz@6445 | 2942 | HandleMark __hm(THREAD); |
goetz@6445 | 2943 | CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base)); |
goetz@6445 | 2944 | } |
goetz@6445 | 2945 | if (THREAD->has_pending_exception()) { |
goetz@6445 | 2946 | if (!suppress_error) illegal_state_oop = THREAD->pending_exception(); |
goetz@6445 | 2947 | THREAD->clear_pending_exception(); |
goetz@6445 | 2948 | } |
duke@435 | 2949 | } |
duke@435 | 2950 | } |
duke@435 | 2951 | } |
duke@435 | 2952 | } |
duke@435 | 2953 | } |
duke@435 | 2954 | } |
duke@435 | 2955 | } |
goetz@6445 | 2956 | // Clear the do_not_unlock flag now. |
goetz@6445 | 2957 | THREAD->clr_do_not_unlock(); |
duke@435 | 2958 | |
duke@435 | 2959 | // |
duke@435 | 2960 | // Notify jvmti/jvmdi |
duke@435 | 2961 | // |
duke@435 | 2962 | // NOTE: we do not notify a method_exit if we have a pending exception, |
duke@435 | 2963 | // including an exception we generate for unlocking checks. In the former |
duke@435 | 2964 | // case, JVMDI has already been notified by our call for the exception handler |
duke@435 | 2965 | // and in both cases as far as JVMDI is concerned we have already returned. |
duke@435 | 2966 | // If we notify it again JVMDI will be all confused about how many frames |
duke@435 | 2967 | // are still on the stack (4340444). |
duke@435 | 2968 | // |
duke@435 | 2969 | // NOTE Further! It turns out the the JVMTI spec in fact expects to see |
duke@435 | 2970 | // method_exit events whenever we leave an activation unless it was done |
duke@435 | 2971 | // for popframe. This is nothing like jvmdi. However we are passing the |
duke@435 | 2972 | // tests at the moment (apparently because they are jvmdi based) so rather |
duke@435 | 2973 | // than change this code and possibly fail tests we will leave it alone |
duke@435 | 2974 | // (with this note) in anticipation of changing the vm and the tests |
duke@435 | 2975 | // simultaneously. |
duke@435 | 2976 | |
duke@435 | 2977 | |
duke@435 | 2978 | // |
duke@435 | 2979 | suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL; |
duke@435 | 2980 | |
duke@435 | 2981 | |
duke@435 | 2982 | |
duke@435 | 2983 | #ifdef VM_JVMTI |
duke@435 | 2984 | if (_jvmti_interp_events) { |
duke@435 | 2985 | // Whenever JVMTI puts a thread in interp_only_mode, method |
duke@435 | 2986 | // entry/exit events are sent for that thread to track stack depth. |
duke@435 | 2987 | if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) { |
duke@435 | 2988 | { |
duke@435 | 2989 | // Prevent any HandleMarkCleaner from freeing our live handles |
duke@435 | 2990 | HandleMark __hm(THREAD); |
duke@435 | 2991 | CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD)); |
duke@435 | 2992 | } |
duke@435 | 2993 | } |
duke@435 | 2994 | } |
duke@435 | 2995 | #endif /* VM_JVMTI */ |
duke@435 | 2996 | |
duke@435 | 2997 | // |
duke@435 | 2998 | // See if we are returning any exception |
duke@435 | 2999 | // A pending exception that was pending prior to a possible popping frame |
duke@435 | 3000 | // overrides the popping frame. |
duke@435 | 3001 | // |
duke@435 | 3002 | assert(!suppress_error || suppress_error && illegal_state_oop() == NULL, "Error was not suppressed"); |
duke@435 | 3003 | if (illegal_state_oop() != NULL || original_exception() != NULL) { |
duke@435 | 3004 | // inform the frame manager we have no result |
duke@435 | 3005 | istate->set_msg(throwing_exception); |
duke@435 | 3006 | if (illegal_state_oop() != NULL) |
duke@435 | 3007 | THREAD->set_pending_exception(illegal_state_oop(), NULL, 0); |
duke@435 | 3008 | else |
duke@435 | 3009 | THREAD->set_pending_exception(original_exception(), NULL, 0); |
duke@435 | 3010 | UPDATE_PC_AND_RETURN(0); |
duke@435 | 3011 | } |
duke@435 | 3012 | |
duke@435 | 3013 | if (istate->msg() == popping_frame) { |
duke@435 | 3014 | // Make it simpler on the assembly code and set the message for the frame pop. |
duke@435 | 3015 | // returns |
duke@435 | 3016 | if (istate->prev() == NULL) { |
duke@435 | 3017 | // We must be returning to a deoptimized frame (because popframe only happens between |
duke@435 | 3018 | // two interpreted frames). We need to save the current arguments in C heap so that |
duke@435 | 3019 | // the deoptimized frame when it restarts can copy the arguments to its expression |
duke@435 | 3020 | // stack and re-execute the call. We also have to notify deoptimization that this |
twisti@1040 | 3021 | // has occurred and to pick the preserved args copy them to the deoptimized frame's |
duke@435 | 3022 | // java expression stack. Yuck. |
duke@435 | 3023 | // |
duke@435 | 3024 | THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize), |
duke@435 | 3025 | LOCALS_SLOT(METHOD->size_of_parameters() - 1)); |
duke@435 | 3026 | THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit); |
duke@435 | 3027 | } |
goetz@6450 | 3028 | } else { |
goetz@6450 | 3029 | istate->set_msg(return_from_method); |
duke@435 | 3030 | } |
bobv@2036 | 3031 | |
bobv@2036 | 3032 | // Normal return |
bobv@2036 | 3033 | // Advance the pc and return to frame manager |
bobv@2036 | 3034 | UPDATE_PC_AND_RETURN(1); |
duke@435 | 3035 | } /* handle_return: */ |
duke@435 | 3036 | |
duke@435 | 3037 | // This is really a fatal error return |
duke@435 | 3038 | |
duke@435 | 3039 | finish: |
duke@435 | 3040 | DECACHE_TOS(); |
duke@435 | 3041 | DECACHE_PC(); |
duke@435 | 3042 | |
duke@435 | 3043 | return; |
duke@435 | 3044 | } |
duke@435 | 3045 | |
duke@435 | 3046 | /* |
duke@435 | 3047 | * All the code following this point is only produced once and is not present |
duke@435 | 3048 | * in the JVMTI version of the interpreter |
duke@435 | 3049 | */ |
duke@435 | 3050 | |
duke@435 | 3051 | #ifndef VM_JVMTI |
duke@435 | 3052 | |
duke@435 | 3053 | // This constructor should only be used to contruct the object to signal |
duke@435 | 3054 | // interpreter initialization. All other instances should be created by |
duke@435 | 3055 | // the frame manager. |
duke@435 | 3056 | BytecodeInterpreter::BytecodeInterpreter(messages msg) { |
duke@435 | 3057 | if (msg != initialize) ShouldNotReachHere(); |
duke@435 | 3058 | _msg = msg; |
duke@435 | 3059 | _self_link = this; |
duke@435 | 3060 | _prev_link = NULL; |
duke@435 | 3061 | } |
duke@435 | 3062 | |
duke@435 | 3063 | // Inline static functions for Java Stack and Local manipulation |
duke@435 | 3064 | |
duke@435 | 3065 | // The implementations are platform dependent. We have to worry about alignment |
duke@435 | 3066 | // issues on some machines which can change on the same platform depending on |
duke@435 | 3067 | // whether it is an LP64 machine also. |
duke@435 | 3068 | address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) { |
duke@435 | 3069 | return (address) tos[Interpreter::expr_index_at(-offset)]; |
duke@435 | 3070 | } |
duke@435 | 3071 | |
duke@435 | 3072 | jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) { |
duke@435 | 3073 | return *((jint*) &tos[Interpreter::expr_index_at(-offset)]); |
duke@435 | 3074 | } |
duke@435 | 3075 | |
duke@435 | 3076 | jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) { |
duke@435 | 3077 | return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]); |
duke@435 | 3078 | } |
duke@435 | 3079 | |
duke@435 | 3080 | oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) { |
duke@435 | 3081 | return (oop)tos [Interpreter::expr_index_at(-offset)]; |
duke@435 | 3082 | } |
duke@435 | 3083 | |
duke@435 | 3084 | jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) { |
duke@435 | 3085 | return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d; |
duke@435 | 3086 | } |
duke@435 | 3087 | |
duke@435 | 3088 | jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) { |
duke@435 | 3089 | return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l; |
duke@435 | 3090 | } |
duke@435 | 3091 | |
duke@435 | 3092 | // only used for value types |
duke@435 | 3093 | void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value, |
duke@435 | 3094 | int offset) { |
duke@435 | 3095 | *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value; |
duke@435 | 3096 | } |
duke@435 | 3097 | |
duke@435 | 3098 | void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value, |
duke@435 | 3099 | int offset) { |
duke@435 | 3100 | *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value; |
duke@435 | 3101 | } |
duke@435 | 3102 | |
duke@435 | 3103 | void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value, |
duke@435 | 3104 | int offset) { |
duke@435 | 3105 | *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value; |
duke@435 | 3106 | } |
duke@435 | 3107 | |
duke@435 | 3108 | void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value, |
duke@435 | 3109 | int offset) { |
duke@435 | 3110 | *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value; |
duke@435 | 3111 | } |
duke@435 | 3112 | |
duke@435 | 3113 | // needs to be platform dep for the 32 bit platforms. |
duke@435 | 3114 | void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value, |
duke@435 | 3115 | int offset) { |
duke@435 | 3116 | ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value; |
duke@435 | 3117 | } |
duke@435 | 3118 | |
duke@435 | 3119 | void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos, |
duke@435 | 3120 | address addr, int offset) { |
duke@435 | 3121 | (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = |
duke@435 | 3122 | ((VMJavaVal64*)addr)->d); |
duke@435 | 3123 | } |
duke@435 | 3124 | |
duke@435 | 3125 | void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value, |
duke@435 | 3126 | int offset) { |
duke@435 | 3127 | ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; |
duke@435 | 3128 | ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value; |
duke@435 | 3129 | } |
duke@435 | 3130 | |
duke@435 | 3131 | void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos, |
duke@435 | 3132 | address addr, int offset) { |
duke@435 | 3133 | ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb; |
duke@435 | 3134 | ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = |
duke@435 | 3135 | ((VMJavaVal64*)addr)->l; |
duke@435 | 3136 | } |
duke@435 | 3137 | |
duke@435 | 3138 | // Locals |
duke@435 | 3139 | |
duke@435 | 3140 | address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) { |
duke@435 | 3141 | return (address)locals[Interpreter::local_index_at(-offset)]; |
duke@435 | 3142 | } |
duke@435 | 3143 | jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) { |
duke@435 | 3144 | return (jint)locals[Interpreter::local_index_at(-offset)]; |
duke@435 | 3145 | } |
duke@435 | 3146 | jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) { |
duke@435 | 3147 | return (jfloat)locals[Interpreter::local_index_at(-offset)]; |
duke@435 | 3148 | } |
duke@435 | 3149 | oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) { |
duke@435 | 3150 | return (oop)locals[Interpreter::local_index_at(-offset)]; |
duke@435 | 3151 | } |
duke@435 | 3152 | jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) { |
duke@435 | 3153 | return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d; |
duke@435 | 3154 | } |
duke@435 | 3155 | jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) { |
duke@435 | 3156 | return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l; |
duke@435 | 3157 | } |
duke@435 | 3158 | |
duke@435 | 3159 | // Returns the address of locals value. |
duke@435 | 3160 | address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) { |
duke@435 | 3161 | return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); |
duke@435 | 3162 | } |
duke@435 | 3163 | address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) { |
duke@435 | 3164 | return ((address)&locals[Interpreter::local_index_at(-(offset+1))]); |
duke@435 | 3165 | } |
duke@435 | 3166 | |
duke@435 | 3167 | // Used for local value or returnAddress |
duke@435 | 3168 | void BytecodeInterpreter::set_locals_slot(intptr_t *locals, |
duke@435 | 3169 | address value, int offset) { |
duke@435 | 3170 | *((address*)&locals[Interpreter::local_index_at(-offset)]) = value; |
duke@435 | 3171 | } |
duke@435 | 3172 | void BytecodeInterpreter::set_locals_int(intptr_t *locals, |
duke@435 | 3173 | jint value, int offset) { |
duke@435 | 3174 | *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value; |
duke@435 | 3175 | } |
duke@435 | 3176 | void BytecodeInterpreter::set_locals_float(intptr_t *locals, |
duke@435 | 3177 | jfloat value, int offset) { |
duke@435 | 3178 | *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value; |
duke@435 | 3179 | } |
duke@435 | 3180 | void BytecodeInterpreter::set_locals_object(intptr_t *locals, |
duke@435 | 3181 | oop value, int offset) { |
duke@435 | 3182 | *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value; |
duke@435 | 3183 | } |
duke@435 | 3184 | void BytecodeInterpreter::set_locals_double(intptr_t *locals, |
duke@435 | 3185 | jdouble value, int offset) { |
duke@435 | 3186 | ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value; |
duke@435 | 3187 | } |
duke@435 | 3188 | void BytecodeInterpreter::set_locals_long(intptr_t *locals, |
duke@435 | 3189 | jlong value, int offset) { |
duke@435 | 3190 | ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value; |
duke@435 | 3191 | } |
duke@435 | 3192 | void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals, |
duke@435 | 3193 | address addr, int offset) { |
duke@435 | 3194 | ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d; |
duke@435 | 3195 | } |
duke@435 | 3196 | void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals, |
duke@435 | 3197 | address addr, int offset) { |
duke@435 | 3198 | ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l; |
duke@435 | 3199 | } |
duke@435 | 3200 | |
duke@435 | 3201 | void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset, |
duke@435 | 3202 | intptr_t* locals, int locals_offset) { |
duke@435 | 3203 | intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)]; |
duke@435 | 3204 | locals[Interpreter::local_index_at(-locals_offset)] = value; |
duke@435 | 3205 | } |
duke@435 | 3206 | |
duke@435 | 3207 | |
duke@435 | 3208 | void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset, |
duke@435 | 3209 | int to_offset) { |
duke@435 | 3210 | tos[Interpreter::expr_index_at(-to_offset)] = |
duke@435 | 3211 | (intptr_t)tos[Interpreter::expr_index_at(-from_offset)]; |
duke@435 | 3212 | } |
duke@435 | 3213 | |
duke@435 | 3214 | void BytecodeInterpreter::dup(intptr_t *tos) { |
duke@435 | 3215 | copy_stack_slot(tos, -1, 0); |
duke@435 | 3216 | } |
duke@435 | 3217 | void BytecodeInterpreter::dup2(intptr_t *tos) { |
duke@435 | 3218 | copy_stack_slot(tos, -2, 0); |
duke@435 | 3219 | copy_stack_slot(tos, -1, 1); |
duke@435 | 3220 | } |
duke@435 | 3221 | |
duke@435 | 3222 | void BytecodeInterpreter::dup_x1(intptr_t *tos) { |
duke@435 | 3223 | /* insert top word two down */ |
duke@435 | 3224 | copy_stack_slot(tos, -1, 0); |
duke@435 | 3225 | copy_stack_slot(tos, -2, -1); |
duke@435 | 3226 | copy_stack_slot(tos, 0, -2); |
duke@435 | 3227 | } |
duke@435 | 3228 | |
duke@435 | 3229 | void BytecodeInterpreter::dup_x2(intptr_t *tos) { |
duke@435 | 3230 | /* insert top word three down */ |
duke@435 | 3231 | copy_stack_slot(tos, -1, 0); |
duke@435 | 3232 | copy_stack_slot(tos, -2, -1); |
duke@435 | 3233 | copy_stack_slot(tos, -3, -2); |
duke@435 | 3234 | copy_stack_slot(tos, 0, -3); |
duke@435 | 3235 | } |
duke@435 | 3236 | void BytecodeInterpreter::dup2_x1(intptr_t *tos) { |
duke@435 | 3237 | /* insert top 2 slots three down */ |
duke@435 | 3238 | copy_stack_slot(tos, -1, 1); |
duke@435 | 3239 | copy_stack_slot(tos, -2, 0); |
duke@435 | 3240 | copy_stack_slot(tos, -3, -1); |
duke@435 | 3241 | copy_stack_slot(tos, 1, -2); |
duke@435 | 3242 | copy_stack_slot(tos, 0, -3); |
duke@435 | 3243 | } |
duke@435 | 3244 | void BytecodeInterpreter::dup2_x2(intptr_t *tos) { |
duke@435 | 3245 | /* insert top 2 slots four down */ |
duke@435 | 3246 | copy_stack_slot(tos, -1, 1); |
duke@435 | 3247 | copy_stack_slot(tos, -2, 0); |
duke@435 | 3248 | copy_stack_slot(tos, -3, -1); |
duke@435 | 3249 | copy_stack_slot(tos, -4, -2); |
duke@435 | 3250 | copy_stack_slot(tos, 1, -3); |
duke@435 | 3251 | copy_stack_slot(tos, 0, -4); |
duke@435 | 3252 | } |
duke@435 | 3253 | |
duke@435 | 3254 | |
duke@435 | 3255 | void BytecodeInterpreter::swap(intptr_t *tos) { |
duke@435 | 3256 | // swap top two elements |
duke@435 | 3257 | intptr_t val = tos[Interpreter::expr_index_at(1)]; |
duke@435 | 3258 | // Copy -2 entry to -1 |
duke@435 | 3259 | copy_stack_slot(tos, -2, -1); |
duke@435 | 3260 | // Store saved -1 entry into -2 |
duke@435 | 3261 | tos[Interpreter::expr_index_at(2)] = val; |
duke@435 | 3262 | } |
duke@435 | 3263 | // -------------------------------------------------------------------------------- |
duke@435 | 3264 | // Non-product code |
duke@435 | 3265 | #ifndef PRODUCT |
duke@435 | 3266 | |
duke@435 | 3267 | const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) { |
duke@435 | 3268 | switch (msg) { |
duke@435 | 3269 | case BytecodeInterpreter::no_request: return("no_request"); |
duke@435 | 3270 | case BytecodeInterpreter::initialize: return("initialize"); |
duke@435 | 3271 | // status message to C++ interpreter |
duke@435 | 3272 | case BytecodeInterpreter::method_entry: return("method_entry"); |
duke@435 | 3273 | case BytecodeInterpreter::method_resume: return("method_resume"); |
duke@435 | 3274 | case BytecodeInterpreter::got_monitors: return("got_monitors"); |
duke@435 | 3275 | case BytecodeInterpreter::rethrow_exception: return("rethrow_exception"); |
duke@435 | 3276 | // requests to frame manager from C++ interpreter |
duke@435 | 3277 | case BytecodeInterpreter::call_method: return("call_method"); |
duke@435 | 3278 | case BytecodeInterpreter::return_from_method: return("return_from_method"); |
duke@435 | 3279 | case BytecodeInterpreter::more_monitors: return("more_monitors"); |
duke@435 | 3280 | case BytecodeInterpreter::throwing_exception: return("throwing_exception"); |
duke@435 | 3281 | case BytecodeInterpreter::popping_frame: return("popping_frame"); |
duke@435 | 3282 | case BytecodeInterpreter::do_osr: return("do_osr"); |
duke@435 | 3283 | // deopt |
duke@435 | 3284 | case BytecodeInterpreter::deopt_resume: return("deopt_resume"); |
duke@435 | 3285 | case BytecodeInterpreter::deopt_resume2: return("deopt_resume2"); |
duke@435 | 3286 | default: return("BAD MSG"); |
duke@435 | 3287 | } |
duke@435 | 3288 | } |
duke@435 | 3289 | void |
duke@435 | 3290 | BytecodeInterpreter::print() { |
duke@435 | 3291 | tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread); |
duke@435 | 3292 | tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp); |
duke@435 | 3293 | tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals); |
duke@435 | 3294 | tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants); |
duke@435 | 3295 | { |
duke@435 | 3296 | ResourceMark rm; |
duke@435 | 3297 | char *method_name = _method->name_and_sig_as_C_string(); |
duke@435 | 3298 | tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name); |
duke@435 | 3299 | } |
duke@435 | 3300 | tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx); |
duke@435 | 3301 | tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack); |
duke@435 | 3302 | tty->print_cr("msg: %s", C_msg(this->_msg)); |
duke@435 | 3303 | tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee); |
duke@435 | 3304 | tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point); |
duke@435 | 3305 | tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance); |
duke@435 | 3306 | tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf); |
duke@435 | 3307 | tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry); |
duke@435 | 3308 | tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link); |
duke@435 | 3309 | tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp); |
duke@435 | 3310 | tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base); |
duke@435 | 3311 | tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit); |
duke@435 | 3312 | tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base); |
duke@435 | 3313 | #ifdef SPARC |
duke@435 | 3314 | tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc); |
duke@435 | 3315 | tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom); |
duke@435 | 3316 | tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult); |
duke@435 | 3317 | tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult); |
duke@435 | 3318 | #endif |
morris@4535 | 3319 | #if !defined(ZERO) |
duke@435 | 3320 | tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp); |
morris@4535 | 3321 | #endif // !ZERO |
duke@435 | 3322 | tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link); |
duke@435 | 3323 | } |
duke@435 | 3324 | |
duke@435 | 3325 | extern "C" { |
goetz@6445 | 3326 | void PI(uintptr_t arg) { |
goetz@6445 | 3327 | ((BytecodeInterpreter*)arg)->print(); |
goetz@6445 | 3328 | } |
duke@435 | 3329 | } |
duke@435 | 3330 | #endif // PRODUCT |
duke@435 | 3331 | |
duke@435 | 3332 | #endif // JVMTI |
duke@435 | 3333 | #endif // CC_INTERP |