Wed, 22 Jan 2014 17:42:23 -0800
Merge
1 /*
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // no precompiled headers
26 #include "classfile/vmSymbols.hpp"
27 #include "gc_interface/collectedHeap.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/bytecodeInterpreter.hpp"
30 #include "interpreter/bytecodeInterpreter.inline.hpp"
31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/biasedLocking.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/interfaceSupport.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/threadCritical.hpp"
46 #include "utilities/exceptions.hpp"
47 #ifdef TARGET_OS_ARCH_linux_x86
48 # include "orderAccess_linux_x86.inline.hpp"
49 #endif
50 #ifdef TARGET_OS_ARCH_linux_sparc
51 # include "orderAccess_linux_sparc.inline.hpp"
52 #endif
53 #ifdef TARGET_OS_ARCH_linux_zero
54 # include "orderAccess_linux_zero.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_ARCH_solaris_x86
57 # include "orderAccess_solaris_x86.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_ARCH_solaris_sparc
60 # include "orderAccess_solaris_sparc.inline.hpp"
61 #endif
62 #ifdef TARGET_OS_ARCH_windows_x86
63 # include "orderAccess_windows_x86.inline.hpp"
64 #endif
65 #ifdef TARGET_OS_ARCH_linux_arm
66 # include "orderAccess_linux_arm.inline.hpp"
67 #endif
68 #ifdef TARGET_OS_ARCH_linux_ppc
69 # include "orderAccess_linux_ppc.inline.hpp"
70 #endif
71 #ifdef TARGET_OS_ARCH_aix_ppc
72 # include "orderAccess_aix_ppc.inline.hpp"
73 #endif
74 #ifdef TARGET_OS_ARCH_bsd_x86
75 # include "orderAccess_bsd_x86.inline.hpp"
76 #endif
77 #ifdef TARGET_OS_ARCH_bsd_zero
78 # include "orderAccess_bsd_zero.inline.hpp"
79 #endif
82 // no precompiled headers
83 #ifdef CC_INTERP
85 /*
86 * USELABELS - If using GCC, then use labels for the opcode dispatching
87 * rather -then a switch statement. This improves performance because it
88 * gives us the oportunity to have the instructions that calculate the
89 * next opcode to jump to be intermixed with the rest of the instructions
90 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
91 */
92 #undef USELABELS
93 #ifdef __GNUC__
94 /*
95 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
96 don't use the computed goto approach.
97 */
98 #ifndef ASSERT
99 #define USELABELS
100 #endif
101 #endif
103 #undef CASE
104 #ifdef USELABELS
105 #define CASE(opcode) opc ## opcode
106 #define DEFAULT opc_default
107 #else
108 #define CASE(opcode) case Bytecodes:: opcode
109 #define DEFAULT default
110 #endif
112 /*
113 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
114 * opcode before going back to the top of the while loop, rather then having
115 * the top of the while loop handle it. This provides a better opportunity
116 * for instruction scheduling. Some compilers just do this prefetch
117 * automatically. Some actually end up with worse performance if you
118 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
119 */
120 #undef PREFETCH_OPCCODE
121 #define PREFETCH_OPCCODE
123 /*
124 Interpreter safepoint: it is expected that the interpreter will have no live
125 handles of its own creation live at an interpreter safepoint. Therefore we
126 run a HandleMarkCleaner and trash all handles allocated in the call chain
127 since the JavaCalls::call_helper invocation that initiated the chain.
128 There really shouldn't be any handles remaining to trash but this is cheap
129 in relation to a safepoint.
130 */
131 #define SAFEPOINT \
132 if ( SafepointSynchronize::is_synchronizing()) { \
133 { \
134 /* zap freed handles rather than GC'ing them */ \
135 HandleMarkCleaner __hmc(THREAD); \
136 } \
137 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
138 }
140 /*
141 * VM_JAVA_ERROR - Macro for throwing a java exception from
142 * the interpreter loop. Should really be a CALL_VM but there
143 * is no entry point to do the transition to vm so we just
144 * do it by hand here.
145 */
146 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
147 DECACHE_STATE(); \
148 SET_LAST_JAVA_FRAME(); \
149 { \
150 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
151 ThreadInVMfromJava trans(THREAD); \
152 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
153 } \
154 RESET_LAST_JAVA_FRAME(); \
155 CACHE_STATE();
157 // Normal throw of a java error.
158 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
159 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
160 goto handle_exception;
162 #ifdef PRODUCT
163 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
164 #else
165 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
166 { \
167 BytecodeCounter::_counter_value++; \
168 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
169 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
170 if (TraceBytecodes) { \
171 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
172 topOfStack[Interpreter::expr_index_at(1)], \
173 topOfStack[Interpreter::expr_index_at(2)]), \
174 handle_exception); \
175 } \
176 }
177 #endif
179 #undef DEBUGGER_SINGLE_STEP_NOTIFY
180 #ifdef VM_JVMTI
181 /* NOTE: (kbr) This macro must be called AFTER the PC has been
182 incremented. JvmtiExport::at_single_stepping_point() may cause a
183 breakpoint opcode to get inserted at the current PC to allow the
184 debugger to coalesce single-step events.
186 As a result if we call at_single_stepping_point() we refetch opcode
187 to get the current opcode. This will override any other prefetching
188 that might have occurred.
189 */
190 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
191 { \
192 if (_jvmti_interp_events) { \
193 if (JvmtiExport::should_post_single_step()) { \
194 DECACHE_STATE(); \
195 SET_LAST_JAVA_FRAME(); \
196 ThreadInVMfromJava trans(THREAD); \
197 JvmtiExport::at_single_stepping_point(THREAD, \
198 istate->method(), \
199 pc); \
200 RESET_LAST_JAVA_FRAME(); \
201 CACHE_STATE(); \
202 if (THREAD->pop_frame_pending() && \
203 !THREAD->pop_frame_in_process()) { \
204 goto handle_Pop_Frame; \
205 } \
206 if (THREAD->jvmti_thread_state() && \
207 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
208 goto handle_Early_Return; \
209 } \
210 opcode = *pc; \
211 } \
212 } \
213 }
214 #else
215 #define DEBUGGER_SINGLE_STEP_NOTIFY()
216 #endif
218 /*
219 * CONTINUE - Macro for executing the next opcode.
220 */
221 #undef CONTINUE
222 #ifdef USELABELS
223 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
224 // initialization (which is is the initialization of the table pointer...)
225 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
226 #define CONTINUE { \
227 opcode = *pc; \
228 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
229 DEBUGGER_SINGLE_STEP_NOTIFY(); \
230 DISPATCH(opcode); \
231 }
232 #else
233 #ifdef PREFETCH_OPCCODE
234 #define CONTINUE { \
235 opcode = *pc; \
236 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
237 DEBUGGER_SINGLE_STEP_NOTIFY(); \
238 continue; \
239 }
240 #else
241 #define CONTINUE { \
242 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
243 DEBUGGER_SINGLE_STEP_NOTIFY(); \
244 continue; \
245 }
246 #endif
247 #endif
250 #define UPDATE_PC(opsize) {pc += opsize; }
251 /*
252 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
253 */
254 #undef UPDATE_PC_AND_TOS
255 #define UPDATE_PC_AND_TOS(opsize, stack) \
256 {pc += opsize; MORE_STACK(stack); }
258 /*
259 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
260 * and executing the next opcode. It's somewhat similar to the combination
261 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
262 */
263 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
264 #ifdef USELABELS
265 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
266 pc += opsize; opcode = *pc; MORE_STACK(stack); \
267 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
268 DEBUGGER_SINGLE_STEP_NOTIFY(); \
269 DISPATCH(opcode); \
270 }
272 #define UPDATE_PC_AND_CONTINUE(opsize) { \
273 pc += opsize; opcode = *pc; \
274 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
275 DEBUGGER_SINGLE_STEP_NOTIFY(); \
276 DISPATCH(opcode); \
277 }
278 #else
279 #ifdef PREFETCH_OPCCODE
280 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
281 pc += opsize; opcode = *pc; MORE_STACK(stack); \
282 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
283 DEBUGGER_SINGLE_STEP_NOTIFY(); \
284 goto do_continue; \
285 }
287 #define UPDATE_PC_AND_CONTINUE(opsize) { \
288 pc += opsize; opcode = *pc; \
289 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
290 DEBUGGER_SINGLE_STEP_NOTIFY(); \
291 goto do_continue; \
292 }
293 #else
294 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
295 pc += opsize; MORE_STACK(stack); \
296 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
297 DEBUGGER_SINGLE_STEP_NOTIFY(); \
298 goto do_continue; \
299 }
301 #define UPDATE_PC_AND_CONTINUE(opsize) { \
302 pc += opsize; \
303 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
304 DEBUGGER_SINGLE_STEP_NOTIFY(); \
305 goto do_continue; \
306 }
307 #endif /* PREFETCH_OPCCODE */
308 #endif /* USELABELS */
310 // About to call a new method, update the save the adjusted pc and return to frame manager
311 #define UPDATE_PC_AND_RETURN(opsize) \
312 DECACHE_TOS(); \
313 istate->set_bcp(pc+opsize); \
314 return;
317 #define METHOD istate->method()
318 #define GET_METHOD_COUNTERS(res) \
319 res = METHOD->method_counters(); \
320 if (res == NULL) { \
321 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
322 }
324 #define OSR_REQUEST(res, branch_pc) \
325 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
326 /*
327 * For those opcodes that need to have a GC point on a backwards branch
328 */
330 // Backedge counting is kind of strange. The asm interpreter will increment
331 // the backedge counter as a separate counter but it does it's comparisons
332 // to the sum (scaled) of invocation counter and backedge count to make
333 // a decision. Seems kind of odd to sum them together like that
335 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
338 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
339 if ((skip) <= 0) { \
340 MethodCounters* mcs; \
341 GET_METHOD_COUNTERS(mcs); \
342 if (UseLoopCounter) { \
343 bool do_OSR = UseOnStackReplacement; \
344 mcs->backedge_counter()->increment(); \
345 if (ProfileInterpreter) { \
346 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
347 /* Check for overflow against MDO count. */ \
348 do_OSR = do_OSR \
349 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
350 /* When ProfileInterpreter is on, the backedge_count comes */ \
351 /* from the methodDataOop, which value does not get reset on */ \
352 /* the call to frequency_counter_overflow(). To avoid */ \
353 /* excessive calls to the overflow routine while the method is */ \
354 /* being compiled, add a second test to make sure the overflow */ \
355 /* function is called only once every overflow_frequency. */ \
356 && (!(mdo_last_branch_taken_count & 1023)); \
357 } else { \
358 /* check for overflow of backedge counter */ \
359 do_OSR = do_OSR \
360 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
361 } \
362 if (do_OSR) { \
363 nmethod* osr_nmethod; \
364 OSR_REQUEST(osr_nmethod, branch_pc); \
365 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
366 intptr_t* buf; \
367 /* Call OSR migration with last java frame only, no checks. */ \
368 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
369 istate->set_msg(do_osr); \
370 istate->set_osr_buf((address)buf); \
371 istate->set_osr_entry(osr_nmethod->osr_entry()); \
372 return; \
373 } \
374 } \
375 } /* UseCompiler ... */ \
376 SAFEPOINT; \
377 }
379 /*
380 * For those opcodes that need to have a GC point on a backwards branch
381 */
383 /*
384 * Macros for caching and flushing the interpreter state. Some local
385 * variables need to be flushed out to the frame before we do certain
386 * things (like pushing frames or becomming gc safe) and some need to
387 * be recached later (like after popping a frame). We could use one
388 * macro to cache or decache everything, but this would be less then
389 * optimal because we don't always need to cache or decache everything
390 * because some things we know are already cached or decached.
391 */
392 #undef DECACHE_TOS
393 #undef CACHE_TOS
394 #undef CACHE_PREV_TOS
395 #define DECACHE_TOS() istate->set_stack(topOfStack);
397 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
399 #undef DECACHE_PC
400 #undef CACHE_PC
401 #define DECACHE_PC() istate->set_bcp(pc);
402 #define CACHE_PC() pc = istate->bcp();
403 #define CACHE_CP() cp = istate->constants();
404 #define CACHE_LOCALS() locals = istate->locals();
405 #undef CACHE_FRAME
406 #define CACHE_FRAME()
408 // BCI() returns the current bytecode-index.
409 #undef BCI
410 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
412 /*
413 * CHECK_NULL - Macro for throwing a NullPointerException if the object
414 * passed is a null ref.
415 * On some architectures/platforms it should be possible to do this implicitly
416 */
417 #undef CHECK_NULL
418 #define CHECK_NULL(obj_) \
419 if ((obj_) == NULL) { \
420 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); \
421 } \
422 VERIFY_OOP(obj_)
424 #define VMdoubleConstZero() 0.0
425 #define VMdoubleConstOne() 1.0
426 #define VMlongConstZero() (max_jlong-max_jlong)
427 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
429 /*
430 * Alignment
431 */
432 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
434 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
435 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
437 // Reload interpreter state after calling the VM or a possible GC
438 #define CACHE_STATE() \
439 CACHE_TOS(); \
440 CACHE_PC(); \
441 CACHE_CP(); \
442 CACHE_LOCALS();
444 // Call the VM with last java frame only.
445 #define CALL_VM_NAKED_LJF(func) \
446 DECACHE_STATE(); \
447 SET_LAST_JAVA_FRAME(); \
448 func; \
449 RESET_LAST_JAVA_FRAME(); \
450 CACHE_STATE();
452 // Call the VM. Don't check for pending exceptions.
453 #define CALL_VM_NOCHECK(func) \
454 CALL_VM_NAKED_LJF(func) \
455 if (THREAD->pop_frame_pending() && \
456 !THREAD->pop_frame_in_process()) { \
457 goto handle_Pop_Frame; \
458 } \
459 if (THREAD->jvmti_thread_state() && \
460 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
461 goto handle_Early_Return; \
462 }
464 // Call the VM and check for pending exceptions
465 #define CALL_VM(func, label) { \
466 CALL_VM_NOCHECK(func); \
467 if (THREAD->has_pending_exception()) goto label; \
468 }
470 /*
471 * BytecodeInterpreter::run(interpreterState istate)
472 * BytecodeInterpreter::runWithChecks(interpreterState istate)
473 *
474 * The real deal. This is where byte codes actually get interpreted.
475 * Basically it's a big while loop that iterates until we return from
476 * the method passed in.
477 *
478 * The runWithChecks is used if JVMTI is enabled.
479 *
480 */
481 #if defined(VM_JVMTI)
482 void
483 BytecodeInterpreter::runWithChecks(interpreterState istate) {
484 #else
485 void
486 BytecodeInterpreter::run(interpreterState istate) {
487 #endif
489 // In order to simplify some tests based on switches set at runtime
490 // we invoke the interpreter a single time after switches are enabled
491 // and set simpler to to test variables rather than method calls or complex
492 // boolean expressions.
494 static int initialized = 0;
495 static int checkit = 0;
496 static intptr_t* c_addr = NULL;
497 static intptr_t c_value;
499 if (checkit && *c_addr != c_value) {
500 os::breakpoint();
501 }
502 #ifdef VM_JVMTI
503 static bool _jvmti_interp_events = 0;
504 #endif
506 static int _compiling; // (UseCompiler || CountCompiledCalls)
508 #ifdef ASSERT
509 if (istate->_msg != initialize) {
510 // We have a problem here if we are running with a pre-hsx24 JDK (for example during bootstrap)
511 // because in that case, EnableInvokeDynamic is true by default but will be later switched off
512 // if java_lang_invoke_MethodHandle::compute_offsets() detects that the JDK only has the classes
513 // for the old JSR292 implementation.
514 // This leads to a situation where 'istate->_stack_limit' always accounts for
515 // methodOopDesc::extra_stack_entries() because it is computed in
516 // CppInterpreterGenerator::generate_compute_interpreter_state() which was generated while
517 // EnableInvokeDynamic was still true. On the other hand, istate->_method->max_stack() doesn't
518 // account for extra_stack_entries() anymore because at the time when it is called
519 // EnableInvokeDynamic was already set to false.
520 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was
521 // switched off because of the wrong classes.
522 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) {
523 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
524 } else {
525 const int extra_stack_entries = Method::extra_stack_entries_for_jsr292;
526 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries
527 + 1), "bad stack limit");
528 }
529 #ifndef SHARK
530 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
531 #endif // !SHARK
532 }
533 // Verify linkages.
534 interpreterState l = istate;
535 do {
536 assert(l == l->_self_link, "bad link");
537 l = l->_prev_link;
538 } while (l != NULL);
539 // Screwups with stack management usually cause us to overwrite istate
540 // save a copy so we can verify it.
541 interpreterState orig = istate;
542 #endif
544 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
545 register address pc = istate->bcp();
546 register jubyte opcode;
547 register intptr_t* locals = istate->locals();
548 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
549 #ifdef LOTS_OF_REGS
550 register JavaThread* THREAD = istate->thread();
551 #else
552 #undef THREAD
553 #define THREAD istate->thread()
554 #endif
556 #ifdef USELABELS
557 const static void* const opclabels_data[256] = {
558 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
559 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
560 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
561 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
563 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
564 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
565 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
566 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
568 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
569 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
570 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
571 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
573 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
574 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
575 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
576 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
578 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
579 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
580 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
581 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
583 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
584 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
585 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
586 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
588 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
589 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
590 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
591 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
593 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
594 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
595 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
596 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
598 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
599 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
600 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
601 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
603 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
604 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
605 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
606 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
608 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
609 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
610 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
611 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
613 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
614 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
615 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
616 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
618 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
619 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
620 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
621 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
623 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
624 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
625 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
626 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
628 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
629 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
630 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
631 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
633 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
634 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
635 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
636 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
637 };
638 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
639 #endif /* USELABELS */
641 #ifdef ASSERT
642 // this will trigger a VERIFY_OOP on entry
643 if (istate->msg() != initialize && ! METHOD->is_static()) {
644 oop rcvr = LOCALS_OBJECT(0);
645 VERIFY_OOP(rcvr);
646 }
647 #endif
648 // #define HACK
649 #ifdef HACK
650 bool interesting = false;
651 #endif // HACK
653 /* QQQ this should be a stack method so we don't know actual direction */
654 guarantee(istate->msg() == initialize ||
655 topOfStack >= istate->stack_limit() &&
656 topOfStack < istate->stack_base(),
657 "Stack top out of range");
659 #ifdef CC_INTERP_PROFILE
660 // MethodData's last branch taken count.
661 uint mdo_last_branch_taken_count = 0;
662 #else
663 const uint mdo_last_branch_taken_count = 0;
664 #endif
666 switch (istate->msg()) {
667 case initialize: {
668 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
669 _compiling = (UseCompiler || CountCompiledCalls);
670 #ifdef VM_JVMTI
671 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
672 #endif
673 return;
674 }
675 break;
676 case method_entry: {
677 THREAD->set_do_not_unlock();
678 // count invocations
679 assert(initialized, "Interpreter not initialized");
680 if (_compiling) {
681 MethodCounters* mcs;
682 GET_METHOD_COUNTERS(mcs);
683 if (ProfileInterpreter) {
684 METHOD->increment_interpreter_invocation_count(THREAD);
685 }
686 mcs->invocation_counter()->increment();
687 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
688 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
689 // We no longer retry on a counter overflow.
690 }
691 // Get or create profile data. Check for pending (async) exceptions.
692 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
693 SAFEPOINT;
694 }
696 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
697 // initialize
698 os::breakpoint();
699 }
701 #ifdef HACK
702 {
703 ResourceMark rm;
704 char *method_name = istate->method()->name_and_sig_as_C_string();
705 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
706 tty->print_cr("entering: depth %d bci: %d",
707 (istate->_stack_base - istate->_stack),
708 istate->_bcp - istate->_method->code_base());
709 interesting = true;
710 }
711 }
712 #endif // HACK
714 // Lock method if synchronized.
715 if (METHOD->is_synchronized()) {
716 // oop rcvr = locals[0].j.r;
717 oop rcvr;
718 if (METHOD->is_static()) {
719 rcvr = METHOD->constants()->pool_holder()->java_mirror();
720 } else {
721 rcvr = LOCALS_OBJECT(0);
722 VERIFY_OOP(rcvr);
723 }
724 // The initial monitor is ours for the taking.
725 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
726 BasicObjectLock* mon = &istate->monitor_base()[-1];
727 mon->set_obj(rcvr);
728 bool success = false;
729 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
730 markOop mark = rcvr->mark();
731 intptr_t hash = (intptr_t) markOopDesc::no_hash;
732 // Implies UseBiasedLocking.
733 if (mark->has_bias_pattern()) {
734 uintptr_t thread_ident;
735 uintptr_t anticipated_bias_locking_value;
736 thread_ident = (uintptr_t)istate->thread();
737 anticipated_bias_locking_value =
738 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
739 ~((uintptr_t) markOopDesc::age_mask_in_place);
741 if (anticipated_bias_locking_value == 0) {
742 // Already biased towards this thread, nothing to do.
743 if (PrintBiasedLockingStatistics) {
744 (* BiasedLocking::biased_lock_entry_count_addr())++;
745 }
746 success = true;
747 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
748 // Try to revoke bias.
749 markOop header = rcvr->klass()->prototype_header();
750 if (hash != markOopDesc::no_hash) {
751 header = header->copy_set_hash(hash);
752 }
753 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
754 if (PrintBiasedLockingStatistics)
755 (*BiasedLocking::revoked_lock_entry_count_addr())++;
756 }
757 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
758 // Try to rebias.
759 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
760 if (hash != markOopDesc::no_hash) {
761 new_header = new_header->copy_set_hash(hash);
762 }
763 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
764 if (PrintBiasedLockingStatistics) {
765 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
766 }
767 } else {
768 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
769 }
770 success = true;
771 } else {
772 // Try to bias towards thread in case object is anonymously biased.
773 markOop header = (markOop) ((uintptr_t) mark &
774 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
775 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
776 if (hash != markOopDesc::no_hash) {
777 header = header->copy_set_hash(hash);
778 }
779 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
780 // Debugging hint.
781 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
782 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
783 if (PrintBiasedLockingStatistics) {
784 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
785 }
786 } else {
787 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
788 }
789 success = true;
790 }
791 }
793 // Traditional lightweight locking.
794 if (!success) {
795 markOop displaced = rcvr->mark()->set_unlocked();
796 mon->lock()->set_displaced_header(displaced);
797 bool call_vm = UseHeavyMonitors;
798 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
799 // Is it simple recursive case?
800 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
801 mon->lock()->set_displaced_header(NULL);
802 } else {
803 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
804 }
805 }
806 }
807 }
808 THREAD->clr_do_not_unlock();
810 // Notify jvmti
811 #ifdef VM_JVMTI
812 if (_jvmti_interp_events) {
813 // Whenever JVMTI puts a thread in interp_only_mode, method
814 // entry/exit events are sent for that thread to track stack depth.
815 if (THREAD->is_interp_only_mode()) {
816 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
817 handle_exception);
818 }
819 }
820 #endif /* VM_JVMTI */
822 goto run;
823 }
825 case popping_frame: {
826 // returned from a java call to pop the frame, restart the call
827 // clear the message so we don't confuse ourselves later
828 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
829 istate->set_msg(no_request);
830 if (_compiling) {
831 // Set MDX back to the ProfileData of the invoke bytecode that will be
832 // restarted.
833 SET_MDX(NULL);
834 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
835 }
836 THREAD->clr_pop_frame_in_process();
837 goto run;
838 }
840 case method_resume: {
841 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
842 // resume
843 os::breakpoint();
844 }
845 #ifdef HACK
846 {
847 ResourceMark rm;
848 char *method_name = istate->method()->name_and_sig_as_C_string();
849 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
850 tty->print_cr("resume: depth %d bci: %d",
851 (istate->_stack_base - istate->_stack) ,
852 istate->_bcp - istate->_method->code_base());
853 interesting = true;
854 }
855 }
856 #endif // HACK
857 // returned from a java call, continue executing.
858 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
859 goto handle_Pop_Frame;
860 }
861 if (THREAD->jvmti_thread_state() &&
862 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
863 goto handle_Early_Return;
864 }
866 if (THREAD->has_pending_exception()) goto handle_exception;
867 // Update the pc by the saved amount of the invoke bytecode size
868 UPDATE_PC(istate->bcp_advance());
870 if (_compiling) {
871 // Get or create profile data. Check for pending (async) exceptions.
872 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
873 }
874 goto run;
875 }
877 case deopt_resume2: {
878 // Returned from an opcode that will reexecute. Deopt was
879 // a result of a PopFrame request.
880 //
882 if (_compiling) {
883 // Get or create profile data. Check for pending (async) exceptions.
884 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
885 }
886 goto run;
887 }
889 case deopt_resume: {
890 // Returned from an opcode that has completed. The stack has
891 // the result all we need to do is skip across the bytecode
892 // and continue (assuming there is no exception pending)
893 //
894 // compute continuation length
895 //
896 // Note: it is possible to deopt at a return_register_finalizer opcode
897 // because this requires entering the vm to do the registering. While the
898 // opcode is complete we can't advance because there are no more opcodes
899 // much like trying to deopt at a poll return. In that has we simply
900 // get out of here
901 //
902 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
903 // this will do the right thing even if an exception is pending.
904 goto handle_return;
905 }
906 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
907 if (THREAD->has_pending_exception()) goto handle_exception;
909 if (_compiling) {
910 // Get or create profile data. Check for pending (async) exceptions.
911 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
912 }
913 goto run;
914 }
915 case got_monitors: {
916 // continue locking now that we have a monitor to use
917 // we expect to find newly allocated monitor at the "top" of the monitor stack.
918 oop lockee = STACK_OBJECT(-1);
919 VERIFY_OOP(lockee);
920 // derefing's lockee ought to provoke implicit null check
921 // find a free monitor
922 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
923 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
924 entry->set_obj(lockee);
925 bool success = false;
926 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
928 markOop mark = lockee->mark();
929 intptr_t hash = (intptr_t) markOopDesc::no_hash;
930 // implies UseBiasedLocking
931 if (mark->has_bias_pattern()) {
932 uintptr_t thread_ident;
933 uintptr_t anticipated_bias_locking_value;
934 thread_ident = (uintptr_t)istate->thread();
935 anticipated_bias_locking_value =
936 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
937 ~((uintptr_t) markOopDesc::age_mask_in_place);
939 if (anticipated_bias_locking_value == 0) {
940 // already biased towards this thread, nothing to do
941 if (PrintBiasedLockingStatistics) {
942 (* BiasedLocking::biased_lock_entry_count_addr())++;
943 }
944 success = true;
945 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
946 // try revoke bias
947 markOop header = lockee->klass()->prototype_header();
948 if (hash != markOopDesc::no_hash) {
949 header = header->copy_set_hash(hash);
950 }
951 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
952 if (PrintBiasedLockingStatistics) {
953 (*BiasedLocking::revoked_lock_entry_count_addr())++;
954 }
955 }
956 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
957 // try rebias
958 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
959 if (hash != markOopDesc::no_hash) {
960 new_header = new_header->copy_set_hash(hash);
961 }
962 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
963 if (PrintBiasedLockingStatistics) {
964 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
965 }
966 } else {
967 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
968 }
969 success = true;
970 } else {
971 // try to bias towards thread in case object is anonymously biased
972 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
973 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
974 if (hash != markOopDesc::no_hash) {
975 header = header->copy_set_hash(hash);
976 }
977 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
978 // debugging hint
979 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
980 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
981 if (PrintBiasedLockingStatistics) {
982 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
983 }
984 } else {
985 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
986 }
987 success = true;
988 }
989 }
991 // traditional lightweight locking
992 if (!success) {
993 markOop displaced = lockee->mark()->set_unlocked();
994 entry->lock()->set_displaced_header(displaced);
995 bool call_vm = UseHeavyMonitors;
996 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
997 // Is it simple recursive case?
998 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
999 entry->lock()->set_displaced_header(NULL);
1000 } else {
1001 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1002 }
1003 }
1004 }
1005 UPDATE_PC_AND_TOS(1, -1);
1006 goto run;
1007 }
1008 default: {
1009 fatal("Unexpected message from frame manager");
1010 }
1011 }
1013 run:
1015 DO_UPDATE_INSTRUCTION_COUNT(*pc)
1016 DEBUGGER_SINGLE_STEP_NOTIFY();
1017 #ifdef PREFETCH_OPCCODE
1018 opcode = *pc; /* prefetch first opcode */
1019 #endif
1021 #ifndef USELABELS
1022 while (1)
1023 #endif
1024 {
1025 #ifndef PREFETCH_OPCCODE
1026 opcode = *pc;
1027 #endif
1028 // Seems like this happens twice per opcode. At worst this is only
1029 // need at entry to the loop.
1030 // DEBUGGER_SINGLE_STEP_NOTIFY();
1031 /* Using this labels avoids double breakpoints when quickening and
1032 * when returing from transition frames.
1033 */
1034 opcode_switch:
1035 assert(istate == orig, "Corrupted istate");
1036 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
1037 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
1038 assert(topOfStack < istate->stack_base(), "Stack underrun");
1040 #ifdef USELABELS
1041 DISPATCH(opcode);
1042 #else
1043 switch (opcode)
1044 #endif
1045 {
1046 CASE(_nop):
1047 UPDATE_PC_AND_CONTINUE(1);
1049 /* Push miscellaneous constants onto the stack. */
1051 CASE(_aconst_null):
1052 SET_STACK_OBJECT(NULL, 0);
1053 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1055 #undef OPC_CONST_n
1056 #define OPC_CONST_n(opcode, const_type, value) \
1057 CASE(opcode): \
1058 SET_STACK_ ## const_type(value, 0); \
1059 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1061 OPC_CONST_n(_iconst_m1, INT, -1);
1062 OPC_CONST_n(_iconst_0, INT, 0);
1063 OPC_CONST_n(_iconst_1, INT, 1);
1064 OPC_CONST_n(_iconst_2, INT, 2);
1065 OPC_CONST_n(_iconst_3, INT, 3);
1066 OPC_CONST_n(_iconst_4, INT, 4);
1067 OPC_CONST_n(_iconst_5, INT, 5);
1068 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
1069 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
1070 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
1072 #undef OPC_CONST2_n
1073 #define OPC_CONST2_n(opcname, value, key, kind) \
1074 CASE(_##opcname): \
1075 { \
1076 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
1077 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1078 }
1079 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
1080 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
1081 OPC_CONST2_n(lconst_0, Zero, long, LONG);
1082 OPC_CONST2_n(lconst_1, One, long, LONG);
1084 /* Load constant from constant pool: */
1086 /* Push a 1-byte signed integer value onto the stack. */
1087 CASE(_bipush):
1088 SET_STACK_INT((jbyte)(pc[1]), 0);
1089 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1091 /* Push a 2-byte signed integer constant onto the stack. */
1092 CASE(_sipush):
1093 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
1094 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1096 /* load from local variable */
1098 CASE(_aload):
1099 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
1100 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
1101 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1103 CASE(_iload):
1104 CASE(_fload):
1105 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
1106 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1108 CASE(_lload):
1109 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
1110 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1112 CASE(_dload):
1113 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
1114 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1116 #undef OPC_LOAD_n
1117 #define OPC_LOAD_n(num) \
1118 CASE(_aload_##num): \
1119 VERIFY_OOP(LOCALS_OBJECT(num)); \
1120 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
1121 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1122 \
1123 CASE(_iload_##num): \
1124 CASE(_fload_##num): \
1125 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
1126 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1127 \
1128 CASE(_lload_##num): \
1129 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
1130 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1131 CASE(_dload_##num): \
1132 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
1133 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1135 OPC_LOAD_n(0);
1136 OPC_LOAD_n(1);
1137 OPC_LOAD_n(2);
1138 OPC_LOAD_n(3);
1140 /* store to a local variable */
1142 CASE(_astore):
1143 astore(topOfStack, -1, locals, pc[1]);
1144 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1146 CASE(_istore):
1147 CASE(_fstore):
1148 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
1149 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1151 CASE(_lstore):
1152 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
1153 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1155 CASE(_dstore):
1156 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
1157 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1159 CASE(_wide): {
1160 uint16_t reg = Bytes::get_Java_u2(pc + 2);
1162 opcode = pc[1];
1164 // Wide and it's sub-bytecode are counted as separate instructions. If we
1165 // don't account for this here, the bytecode trace skips the next bytecode.
1166 DO_UPDATE_INSTRUCTION_COUNT(opcode);
1168 switch(opcode) {
1169 case Bytecodes::_aload:
1170 VERIFY_OOP(LOCALS_OBJECT(reg));
1171 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
1172 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1174 case Bytecodes::_iload:
1175 case Bytecodes::_fload:
1176 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
1177 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1179 case Bytecodes::_lload:
1180 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1181 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1183 case Bytecodes::_dload:
1184 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1185 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1187 case Bytecodes::_astore:
1188 astore(topOfStack, -1, locals, reg);
1189 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1191 case Bytecodes::_istore:
1192 case Bytecodes::_fstore:
1193 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
1194 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1196 case Bytecodes::_lstore:
1197 SET_LOCALS_LONG(STACK_LONG(-1), reg);
1198 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1200 case Bytecodes::_dstore:
1201 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
1202 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1204 case Bytecodes::_iinc: {
1205 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
1206 // Be nice to see what this generates.... QQQ
1207 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
1208 UPDATE_PC_AND_CONTINUE(6);
1209 }
1210 case Bytecodes::_ret:
1211 // Profile ret.
1212 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
1213 // Now, update the pc.
1214 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1215 UPDATE_PC_AND_CONTINUE(0);
1216 default:
1217 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
1218 }
1219 }
1222 #undef OPC_STORE_n
1223 #define OPC_STORE_n(num) \
1224 CASE(_astore_##num): \
1225 astore(topOfStack, -1, locals, num); \
1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1227 CASE(_istore_##num): \
1228 CASE(_fstore_##num): \
1229 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1232 OPC_STORE_n(0);
1233 OPC_STORE_n(1);
1234 OPC_STORE_n(2);
1235 OPC_STORE_n(3);
1237 #undef OPC_DSTORE_n
1238 #define OPC_DSTORE_n(num) \
1239 CASE(_dstore_##num): \
1240 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1241 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1242 CASE(_lstore_##num): \
1243 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1244 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1246 OPC_DSTORE_n(0);
1247 OPC_DSTORE_n(1);
1248 OPC_DSTORE_n(2);
1249 OPC_DSTORE_n(3);
1251 /* stack pop, dup, and insert opcodes */
1254 CASE(_pop): /* Discard the top item on the stack */
1255 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1258 CASE(_pop2): /* Discard the top 2 items on the stack */
1259 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1262 CASE(_dup): /* Duplicate the top item on the stack */
1263 dup(topOfStack);
1264 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1266 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1267 dup2(topOfStack);
1268 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1270 CASE(_dup_x1): /* insert top word two down */
1271 dup_x1(topOfStack);
1272 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1274 CASE(_dup_x2): /* insert top word three down */
1275 dup_x2(topOfStack);
1276 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1278 CASE(_dup2_x1): /* insert top 2 slots three down */
1279 dup2_x1(topOfStack);
1280 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1282 CASE(_dup2_x2): /* insert top 2 slots four down */
1283 dup2_x2(topOfStack);
1284 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1286 CASE(_swap): { /* swap top two elements on the stack */
1287 swap(topOfStack);
1288 UPDATE_PC_AND_CONTINUE(1);
1289 }
1291 /* Perform various binary integer operations */
1293 #undef OPC_INT_BINARY
1294 #define OPC_INT_BINARY(opcname, opname, test) \
1295 CASE(_i##opcname): \
1296 if (test && (STACK_INT(-1) == 0)) { \
1297 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1298 "/ by zero", note_div0Check_trap); \
1299 } \
1300 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1301 STACK_INT(-1)), \
1302 -2); \
1303 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1304 CASE(_l##opcname): \
1305 { \
1306 if (test) { \
1307 jlong l1 = STACK_LONG(-1); \
1308 if (VMlongEqz(l1)) { \
1309 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1310 "/ by long zero", note_div0Check_trap); \
1311 } \
1312 } \
1313 /* First long at (-1,-2) next long at (-3,-4) */ \
1314 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1315 STACK_LONG(-1)), \
1316 -3); \
1317 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1318 }
1320 OPC_INT_BINARY(add, Add, 0);
1321 OPC_INT_BINARY(sub, Sub, 0);
1322 OPC_INT_BINARY(mul, Mul, 0);
1323 OPC_INT_BINARY(and, And, 0);
1324 OPC_INT_BINARY(or, Or, 0);
1325 OPC_INT_BINARY(xor, Xor, 0);
1326 OPC_INT_BINARY(div, Div, 1);
1327 OPC_INT_BINARY(rem, Rem, 1);
1330 /* Perform various binary floating number operations */
1331 /* On some machine/platforms/compilers div zero check can be implicit */
1333 #undef OPC_FLOAT_BINARY
1334 #define OPC_FLOAT_BINARY(opcname, opname) \
1335 CASE(_d##opcname): { \
1336 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1337 STACK_DOUBLE(-1)), \
1338 -3); \
1339 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1340 } \
1341 CASE(_f##opcname): \
1342 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1343 STACK_FLOAT(-1)), \
1344 -2); \
1345 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1348 OPC_FLOAT_BINARY(add, Add);
1349 OPC_FLOAT_BINARY(sub, Sub);
1350 OPC_FLOAT_BINARY(mul, Mul);
1351 OPC_FLOAT_BINARY(div, Div);
1352 OPC_FLOAT_BINARY(rem, Rem);
1354 /* Shift operations
1355 * Shift left int and long: ishl, lshl
1356 * Logical shift right int and long w/zero extension: iushr, lushr
1357 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1358 */
1360 #undef OPC_SHIFT_BINARY
1361 #define OPC_SHIFT_BINARY(opcname, opname) \
1362 CASE(_i##opcname): \
1363 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1364 STACK_INT(-1)), \
1365 -2); \
1366 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1367 CASE(_l##opcname): \
1368 { \
1369 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1370 STACK_INT(-1)), \
1371 -2); \
1372 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1373 }
1375 OPC_SHIFT_BINARY(shl, Shl);
1376 OPC_SHIFT_BINARY(shr, Shr);
1377 OPC_SHIFT_BINARY(ushr, Ushr);
1379 /* Increment local variable by constant */
1380 CASE(_iinc):
1381 {
1382 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1383 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1384 UPDATE_PC_AND_CONTINUE(3);
1385 }
1387 /* negate the value on the top of the stack */
1389 CASE(_ineg):
1390 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1391 UPDATE_PC_AND_CONTINUE(1);
1393 CASE(_fneg):
1394 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1395 UPDATE_PC_AND_CONTINUE(1);
1397 CASE(_lneg):
1398 {
1399 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1400 UPDATE_PC_AND_CONTINUE(1);
1401 }
1403 CASE(_dneg):
1404 {
1405 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1406 UPDATE_PC_AND_CONTINUE(1);
1407 }
1409 /* Conversion operations */
1411 CASE(_i2f): /* convert top of stack int to float */
1412 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1413 UPDATE_PC_AND_CONTINUE(1);
1415 CASE(_i2l): /* convert top of stack int to long */
1416 {
1417 // this is ugly QQQ
1418 jlong r = VMint2Long(STACK_INT(-1));
1419 MORE_STACK(-1); // Pop
1420 SET_STACK_LONG(r, 1);
1422 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1423 }
1425 CASE(_i2d): /* convert top of stack int to double */
1426 {
1427 // this is ugly QQQ (why cast to jlong?? )
1428 jdouble r = (jlong)STACK_INT(-1);
1429 MORE_STACK(-1); // Pop
1430 SET_STACK_DOUBLE(r, 1);
1432 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1433 }
1435 CASE(_l2i): /* convert top of stack long to int */
1436 {
1437 jint r = VMlong2Int(STACK_LONG(-1));
1438 MORE_STACK(-2); // Pop
1439 SET_STACK_INT(r, 0);
1440 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1441 }
1443 CASE(_l2f): /* convert top of stack long to float */
1444 {
1445 jlong r = STACK_LONG(-1);
1446 MORE_STACK(-2); // Pop
1447 SET_STACK_FLOAT(VMlong2Float(r), 0);
1448 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1449 }
1451 CASE(_l2d): /* convert top of stack long to double */
1452 {
1453 jlong r = STACK_LONG(-1);
1454 MORE_STACK(-2); // Pop
1455 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1456 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1457 }
1459 CASE(_f2i): /* Convert top of stack float to int */
1460 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1461 UPDATE_PC_AND_CONTINUE(1);
1463 CASE(_f2l): /* convert top of stack float to long */
1464 {
1465 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1466 MORE_STACK(-1); // POP
1467 SET_STACK_LONG(r, 1);
1468 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1469 }
1471 CASE(_f2d): /* convert top of stack float to double */
1472 {
1473 jfloat f;
1474 jdouble r;
1475 f = STACK_FLOAT(-1);
1476 r = (jdouble) f;
1477 MORE_STACK(-1); // POP
1478 SET_STACK_DOUBLE(r, 1);
1479 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1480 }
1482 CASE(_d2i): /* convert top of stack double to int */
1483 {
1484 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1485 MORE_STACK(-2);
1486 SET_STACK_INT(r1, 0);
1487 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1488 }
1490 CASE(_d2f): /* convert top of stack double to float */
1491 {
1492 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1493 MORE_STACK(-2);
1494 SET_STACK_FLOAT(r1, 0);
1495 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1496 }
1498 CASE(_d2l): /* convert top of stack double to long */
1499 {
1500 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1501 MORE_STACK(-2);
1502 SET_STACK_LONG(r1, 1);
1503 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1504 }
1506 CASE(_i2b):
1507 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1508 UPDATE_PC_AND_CONTINUE(1);
1510 CASE(_i2c):
1511 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1512 UPDATE_PC_AND_CONTINUE(1);
1514 CASE(_i2s):
1515 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1516 UPDATE_PC_AND_CONTINUE(1);
1518 /* comparison operators */
1521 #define COMPARISON_OP(name, comparison) \
1522 CASE(_if_icmp##name): { \
1523 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
1524 int skip = cmp \
1525 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1526 address branch_pc = pc; \
1527 /* Profile branch. */ \
1528 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1529 UPDATE_PC_AND_TOS(skip, -2); \
1530 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1531 CONTINUE; \
1532 } \
1533 CASE(_if##name): { \
1534 const bool cmp = (STACK_INT(-1) comparison 0); \
1535 int skip = cmp \
1536 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1537 address branch_pc = pc; \
1538 /* Profile branch. */ \
1539 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1540 UPDATE_PC_AND_TOS(skip, -1); \
1541 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1542 CONTINUE; \
1543 }
1545 #define COMPARISON_OP2(name, comparison) \
1546 COMPARISON_OP(name, comparison) \
1547 CASE(_if_acmp##name): { \
1548 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
1549 int skip = cmp \
1550 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1551 address branch_pc = pc; \
1552 /* Profile branch. */ \
1553 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1554 UPDATE_PC_AND_TOS(skip, -2); \
1555 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1556 CONTINUE; \
1557 }
1559 #define NULL_COMPARISON_NOT_OP(name) \
1560 CASE(_if##name): { \
1561 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
1562 int skip = cmp \
1563 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1564 address branch_pc = pc; \
1565 /* Profile branch. */ \
1566 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1567 UPDATE_PC_AND_TOS(skip, -1); \
1568 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1569 CONTINUE; \
1570 }
1572 #define NULL_COMPARISON_OP(name) \
1573 CASE(_if##name): { \
1574 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
1575 int skip = cmp \
1576 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1577 address branch_pc = pc; \
1578 /* Profile branch. */ \
1579 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1580 UPDATE_PC_AND_TOS(skip, -1); \
1581 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1582 CONTINUE; \
1583 }
1584 COMPARISON_OP(lt, <);
1585 COMPARISON_OP(gt, >);
1586 COMPARISON_OP(le, <=);
1587 COMPARISON_OP(ge, >=);
1588 COMPARISON_OP2(eq, ==); /* include ref comparison */
1589 COMPARISON_OP2(ne, !=); /* include ref comparison */
1590 NULL_COMPARISON_OP(null);
1591 NULL_COMPARISON_NOT_OP(nonnull);
1593 /* Goto pc at specified offset in switch table. */
1595 CASE(_tableswitch): {
1596 jint* lpc = (jint*)VMalignWordUp(pc+1);
1597 int32_t key = STACK_INT(-1);
1598 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1599 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1600 int32_t skip;
1601 key -= low;
1602 if (((uint32_t) key > (uint32_t)(high - low))) {
1603 key = -1;
1604 skip = Bytes::get_Java_u4((address)&lpc[0]);
1605 } else {
1606 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1607 }
1608 // Profile switch.
1609 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
1610 // Does this really need a full backedge check (osr)?
1611 address branch_pc = pc;
1612 UPDATE_PC_AND_TOS(skip, -1);
1613 DO_BACKEDGE_CHECKS(skip, branch_pc);
1614 CONTINUE;
1615 }
1617 /* Goto pc whose table entry matches specified key. */
1619 CASE(_lookupswitch): {
1620 jint* lpc = (jint*)VMalignWordUp(pc+1);
1621 int32_t key = STACK_INT(-1);
1622 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1623 // Remember index.
1624 int index = -1;
1625 int newindex = 0;
1626 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1627 while (--npairs >= 0) {
1628 lpc += 2;
1629 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1630 skip = Bytes::get_Java_u4((address)&lpc[1]);
1631 index = newindex;
1632 break;
1633 }
1634 newindex += 1;
1635 }
1636 // Profile switch.
1637 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
1638 address branch_pc = pc;
1639 UPDATE_PC_AND_TOS(skip, -1);
1640 DO_BACKEDGE_CHECKS(skip, branch_pc);
1641 CONTINUE;
1642 }
1644 CASE(_fcmpl):
1645 CASE(_fcmpg):
1646 {
1647 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1648 STACK_FLOAT(-1),
1649 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1650 -2);
1651 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1652 }
1654 CASE(_dcmpl):
1655 CASE(_dcmpg):
1656 {
1657 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1658 STACK_DOUBLE(-1),
1659 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1660 MORE_STACK(-4); // Pop
1661 SET_STACK_INT(r, 0);
1662 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1663 }
1665 CASE(_lcmp):
1666 {
1667 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1668 MORE_STACK(-4);
1669 SET_STACK_INT(r, 0);
1670 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1671 }
1674 /* Return from a method */
1676 CASE(_areturn):
1677 CASE(_ireturn):
1678 CASE(_freturn):
1679 {
1680 // Allow a safepoint before returning to frame manager.
1681 SAFEPOINT;
1683 goto handle_return;
1684 }
1686 CASE(_lreturn):
1687 CASE(_dreturn):
1688 {
1689 // Allow a safepoint before returning to frame manager.
1690 SAFEPOINT;
1691 goto handle_return;
1692 }
1694 CASE(_return_register_finalizer): {
1696 oop rcvr = LOCALS_OBJECT(0);
1697 VERIFY_OOP(rcvr);
1698 if (rcvr->klass()->has_finalizer()) {
1699 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1700 }
1701 goto handle_return;
1702 }
1703 CASE(_return): {
1705 // Allow a safepoint before returning to frame manager.
1706 SAFEPOINT;
1707 goto handle_return;
1708 }
1710 /* Array access byte-codes */
1712 /* Every array access byte-code starts out like this */
1713 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1714 #define ARRAY_INTRO(arrayOff) \
1715 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1716 jint index = STACK_INT(arrayOff + 1); \
1717 char message[jintAsStringSize]; \
1718 CHECK_NULL(arrObj); \
1719 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1720 sprintf(message, "%d", index); \
1721 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1722 message, note_rangeCheck_trap); \
1723 }
1725 /* 32-bit loads. These handle conversion from < 32-bit types */
1726 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1727 { \
1728 ARRAY_INTRO(-2); \
1729 (void)extra; \
1730 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1731 -2); \
1732 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1733 }
1735 /* 64-bit loads */
1736 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1737 { \
1738 ARRAY_INTRO(-2); \
1739 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1740 (void)extra; \
1741 UPDATE_PC_AND_CONTINUE(1); \
1742 }
1744 CASE(_iaload):
1745 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1746 CASE(_faload):
1747 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1748 CASE(_aaload): {
1749 ARRAY_INTRO(-2);
1750 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
1751 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1752 }
1753 CASE(_baload):
1754 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1755 CASE(_caload):
1756 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1757 CASE(_saload):
1758 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1759 CASE(_laload):
1760 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1761 CASE(_daload):
1762 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1764 /* 32-bit stores. These handle conversion to < 32-bit types */
1765 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1766 { \
1767 ARRAY_INTRO(-3); \
1768 (void)extra; \
1769 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1770 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1771 }
1773 /* 64-bit stores */
1774 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1775 { \
1776 ARRAY_INTRO(-4); \
1777 (void)extra; \
1778 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1779 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1780 }
1782 CASE(_iastore):
1783 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1784 CASE(_fastore):
1785 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1786 /*
1787 * This one looks different because of the assignability check
1788 */
1789 CASE(_aastore): {
1790 oop rhsObject = STACK_OBJECT(-1);
1791 VERIFY_OOP(rhsObject);
1792 ARRAY_INTRO( -3);
1793 // arrObj, index are set
1794 if (rhsObject != NULL) {
1795 /* Check assignability of rhsObject into arrObj */
1796 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1797 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1798 //
1799 // Check for compatibilty. This check must not GC!!
1800 // Seems way more expensive now that we must dispatch
1801 //
1802 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1803 // Decrement counter if subtype check failed.
1804 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
1805 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
1806 }
1807 // Profile checkcast with null_seen and receiver.
1808 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
1809 } else {
1810 // Profile checkcast with null_seen and receiver.
1811 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
1812 }
1813 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1814 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1815 }
1816 CASE(_bastore):
1817 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1818 CASE(_castore):
1819 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1820 CASE(_sastore):
1821 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1822 CASE(_lastore):
1823 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1824 CASE(_dastore):
1825 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1827 CASE(_arraylength):
1828 {
1829 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1830 CHECK_NULL(ary);
1831 SET_STACK_INT(ary->length(), -1);
1832 UPDATE_PC_AND_CONTINUE(1);
1833 }
1835 /* monitorenter and monitorexit for locking/unlocking an object */
1837 CASE(_monitorenter): {
1838 oop lockee = STACK_OBJECT(-1);
1839 // derefing's lockee ought to provoke implicit null check
1840 CHECK_NULL(lockee);
1841 // find a free monitor or one already allocated for this object
1842 // if we find a matching object then we need a new monitor
1843 // since this is recursive enter
1844 BasicObjectLock* limit = istate->monitor_base();
1845 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1846 BasicObjectLock* entry = NULL;
1847 while (most_recent != limit ) {
1848 if (most_recent->obj() == NULL) entry = most_recent;
1849 else if (most_recent->obj() == lockee) break;
1850 most_recent++;
1851 }
1852 if (entry != NULL) {
1853 entry->set_obj(lockee);
1854 int success = false;
1855 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
1857 markOop mark = lockee->mark();
1858 intptr_t hash = (intptr_t) markOopDesc::no_hash;
1859 // implies UseBiasedLocking
1860 if (mark->has_bias_pattern()) {
1861 uintptr_t thread_ident;
1862 uintptr_t anticipated_bias_locking_value;
1863 thread_ident = (uintptr_t)istate->thread();
1864 anticipated_bias_locking_value =
1865 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1866 ~((uintptr_t) markOopDesc::age_mask_in_place);
1868 if (anticipated_bias_locking_value == 0) {
1869 // already biased towards this thread, nothing to do
1870 if (PrintBiasedLockingStatistics) {
1871 (* BiasedLocking::biased_lock_entry_count_addr())++;
1872 }
1873 success = true;
1874 }
1875 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1876 // try revoke bias
1877 markOop header = lockee->klass()->prototype_header();
1878 if (hash != markOopDesc::no_hash) {
1879 header = header->copy_set_hash(hash);
1880 }
1881 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
1882 if (PrintBiasedLockingStatistics)
1883 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1884 }
1885 }
1886 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1887 // try rebias
1888 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1889 if (hash != markOopDesc::no_hash) {
1890 new_header = new_header->copy_set_hash(hash);
1891 }
1892 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
1893 if (PrintBiasedLockingStatistics)
1894 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1895 }
1896 else {
1897 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1898 }
1899 success = true;
1900 }
1901 else {
1902 // try to bias towards thread in case object is anonymously biased
1903 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1904 (uintptr_t)markOopDesc::age_mask_in_place |
1905 epoch_mask_in_place));
1906 if (hash != markOopDesc::no_hash) {
1907 header = header->copy_set_hash(hash);
1908 }
1909 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1910 // debugging hint
1911 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1912 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
1913 if (PrintBiasedLockingStatistics)
1914 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1915 }
1916 else {
1917 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1918 }
1919 success = true;
1920 }
1921 }
1923 // traditional lightweight locking
1924 if (!success) {
1925 markOop displaced = lockee->mark()->set_unlocked();
1926 entry->lock()->set_displaced_header(displaced);
1927 bool call_vm = UseHeavyMonitors;
1928 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1929 // Is it simple recursive case?
1930 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1931 entry->lock()->set_displaced_header(NULL);
1932 } else {
1933 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1934 }
1935 }
1936 }
1937 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1938 } else {
1939 istate->set_msg(more_monitors);
1940 UPDATE_PC_AND_RETURN(0); // Re-execute
1941 }
1942 }
1944 CASE(_monitorexit): {
1945 oop lockee = STACK_OBJECT(-1);
1946 CHECK_NULL(lockee);
1947 // derefing's lockee ought to provoke implicit null check
1948 // find our monitor slot
1949 BasicObjectLock* limit = istate->monitor_base();
1950 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1951 while (most_recent != limit ) {
1952 if ((most_recent)->obj() == lockee) {
1953 BasicLock* lock = most_recent->lock();
1954 markOop header = lock->displaced_header();
1955 most_recent->set_obj(NULL);
1956 if (!lockee->mark()->has_bias_pattern()) {
1957 bool call_vm = UseHeavyMonitors;
1958 // If it isn't recursive we either must swap old header or call the runtime
1959 if (header != NULL || call_vm) {
1960 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1961 // restore object for the slow case
1962 most_recent->set_obj(lockee);
1963 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1964 }
1965 }
1966 }
1967 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1968 }
1969 most_recent++;
1970 }
1971 // Need to throw illegal monitor state exception
1972 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1973 ShouldNotReachHere();
1974 }
1976 /* All of the non-quick opcodes. */
1978 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1979 * constant pool index in the instruction.
1980 */
1981 CASE(_getfield):
1982 CASE(_getstatic):
1983 {
1984 u2 index;
1985 ConstantPoolCacheEntry* cache;
1986 index = Bytes::get_native_u2(pc+1);
1988 // QQQ Need to make this as inlined as possible. Probably need to
1989 // split all the bytecode cases out so c++ compiler has a chance
1990 // for constant prop to fold everything possible away.
1992 cache = cp->entry_at(index);
1993 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
1994 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
1995 handle_exception);
1996 cache = cp->entry_at(index);
1997 }
1999 #ifdef VM_JVMTI
2000 if (_jvmti_interp_events) {
2001 int *count_addr;
2002 oop obj;
2003 // Check to see if a field modification watch has been set
2004 // before we take the time to call into the VM.
2005 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
2006 if ( *count_addr > 0 ) {
2007 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
2008 obj = (oop)NULL;
2009 } else {
2010 obj = (oop) STACK_OBJECT(-1);
2011 VERIFY_OOP(obj);
2012 }
2013 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
2014 obj,
2015 cache),
2016 handle_exception);
2017 }
2018 }
2019 #endif /* VM_JVMTI */
2021 oop obj;
2022 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
2023 Klass* k = cache->f1_as_klass();
2024 obj = k->java_mirror();
2025 MORE_STACK(1); // Assume single slot push
2026 } else {
2027 obj = (oop) STACK_OBJECT(-1);
2028 CHECK_NULL(obj);
2029 }
2031 //
2032 // Now store the result on the stack
2033 //
2034 TosState tos_type = cache->flag_state();
2035 int field_offset = cache->f2_as_index();
2036 if (cache->is_volatile()) {
2037 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2038 OrderAccess::fence();
2039 }
2040 if (tos_type == atos) {
2041 VERIFY_OOP(obj->obj_field_acquire(field_offset));
2042 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
2043 } else if (tos_type == itos) {
2044 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
2045 } else if (tos_type == ltos) {
2046 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
2047 MORE_STACK(1);
2048 } else if (tos_type == btos) {
2049 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
2050 } else if (tos_type == ctos) {
2051 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
2052 } else if (tos_type == stos) {
2053 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
2054 } else if (tos_type == ftos) {
2055 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
2056 } else {
2057 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
2058 MORE_STACK(1);
2059 }
2060 } else {
2061 if (tos_type == atos) {
2062 VERIFY_OOP(obj->obj_field(field_offset));
2063 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
2064 } else if (tos_type == itos) {
2065 SET_STACK_INT(obj->int_field(field_offset), -1);
2066 } else if (tos_type == ltos) {
2067 SET_STACK_LONG(obj->long_field(field_offset), 0);
2068 MORE_STACK(1);
2069 } else if (tos_type == btos) {
2070 SET_STACK_INT(obj->byte_field(field_offset), -1);
2071 } else if (tos_type == ctos) {
2072 SET_STACK_INT(obj->char_field(field_offset), -1);
2073 } else if (tos_type == stos) {
2074 SET_STACK_INT(obj->short_field(field_offset), -1);
2075 } else if (tos_type == ftos) {
2076 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2077 } else {
2078 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2079 MORE_STACK(1);
2080 }
2081 }
2083 UPDATE_PC_AND_CONTINUE(3);
2084 }
2086 CASE(_putfield):
2087 CASE(_putstatic):
2088 {
2089 u2 index = Bytes::get_native_u2(pc+1);
2090 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2091 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2092 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
2093 handle_exception);
2094 cache = cp->entry_at(index);
2095 }
2097 #ifdef VM_JVMTI
2098 if (_jvmti_interp_events) {
2099 int *count_addr;
2100 oop obj;
2101 // Check to see if a field modification watch has been set
2102 // before we take the time to call into the VM.
2103 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
2104 if ( *count_addr > 0 ) {
2105 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2106 obj = (oop)NULL;
2107 }
2108 else {
2109 if (cache->is_long() || cache->is_double()) {
2110 obj = (oop) STACK_OBJECT(-3);
2111 } else {
2112 obj = (oop) STACK_OBJECT(-2);
2113 }
2114 VERIFY_OOP(obj);
2115 }
2117 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
2118 obj,
2119 cache,
2120 (jvalue *)STACK_SLOT(-1)),
2121 handle_exception);
2122 }
2123 }
2124 #endif /* VM_JVMTI */
2126 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2127 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2129 oop obj;
2130 int count;
2131 TosState tos_type = cache->flag_state();
2133 count = -1;
2134 if (tos_type == ltos || tos_type == dtos) {
2135 --count;
2136 }
2137 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2138 Klass* k = cache->f1_as_klass();
2139 obj = k->java_mirror();
2140 } else {
2141 --count;
2142 obj = (oop) STACK_OBJECT(count);
2143 CHECK_NULL(obj);
2144 }
2146 //
2147 // Now store the result
2148 //
2149 int field_offset = cache->f2_as_index();
2150 if (cache->is_volatile()) {
2151 if (tos_type == itos) {
2152 obj->release_int_field_put(field_offset, STACK_INT(-1));
2153 } else if (tos_type == atos) {
2154 VERIFY_OOP(STACK_OBJECT(-1));
2155 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
2156 } else if (tos_type == btos) {
2157 obj->release_byte_field_put(field_offset, STACK_INT(-1));
2158 } else if (tos_type == ltos) {
2159 obj->release_long_field_put(field_offset, STACK_LONG(-1));
2160 } else if (tos_type == ctos) {
2161 obj->release_char_field_put(field_offset, STACK_INT(-1));
2162 } else if (tos_type == stos) {
2163 obj->release_short_field_put(field_offset, STACK_INT(-1));
2164 } else if (tos_type == ftos) {
2165 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
2166 } else {
2167 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
2168 }
2169 OrderAccess::storeload();
2170 } else {
2171 if (tos_type == itos) {
2172 obj->int_field_put(field_offset, STACK_INT(-1));
2173 } else if (tos_type == atos) {
2174 VERIFY_OOP(STACK_OBJECT(-1));
2175 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
2176 } else if (tos_type == btos) {
2177 obj->byte_field_put(field_offset, STACK_INT(-1));
2178 } else if (tos_type == ltos) {
2179 obj->long_field_put(field_offset, STACK_LONG(-1));
2180 } else if (tos_type == ctos) {
2181 obj->char_field_put(field_offset, STACK_INT(-1));
2182 } else if (tos_type == stos) {
2183 obj->short_field_put(field_offset, STACK_INT(-1));
2184 } else if (tos_type == ftos) {
2185 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2186 } else {
2187 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2188 }
2189 }
2191 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
2192 }
2194 CASE(_new): {
2195 u2 index = Bytes::get_Java_u2(pc+1);
2196 ConstantPool* constants = istate->method()->constants();
2197 if (!constants->tag_at(index).is_unresolved_klass()) {
2198 // Make sure klass is initialized and doesn't have a finalizer
2199 Klass* entry = constants->slot_at(index).get_klass();
2200 assert(entry->is_klass(), "Should be resolved klass");
2201 Klass* k_entry = (Klass*) entry;
2202 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
2203 InstanceKlass* ik = (InstanceKlass*) k_entry;
2204 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
2205 size_t obj_size = ik->size_helper();
2206 oop result = NULL;
2207 // If the TLAB isn't pre-zeroed then we'll have to do it
2208 bool need_zero = !ZeroTLAB;
2209 if (UseTLAB) {
2210 result = (oop) THREAD->tlab().allocate(obj_size);
2211 }
2212 // Disable non-TLAB-based fast-path, because profiling requires that all
2213 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
2214 // returns NULL.
2215 #ifndef CC_INTERP_PROFILE
2216 if (result == NULL) {
2217 need_zero = true;
2218 // Try allocate in shared eden
2219 retry:
2220 HeapWord* compare_to = *Universe::heap()->top_addr();
2221 HeapWord* new_top = compare_to + obj_size;
2222 if (new_top <= *Universe::heap()->end_addr()) {
2223 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
2224 goto retry;
2225 }
2226 result = (oop) compare_to;
2227 }
2228 }
2229 #endif
2230 if (result != NULL) {
2231 // Initialize object (if nonzero size and need) and then the header
2232 if (need_zero ) {
2233 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2234 obj_size -= sizeof(oopDesc) / oopSize;
2235 if (obj_size > 0 ) {
2236 memset(to_zero, 0, obj_size * HeapWordSize);
2237 }
2238 }
2239 if (UseBiasedLocking) {
2240 result->set_mark(ik->prototype_header());
2241 } else {
2242 result->set_mark(markOopDesc::prototype());
2243 }
2244 result->set_klass_gap(0);
2245 result->set_klass(k_entry);
2246 // Must prevent reordering of stores for object initialization
2247 // with stores that publish the new object.
2248 OrderAccess::storestore();
2249 SET_STACK_OBJECT(result, 0);
2250 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2251 }
2252 }
2253 }
2254 // Slow case allocation
2255 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2256 handle_exception);
2257 // Must prevent reordering of stores for object initialization
2258 // with stores that publish the new object.
2259 OrderAccess::storestore();
2260 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2261 THREAD->set_vm_result(NULL);
2262 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2263 }
2264 CASE(_anewarray): {
2265 u2 index = Bytes::get_Java_u2(pc+1);
2266 jint size = STACK_INT(-1);
2267 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2268 handle_exception);
2269 // Must prevent reordering of stores for object initialization
2270 // with stores that publish the new object.
2271 OrderAccess::storestore();
2272 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2273 THREAD->set_vm_result(NULL);
2274 UPDATE_PC_AND_CONTINUE(3);
2275 }
2276 CASE(_multianewarray): {
2277 jint dims = *(pc+3);
2278 jint size = STACK_INT(-1);
2279 // stack grows down, dimensions are up!
2280 jint *dimarray =
2281 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2282 Interpreter::stackElementWords-1];
2283 //adjust pointer to start of stack element
2284 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2285 handle_exception);
2286 // Must prevent reordering of stores for object initialization
2287 // with stores that publish the new object.
2288 OrderAccess::storestore();
2289 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2290 THREAD->set_vm_result(NULL);
2291 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2292 }
2293 CASE(_checkcast):
2294 if (STACK_OBJECT(-1) != NULL) {
2295 VERIFY_OOP(STACK_OBJECT(-1));
2296 u2 index = Bytes::get_Java_u2(pc+1);
2297 // Constant pool may have actual klass or unresolved klass. If it is
2298 // unresolved we must resolve it.
2299 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2300 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2301 }
2302 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2303 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2304 //
2305 // Check for compatibilty. This check must not GC!!
2306 // Seems way more expensive now that we must dispatch.
2307 //
2308 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2309 // Decrement counter at checkcast.
2310 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2311 ResourceMark rm(THREAD);
2312 const char* objName = objKlass->external_name();
2313 const char* klassName = klassOf->external_name();
2314 char* message = SharedRuntime::generate_class_cast_message(
2315 objName, klassName);
2316 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
2317 }
2318 // Profile checkcast with null_seen and receiver.
2319 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
2320 } else {
2321 // Profile checkcast with null_seen and receiver.
2322 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
2323 }
2324 UPDATE_PC_AND_CONTINUE(3);
2326 CASE(_instanceof):
2327 if (STACK_OBJECT(-1) == NULL) {
2328 SET_STACK_INT(0, -1);
2329 // Profile instanceof with null_seen and receiver.
2330 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
2331 } else {
2332 VERIFY_OOP(STACK_OBJECT(-1));
2333 u2 index = Bytes::get_Java_u2(pc+1);
2334 // Constant pool may have actual klass or unresolved klass. If it is
2335 // unresolved we must resolve it.
2336 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2337 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2338 }
2339 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2340 Klass* objKlass = STACK_OBJECT(-1)->klass();
2341 //
2342 // Check for compatibilty. This check must not GC!!
2343 // Seems way more expensive now that we must dispatch.
2344 //
2345 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2346 SET_STACK_INT(1, -1);
2347 } else {
2348 SET_STACK_INT(0, -1);
2349 // Decrement counter at checkcast.
2350 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2351 }
2352 // Profile instanceof with null_seen and receiver.
2353 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
2354 }
2355 UPDATE_PC_AND_CONTINUE(3);
2357 CASE(_ldc_w):
2358 CASE(_ldc):
2359 {
2360 u2 index;
2361 bool wide = false;
2362 int incr = 2; // frequent case
2363 if (opcode == Bytecodes::_ldc) {
2364 index = pc[1];
2365 } else {
2366 index = Bytes::get_Java_u2(pc+1);
2367 incr = 3;
2368 wide = true;
2369 }
2371 ConstantPool* constants = METHOD->constants();
2372 switch (constants->tag_at(index).value()) {
2373 case JVM_CONSTANT_Integer:
2374 SET_STACK_INT(constants->int_at(index), 0);
2375 break;
2377 case JVM_CONSTANT_Float:
2378 SET_STACK_FLOAT(constants->float_at(index), 0);
2379 break;
2381 case JVM_CONSTANT_String:
2382 {
2383 oop result = constants->resolved_references()->obj_at(index);
2384 if (result == NULL) {
2385 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2386 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2387 THREAD->set_vm_result(NULL);
2388 } else {
2389 VERIFY_OOP(result);
2390 SET_STACK_OBJECT(result, 0);
2391 }
2392 break;
2393 }
2395 case JVM_CONSTANT_Class:
2396 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2397 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2398 break;
2400 case JVM_CONSTANT_UnresolvedClass:
2401 case JVM_CONSTANT_UnresolvedClassInError:
2402 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2403 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2404 THREAD->set_vm_result(NULL);
2405 break;
2407 default: ShouldNotReachHere();
2408 }
2409 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2410 }
2412 CASE(_ldc2_w):
2413 {
2414 u2 index = Bytes::get_Java_u2(pc+1);
2416 ConstantPool* constants = METHOD->constants();
2417 switch (constants->tag_at(index).value()) {
2419 case JVM_CONSTANT_Long:
2420 SET_STACK_LONG(constants->long_at(index), 1);
2421 break;
2423 case JVM_CONSTANT_Double:
2424 SET_STACK_DOUBLE(constants->double_at(index), 1);
2425 break;
2426 default: ShouldNotReachHere();
2427 }
2428 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2429 }
2431 CASE(_fast_aldc_w):
2432 CASE(_fast_aldc): {
2433 u2 index;
2434 int incr;
2435 if (opcode == Bytecodes::_fast_aldc) {
2436 index = pc[1];
2437 incr = 2;
2438 } else {
2439 index = Bytes::get_native_u2(pc+1);
2440 incr = 3;
2441 }
2443 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2444 // This kind of CP cache entry does not need to match the flags byte, because
2445 // there is a 1-1 relation between bytecode type and CP entry type.
2446 ConstantPool* constants = METHOD->constants();
2447 oop result = constants->resolved_references()->obj_at(index);
2448 if (result == NULL) {
2449 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2450 handle_exception);
2451 result = THREAD->vm_result();
2452 }
2454 VERIFY_OOP(result);
2455 SET_STACK_OBJECT(result, 0);
2456 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2457 }
2459 CASE(_invokedynamic): {
2461 if (!EnableInvokeDynamic) {
2462 // We should not encounter this bytecode if !EnableInvokeDynamic.
2463 // The verifier will stop it. However, if we get past the verifier,
2464 // this will stop the thread in a reasonable way, without crashing the JVM.
2465 CALL_VM(InterpreterRuntime::throw_IncompatibleClassChangeError(THREAD),
2466 handle_exception);
2467 ShouldNotReachHere();
2468 }
2470 u4 index = Bytes::get_native_u4(pc+1);
2471 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2473 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
2474 // This kind of CP cache entry does not need to match the flags byte, because
2475 // there is a 1-1 relation between bytecode type and CP entry type.
2476 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2477 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
2478 handle_exception);
2479 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2480 }
2482 Method* method = cache->f1_as_method();
2483 if (VerifyOops) method->verify();
2485 if (cache->has_appendix()) {
2486 ConstantPool* constants = METHOD->constants();
2487 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2488 MORE_STACK(1);
2489 }
2491 istate->set_msg(call_method);
2492 istate->set_callee(method);
2493 istate->set_callee_entry_point(method->from_interpreted_entry());
2494 istate->set_bcp_advance(5);
2496 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
2497 BI_PROFILE_UPDATE_CALL();
2499 UPDATE_PC_AND_RETURN(0); // I'll be back...
2500 }
2502 CASE(_invokehandle): {
2504 if (!EnableInvokeDynamic) {
2505 ShouldNotReachHere();
2506 }
2508 u2 index = Bytes::get_native_u2(pc+1);
2509 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2511 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2512 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
2513 handle_exception);
2514 cache = cp->entry_at(index);
2515 }
2517 Method* method = cache->f1_as_method();
2518 if (VerifyOops) method->verify();
2520 if (cache->has_appendix()) {
2521 ConstantPool* constants = METHOD->constants();
2522 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2523 MORE_STACK(1);
2524 }
2526 istate->set_msg(call_method);
2527 istate->set_callee(method);
2528 istate->set_callee_entry_point(method->from_interpreted_entry());
2529 istate->set_bcp_advance(3);
2531 // Invokehandle has got a call counter, just like a final call -> increment!
2532 BI_PROFILE_UPDATE_FINALCALL();
2534 UPDATE_PC_AND_RETURN(0); // I'll be back...
2535 }
2537 CASE(_invokeinterface): {
2538 u2 index = Bytes::get_native_u2(pc+1);
2540 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2541 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2543 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2544 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2545 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2546 handle_exception);
2547 cache = cp->entry_at(index);
2548 }
2550 istate->set_msg(call_method);
2552 // Special case of invokeinterface called for virtual method of
2553 // java.lang.Object. See cpCacheOop.cpp for details.
2554 // This code isn't produced by javac, but could be produced by
2555 // another compliant java compiler.
2556 if (cache->is_forced_virtual()) {
2557 Method* callee;
2558 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2559 if (cache->is_vfinal()) {
2560 callee = cache->f2_as_vfinal_method();
2561 // Profile 'special case of invokeinterface' final call.
2562 BI_PROFILE_UPDATE_FINALCALL();
2563 } else {
2564 // Get receiver.
2565 int parms = cache->parameter_size();
2566 // Same comments as invokevirtual apply here.
2567 oop rcvr = STACK_OBJECT(-parms);
2568 VERIFY_OOP(rcvr);
2569 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2570 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2571 // Profile 'special case of invokeinterface' virtual call.
2572 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2573 }
2574 istate->set_callee(callee);
2575 istate->set_callee_entry_point(callee->from_interpreted_entry());
2576 #ifdef VM_JVMTI
2577 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2578 istate->set_callee_entry_point(callee->interpreter_entry());
2579 }
2580 #endif /* VM_JVMTI */
2581 istate->set_bcp_advance(5);
2582 UPDATE_PC_AND_RETURN(0); // I'll be back...
2583 }
2585 // this could definitely be cleaned up QQQ
2586 Method* callee;
2587 Klass* iclass = cache->f1_as_klass();
2588 // InstanceKlass* interface = (InstanceKlass*) iclass;
2589 // get receiver
2590 int parms = cache->parameter_size();
2591 oop rcvr = STACK_OBJECT(-parms);
2592 CHECK_NULL(rcvr);
2593 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2594 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2595 int i;
2596 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2597 if (ki->interface_klass() == iclass) break;
2598 }
2599 // If the interface isn't found, this class doesn't implement this
2600 // interface. The link resolver checks this but only for the first
2601 // time this interface is called.
2602 if (i == int2->itable_length()) {
2603 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
2604 }
2605 int mindex = cache->f2_as_index();
2606 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2607 callee = im[mindex].method();
2608 if (callee == NULL) {
2609 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
2610 }
2612 // Profile virtual call.
2613 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2615 istate->set_callee(callee);
2616 istate->set_callee_entry_point(callee->from_interpreted_entry());
2617 #ifdef VM_JVMTI
2618 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2619 istate->set_callee_entry_point(callee->interpreter_entry());
2620 }
2621 #endif /* VM_JVMTI */
2622 istate->set_bcp_advance(5);
2623 UPDATE_PC_AND_RETURN(0); // I'll be back...
2624 }
2626 CASE(_invokevirtual):
2627 CASE(_invokespecial):
2628 CASE(_invokestatic): {
2629 u2 index = Bytes::get_native_u2(pc+1);
2631 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2632 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2633 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2635 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2636 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2637 handle_exception);
2638 cache = cp->entry_at(index);
2639 }
2641 istate->set_msg(call_method);
2642 {
2643 Method* callee;
2644 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2645 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2646 if (cache->is_vfinal()) {
2647 callee = cache->f2_as_vfinal_method();
2648 // Profile final call.
2649 BI_PROFILE_UPDATE_FINALCALL();
2650 } else {
2651 // get receiver
2652 int parms = cache->parameter_size();
2653 // this works but needs a resourcemark and seems to create a vtable on every call:
2654 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2655 //
2656 // this fails with an assert
2657 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2658 // but this works
2659 oop rcvr = STACK_OBJECT(-parms);
2660 VERIFY_OOP(rcvr);
2661 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2662 /*
2663 Executing this code in java.lang.String:
2664 public String(char value[]) {
2665 this.count = value.length;
2666 this.value = (char[])value.clone();
2667 }
2669 a find on rcvr->klass() reports:
2670 {type array char}{type array class}
2671 - klass: {other class}
2673 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2674 because rcvr->klass()->oop_is_instance() == 0
2675 However it seems to have a vtable in the right location. Huh?
2677 */
2678 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2679 // Profile virtual call.
2680 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2681 }
2682 } else {
2683 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2684 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2685 }
2686 callee = cache->f1_as_method();
2688 // Profile call.
2689 BI_PROFILE_UPDATE_CALL();
2690 }
2692 istate->set_callee(callee);
2693 istate->set_callee_entry_point(callee->from_interpreted_entry());
2694 #ifdef VM_JVMTI
2695 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2696 istate->set_callee_entry_point(callee->interpreter_entry());
2697 }
2698 #endif /* VM_JVMTI */
2699 istate->set_bcp_advance(3);
2700 UPDATE_PC_AND_RETURN(0); // I'll be back...
2701 }
2702 }
2704 /* Allocate memory for a new java object. */
2706 CASE(_newarray): {
2707 BasicType atype = (BasicType) *(pc+1);
2708 jint size = STACK_INT(-1);
2709 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2710 handle_exception);
2711 // Must prevent reordering of stores for object initialization
2712 // with stores that publish the new object.
2713 OrderAccess::storestore();
2714 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2715 THREAD->set_vm_result(NULL);
2717 UPDATE_PC_AND_CONTINUE(2);
2718 }
2720 /* Throw an exception. */
2722 CASE(_athrow): {
2723 oop except_oop = STACK_OBJECT(-1);
2724 CHECK_NULL(except_oop);
2725 // set pending_exception so we use common code
2726 THREAD->set_pending_exception(except_oop, NULL, 0);
2727 goto handle_exception;
2728 }
2730 /* goto and jsr. They are exactly the same except jsr pushes
2731 * the address of the next instruction first.
2732 */
2734 CASE(_jsr): {
2735 /* push bytecode index on stack */
2736 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2737 MORE_STACK(1);
2738 /* FALL THROUGH */
2739 }
2741 CASE(_goto):
2742 {
2743 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2744 // Profile jump.
2745 BI_PROFILE_UPDATE_JUMP();
2746 address branch_pc = pc;
2747 UPDATE_PC(offset);
2748 DO_BACKEDGE_CHECKS(offset, branch_pc);
2749 CONTINUE;
2750 }
2752 CASE(_jsr_w): {
2753 /* push return address on the stack */
2754 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2755 MORE_STACK(1);
2756 /* FALL THROUGH */
2757 }
2759 CASE(_goto_w):
2760 {
2761 int32_t offset = Bytes::get_Java_u4(pc + 1);
2762 // Profile jump.
2763 BI_PROFILE_UPDATE_JUMP();
2764 address branch_pc = pc;
2765 UPDATE_PC(offset);
2766 DO_BACKEDGE_CHECKS(offset, branch_pc);
2767 CONTINUE;
2768 }
2770 /* return from a jsr or jsr_w */
2772 CASE(_ret): {
2773 // Profile ret.
2774 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
2775 // Now, update the pc.
2776 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2777 UPDATE_PC_AND_CONTINUE(0);
2778 }
2780 /* debugger breakpoint */
2782 CASE(_breakpoint): {
2783 Bytecodes::Code original_bytecode;
2784 DECACHE_STATE();
2785 SET_LAST_JAVA_FRAME();
2786 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2787 METHOD, pc);
2788 RESET_LAST_JAVA_FRAME();
2789 CACHE_STATE();
2790 if (THREAD->has_pending_exception()) goto handle_exception;
2791 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2792 handle_exception);
2794 opcode = (jubyte)original_bytecode;
2795 goto opcode_switch;
2796 }
2798 DEFAULT:
2799 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
2800 Bytecodes::name((Bytecodes::Code)opcode)));
2801 goto finish;
2803 } /* switch(opc) */
2806 #ifdef USELABELS
2807 check_for_exception:
2808 #endif
2809 {
2810 if (!THREAD->has_pending_exception()) {
2811 CONTINUE;
2812 }
2813 /* We will be gcsafe soon, so flush our state. */
2814 DECACHE_PC();
2815 goto handle_exception;
2816 }
2817 do_continue: ;
2819 } /* while (1) interpreter loop */
2822 // An exception exists in the thread state see whether this activation can handle it
2823 handle_exception: {
2825 HandleMarkCleaner __hmc(THREAD);
2826 Handle except_oop(THREAD, THREAD->pending_exception());
2827 // Prevent any subsequent HandleMarkCleaner in the VM
2828 // from freeing the except_oop handle.
2829 HandleMark __hm(THREAD);
2831 THREAD->clear_pending_exception();
2832 assert(except_oop(), "No exception to process");
2833 intptr_t continuation_bci;
2834 // expression stack is emptied
2835 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2836 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2837 handle_exception);
2839 except_oop = THREAD->vm_result();
2840 THREAD->set_vm_result(NULL);
2841 if (continuation_bci >= 0) {
2842 // Place exception on top of stack
2843 SET_STACK_OBJECT(except_oop(), 0);
2844 MORE_STACK(1);
2845 pc = METHOD->code_base() + continuation_bci;
2846 if (TraceExceptions) {
2847 ttyLocker ttyl;
2848 ResourceMark rm;
2849 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
2850 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2851 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2852 istate->bcp() - (intptr_t)METHOD->code_base(),
2853 continuation_bci, THREAD);
2854 }
2855 // for AbortVMOnException flag
2856 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2858 // Update profiling data.
2859 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
2860 goto run;
2861 }
2862 if (TraceExceptions) {
2863 ttyLocker ttyl;
2864 ResourceMark rm;
2865 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), (void*)except_oop());
2866 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2867 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
2868 istate->bcp() - (intptr_t)METHOD->code_base(),
2869 THREAD);
2870 }
2871 // for AbortVMOnException flag
2872 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2873 // No handler in this activation, unwind and try again
2874 THREAD->set_pending_exception(except_oop(), NULL, 0);
2875 goto handle_return;
2876 } // handle_exception:
2878 // Return from an interpreter invocation with the result of the interpretation
2879 // on the top of the Java Stack (or a pending exception)
2881 handle_Pop_Frame: {
2883 // We don't really do anything special here except we must be aware
2884 // that we can get here without ever locking the method (if sync).
2885 // Also we skip the notification of the exit.
2887 istate->set_msg(popping_frame);
2888 // Clear pending so while the pop is in process
2889 // we don't start another one if a call_vm is done.
2890 THREAD->clr_pop_frame_pending();
2891 // Let interpreter (only) see the we're in the process of popping a frame
2892 THREAD->set_pop_frame_in_process();
2894 goto handle_return;
2896 } // handle_Pop_Frame
2898 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2899 // given by the invoker of the early return.
2900 handle_Early_Return: {
2902 istate->set_msg(early_return);
2904 // Clear expression stack.
2905 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2907 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
2909 // Push the value to be returned.
2910 switch (istate->method()->result_type()) {
2911 case T_BOOLEAN:
2912 case T_SHORT:
2913 case T_BYTE:
2914 case T_CHAR:
2915 case T_INT:
2916 SET_STACK_INT(ts->earlyret_value().i, 0);
2917 MORE_STACK(1);
2918 break;
2919 case T_LONG:
2920 SET_STACK_LONG(ts->earlyret_value().j, 1);
2921 MORE_STACK(2);
2922 break;
2923 case T_FLOAT:
2924 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
2925 MORE_STACK(1);
2926 break;
2927 case T_DOUBLE:
2928 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2929 MORE_STACK(2);
2930 break;
2931 case T_ARRAY:
2932 case T_OBJECT:
2933 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2934 MORE_STACK(1);
2935 break;
2936 }
2938 ts->clr_earlyret_value();
2939 ts->set_earlyret_oop(NULL);
2940 ts->clr_earlyret_pending();
2942 // Fall through to handle_return.
2944 } // handle_Early_Return
2946 handle_return: {
2947 // A storestore barrier is required to order initialization of
2948 // final fields with publishing the reference to the object that
2949 // holds the field. Without the barrier the value of final fields
2950 // can be observed to change.
2951 OrderAccess::storestore();
2953 DECACHE_STATE();
2955 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
2956 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
2957 Handle original_exception(THREAD, THREAD->pending_exception());
2958 Handle illegal_state_oop(THREAD, NULL);
2960 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2961 // in any following VM entries from freeing our live handles, but illegal_state_oop
2962 // isn't really allocated yet and so doesn't become live until later and
2963 // in unpredicatable places. Instead we must protect the places where we enter the
2964 // VM. It would be much simpler (and safer) if we could allocate a real handle with
2965 // a NULL oop in it and then overwrite the oop later as needed. This isn't
2966 // unfortunately isn't possible.
2968 THREAD->clear_pending_exception();
2970 //
2971 // As far as we are concerned we have returned. If we have a pending exception
2972 // that will be returned as this invocation's result. However if we get any
2973 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
2974 // will be our final result (i.e. monitor exception trumps a pending exception).
2975 //
2977 // If we never locked the method (or really passed the point where we would have),
2978 // there is no need to unlock it (or look for other monitors), since that
2979 // could not have happened.
2981 if (THREAD->do_not_unlock()) {
2983 // Never locked, reset the flag now because obviously any caller must
2984 // have passed their point of locking for us to have gotten here.
2986 THREAD->clr_do_not_unlock();
2987 } else {
2988 // At this point we consider that we have returned. We now check that the
2989 // locks were properly block structured. If we find that they were not
2990 // used properly we will return with an illegal monitor exception.
2991 // The exception is checked by the caller not the callee since this
2992 // checking is considered to be part of the invocation and therefore
2993 // in the callers scope (JVM spec 8.13).
2994 //
2995 // Another weird thing to watch for is if the method was locked
2996 // recursively and then not exited properly. This means we must
2997 // examine all the entries in reverse time(and stack) order and
2998 // unlock as we find them. If we find the method monitor before
2999 // we are at the initial entry then we should throw an exception.
3000 // It is not clear the template based interpreter does this
3001 // correctly
3003 BasicObjectLock* base = istate->monitor_base();
3004 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
3005 bool method_unlock_needed = METHOD->is_synchronized();
3006 // We know the initial monitor was used for the method don't check that
3007 // slot in the loop
3008 if (method_unlock_needed) base--;
3010 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
3011 while (end < base) {
3012 oop lockee = end->obj();
3013 if (lockee != NULL) {
3014 BasicLock* lock = end->lock();
3015 markOop header = lock->displaced_header();
3016 end->set_obj(NULL);
3018 if (!lockee->mark()->has_bias_pattern()) {
3019 // If it isn't recursive we either must swap old header or call the runtime
3020 if (header != NULL) {
3021 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
3022 // restore object for the slow case
3023 end->set_obj(lockee);
3024 {
3025 // Prevent any HandleMarkCleaner from freeing our live handles
3026 HandleMark __hm(THREAD);
3027 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
3028 }
3029 }
3030 }
3031 }
3032 // One error is plenty
3033 if (illegal_state_oop() == NULL && !suppress_error) {
3034 {
3035 // Prevent any HandleMarkCleaner from freeing our live handles
3036 HandleMark __hm(THREAD);
3037 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3038 }
3039 assert(THREAD->has_pending_exception(), "Lost our exception!");
3040 illegal_state_oop = THREAD->pending_exception();
3041 THREAD->clear_pending_exception();
3042 }
3043 }
3044 end++;
3045 }
3046 // Unlock the method if needed
3047 if (method_unlock_needed) {
3048 if (base->obj() == NULL) {
3049 // The method is already unlocked this is not good.
3050 if (illegal_state_oop() == NULL && !suppress_error) {
3051 {
3052 // Prevent any HandleMarkCleaner from freeing our live handles
3053 HandleMark __hm(THREAD);
3054 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
3055 }
3056 assert(THREAD->has_pending_exception(), "Lost our exception!");
3057 illegal_state_oop = THREAD->pending_exception();
3058 THREAD->clear_pending_exception();
3059 }
3060 } else {
3061 //
3062 // The initial monitor is always used for the method
3063 // However if that slot is no longer the oop for the method it was unlocked
3064 // and reused by something that wasn't unlocked!
3065 //
3066 // deopt can come in with rcvr dead because c2 knows
3067 // its value is preserved in the monitor. So we can't use locals[0] at all
3068 // and must use first monitor slot.
3069 //
3070 oop rcvr = base->obj();
3071 if (rcvr == NULL) {
3072 if (!suppress_error) {
3073 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
3074 illegal_state_oop = THREAD->pending_exception();
3075 THREAD->clear_pending_exception();
3076 }
3077 } else if (UseHeavyMonitors) {
3078 {
3079 // Prevent any HandleMarkCleaner from freeing our live handles.
3080 HandleMark __hm(THREAD);
3081 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3082 }
3083 if (THREAD->has_pending_exception()) {
3084 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3085 THREAD->clear_pending_exception();
3086 }
3087 } else {
3088 BasicLock* lock = base->lock();
3089 markOop header = lock->displaced_header();
3090 base->set_obj(NULL);
3092 if (!rcvr->mark()->has_bias_pattern()) {
3093 base->set_obj(NULL);
3094 // If it isn't recursive we either must swap old header or call the runtime
3095 if (header != NULL) {
3096 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
3097 // restore object for the slow case
3098 base->set_obj(rcvr);
3099 {
3100 // Prevent any HandleMarkCleaner from freeing our live handles
3101 HandleMark __hm(THREAD);
3102 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3103 }
3104 if (THREAD->has_pending_exception()) {
3105 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3106 THREAD->clear_pending_exception();
3107 }
3108 }
3109 }
3110 }
3111 }
3112 }
3113 }
3114 }
3115 // Clear the do_not_unlock flag now.
3116 THREAD->clr_do_not_unlock();
3118 //
3119 // Notify jvmti/jvmdi
3120 //
3121 // NOTE: we do not notify a method_exit if we have a pending exception,
3122 // including an exception we generate for unlocking checks. In the former
3123 // case, JVMDI has already been notified by our call for the exception handler
3124 // and in both cases as far as JVMDI is concerned we have already returned.
3125 // If we notify it again JVMDI will be all confused about how many frames
3126 // are still on the stack (4340444).
3127 //
3128 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
3129 // method_exit events whenever we leave an activation unless it was done
3130 // for popframe. This is nothing like jvmdi. However we are passing the
3131 // tests at the moment (apparently because they are jvmdi based) so rather
3132 // than change this code and possibly fail tests we will leave it alone
3133 // (with this note) in anticipation of changing the vm and the tests
3134 // simultaneously.
3137 //
3138 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
3142 #ifdef VM_JVMTI
3143 if (_jvmti_interp_events) {
3144 // Whenever JVMTI puts a thread in interp_only_mode, method
3145 // entry/exit events are sent for that thread to track stack depth.
3146 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
3147 {
3148 // Prevent any HandleMarkCleaner from freeing our live handles
3149 HandleMark __hm(THREAD);
3150 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3151 }
3152 }
3153 }
3154 #endif /* VM_JVMTI */
3156 //
3157 // See if we are returning any exception
3158 // A pending exception that was pending prior to a possible popping frame
3159 // overrides the popping frame.
3160 //
3161 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
3162 if (illegal_state_oop() != NULL || original_exception() != NULL) {
3163 // Inform the frame manager we have no result.
3164 istate->set_msg(throwing_exception);
3165 if (illegal_state_oop() != NULL)
3166 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
3167 else
3168 THREAD->set_pending_exception(original_exception(), NULL, 0);
3169 UPDATE_PC_AND_RETURN(0);
3170 }
3172 if (istate->msg() == popping_frame) {
3173 // Make it simpler on the assembly code and set the message for the frame pop.
3174 // returns
3175 if (istate->prev() == NULL) {
3176 // We must be returning to a deoptimized frame (because popframe only happens between
3177 // two interpreted frames). We need to save the current arguments in C heap so that
3178 // the deoptimized frame when it restarts can copy the arguments to its expression
3179 // stack and re-execute the call. We also have to notify deoptimization that this
3180 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3181 // java expression stack. Yuck.
3182 //
3183 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3184 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3185 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3186 }
3187 } else {
3188 istate->set_msg(return_from_method);
3189 }
3191 // Normal return
3192 // Advance the pc and return to frame manager
3193 UPDATE_PC_AND_RETURN(1);
3194 } /* handle_return: */
3196 // This is really a fatal error return
3198 finish:
3199 DECACHE_TOS();
3200 DECACHE_PC();
3202 return;
3203 }
3205 /*
3206 * All the code following this point is only produced once and is not present
3207 * in the JVMTI version of the interpreter
3208 */
3210 #ifndef VM_JVMTI
3212 // This constructor should only be used to contruct the object to signal
3213 // interpreter initialization. All other instances should be created by
3214 // the frame manager.
3215 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3216 if (msg != initialize) ShouldNotReachHere();
3217 _msg = msg;
3218 _self_link = this;
3219 _prev_link = NULL;
3220 }
3222 // Inline static functions for Java Stack and Local manipulation
3224 // The implementations are platform dependent. We have to worry about alignment
3225 // issues on some machines which can change on the same platform depending on
3226 // whether it is an LP64 machine also.
3227 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
3228 return (address) tos[Interpreter::expr_index_at(-offset)];
3229 }
3231 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
3232 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
3233 }
3235 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
3236 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
3237 }
3239 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
3240 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
3241 }
3243 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
3244 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
3245 }
3247 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
3248 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
3249 }
3251 // only used for value types
3252 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
3253 int offset) {
3254 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3255 }
3257 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
3258 int offset) {
3259 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3260 }
3262 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
3263 int offset) {
3264 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3265 }
3267 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
3268 int offset) {
3269 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3270 }
3272 // needs to be platform dep for the 32 bit platforms.
3273 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
3274 int offset) {
3275 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
3276 }
3278 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
3279 address addr, int offset) {
3280 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
3281 ((VMJavaVal64*)addr)->d);
3282 }
3284 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
3285 int offset) {
3286 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3287 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
3288 }
3290 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
3291 address addr, int offset) {
3292 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3293 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
3294 ((VMJavaVal64*)addr)->l;
3295 }
3297 // Locals
3299 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
3300 return (address)locals[Interpreter::local_index_at(-offset)];
3301 }
3302 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
3303 return (jint)locals[Interpreter::local_index_at(-offset)];
3304 }
3305 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
3306 return (jfloat)locals[Interpreter::local_index_at(-offset)];
3307 }
3308 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
3309 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
3310 }
3311 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
3312 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
3313 }
3314 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
3315 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
3316 }
3318 // Returns the address of locals value.
3319 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
3320 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3321 }
3322 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
3323 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3324 }
3326 // Used for local value or returnAddress
3327 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
3328 address value, int offset) {
3329 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
3330 }
3331 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
3332 jint value, int offset) {
3333 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
3334 }
3335 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
3336 jfloat value, int offset) {
3337 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
3338 }
3339 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
3340 oop value, int offset) {
3341 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
3342 }
3343 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
3344 jdouble value, int offset) {
3345 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
3346 }
3347 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
3348 jlong value, int offset) {
3349 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
3350 }
3351 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
3352 address addr, int offset) {
3353 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
3354 }
3355 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
3356 address addr, int offset) {
3357 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
3358 }
3360 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3361 intptr_t* locals, int locals_offset) {
3362 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3363 locals[Interpreter::local_index_at(-locals_offset)] = value;
3364 }
3367 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3368 int to_offset) {
3369 tos[Interpreter::expr_index_at(-to_offset)] =
3370 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3371 }
3373 void BytecodeInterpreter::dup(intptr_t *tos) {
3374 copy_stack_slot(tos, -1, 0);
3375 }
3376 void BytecodeInterpreter::dup2(intptr_t *tos) {
3377 copy_stack_slot(tos, -2, 0);
3378 copy_stack_slot(tos, -1, 1);
3379 }
3381 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3382 /* insert top word two down */
3383 copy_stack_slot(tos, -1, 0);
3384 copy_stack_slot(tos, -2, -1);
3385 copy_stack_slot(tos, 0, -2);
3386 }
3388 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3389 /* insert top word three down */
3390 copy_stack_slot(tos, -1, 0);
3391 copy_stack_slot(tos, -2, -1);
3392 copy_stack_slot(tos, -3, -2);
3393 copy_stack_slot(tos, 0, -3);
3394 }
3395 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3396 /* insert top 2 slots three down */
3397 copy_stack_slot(tos, -1, 1);
3398 copy_stack_slot(tos, -2, 0);
3399 copy_stack_slot(tos, -3, -1);
3400 copy_stack_slot(tos, 1, -2);
3401 copy_stack_slot(tos, 0, -3);
3402 }
3403 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3404 /* insert top 2 slots four down */
3405 copy_stack_slot(tos, -1, 1);
3406 copy_stack_slot(tos, -2, 0);
3407 copy_stack_slot(tos, -3, -1);
3408 copy_stack_slot(tos, -4, -2);
3409 copy_stack_slot(tos, 1, -3);
3410 copy_stack_slot(tos, 0, -4);
3411 }
3414 void BytecodeInterpreter::swap(intptr_t *tos) {
3415 // swap top two elements
3416 intptr_t val = tos[Interpreter::expr_index_at(1)];
3417 // Copy -2 entry to -1
3418 copy_stack_slot(tos, -2, -1);
3419 // Store saved -1 entry into -2
3420 tos[Interpreter::expr_index_at(2)] = val;
3421 }
3422 // --------------------------------------------------------------------------------
3423 // Non-product code
3424 #ifndef PRODUCT
3426 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3427 switch (msg) {
3428 case BytecodeInterpreter::no_request: return("no_request");
3429 case BytecodeInterpreter::initialize: return("initialize");
3430 // status message to C++ interpreter
3431 case BytecodeInterpreter::method_entry: return("method_entry");
3432 case BytecodeInterpreter::method_resume: return("method_resume");
3433 case BytecodeInterpreter::got_monitors: return("got_monitors");
3434 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3435 // requests to frame manager from C++ interpreter
3436 case BytecodeInterpreter::call_method: return("call_method");
3437 case BytecodeInterpreter::return_from_method: return("return_from_method");
3438 case BytecodeInterpreter::more_monitors: return("more_monitors");
3439 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3440 case BytecodeInterpreter::popping_frame: return("popping_frame");
3441 case BytecodeInterpreter::do_osr: return("do_osr");
3442 // deopt
3443 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3444 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3445 default: return("BAD MSG");
3446 }
3447 }
3448 void
3449 BytecodeInterpreter::print() {
3450 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3451 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3452 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3453 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3454 {
3455 ResourceMark rm;
3456 char *method_name = _method->name_and_sig_as_C_string();
3457 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3458 }
3459 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
3460 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3461 tty->print_cr("msg: %s", C_msg(this->_msg));
3462 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3463 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3464 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3465 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3466 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3467 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3468 tty->print_cr("native_mirror: " INTPTR_FORMAT, (void*) this->_oop_temp);
3469 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3470 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3471 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3472 #ifdef SPARC
3473 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
3474 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
3475 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
3476 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
3477 #endif
3478 #if !defined(ZERO)
3479 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
3480 #endif // !ZERO
3481 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3482 }
3484 extern "C" {
3485 void PI(uintptr_t arg) {
3486 ((BytecodeInterpreter*)arg)->print();
3487 }
3488 }
3489 #endif // PRODUCT
3491 #endif // JVMTI
3492 #endif // CC_INTERP