Thu, 27 Sep 2012 15:49:48 -0700
7198084: NPG: distance is too big for short branches in test_invocation_counter_for_mdp()
Summary: use long branches in test_invocation_counter_for_mdp()
Reviewed-by: twisti
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/method.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "prims/jvmtiRedefineClassesTrace.hpp"
35 #include "prims/jvmtiThreadState.hpp"
36 #include "runtime/basicLock.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef TARGET_OS_FAMILY_linux
40 # include "thread_linux.inline.hpp"
41 #endif
42 #ifdef TARGET_OS_FAMILY_solaris
43 # include "thread_solaris.inline.hpp"
44 #endif
46 #ifndef CC_INTERP
47 #ifndef FAST_DISPATCH
48 #define FAST_DISPATCH 1
49 #endif
50 #undef FAST_DISPATCH
52 // Implementation of InterpreterMacroAssembler
54 // This file specializes the assember with interpreter-specific macros
56 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
57 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
59 #else // CC_INTERP
60 #ifndef STATE
61 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
62 #endif // STATE
64 #endif // CC_INTERP
66 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
67 // Note: this algorithm is also used by C1's OSR entry sequence.
68 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
69 assert_different_registers(args_size, locals_size);
70 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
71 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
72 // Use br/mov combination because it works on both V8 and V9 and is
73 // faster.
74 Label skip_move;
75 br(Assembler::negative, true, Assembler::pt, skip_move);
76 delayed()->mov(G0, delta);
77 bind(skip_move);
78 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
79 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
80 }
82 #ifndef CC_INTERP
84 // Dispatch code executed in the prolog of a bytecode which does not do it's
85 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
86 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
87 assert_not_delayed();
88 #ifdef FAST_DISPATCH
89 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
90 // they both use I2.
91 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
92 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
93 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
94 // add offset to correct dispatch table
95 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
96 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
97 #else
98 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
99 // dispatch table to use
100 AddressLiteral tbl(Interpreter::dispatch_table(state));
101 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
102 set(tbl, G3_scratch); // compute addr of table
103 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
104 #endif
105 }
108 // Dispatch code executed in the epilog of a bytecode which does not do it's
109 // own dispatch. The dispatch address in IdispatchAddress is used for the
110 // dispatch.
111 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
112 assert_not_delayed();
113 verify_FPU(1, state);
114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
115 jmp( IdispatchAddress, 0 );
116 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
117 else delayed()->nop();
118 }
121 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
122 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
123 assert_not_delayed();
124 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
125 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
126 }
129 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
130 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
131 assert_not_delayed();
132 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
133 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
134 }
137 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
138 // load current bytecode
139 assert_not_delayed();
140 ldub( Lbcp, 0, Lbyte_code); // load next bytecode
141 dispatch_base(state, table);
142 }
145 void InterpreterMacroAssembler::call_VM_leaf_base(
146 Register java_thread,
147 address entry_point,
148 int number_of_arguments
149 ) {
150 if (!java_thread->is_valid())
151 java_thread = L7_thread_cache;
152 // super call
153 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
154 }
157 void InterpreterMacroAssembler::call_VM_base(
158 Register oop_result,
159 Register java_thread,
160 Register last_java_sp,
161 address entry_point,
162 int number_of_arguments,
163 bool check_exception
164 ) {
165 if (!java_thread->is_valid())
166 java_thread = L7_thread_cache;
167 // See class ThreadInVMfromInterpreter, which assumes that the interpreter
168 // takes responsibility for setting its own thread-state on call-out.
169 // However, ThreadInVMfromInterpreter resets the state to "in_Java".
171 //save_bcp(); // save bcp
172 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
173 //restore_bcp(); // restore bcp
174 //restore_locals(); // restore locals pointer
175 }
178 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
179 if (JvmtiExport::can_pop_frame()) {
180 Label L;
182 // Check the "pending popframe condition" flag in the current thread
183 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
185 // Initiate popframe handling only if it is not already being processed. If the flag
186 // has the popframe_processing bit set, it means that this code is called *during* popframe
187 // handling - we don't want to reenter.
188 btst(JavaThread::popframe_pending_bit, scratch_reg);
189 br(zero, false, pt, L);
190 delayed()->nop();
191 btst(JavaThread::popframe_processing_bit, scratch_reg);
192 br(notZero, false, pt, L);
193 delayed()->nop();
195 // Call Interpreter::remove_activation_preserving_args_entry() to get the
196 // address of the same-named entrypoint in the generated interpreter code.
197 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
199 // Jump to Interpreter::_remove_activation_preserving_args_entry
200 jmpl(O0, G0, G0);
201 delayed()->nop();
202 bind(L);
203 }
204 }
207 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
208 Register thr_state = G4_scratch;
209 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
210 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
211 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
212 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
213 switch (state) {
214 case ltos: ld_long(val_addr, Otos_l); break;
215 case atos: ld_ptr(oop_addr, Otos_l);
216 st_ptr(G0, oop_addr); break;
217 case btos: // fall through
218 case ctos: // fall through
219 case stos: // fall through
220 case itos: ld(val_addr, Otos_l1); break;
221 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
222 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
223 case vtos: /* nothing to do */ break;
224 default : ShouldNotReachHere();
225 }
226 // Clean up tos value in the jvmti thread state
227 or3(G0, ilgl, G3_scratch);
228 stw(G3_scratch, tos_addr);
229 st_long(G0, val_addr);
230 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
231 }
234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
235 if (JvmtiExport::can_force_early_return()) {
236 Label L;
237 Register thr_state = G3_scratch;
238 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
239 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
241 // Initiate earlyret handling only if it is not already being processed.
242 // If the flag has the earlyret_processing bit set, it means that this code
243 // is called *during* earlyret handling - we don't want to reenter.
244 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
245 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
247 // Call Interpreter::remove_activation_early_entry() to get the address of the
248 // same-named entrypoint in the generated interpreter code
249 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
250 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
252 // Jump to Interpreter::_remove_activation_early_entry
253 jmpl(O0, G0, G0);
254 delayed()->nop();
255 bind(L);
256 }
257 }
260 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
261 mov(arg_1, O0);
262 mov(arg_2, O1);
263 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
264 }
265 #endif /* CC_INTERP */
268 #ifndef CC_INTERP
270 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
271 assert_not_delayed();
272 dispatch_Lbyte_code(state, table);
273 }
276 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
277 dispatch_base(state, Interpreter::normal_table(state));
278 }
281 void InterpreterMacroAssembler::dispatch_only(TosState state) {
282 dispatch_base(state, Interpreter::dispatch_table(state));
283 }
286 // common code to dispatch and dispatch_only
287 // dispatch value in Lbyte_code and increment Lbcp
289 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
290 verify_FPU(1, state);
291 // %%%%% maybe implement +VerifyActivationFrameSize here
292 //verify_thread(); //too slow; we will just verify on method entry & exit
293 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
294 #ifdef FAST_DISPATCH
295 if (table == Interpreter::dispatch_table(state)) {
296 // use IdispatchTables
297 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
298 // add offset to correct dispatch table
299 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
300 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
301 } else {
302 #endif
303 // dispatch table to use
304 AddressLiteral tbl(table);
305 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
306 set(tbl, G3_scratch); // compute addr of table
307 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
308 #ifdef FAST_DISPATCH
309 }
310 #endif
311 jmp( G3_scratch, 0 );
312 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
313 else delayed()->nop();
314 }
317 // Helpers for expression stack
319 // Longs and doubles are Category 2 computational types in the
320 // JVM specification (section 3.11.1) and take 2 expression stack or
321 // local slots.
322 // Aligning them on 32 bit with tagged stacks is hard because the code generated
323 // for the dup* bytecodes depends on what types are already on the stack.
324 // If the types are split into the two stack/local slots, that is much easier
325 // (and we can use 0 for non-reference tags).
327 // Known good alignment in _LP64 but unknown otherwise
328 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
329 assert_not_delayed();
331 #ifdef _LP64
332 ldf(FloatRegisterImpl::D, r1, offset, d);
333 #else
334 ldf(FloatRegisterImpl::S, r1, offset, d);
335 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
336 #endif
337 }
339 // Known good alignment in _LP64 but unknown otherwise
340 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
341 assert_not_delayed();
343 #ifdef _LP64
344 stf(FloatRegisterImpl::D, d, r1, offset);
345 // store something more useful here
346 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
347 #else
348 stf(FloatRegisterImpl::S, d, r1, offset);
349 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
350 #endif
351 }
354 // Known good alignment in _LP64 but unknown otherwise
355 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
356 assert_not_delayed();
357 #ifdef _LP64
358 ldx(r1, offset, rd);
359 #else
360 ld(r1, offset, rd);
361 ld(r1, offset + Interpreter::stackElementSize, rd->successor());
362 #endif
363 }
365 // Known good alignment in _LP64 but unknown otherwise
366 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
367 assert_not_delayed();
369 #ifdef _LP64
370 stx(l, r1, offset);
371 // store something more useful here
372 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
373 #else
374 st(l, r1, offset);
375 st(l->successor(), r1, offset + Interpreter::stackElementSize);
376 #endif
377 }
379 void InterpreterMacroAssembler::pop_i(Register r) {
380 assert_not_delayed();
381 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
382 inc(Lesp, Interpreter::stackElementSize);
383 debug_only(verify_esp(Lesp));
384 }
386 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
387 assert_not_delayed();
388 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
389 inc(Lesp, Interpreter::stackElementSize);
390 debug_only(verify_esp(Lesp));
391 }
393 void InterpreterMacroAssembler::pop_l(Register r) {
394 assert_not_delayed();
395 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
396 inc(Lesp, 2*Interpreter::stackElementSize);
397 debug_only(verify_esp(Lesp));
398 }
401 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
402 assert_not_delayed();
403 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
404 inc(Lesp, Interpreter::stackElementSize);
405 debug_only(verify_esp(Lesp));
406 }
409 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
410 assert_not_delayed();
411 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
412 inc(Lesp, 2*Interpreter::stackElementSize);
413 debug_only(verify_esp(Lesp));
414 }
417 void InterpreterMacroAssembler::push_i(Register r) {
418 assert_not_delayed();
419 debug_only(verify_esp(Lesp));
420 st(r, Lesp, 0);
421 dec(Lesp, Interpreter::stackElementSize);
422 }
424 void InterpreterMacroAssembler::push_ptr(Register r) {
425 assert_not_delayed();
426 st_ptr(r, Lesp, 0);
427 dec(Lesp, Interpreter::stackElementSize);
428 }
430 // remember: our convention for longs in SPARC is:
431 // O0 (Otos_l1) has high-order part in first word,
432 // O1 (Otos_l2) has low-order part in second word
434 void InterpreterMacroAssembler::push_l(Register r) {
435 assert_not_delayed();
436 debug_only(verify_esp(Lesp));
437 // Longs are stored in memory-correct order, even if unaligned.
438 int offset = -Interpreter::stackElementSize;
439 store_unaligned_long(r, Lesp, offset);
440 dec(Lesp, 2 * Interpreter::stackElementSize);
441 }
444 void InterpreterMacroAssembler::push_f(FloatRegister f) {
445 assert_not_delayed();
446 debug_only(verify_esp(Lesp));
447 stf(FloatRegisterImpl::S, f, Lesp, 0);
448 dec(Lesp, Interpreter::stackElementSize);
449 }
452 void InterpreterMacroAssembler::push_d(FloatRegister d) {
453 assert_not_delayed();
454 debug_only(verify_esp(Lesp));
455 // Longs are stored in memory-correct order, even if unaligned.
456 int offset = -Interpreter::stackElementSize;
457 store_unaligned_double(d, Lesp, offset);
458 dec(Lesp, 2 * Interpreter::stackElementSize);
459 }
462 void InterpreterMacroAssembler::push(TosState state) {
463 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
464 switch (state) {
465 case atos: push_ptr(); break;
466 case btos: push_i(); break;
467 case ctos:
468 case stos: push_i(); break;
469 case itos: push_i(); break;
470 case ltos: push_l(); break;
471 case ftos: push_f(); break;
472 case dtos: push_d(); break;
473 case vtos: /* nothing to do */ break;
474 default : ShouldNotReachHere();
475 }
476 }
479 void InterpreterMacroAssembler::pop(TosState state) {
480 switch (state) {
481 case atos: pop_ptr(); break;
482 case btos: pop_i(); break;
483 case ctos:
484 case stos: pop_i(); break;
485 case itos: pop_i(); break;
486 case ltos: pop_l(); break;
487 case ftos: pop_f(); break;
488 case dtos: pop_d(); break;
489 case vtos: /* nothing to do */ break;
490 default : ShouldNotReachHere();
491 }
492 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
493 }
496 // Helpers for swap and dup
497 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
498 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
499 }
500 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
501 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
502 }
505 void InterpreterMacroAssembler::load_receiver(Register param_count,
506 Register recv) {
507 sll(param_count, Interpreter::logStackElementSize, param_count);
508 ld_ptr(Lesp, param_count, recv); // gets receiver oop
509 }
511 void InterpreterMacroAssembler::empty_expression_stack() {
512 // Reset Lesp.
513 sub( Lmonitors, wordSize, Lesp );
515 // Reset SP by subtracting more space from Lesp.
516 Label done;
517 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
519 // A native does not need to do this, since its callee does not change SP.
520 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags.
521 btst(JVM_ACC_NATIVE, Gframe_size);
522 br(Assembler::notZero, false, Assembler::pt, done);
523 delayed()->nop();
525 // Compute max expression stack+register save area
526 lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack.
527 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
529 //
530 // now set up a stack frame with the size computed above
531 //
532 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
533 sll( Gframe_size, LogBytesPerWord, Gframe_size );
534 sub( Lesp, Gframe_size, Gframe_size );
535 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
536 debug_only(verify_sp(Gframe_size, G4_scratch));
537 #ifdef _LP64
538 sub(Gframe_size, STACK_BIAS, Gframe_size );
539 #endif
540 mov(Gframe_size, SP);
542 bind(done);
543 }
546 #ifdef ASSERT
547 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
548 Label Bad, OK;
550 // Saved SP must be aligned.
551 #ifdef _LP64
552 btst(2*BytesPerWord-1, Rsp);
553 #else
554 btst(LongAlignmentMask, Rsp);
555 #endif
556 br(Assembler::notZero, false, Assembler::pn, Bad);
557 delayed()->nop();
559 // Saved SP, plus register window size, must not be above FP.
560 add(Rsp, frame::register_save_words * wordSize, Rtemp);
561 #ifdef _LP64
562 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
563 #endif
564 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
566 // Saved SP must not be ridiculously below current SP.
567 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
568 set(maxstack, Rtemp);
569 sub(SP, Rtemp, Rtemp);
570 #ifdef _LP64
571 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
572 #endif
573 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
575 ba_short(OK);
577 bind(Bad);
578 stop("on return to interpreted call, restored SP is corrupted");
580 bind(OK);
581 }
584 void InterpreterMacroAssembler::verify_esp(Register Resp) {
585 // about to read or write Resp[0]
586 // make sure it is not in the monitors or the register save area
587 Label OK1, OK2;
589 cmp(Resp, Lmonitors);
590 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
591 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
592 stop("too many pops: Lesp points into monitor area");
593 bind(OK1);
594 #ifdef _LP64
595 sub(Resp, STACK_BIAS, Resp);
596 #endif
597 cmp(Resp, SP);
598 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
599 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
600 stop("too many pushes: Lesp points into register window");
601 bind(OK2);
602 }
603 #endif // ASSERT
605 // Load compiled (i2c) or interpreter entry when calling from interpreted and
606 // do the call. Centralized so that all interpreter calls will do the same actions.
607 // If jvmti single stepping is on for a thread we must not call compiled code.
608 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
610 // Assume we want to go compiled if available
612 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target);
614 if (JvmtiExport::can_post_interpreter_events()) {
615 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
616 // compiled code in threads for which the event is enabled. Check here for
617 // interp_only_mode if these events CAN be enabled.
618 verify_thread();
619 Label skip_compiled_code;
621 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
622 ld(interp_only, scratch);
623 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);
624 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
625 bind(skip_compiled_code);
626 }
628 // the i2c_adapters need Method* in G5_method (right? %%%)
629 // do the call
630 #ifdef ASSERT
631 {
632 Label ok;
633 br_notnull_short(target, Assembler::pt, ok);
634 stop("null entry point");
635 bind(ok);
636 }
637 #endif // ASSERT
639 // Adjust Rret first so Llast_SP can be same as Rret
640 add(Rret, -frame::pc_return_offset, O7);
641 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
642 // Record SP so we can remove any stack space allocated by adapter transition
643 jmp(target, 0);
644 delayed()->mov(SP, Llast_SP);
645 }
647 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
648 assert_not_delayed();
650 Label not_taken;
651 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
652 else br (cc, false, Assembler::pn, not_taken);
653 delayed()->nop();
655 TemplateTable::branch(false,false);
657 bind(not_taken);
659 profile_not_taken_branch(G3_scratch);
660 }
663 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
664 int bcp_offset,
665 Register Rtmp,
666 Register Rdst,
667 signedOrNot is_signed,
668 setCCOrNot should_set_CC ) {
669 assert(Rtmp != Rdst, "need separate temp register");
670 assert_not_delayed();
671 switch (is_signed) {
672 default: ShouldNotReachHere();
674 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte
675 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte
676 }
677 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
678 sll( Rdst, BitsPerByte, Rdst);
679 switch (should_set_CC ) {
680 default: ShouldNotReachHere();
682 case set_CC: orcc( Rdst, Rtmp, Rdst ); break;
683 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;
684 }
685 }
688 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
689 int bcp_offset,
690 Register Rtmp,
691 Register Rdst,
692 setCCOrNot should_set_CC ) {
693 assert(Rtmp != Rdst, "need separate temp register");
694 assert_not_delayed();
695 add( Lbcp, bcp_offset, Rtmp);
696 andcc( Rtmp, 3, G0);
697 Label aligned;
698 switch (should_set_CC ) {
699 default: ShouldNotReachHere();
701 case set_CC: break;
702 case dont_set_CC: break;
703 }
705 br(Assembler::zero, true, Assembler::pn, aligned);
706 #ifdef _LP64
707 delayed()->ldsw(Rtmp, 0, Rdst);
708 #else
709 delayed()->ld(Rtmp, 0, Rdst);
710 #endif
712 ldub(Lbcp, bcp_offset + 3, Rdst);
713 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
714 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
715 #ifdef _LP64
716 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
717 #else
718 // Unsigned load is faster than signed on some implementations
719 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
720 #endif
721 or3(Rtmp, Rdst, Rdst );
723 bind(aligned);
724 if (should_set_CC == set_CC) tst(Rdst);
725 }
727 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index,
728 int bcp_offset, size_t index_size) {
729 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
730 if (index_size == sizeof(u2)) {
731 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);
732 } else if (index_size == sizeof(u4)) {
733 assert(EnableInvokeDynamic, "giant index used only for JSR 292");
734 get_4_byte_integer_at_bcp(bcp_offset, temp, index);
735 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
736 xor3(index, -1, index); // convert to plain index
737 } else if (index_size == sizeof(u1)) {
738 ldub(Lbcp, bcp_offset, index);
739 } else {
740 ShouldNotReachHere();
741 }
742 }
745 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
746 int bcp_offset, size_t index_size) {
747 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
748 assert_different_registers(cache, tmp);
749 assert_not_delayed();
750 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
751 // convert from field index to ConstantPoolCacheEntry index and from
752 // word index to byte offset
753 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
754 add(LcpoolCache, tmp, cache);
755 }
758 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
759 Register temp,
760 Register bytecode,
761 int byte_no,
762 int bcp_offset,
763 size_t index_size) {
764 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
765 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
766 const int shift_count = (1 + byte_no) * BitsPerByte;
767 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
768 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
769 "correct shift count");
770 srl(bytecode, shift_count, bytecode);
771 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
772 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);
773 }
776 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
777 int bcp_offset, size_t index_size) {
778 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
779 assert_different_registers(cache, tmp);
780 assert_not_delayed();
781 if (index_size == sizeof(u2)) {
782 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
783 } else {
784 ShouldNotReachHere(); // other sizes not supported here
785 }
786 // convert from field index to ConstantPoolCacheEntry index
787 // and from word index to byte offset
788 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
789 // skip past the header
790 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp);
791 // construct pointer to cache entry
792 add(LcpoolCache, tmp, cache);
793 }
796 // Load object from cpool->resolved_references(index)
797 void InterpreterMacroAssembler::load_resolved_reference_at_index(
798 Register result, Register index) {
799 assert_different_registers(result, index);
800 assert_not_delayed();
801 // convert from field index to resolved_references() index and from
802 // word index to byte offset. Since this is a java object, it can be compressed
803 Register tmp = index; // reuse
804 sll(index, LogBytesPerHeapOop, tmp);
805 get_constant_pool(result);
806 // load pointer for resolved_references[] objArray
807 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
808 // JNIHandles::resolve(result)
809 ld_ptr(result, 0, result);
810 // Add in the index
811 add(result, tmp, result);
812 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
813 }
816 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
817 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
818 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
819 Register Rsuper_klass,
820 Register Rtmp1,
821 Register Rtmp2,
822 Register Rtmp3,
823 Label &ok_is_subtype ) {
824 Label not_subtype;
826 // Profile the not-null value's klass.
827 profile_typecheck(Rsub_klass, Rtmp1);
829 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
830 Rtmp1, Rtmp2,
831 &ok_is_subtype, ¬_subtype, NULL);
833 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
834 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
835 &ok_is_subtype, NULL);
837 bind(not_subtype);
838 profile_typecheck_failed(Rtmp1);
839 }
841 // Separate these two to allow for delay slot in middle
842 // These are used to do a test and full jump to exception-throwing code.
844 // %%%%% Could possibly reoptimize this by testing to see if could use
845 // a single conditional branch (i.e. if span is small enough.
846 // If you go that route, than get rid of the split and give up
847 // on the delay-slot hack.
849 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
850 Label& ok ) {
851 assert_not_delayed();
852 br(ok_condition, true, pt, ok);
853 // DELAY SLOT
854 }
856 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
857 Label& ok ) {
858 assert_not_delayed();
859 bp( ok_condition, true, Assembler::xcc, pt, ok);
860 // DELAY SLOT
861 }
863 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
864 Label& ok ) {
865 assert_not_delayed();
866 brx(ok_condition, true, pt, ok);
867 // DELAY SLOT
868 }
870 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
871 Register Rscratch,
872 Label& ok ) {
873 assert(throw_entry_point != NULL, "entry point must be generated by now");
874 AddressLiteral dest(throw_entry_point);
875 jump_to(dest, Rscratch);
876 delayed()->nop();
877 bind(ok);
878 }
881 // And if you cannot use the delay slot, here is a shorthand:
883 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
884 address throw_entry_point,
885 Register Rscratch ) {
886 Label ok;
887 if (ok_condition != never) {
888 throw_if_not_1_icc( ok_condition, ok);
889 delayed()->nop();
890 }
891 throw_if_not_2( throw_entry_point, Rscratch, ok);
892 }
893 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
894 address throw_entry_point,
895 Register Rscratch ) {
896 Label ok;
897 if (ok_condition != never) {
898 throw_if_not_1_xcc( ok_condition, ok);
899 delayed()->nop();
900 }
901 throw_if_not_2( throw_entry_point, Rscratch, ok);
902 }
903 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
904 address throw_entry_point,
905 Register Rscratch ) {
906 Label ok;
907 if (ok_condition != never) {
908 throw_if_not_1_x( ok_condition, ok);
909 delayed()->nop();
910 }
911 throw_if_not_2( throw_entry_point, Rscratch, ok);
912 }
914 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
915 // Note: res is still shy of address by array offset into object.
917 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
918 assert_not_delayed();
920 verify_oop(array);
921 #ifdef _LP64
922 // sign extend since tos (index) can be a 32bit value
923 sra(index, G0, index);
924 #endif // _LP64
926 // check array
927 Label ptr_ok;
928 tst(array);
929 throw_if_not_1_x( notZero, ptr_ok );
930 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
931 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
933 Label index_ok;
934 cmp(index, tmp);
935 throw_if_not_1_icc( lessUnsigned, index_ok );
936 if (index_shift > 0) delayed()->sll(index, index_shift, index);
937 else delayed()->add(array, index, res); // addr - const offset in index
938 // convention: move aberrant index into G3_scratch for exception message
939 mov(index, G3_scratch);
940 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
942 // add offset if didn't do it in delay slot
943 if (index_shift > 0) add(array, index, res); // addr - const offset in index
944 }
947 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
948 assert_not_delayed();
950 // pop array
951 pop_ptr(array);
953 // check array
954 index_check_without_pop(array, index, index_shift, tmp, res);
955 }
958 void InterpreterMacroAssembler::get_const(Register Rdst) {
959 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst);
960 }
963 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
964 get_const(Rdst);
965 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
966 }
969 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
970 get_constant_pool(Rdst);
971 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
972 }
975 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
976 get_constant_pool(Rcpool);
977 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags);
978 }
981 // unlock if synchronized method
982 //
983 // Unlock the receiver if this is a synchronized method.
984 // Unlock any Java monitors from syncronized blocks.
985 //
986 // If there are locked Java monitors
987 // If throw_monitor_exception
988 // throws IllegalMonitorStateException
989 // Else if install_monitor_exception
990 // installs IllegalMonitorStateException
991 // Else
992 // no error processing
993 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
994 bool throw_monitor_exception,
995 bool install_monitor_exception) {
996 Label unlocked, unlock, no_unlock;
998 // get the value of _do_not_unlock_if_synchronized into G1_scratch
999 const Address do_not_unlock_if_synchronized(G2_thread,
1000 JavaThread::do_not_unlock_if_synchronized_offset());
1001 ldbool(do_not_unlock_if_synchronized, G1_scratch);
1002 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
1004 // check if synchronized method
1005 const Address access_flags(Lmethod, Method::access_flags_offset());
1006 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1007 push(state); // save tos
1008 ld(access_flags, G3_scratch); // Load access flags.
1009 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
1010 br(zero, false, pt, unlocked);
1011 delayed()->nop();
1013 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1014 // is set.
1015 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);
1016 delayed()->nop();
1018 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1019 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1021 //Intel: if (throw_monitor_exception) ... else ...
1022 // Entry already unlocked, need to throw exception
1023 //...
1025 // pass top-most monitor elem
1026 add( top_most_monitor(), O1 );
1028 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
1029 br_notnull_short(G3_scratch, pt, unlock);
1031 if (throw_monitor_exception) {
1032 // Entry already unlocked need to throw an exception
1033 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1034 should_not_reach_here();
1035 } else {
1036 // Monitor already unlocked during a stack unroll.
1037 // If requested, install an illegal_monitor_state_exception.
1038 // Continue with stack unrolling.
1039 if (install_monitor_exception) {
1040 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1041 }
1042 ba_short(unlocked);
1043 }
1045 bind(unlock);
1047 unlock_object(O1);
1049 bind(unlocked);
1051 // I0, I1: Might contain return value
1053 // Check that all monitors are unlocked
1054 { Label loop, exception, entry, restart;
1056 Register Rmptr = O0;
1057 Register Rtemp = O1;
1058 Register Rlimit = Lmonitors;
1059 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1060 assert( (delta & LongAlignmentMask) == 0,
1061 "sizeof BasicObjectLock must be even number of doublewords");
1063 #ifdef ASSERT
1064 add(top_most_monitor(), Rmptr, delta);
1065 { Label L;
1066 // ensure that Rmptr starts out above (or at) Rlimit
1067 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1068 stop("monitor stack has negative size");
1069 bind(L);
1070 }
1071 #endif
1072 bind(restart);
1073 ba(entry);
1074 delayed()->
1075 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry
1077 // Entry is still locked, need to throw exception
1078 bind(exception);
1079 if (throw_monitor_exception) {
1080 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1081 should_not_reach_here();
1082 } else {
1083 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1084 // Unlock does not block, so don't have to worry about the frame
1085 unlock_object(Rmptr);
1086 if (install_monitor_exception) {
1087 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1088 }
1089 ba_short(restart);
1090 }
1092 bind(loop);
1093 cmp(Rtemp, G0); // check if current entry is used
1094 brx(Assembler::notEqual, false, pn, exception);
1095 delayed()->
1096 dec(Rmptr, delta); // otherwise advance to next entry
1097 #ifdef ASSERT
1098 { Label L;
1099 // ensure that Rmptr has not somehow stepped below Rlimit
1100 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1101 stop("ran off the end of the monitor stack");
1102 bind(L);
1103 }
1104 #endif
1105 bind(entry);
1106 cmp(Rmptr, Rlimit); // check if bottom reached
1107 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry
1108 delayed()->
1109 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1110 }
1112 bind(no_unlock);
1113 pop(state);
1114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1115 }
1118 // remove activation
1119 //
1120 // Unlock the receiver if this is a synchronized method.
1121 // Unlock any Java monitors from syncronized blocks.
1122 // Remove the activation from the stack.
1123 //
1124 // If there are locked Java monitors
1125 // If throw_monitor_exception
1126 // throws IllegalMonitorStateException
1127 // Else if install_monitor_exception
1128 // installs IllegalMonitorStateException
1129 // Else
1130 // no error processing
1131 void InterpreterMacroAssembler::remove_activation(TosState state,
1132 bool throw_monitor_exception,
1133 bool install_monitor_exception) {
1135 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
1137 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1138 notify_method_exit(false, state, NotifyJVMTI);
1140 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1141 verify_thread();
1143 // return tos
1144 assert(Otos_l1 == Otos_i, "adjust code below");
1145 switch (state) {
1146 #ifdef _LP64
1147 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
1148 #else
1149 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
1150 #endif
1151 case btos: // fall through
1152 case ctos:
1153 case stos: // fall through
1154 case atos: // fall through
1155 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0
1156 case ftos: // fall through
1157 case dtos: // fall through
1158 case vtos: /* nothing to do */ break;
1159 default : ShouldNotReachHere();
1160 }
1162 #if defined(COMPILER2) && !defined(_LP64)
1163 if (state == ltos) {
1164 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1165 // or compiled so just be safe use G1 and O0/O1
1167 // Shift bits into high (msb) of G1
1168 sllx(Otos_l1->after_save(), 32, G1);
1169 // Zero extend low bits
1170 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1171 or3 (Otos_l2->after_save(), G1, G1);
1172 }
1173 #endif /* COMPILER2 */
1175 }
1176 #endif /* CC_INTERP */
1179 // Lock object
1180 //
1181 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1182 // it must be initialized with the object to lock
1183 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1184 if (UseHeavyMonitors) {
1185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1186 }
1187 else {
1188 Register obj_reg = Object;
1189 Register mark_reg = G4_scratch;
1190 Register temp_reg = G1_scratch;
1191 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1192 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1193 Label done;
1195 Label slow_case;
1197 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1199 // load markOop from object into mark_reg
1200 ld_ptr(mark_addr, mark_reg);
1202 if (UseBiasedLocking) {
1203 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1204 }
1206 // get the address of basicLock on stack that will be stored in the object
1207 // we need a temporary register here as we do not want to clobber lock_reg
1208 // (cas clobbers the destination register)
1209 mov(lock_reg, temp_reg);
1210 // set mark reg to be (markOop of object | UNLOCK_VALUE)
1211 or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1212 // initialize the box (Must happen before we update the object mark!)
1213 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1214 // compare and exchange object_addr, markOop | 1, stack address of basicLock
1215 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1216 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1217 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1219 // if the compare and exchange succeeded we are done (we saw an unlocked object)
1220 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
1222 // We did not see an unlocked object so try the fast recursive case
1224 // Check if owner is self by comparing the value in the markOop of object
1225 // with the stack pointer
1226 sub(temp_reg, SP, temp_reg);
1227 #ifdef _LP64
1228 sub(temp_reg, STACK_BIAS, temp_reg);
1229 #endif
1230 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1232 // Composite "andcc" test:
1233 // (a) %sp -vs- markword proximity check, and,
1234 // (b) verify mark word LSBs == 0 (Stack-locked).
1235 //
1236 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1237 // Note that the page size used for %sp proximity testing is arbitrary and is
1238 // unrelated to the actual MMU page size. We use a 'logical' page size of
1239 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1240 // field of the andcc instruction.
1241 andcc (temp_reg, 0xFFFFF003, G0) ;
1243 // if condition is true we are done and hence we can store 0 in the displaced
1244 // header indicating it is a recursive lock and be done
1245 brx(Assembler::zero, true, Assembler::pt, done);
1246 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1248 // none of the above fast optimizations worked so we have to get into the
1249 // slow case of monitor enter
1250 bind(slow_case);
1251 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1253 bind(done);
1254 }
1255 }
1257 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1258 //
1259 // Argument - lock_reg points to the BasicObjectLock for lock
1260 // Throw IllegalMonitorException if object is not locked by current thread
1261 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1262 if (UseHeavyMonitors) {
1263 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1264 } else {
1265 Register obj_reg = G3_scratch;
1266 Register mark_reg = G4_scratch;
1267 Register displaced_header_reg = G1_scratch;
1268 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
1269 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1270 Label done;
1272 if (UseBiasedLocking) {
1273 // load the object out of the BasicObjectLock
1274 ld_ptr(lockobj_addr, obj_reg);
1275 biased_locking_exit(mark_addr, mark_reg, done, true);
1276 st_ptr(G0, lockobj_addr); // free entry
1277 }
1279 // Test first if we are in the fast recursive case
1280 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
1281 ld_ptr(lock_addr, displaced_header_reg);
1282 br_null(displaced_header_reg, true, Assembler::pn, done);
1283 delayed()->st_ptr(G0, lockobj_addr); // free entry
1285 // See if it is still a light weight lock, if so we just unlock
1286 // the object and we are done
1288 if (!UseBiasedLocking) {
1289 // load the object out of the BasicObjectLock
1290 ld_ptr(lockobj_addr, obj_reg);
1291 }
1293 // we have the displaced header in displaced_header_reg
1294 // we expect to see the stack address of the basicLock in case the
1295 // lock is still a light weight lock (lock_reg)
1296 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1297 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
1298 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1299 cmp(lock_reg, displaced_header_reg);
1300 brx(Assembler::equal, true, Assembler::pn, done);
1301 delayed()->st_ptr(G0, lockobj_addr); // free entry
1303 // The lock has been converted into a heavy lock and hence
1304 // we need to get into the slow case
1306 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1308 bind(done);
1309 }
1310 }
1312 #ifndef CC_INTERP
1314 // Get the method data pointer from the Method* and set the
1315 // specified register to its value.
1317 void InterpreterMacroAssembler::set_method_data_pointer() {
1318 assert(ProfileInterpreter, "must be profiling interpreter");
1319 Label get_continue;
1321 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1322 test_method_data_pointer(get_continue);
1323 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1324 bind(get_continue);
1325 }
1327 // Set the method data pointer for the current bcp.
1329 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1330 assert(ProfileInterpreter, "must be profiling interpreter");
1331 Label zero_continue;
1333 // Test MDO to avoid the call if it is NULL.
1334 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1335 test_method_data_pointer(zero_continue);
1336 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1337 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1338 add(ImethodDataPtr, O0, ImethodDataPtr);
1339 bind(zero_continue);
1340 }
1342 // Test ImethodDataPtr. If it is null, continue at the specified label
1344 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1345 assert(ProfileInterpreter, "must be profiling interpreter");
1346 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);
1347 }
1349 void InterpreterMacroAssembler::verify_method_data_pointer() {
1350 assert(ProfileInterpreter, "must be profiling interpreter");
1351 #ifdef ASSERT
1352 Label verify_continue;
1353 test_method_data_pointer(verify_continue);
1355 // If the mdp is valid, it will point to a DataLayout header which is
1356 // consistent with the bcp. The converse is highly probable also.
1357 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1358 ld_ptr(Lmethod, Method::const_offset(), O5);
1359 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch);
1360 add(G3_scratch, O5, G3_scratch);
1361 cmp(Lbcp, G3_scratch);
1362 brx(Assembler::equal, false, Assembler::pt, verify_continue);
1364 Register temp_reg = O5;
1365 delayed()->mov(ImethodDataPtr, temp_reg);
1366 // %%% should use call_VM_leaf here?
1367 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
1368 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
1369 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
1370 stf(FloatRegisterImpl::D, Ftos_d, d_save);
1371 mov(temp_reg->after_save(), O2);
1372 save_thread(L7_thread_cache);
1373 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1374 delayed()->nop();
1375 restore_thread(L7_thread_cache);
1376 ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1377 restore();
1378 bind(verify_continue);
1379 #endif // ASSERT
1380 }
1382 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1383 Register Rtmp,
1384 Label &profile_continue) {
1385 assert(ProfileInterpreter, "must be profiling interpreter");
1386 // Control will flow to "profile_continue" if the counter is less than the
1387 // limit or if we call profile_method()
1389 Label done;
1391 // if no method data exists, and the counter is high enough, make one
1392 br_notnull_short(ImethodDataPtr, Assembler::pn, done);
1394 // Test to see if we should create a method data oop
1395 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1396 sethi(profile_limit, Rtmp);
1397 ld(Rtmp, profile_limit.low10(), Rtmp);
1398 cmp(invocation_count, Rtmp);
1399 // Use long branches because call_VM() code and following code generated by
1400 // test_backedge_count_for_osr() is large in debug VM.
1401 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1402 delayed()->nop();
1404 // Build it now.
1405 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1406 set_method_data_pointer_for_bcp();
1407 ba(profile_continue);
1408 delayed()->nop();
1409 bind(done);
1410 }
1412 // Store a value at some constant offset from the method data pointer.
1414 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1415 assert(ProfileInterpreter, "must be profiling interpreter");
1416 st_ptr(value, ImethodDataPtr, constant);
1417 }
1419 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1420 Register bumped_count,
1421 bool decrement) {
1422 assert(ProfileInterpreter, "must be profiling interpreter");
1424 // Load the counter.
1425 ld_ptr(counter, bumped_count);
1427 if (decrement) {
1428 // Decrement the register. Set condition codes.
1429 subcc(bumped_count, DataLayout::counter_increment, bumped_count);
1431 // If the decrement causes the counter to overflow, stay negative
1432 Label L;
1433 brx(Assembler::negative, true, Assembler::pn, L);
1435 // Store the decremented counter, if it is still negative.
1436 delayed()->st_ptr(bumped_count, counter);
1437 bind(L);
1438 } else {
1439 // Increment the register. Set carry flag.
1440 addcc(bumped_count, DataLayout::counter_increment, bumped_count);
1442 // If the increment causes the counter to overflow, pull back by 1.
1443 assert(DataLayout::counter_increment == 1, "subc works");
1444 subc(bumped_count, G0, bumped_count);
1446 // Store the incremented counter.
1447 st_ptr(bumped_count, counter);
1448 }
1449 }
1451 // Increment the value at some constant offset from the method data pointer.
1453 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1454 Register bumped_count,
1455 bool decrement) {
1456 // Locate the counter at a fixed offset from the mdp:
1457 Address counter(ImethodDataPtr, constant);
1458 increment_mdp_data_at(counter, bumped_count, decrement);
1459 }
1461 // Increment the value at some non-fixed (reg + constant) offset from
1462 // the method data pointer.
1464 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1465 int constant,
1466 Register bumped_count,
1467 Register scratch2,
1468 bool decrement) {
1469 // Add the constant to reg to get the offset.
1470 add(ImethodDataPtr, reg, scratch2);
1471 Address counter(scratch2, constant);
1472 increment_mdp_data_at(counter, bumped_count, decrement);
1473 }
1475 // Set a flag value at the current method data pointer position.
1476 // Updates a single byte of the header, to avoid races with other header bits.
1478 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1479 Register scratch) {
1480 assert(ProfileInterpreter, "must be profiling interpreter");
1481 // Load the data header
1482 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
1484 // Set the flag
1485 or3(scratch, flag_constant, scratch);
1487 // Store the modified header.
1488 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
1489 }
1491 // Test the location at some offset from the method data pointer.
1492 // If it is not equal to value, branch to the not_equal_continue Label.
1493 // Set condition codes to match the nullness of the loaded value.
1495 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1496 Register value,
1497 Label& not_equal_continue,
1498 Register scratch) {
1499 assert(ProfileInterpreter, "must be profiling interpreter");
1500 ld_ptr(ImethodDataPtr, offset, scratch);
1501 cmp(value, scratch);
1502 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
1503 delayed()->tst(scratch);
1504 }
1506 // Update the method data pointer by the displacement located at some fixed
1507 // offset from the method data pointer.
1509 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1510 Register scratch) {
1511 assert(ProfileInterpreter, "must be profiling interpreter");
1512 ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
1513 add(ImethodDataPtr, scratch, ImethodDataPtr);
1514 }
1516 // Update the method data pointer by the displacement located at the
1517 // offset (reg + offset_of_disp).
1519 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1520 int offset_of_disp,
1521 Register scratch) {
1522 assert(ProfileInterpreter, "must be profiling interpreter");
1523 add(reg, offset_of_disp, scratch);
1524 ld_ptr(ImethodDataPtr, scratch, scratch);
1525 add(ImethodDataPtr, scratch, ImethodDataPtr);
1526 }
1528 // Update the method data pointer by a simple constant displacement.
1530 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1531 assert(ProfileInterpreter, "must be profiling interpreter");
1532 add(ImethodDataPtr, constant, ImethodDataPtr);
1533 }
1535 // Update the method data pointer for a _ret bytecode whose target
1536 // was not among our cached targets.
1538 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1539 Register return_bci) {
1540 assert(ProfileInterpreter, "must be profiling interpreter");
1541 push(state);
1542 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile
1543 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1544 ld_ptr(l_tmp, return_bci);
1545 pop(state);
1546 }
1548 // Count a taken branch in the bytecodes.
1550 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1551 if (ProfileInterpreter) {
1552 Label profile_continue;
1554 // If no method data exists, go to profile_continue.
1555 test_method_data_pointer(profile_continue);
1557 // We are taking a branch. Increment the taken count.
1558 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
1560 // The method data pointer needs to be updated to reflect the new target.
1561 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1562 bind (profile_continue);
1563 }
1564 }
1567 // Count a not-taken branch in the bytecodes.
1569 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
1570 if (ProfileInterpreter) {
1571 Label profile_continue;
1573 // If no method data exists, go to profile_continue.
1574 test_method_data_pointer(profile_continue);
1576 // We are taking a branch. Increment the not taken count.
1577 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
1579 // The method data pointer needs to be updated to correspond to the
1580 // next bytecode.
1581 update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1582 bind (profile_continue);
1583 }
1584 }
1587 // Count a non-virtual call in the bytecodes.
1589 void InterpreterMacroAssembler::profile_call(Register scratch) {
1590 if (ProfileInterpreter) {
1591 Label profile_continue;
1593 // If no method data exists, go to profile_continue.
1594 test_method_data_pointer(profile_continue);
1596 // We are making a call. Increment the count.
1597 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1599 // The method data pointer needs to be updated to reflect the new target.
1600 update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1601 bind (profile_continue);
1602 }
1603 }
1606 // Count a final call in the bytecodes.
1608 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
1609 if (ProfileInterpreter) {
1610 Label profile_continue;
1612 // If no method data exists, go to profile_continue.
1613 test_method_data_pointer(profile_continue);
1615 // We are making a call. Increment the count.
1616 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1618 // The method data pointer needs to be updated to reflect the new target.
1619 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1620 bind (profile_continue);
1621 }
1622 }
1625 // Count a virtual call in the bytecodes.
1627 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1628 Register scratch,
1629 bool receiver_can_be_null) {
1630 if (ProfileInterpreter) {
1631 Label profile_continue;
1633 // If no method data exists, go to profile_continue.
1634 test_method_data_pointer(profile_continue);
1637 Label skip_receiver_profile;
1638 if (receiver_can_be_null) {
1639 Label not_null;
1640 br_notnull_short(receiver, Assembler::pt, not_null);
1641 // We are making a call. Increment the count for null receiver.
1642 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1643 ba_short(skip_receiver_profile);
1644 bind(not_null);
1645 }
1647 // Record the receiver type.
1648 record_klass_in_profile(receiver, scratch, true);
1649 bind(skip_receiver_profile);
1651 // The method data pointer needs to be updated to reflect the new target.
1652 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1653 bind (profile_continue);
1654 }
1655 }
1657 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1658 Register receiver, Register scratch,
1659 int start_row, Label& done, bool is_virtual_call) {
1660 if (TypeProfileWidth == 0) {
1661 if (is_virtual_call) {
1662 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1663 }
1664 return;
1665 }
1667 int last_row = VirtualCallData::row_limit() - 1;
1668 assert(start_row <= last_row, "must be work left to do");
1669 // Test this row for both the receiver and for null.
1670 // Take any of three different outcomes:
1671 // 1. found receiver => increment count and goto done
1672 // 2. found null => keep looking for case 1, maybe allocate this cell
1673 // 3. found something else => keep looking for cases 1 and 2
1674 // Case 3 is handled by a recursive call.
1675 for (int row = start_row; row <= last_row; row++) {
1676 Label next_test;
1677 bool test_for_null_also = (row == start_row);
1679 // See if the receiver is receiver[n].
1680 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1681 test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1682 // delayed()->tst(scratch);
1684 // The receiver is receiver[n]. Increment count[n].
1685 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1686 increment_mdp_data_at(count_offset, scratch);
1687 ba_short(done);
1688 bind(next_test);
1690 if (test_for_null_also) {
1691 Label found_null;
1692 // Failed the equality check on receiver[n]... Test for null.
1693 if (start_row == last_row) {
1694 // The only thing left to do is handle the null case.
1695 if (is_virtual_call) {
1696 brx(Assembler::zero, false, Assembler::pn, found_null);
1697 delayed()->nop();
1698 // Receiver did not match any saved receiver and there is no empty row for it.
1699 // Increment total counter to indicate polymorphic case.
1700 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1701 ba_short(done);
1702 bind(found_null);
1703 } else {
1704 brx(Assembler::notZero, false, Assembler::pt, done);
1705 delayed()->nop();
1706 }
1707 break;
1708 }
1709 // Since null is rare, make it be the branch-taken case.
1710 brx(Assembler::zero, false, Assembler::pn, found_null);
1711 delayed()->nop();
1713 // Put all the "Case 3" tests here.
1714 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1716 // Found a null. Keep searching for a matching receiver,
1717 // but remember that this is an empty (unused) slot.
1718 bind(found_null);
1719 }
1720 }
1722 // In the fall-through case, we found no matching receiver, but we
1723 // observed the receiver[start_row] is NULL.
1725 // Fill in the receiver field and increment the count.
1726 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1727 set_mdp_data_at(recvr_offset, receiver);
1728 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1729 mov(DataLayout::counter_increment, scratch);
1730 set_mdp_data_at(count_offset, scratch);
1731 if (start_row > 0) {
1732 ba_short(done);
1733 }
1734 }
1736 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1737 Register scratch, bool is_virtual_call) {
1738 assert(ProfileInterpreter, "must be profiling");
1739 Label done;
1741 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1743 bind (done);
1744 }
1747 // Count a ret in the bytecodes.
1749 void InterpreterMacroAssembler::profile_ret(TosState state,
1750 Register return_bci,
1751 Register scratch) {
1752 if (ProfileInterpreter) {
1753 Label profile_continue;
1754 uint row;
1756 // If no method data exists, go to profile_continue.
1757 test_method_data_pointer(profile_continue);
1759 // Update the total ret count.
1760 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1762 for (row = 0; row < RetData::row_limit(); row++) {
1763 Label next_test;
1765 // See if return_bci is equal to bci[n]:
1766 test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1767 return_bci, next_test, scratch);
1769 // return_bci is equal to bci[n]. Increment the count.
1770 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1772 // The method data pointer needs to be updated to reflect the new target.
1773 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1774 ba_short(profile_continue);
1775 bind(next_test);
1776 }
1778 update_mdp_for_ret(state, return_bci);
1780 bind (profile_continue);
1781 }
1782 }
1784 // Profile an unexpected null in the bytecodes.
1785 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1786 if (ProfileInterpreter) {
1787 Label profile_continue;
1789 // If no method data exists, go to profile_continue.
1790 test_method_data_pointer(profile_continue);
1792 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1794 // The method data pointer needs to be updated.
1795 int mdp_delta = in_bytes(BitData::bit_data_size());
1796 if (TypeProfileCasts) {
1797 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1798 }
1799 update_mdp_by_constant(mdp_delta);
1801 bind (profile_continue);
1802 }
1803 }
1805 void InterpreterMacroAssembler::profile_typecheck(Register klass,
1806 Register scratch) {
1807 if (ProfileInterpreter) {
1808 Label profile_continue;
1810 // If no method data exists, go to profile_continue.
1811 test_method_data_pointer(profile_continue);
1813 int mdp_delta = in_bytes(BitData::bit_data_size());
1814 if (TypeProfileCasts) {
1815 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1817 // Record the object type.
1818 record_klass_in_profile(klass, scratch, false);
1819 }
1821 // The method data pointer needs to be updated.
1822 update_mdp_by_constant(mdp_delta);
1824 bind (profile_continue);
1825 }
1826 }
1828 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
1829 if (ProfileInterpreter && TypeProfileCasts) {
1830 Label profile_continue;
1832 // If no method data exists, go to profile_continue.
1833 test_method_data_pointer(profile_continue);
1835 int count_offset = in_bytes(CounterData::count_offset());
1836 // Back up the address, since we have already bumped the mdp.
1837 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1839 // *Decrement* the counter. We expect to see zero or small negatives.
1840 increment_mdp_data_at(count_offset, scratch, true);
1842 bind (profile_continue);
1843 }
1844 }
1846 // Count the default case of a switch construct.
1848 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
1849 if (ProfileInterpreter) {
1850 Label profile_continue;
1852 // If no method data exists, go to profile_continue.
1853 test_method_data_pointer(profile_continue);
1855 // Update the default case count
1856 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1857 scratch);
1859 // The method data pointer needs to be updated.
1860 update_mdp_by_offset(
1861 in_bytes(MultiBranchData::default_displacement_offset()),
1862 scratch);
1864 bind (profile_continue);
1865 }
1866 }
1868 // Count the index'th case of a switch construct.
1870 void InterpreterMacroAssembler::profile_switch_case(Register index,
1871 Register scratch,
1872 Register scratch2,
1873 Register scratch3) {
1874 if (ProfileInterpreter) {
1875 Label profile_continue;
1877 // If no method data exists, go to profile_continue.
1878 test_method_data_pointer(profile_continue);
1880 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1881 set(in_bytes(MultiBranchData::per_case_size()), scratch);
1882 smul(index, scratch, scratch);
1883 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
1885 // Update the case count
1886 increment_mdp_data_at(scratch,
1887 in_bytes(MultiBranchData::relative_count_offset()),
1888 scratch2,
1889 scratch3);
1891 // The method data pointer needs to be updated.
1892 update_mdp_by_offset(scratch,
1893 in_bytes(MultiBranchData::relative_displacement_offset()),
1894 scratch2);
1896 bind (profile_continue);
1897 }
1898 }
1900 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
1902 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
1903 Register Rtemp,
1904 Register Rtemp2 ) {
1906 Register Rlimit = Lmonitors;
1907 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1908 assert( (delta & LongAlignmentMask) == 0,
1909 "sizeof BasicObjectLock must be even number of doublewords");
1911 sub( SP, delta, SP);
1912 sub( Lesp, delta, Lesp);
1913 sub( Lmonitors, delta, Lmonitors);
1915 if (!stack_is_empty) {
1917 // must copy stack contents down
1919 Label start_copying, next;
1921 // untested("monitor stack expansion");
1922 compute_stack_base(Rtemp);
1923 ba(start_copying);
1924 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1926 // note: must copy from low memory upwards
1927 // On entry to loop,
1928 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1929 // Loop mutates Rtemp
1931 bind( next);
1933 st_ptr(Rtemp2, Rtemp, 0);
1934 inc(Rtemp, wordSize);
1935 cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1937 bind( start_copying );
1939 brx( notEqual, true, pn, next );
1940 delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1942 // done copying stack
1943 }
1944 }
1946 // Locals
1947 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
1948 assert_not_delayed();
1949 sll(index, Interpreter::logStackElementSize, index);
1950 sub(Llocals, index, index);
1951 ld_ptr(index, 0, dst);
1952 // Note: index must hold the effective address--the iinc template uses it
1953 }
1955 // Just like access_local_ptr but the tag is a returnAddress
1956 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
1957 Register dst ) {
1958 assert_not_delayed();
1959 sll(index, Interpreter::logStackElementSize, index);
1960 sub(Llocals, index, index);
1961 ld_ptr(index, 0, dst);
1962 }
1964 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
1965 assert_not_delayed();
1966 sll(index, Interpreter::logStackElementSize, index);
1967 sub(Llocals, index, index);
1968 ld(index, 0, dst);
1969 // Note: index must hold the effective address--the iinc template uses it
1970 }
1973 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
1974 assert_not_delayed();
1975 sll(index, Interpreter::logStackElementSize, index);
1976 sub(Llocals, index, index);
1977 // First half stored at index n+1 (which grows down from Llocals[n])
1978 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
1979 }
1982 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
1983 assert_not_delayed();
1984 sll(index, Interpreter::logStackElementSize, index);
1985 sub(Llocals, index, index);
1986 ldf(FloatRegisterImpl::S, index, 0, dst);
1987 }
1990 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
1991 assert_not_delayed();
1992 sll(index, Interpreter::logStackElementSize, index);
1993 sub(Llocals, index, index);
1994 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1995 }
1998 #ifdef ASSERT
1999 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
2000 Label L;
2002 assert(Rindex != Rscratch, "Registers cannot be same");
2003 assert(Rindex != Rscratch1, "Registers cannot be same");
2004 assert(Rlimit != Rscratch, "Registers cannot be same");
2005 assert(Rlimit != Rscratch1, "Registers cannot be same");
2006 assert(Rscratch1 != Rscratch, "Registers cannot be same");
2008 // untested("reg area corruption");
2009 add(Rindex, offset, Rscratch);
2010 add(Rlimit, 64 + STACK_BIAS, Rscratch1);
2011 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);
2012 stop("regsave area is being clobbered");
2013 bind(L);
2014 }
2015 #endif // ASSERT
2018 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2019 assert_not_delayed();
2020 sll(index, Interpreter::logStackElementSize, index);
2021 sub(Llocals, index, index);
2022 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
2023 st(src, index, 0);
2024 }
2026 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
2027 assert_not_delayed();
2028 sll(index, Interpreter::logStackElementSize, index);
2029 sub(Llocals, index, index);
2030 #ifdef ASSERT
2031 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2032 #endif
2033 st_ptr(src, index, 0);
2034 }
2038 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
2039 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
2040 }
2042 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
2043 assert_not_delayed();
2044 sll(index, Interpreter::logStackElementSize, index);
2045 sub(Llocals, index, index);
2046 #ifdef ASSERT
2047 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2048 #endif
2049 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
2050 }
2053 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
2054 assert_not_delayed();
2055 sll(index, Interpreter::logStackElementSize, index);
2056 sub(Llocals, index, index);
2057 #ifdef ASSERT
2058 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2059 #endif
2060 stf(FloatRegisterImpl::S, src, index, 0);
2061 }
2064 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
2065 assert_not_delayed();
2066 sll(index, Interpreter::logStackElementSize, index);
2067 sub(Llocals, index, index);
2068 #ifdef ASSERT
2069 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2070 #endif
2071 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2072 }
2075 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2076 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2077 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2078 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2079 }
2082 Address InterpreterMacroAssembler::top_most_monitor() {
2083 return Address(FP, top_most_monitor_byte_offset());
2084 }
2087 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2088 add( Lesp, wordSize, Rdest );
2089 }
2091 #endif /* CC_INTERP */
2093 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
2094 assert(UseCompiler, "incrementing must be useful");
2095 #ifdef CC_INTERP
2096 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2097 InvocationCounter::counter_offset());
2098 Address be_counter (G5_method, Method::backedge_counter_offset() +
2099 InvocationCounter::counter_offset());
2100 #else
2101 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2102 InvocationCounter::counter_offset());
2103 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2104 InvocationCounter::counter_offset());
2105 #endif /* CC_INTERP */
2106 int delta = InvocationCounter::count_increment;
2108 // Load each counter in a register
2109 ld( inv_counter, Rtmp );
2110 ld( be_counter, Rtmp2 );
2112 assert( is_simm13( delta ), " delta too large.");
2114 // Add the delta to the invocation counter and store the result
2115 add( Rtmp, delta, Rtmp );
2117 // Mask the backedge counter
2118 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2120 // Store value
2121 st( Rtmp, inv_counter);
2123 // Add invocation counter + backedge counter
2124 add( Rtmp, Rtmp2, Rtmp);
2126 // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
2127 }
2129 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
2130 assert(UseCompiler, "incrementing must be useful");
2131 #ifdef CC_INTERP
2132 Address be_counter (G5_method, Method::backedge_counter_offset() +
2133 InvocationCounter::counter_offset());
2134 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2135 InvocationCounter::counter_offset());
2136 #else
2137 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2138 InvocationCounter::counter_offset());
2139 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2140 InvocationCounter::counter_offset());
2141 #endif /* CC_INTERP */
2142 int delta = InvocationCounter::count_increment;
2143 // Load each counter in a register
2144 ld( be_counter, Rtmp );
2145 ld( inv_counter, Rtmp2 );
2147 // Add the delta to the backedge counter
2148 add( Rtmp, delta, Rtmp );
2150 // Mask the invocation counter, add to backedge counter
2151 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2153 // and store the result to memory
2154 st( Rtmp, be_counter );
2156 // Add backedge + invocation counter
2157 add( Rtmp, Rtmp2, Rtmp );
2159 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2160 }
2162 #ifndef CC_INTERP
2163 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2164 Register branch_bcp,
2165 Register Rtmp ) {
2166 Label did_not_overflow;
2167 Label overflow_with_error;
2168 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2169 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2171 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2172 load_contents(limit, Rtmp);
2173 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2175 // When ProfileInterpreter is on, the backedge_count comes from the
2176 // MethodData*, which value does not get reset on the call to
2177 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2178 // routine while the method is being compiled, add a second test to make sure
2179 // the overflow function is called only once every overflow_frequency.
2180 if (ProfileInterpreter) {
2181 const int overflow_frequency = 1024;
2182 andcc(backedge_count, overflow_frequency-1, Rtmp);
2183 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2184 delayed()->nop();
2185 }
2187 // overflow in loop, pass branch bytecode
2188 set(6,Rtmp);
2189 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2191 // Was an OSR adapter generated?
2192 // O0 = osr nmethod
2193 br_null_short(O0, Assembler::pn, overflow_with_error);
2195 // Has the nmethod been invalidated already?
2196 ld(O0, nmethod::entry_bci_offset(), O2);
2197 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
2199 // migrate the interpreter frame off of the stack
2201 mov(G2_thread, L7);
2202 // save nmethod
2203 mov(O0, L6);
2204 set_last_Java_frame(SP, noreg);
2205 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2206 reset_last_Java_frame();
2207 mov(L7, G2_thread);
2209 // move OSR nmethod to I1
2210 mov(L6, I1);
2212 // OSR buffer to I0
2213 mov(O0, I0);
2215 // remove the interpreter frame
2216 restore(I5_savedSP, 0, SP);
2218 // Jump to the osr code.
2219 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
2220 jmp(O2, G0);
2221 delayed()->nop();
2223 bind(overflow_with_error);
2225 bind(did_not_overflow);
2226 }
2230 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
2231 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2232 }
2235 // local helper function for the verify_oop_or_return_address macro
2236 static bool verify_return_address(Method* m, int bci) {
2237 #ifndef PRODUCT
2238 address pc = (address)(m->constMethod())
2239 + in_bytes(ConstMethod::codes_offset()) + bci;
2240 // assume it is a valid return address if it is inside m and is preceded by a jsr
2241 if (!m->contains(pc)) return false;
2242 address jsr_pc;
2243 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2244 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
2245 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2246 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
2247 #endif // PRODUCT
2248 return false;
2249 }
2252 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2253 if (!VerifyOops) return;
2254 // the VM documentation for the astore[_wide] bytecode allows
2255 // the TOS to be not only an oop but also a return address
2256 Label test;
2257 Label skip;
2258 // See if it is an address (in the current method):
2260 mov(reg, Rtmp);
2261 const int log2_bytecode_size_limit = 16;
2262 srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2263 br_notnull_short( Rtmp, pt, test );
2265 // %%% should use call_VM_leaf here?
2266 save_frame_and_mov(0, Lmethod, O0, reg, O1);
2267 save_thread(L7_thread_cache);
2268 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2269 delayed()->nop();
2270 restore_thread(L7_thread_cache);
2271 br_notnull( O0, false, pt, skip );
2272 delayed()->restore();
2274 // Perform a more elaborate out-of-line call
2275 // Not an address; verify it:
2276 bind(test);
2277 verify_oop(reg);
2278 bind(skip);
2279 }
2282 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2283 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2284 }
2285 #endif /* CC_INTERP */
2287 // Inline assembly for:
2288 //
2289 // if (thread is in interp_only_mode) {
2290 // InterpreterRuntime::post_method_entry();
2291 // }
2292 // if (DTraceMethodProbes) {
2293 // SharedRuntime::dtrace_method_entry(method, receiver);
2294 // }
2295 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2296 // SharedRuntime::rc_trace_method_entry(method, receiver);
2297 // }
2299 void InterpreterMacroAssembler::notify_method_entry() {
2301 // C++ interpreter only uses this for native methods.
2303 // Whenever JVMTI puts a thread in interp_only_mode, method
2304 // entry/exit events are sent for that thread to track stack
2305 // depth. If it is possible to enter interp_only_mode we add
2306 // the code to check if the event should be sent.
2307 if (JvmtiExport::can_post_interpreter_events()) {
2308 Label L;
2309 Register temp_reg = O5;
2310 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2311 ld(interp_only, temp_reg);
2312 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2313 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2314 bind(L);
2315 }
2317 {
2318 Register temp_reg = O5;
2319 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2320 call_VM_leaf(noreg,
2321 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2322 G2_thread, Lmethod);
2323 }
2325 // RedefineClasses() tracing support for obsolete method entry
2326 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2327 call_VM_leaf(noreg,
2328 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2329 G2_thread, Lmethod);
2330 }
2331 }
2334 // Inline assembly for:
2335 //
2336 // if (thread is in interp_only_mode) {
2337 // // save result
2338 // InterpreterRuntime::post_method_exit();
2339 // // restore result
2340 // }
2341 // if (DTraceMethodProbes) {
2342 // SharedRuntime::dtrace_method_exit(thread, method);
2343 // }
2344 //
2345 // Native methods have their result stored in d_tmp and l_tmp
2346 // Java methods have their result stored in the expression stack
2348 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2349 TosState state,
2350 NotifyMethodExitMode mode) {
2351 // C++ interpreter only uses this for native methods.
2353 // Whenever JVMTI puts a thread in interp_only_mode, method
2354 // entry/exit events are sent for that thread to track stack
2355 // depth. If it is possible to enter interp_only_mode we add
2356 // the code to check if the event should be sent.
2357 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2358 Label L;
2359 Register temp_reg = O5;
2360 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2361 ld(interp_only, temp_reg);
2362 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2364 // Note: frame::interpreter_frame_result has a dependency on how the
2365 // method result is saved across the call to post_method_exit. For
2366 // native methods it assumes the result registers are saved to
2367 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2368 // implementation will need to be updated too.
2370 save_return_value(state, is_native_method);
2371 call_VM(noreg,
2372 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2373 restore_return_value(state, is_native_method);
2374 bind(L);
2375 }
2377 {
2378 Register temp_reg = O5;
2379 // Dtrace notification
2380 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2381 save_return_value(state, is_native_method);
2382 call_VM_leaf(
2383 noreg,
2384 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2385 G2_thread, Lmethod);
2386 restore_return_value(state, is_native_method);
2387 }
2388 }
2390 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2391 #ifdef CC_INTERP
2392 // result potentially in O0/O1: save it across calls
2393 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2394 #ifdef _LP64
2395 stx(O0, STATE(_native_lresult));
2396 #else
2397 std(O0, STATE(_native_lresult));
2398 #endif
2399 #else // CC_INTERP
2400 if (is_native_call) {
2401 stf(FloatRegisterImpl::D, F0, d_tmp);
2402 #ifdef _LP64
2403 stx(O0, l_tmp);
2404 #else
2405 std(O0, l_tmp);
2406 #endif
2407 } else {
2408 push(state);
2409 }
2410 #endif // CC_INTERP
2411 }
2413 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2414 #ifdef CC_INTERP
2415 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2416 #ifdef _LP64
2417 ldx(STATE(_native_lresult), O0);
2418 #else
2419 ldd(STATE(_native_lresult), O0);
2420 #endif
2421 #else // CC_INTERP
2422 if (is_native_call) {
2423 ldf(FloatRegisterImpl::D, d_tmp, F0);
2424 #ifdef _LP64
2425 ldx(l_tmp, O0);
2426 #else
2427 ldd(l_tmp, O0);
2428 #endif
2429 } else {
2430 pop(state);
2431 }
2432 #endif // CC_INTERP
2433 }
2435 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2436 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2437 int increment, int mask,
2438 Register scratch1, Register scratch2,
2439 Condition cond, Label *where) {
2440 ld(counter_addr, scratch1);
2441 add(scratch1, increment, scratch1);
2442 if (is_simm13(mask)) {
2443 andcc(scratch1, mask, G0);
2444 } else {
2445 set(mask, scratch2);
2446 andcc(scratch1, scratch2, G0);
2447 }
2448 br(cond, false, Assembler::pn, *where);
2449 delayed()->st(scratch1, counter_addr);
2450 }