Tue, 27 Nov 2012 14:20:21 +0100
8003935: Simplify the needed includes for using Thread::current()
Reviewed-by: dholmes, rbackman, coleenp
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/method.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "prims/jvmtiRedefineClassesTrace.hpp"
35 #include "prims/jvmtiThreadState.hpp"
36 #include "runtime/basicLock.hpp"
37 #include "runtime/biasedLocking.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #include "runtime/thread.inline.hpp"
41 #ifndef CC_INTERP
42 #ifndef FAST_DISPATCH
43 #define FAST_DISPATCH 1
44 #endif
45 #undef FAST_DISPATCH
47 // Implementation of InterpreterMacroAssembler
49 // This file specializes the assember with interpreter-specific macros
51 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
52 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
54 #else // CC_INTERP
55 #ifndef STATE
56 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
57 #endif // STATE
59 #endif // CC_INTERP
61 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
62 // Note: this algorithm is also used by C1's OSR entry sequence.
63 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
64 assert_different_registers(args_size, locals_size);
65 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
66 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
67 // Use br/mov combination because it works on both V8 and V9 and is
68 // faster.
69 Label skip_move;
70 br(Assembler::negative, true, Assembler::pt, skip_move);
71 delayed()->mov(G0, delta);
72 bind(skip_move);
73 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
74 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
75 }
77 #ifndef CC_INTERP
79 // Dispatch code executed in the prolog of a bytecode which does not do it's
80 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
81 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
82 assert_not_delayed();
83 #ifdef FAST_DISPATCH
84 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
85 // they both use I2.
86 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
87 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
88 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
89 // add offset to correct dispatch table
90 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
91 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
92 #else
93 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
94 // dispatch table to use
95 AddressLiteral tbl(Interpreter::dispatch_table(state));
96 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
97 set(tbl, G3_scratch); // compute addr of table
98 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
99 #endif
100 }
103 // Dispatch code executed in the epilog of a bytecode which does not do it's
104 // own dispatch. The dispatch address in IdispatchAddress is used for the
105 // dispatch.
106 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
107 assert_not_delayed();
108 verify_FPU(1, state);
109 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
110 jmp( IdispatchAddress, 0 );
111 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
112 else delayed()->nop();
113 }
116 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
117 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
118 assert_not_delayed();
119 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
120 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
121 }
124 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
125 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
126 assert_not_delayed();
127 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
128 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
129 }
132 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
133 // load current bytecode
134 assert_not_delayed();
135 ldub( Lbcp, 0, Lbyte_code); // load next bytecode
136 dispatch_base(state, table);
137 }
140 void InterpreterMacroAssembler::call_VM_leaf_base(
141 Register java_thread,
142 address entry_point,
143 int number_of_arguments
144 ) {
145 if (!java_thread->is_valid())
146 java_thread = L7_thread_cache;
147 // super call
148 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
149 }
152 void InterpreterMacroAssembler::call_VM_base(
153 Register oop_result,
154 Register java_thread,
155 Register last_java_sp,
156 address entry_point,
157 int number_of_arguments,
158 bool check_exception
159 ) {
160 if (!java_thread->is_valid())
161 java_thread = L7_thread_cache;
162 // See class ThreadInVMfromInterpreter, which assumes that the interpreter
163 // takes responsibility for setting its own thread-state on call-out.
164 // However, ThreadInVMfromInterpreter resets the state to "in_Java".
166 //save_bcp(); // save bcp
167 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
168 //restore_bcp(); // restore bcp
169 //restore_locals(); // restore locals pointer
170 }
173 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
174 if (JvmtiExport::can_pop_frame()) {
175 Label L;
177 // Check the "pending popframe condition" flag in the current thread
178 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
180 // Initiate popframe handling only if it is not already being processed. If the flag
181 // has the popframe_processing bit set, it means that this code is called *during* popframe
182 // handling - we don't want to reenter.
183 btst(JavaThread::popframe_pending_bit, scratch_reg);
184 br(zero, false, pt, L);
185 delayed()->nop();
186 btst(JavaThread::popframe_processing_bit, scratch_reg);
187 br(notZero, false, pt, L);
188 delayed()->nop();
190 // Call Interpreter::remove_activation_preserving_args_entry() to get the
191 // address of the same-named entrypoint in the generated interpreter code.
192 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
194 // Jump to Interpreter::_remove_activation_preserving_args_entry
195 jmpl(O0, G0, G0);
196 delayed()->nop();
197 bind(L);
198 }
199 }
202 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
203 Register thr_state = G4_scratch;
204 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
205 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
206 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
207 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
208 switch (state) {
209 case ltos: ld_long(val_addr, Otos_l); break;
210 case atos: ld_ptr(oop_addr, Otos_l);
211 st_ptr(G0, oop_addr); break;
212 case btos: // fall through
213 case ctos: // fall through
214 case stos: // fall through
215 case itos: ld(val_addr, Otos_l1); break;
216 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
217 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
218 case vtos: /* nothing to do */ break;
219 default : ShouldNotReachHere();
220 }
221 // Clean up tos value in the jvmti thread state
222 or3(G0, ilgl, G3_scratch);
223 stw(G3_scratch, tos_addr);
224 st_long(G0, val_addr);
225 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
226 }
229 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
230 if (JvmtiExport::can_force_early_return()) {
231 Label L;
232 Register thr_state = G3_scratch;
233 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
234 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
236 // Initiate earlyret handling only if it is not already being processed.
237 // If the flag has the earlyret_processing bit set, it means that this code
238 // is called *during* earlyret handling - we don't want to reenter.
239 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
240 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
242 // Call Interpreter::remove_activation_early_entry() to get the address of the
243 // same-named entrypoint in the generated interpreter code
244 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
245 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
247 // Jump to Interpreter::_remove_activation_early_entry
248 jmpl(O0, G0, G0);
249 delayed()->nop();
250 bind(L);
251 }
252 }
255 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
256 mov(arg_1, O0);
257 mov(arg_2, O1);
258 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
259 }
260 #endif /* CC_INTERP */
263 #ifndef CC_INTERP
265 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
266 assert_not_delayed();
267 dispatch_Lbyte_code(state, table);
268 }
271 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
272 dispatch_base(state, Interpreter::normal_table(state));
273 }
276 void InterpreterMacroAssembler::dispatch_only(TosState state) {
277 dispatch_base(state, Interpreter::dispatch_table(state));
278 }
281 // common code to dispatch and dispatch_only
282 // dispatch value in Lbyte_code and increment Lbcp
284 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
285 verify_FPU(1, state);
286 // %%%%% maybe implement +VerifyActivationFrameSize here
287 //verify_thread(); //too slow; we will just verify on method entry & exit
288 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
289 #ifdef FAST_DISPATCH
290 if (table == Interpreter::dispatch_table(state)) {
291 // use IdispatchTables
292 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
293 // add offset to correct dispatch table
294 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
295 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
296 } else {
297 #endif
298 // dispatch table to use
299 AddressLiteral tbl(table);
300 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
301 set(tbl, G3_scratch); // compute addr of table
302 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
303 #ifdef FAST_DISPATCH
304 }
305 #endif
306 jmp( G3_scratch, 0 );
307 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
308 else delayed()->nop();
309 }
312 // Helpers for expression stack
314 // Longs and doubles are Category 2 computational types in the
315 // JVM specification (section 3.11.1) and take 2 expression stack or
316 // local slots.
317 // Aligning them on 32 bit with tagged stacks is hard because the code generated
318 // for the dup* bytecodes depends on what types are already on the stack.
319 // If the types are split into the two stack/local slots, that is much easier
320 // (and we can use 0 for non-reference tags).
322 // Known good alignment in _LP64 but unknown otherwise
323 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
324 assert_not_delayed();
326 #ifdef _LP64
327 ldf(FloatRegisterImpl::D, r1, offset, d);
328 #else
329 ldf(FloatRegisterImpl::S, r1, offset, d);
330 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
331 #endif
332 }
334 // Known good alignment in _LP64 but unknown otherwise
335 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
336 assert_not_delayed();
338 #ifdef _LP64
339 stf(FloatRegisterImpl::D, d, r1, offset);
340 // store something more useful here
341 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
342 #else
343 stf(FloatRegisterImpl::S, d, r1, offset);
344 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
345 #endif
346 }
349 // Known good alignment in _LP64 but unknown otherwise
350 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
351 assert_not_delayed();
352 #ifdef _LP64
353 ldx(r1, offset, rd);
354 #else
355 ld(r1, offset, rd);
356 ld(r1, offset + Interpreter::stackElementSize, rd->successor());
357 #endif
358 }
360 // Known good alignment in _LP64 but unknown otherwise
361 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
362 assert_not_delayed();
364 #ifdef _LP64
365 stx(l, r1, offset);
366 // store something more useful here
367 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
368 #else
369 st(l, r1, offset);
370 st(l->successor(), r1, offset + Interpreter::stackElementSize);
371 #endif
372 }
374 void InterpreterMacroAssembler::pop_i(Register r) {
375 assert_not_delayed();
376 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
377 inc(Lesp, Interpreter::stackElementSize);
378 debug_only(verify_esp(Lesp));
379 }
381 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
382 assert_not_delayed();
383 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
384 inc(Lesp, Interpreter::stackElementSize);
385 debug_only(verify_esp(Lesp));
386 }
388 void InterpreterMacroAssembler::pop_l(Register r) {
389 assert_not_delayed();
390 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
391 inc(Lesp, 2*Interpreter::stackElementSize);
392 debug_only(verify_esp(Lesp));
393 }
396 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
397 assert_not_delayed();
398 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
399 inc(Lesp, Interpreter::stackElementSize);
400 debug_only(verify_esp(Lesp));
401 }
404 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
405 assert_not_delayed();
406 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
407 inc(Lesp, 2*Interpreter::stackElementSize);
408 debug_only(verify_esp(Lesp));
409 }
412 void InterpreterMacroAssembler::push_i(Register r) {
413 assert_not_delayed();
414 debug_only(verify_esp(Lesp));
415 st(r, Lesp, 0);
416 dec(Lesp, Interpreter::stackElementSize);
417 }
419 void InterpreterMacroAssembler::push_ptr(Register r) {
420 assert_not_delayed();
421 st_ptr(r, Lesp, 0);
422 dec(Lesp, Interpreter::stackElementSize);
423 }
425 // remember: our convention for longs in SPARC is:
426 // O0 (Otos_l1) has high-order part in first word,
427 // O1 (Otos_l2) has low-order part in second word
429 void InterpreterMacroAssembler::push_l(Register r) {
430 assert_not_delayed();
431 debug_only(verify_esp(Lesp));
432 // Longs are stored in memory-correct order, even if unaligned.
433 int offset = -Interpreter::stackElementSize;
434 store_unaligned_long(r, Lesp, offset);
435 dec(Lesp, 2 * Interpreter::stackElementSize);
436 }
439 void InterpreterMacroAssembler::push_f(FloatRegister f) {
440 assert_not_delayed();
441 debug_only(verify_esp(Lesp));
442 stf(FloatRegisterImpl::S, f, Lesp, 0);
443 dec(Lesp, Interpreter::stackElementSize);
444 }
447 void InterpreterMacroAssembler::push_d(FloatRegister d) {
448 assert_not_delayed();
449 debug_only(verify_esp(Lesp));
450 // Longs are stored in memory-correct order, even if unaligned.
451 int offset = -Interpreter::stackElementSize;
452 store_unaligned_double(d, Lesp, offset);
453 dec(Lesp, 2 * Interpreter::stackElementSize);
454 }
457 void InterpreterMacroAssembler::push(TosState state) {
458 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
459 switch (state) {
460 case atos: push_ptr(); break;
461 case btos: push_i(); break;
462 case ctos:
463 case stos: push_i(); break;
464 case itos: push_i(); break;
465 case ltos: push_l(); break;
466 case ftos: push_f(); break;
467 case dtos: push_d(); break;
468 case vtos: /* nothing to do */ break;
469 default : ShouldNotReachHere();
470 }
471 }
474 void InterpreterMacroAssembler::pop(TosState state) {
475 switch (state) {
476 case atos: pop_ptr(); break;
477 case btos: pop_i(); break;
478 case ctos:
479 case stos: pop_i(); break;
480 case itos: pop_i(); break;
481 case ltos: pop_l(); break;
482 case ftos: pop_f(); break;
483 case dtos: pop_d(); break;
484 case vtos: /* nothing to do */ break;
485 default : ShouldNotReachHere();
486 }
487 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
488 }
491 // Helpers for swap and dup
492 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
493 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
494 }
495 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
496 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
497 }
500 void InterpreterMacroAssembler::load_receiver(Register param_count,
501 Register recv) {
502 sll(param_count, Interpreter::logStackElementSize, param_count);
503 ld_ptr(Lesp, param_count, recv); // gets receiver oop
504 }
506 void InterpreterMacroAssembler::empty_expression_stack() {
507 // Reset Lesp.
508 sub( Lmonitors, wordSize, Lesp );
510 // Reset SP by subtracting more space from Lesp.
511 Label done;
512 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
514 // A native does not need to do this, since its callee does not change SP.
515 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags.
516 btst(JVM_ACC_NATIVE, Gframe_size);
517 br(Assembler::notZero, false, Assembler::pt, done);
518 delayed()->nop();
520 // Compute max expression stack+register save area
521 lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack.
522 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
524 //
525 // now set up a stack frame with the size computed above
526 //
527 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
528 sll( Gframe_size, LogBytesPerWord, Gframe_size );
529 sub( Lesp, Gframe_size, Gframe_size );
530 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
531 debug_only(verify_sp(Gframe_size, G4_scratch));
532 #ifdef _LP64
533 sub(Gframe_size, STACK_BIAS, Gframe_size );
534 #endif
535 mov(Gframe_size, SP);
537 bind(done);
538 }
541 #ifdef ASSERT
542 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
543 Label Bad, OK;
545 // Saved SP must be aligned.
546 #ifdef _LP64
547 btst(2*BytesPerWord-1, Rsp);
548 #else
549 btst(LongAlignmentMask, Rsp);
550 #endif
551 br(Assembler::notZero, false, Assembler::pn, Bad);
552 delayed()->nop();
554 // Saved SP, plus register window size, must not be above FP.
555 add(Rsp, frame::register_save_words * wordSize, Rtemp);
556 #ifdef _LP64
557 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
558 #endif
559 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
561 // Saved SP must not be ridiculously below current SP.
562 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
563 set(maxstack, Rtemp);
564 sub(SP, Rtemp, Rtemp);
565 #ifdef _LP64
566 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
567 #endif
568 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
570 ba_short(OK);
572 bind(Bad);
573 stop("on return to interpreted call, restored SP is corrupted");
575 bind(OK);
576 }
579 void InterpreterMacroAssembler::verify_esp(Register Resp) {
580 // about to read or write Resp[0]
581 // make sure it is not in the monitors or the register save area
582 Label OK1, OK2;
584 cmp(Resp, Lmonitors);
585 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
586 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
587 stop("too many pops: Lesp points into monitor area");
588 bind(OK1);
589 #ifdef _LP64
590 sub(Resp, STACK_BIAS, Resp);
591 #endif
592 cmp(Resp, SP);
593 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
594 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
595 stop("too many pushes: Lesp points into register window");
596 bind(OK2);
597 }
598 #endif // ASSERT
600 // Load compiled (i2c) or interpreter entry when calling from interpreted and
601 // do the call. Centralized so that all interpreter calls will do the same actions.
602 // If jvmti single stepping is on for a thread we must not call compiled code.
603 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
605 // Assume we want to go compiled if available
607 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target);
609 if (JvmtiExport::can_post_interpreter_events()) {
610 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
611 // compiled code in threads for which the event is enabled. Check here for
612 // interp_only_mode if these events CAN be enabled.
613 verify_thread();
614 Label skip_compiled_code;
616 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
617 ld(interp_only, scratch);
618 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);
619 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
620 bind(skip_compiled_code);
621 }
623 // the i2c_adapters need Method* in G5_method (right? %%%)
624 // do the call
625 #ifdef ASSERT
626 {
627 Label ok;
628 br_notnull_short(target, Assembler::pt, ok);
629 stop("null entry point");
630 bind(ok);
631 }
632 #endif // ASSERT
634 // Adjust Rret first so Llast_SP can be same as Rret
635 add(Rret, -frame::pc_return_offset, O7);
636 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
637 // Record SP so we can remove any stack space allocated by adapter transition
638 jmp(target, 0);
639 delayed()->mov(SP, Llast_SP);
640 }
642 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
643 assert_not_delayed();
645 Label not_taken;
646 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
647 else br (cc, false, Assembler::pn, not_taken);
648 delayed()->nop();
650 TemplateTable::branch(false,false);
652 bind(not_taken);
654 profile_not_taken_branch(G3_scratch);
655 }
658 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
659 int bcp_offset,
660 Register Rtmp,
661 Register Rdst,
662 signedOrNot is_signed,
663 setCCOrNot should_set_CC ) {
664 assert(Rtmp != Rdst, "need separate temp register");
665 assert_not_delayed();
666 switch (is_signed) {
667 default: ShouldNotReachHere();
669 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte
670 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte
671 }
672 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
673 sll( Rdst, BitsPerByte, Rdst);
674 switch (should_set_CC ) {
675 default: ShouldNotReachHere();
677 case set_CC: orcc( Rdst, Rtmp, Rdst ); break;
678 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;
679 }
680 }
683 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
684 int bcp_offset,
685 Register Rtmp,
686 Register Rdst,
687 setCCOrNot should_set_CC ) {
688 assert(Rtmp != Rdst, "need separate temp register");
689 assert_not_delayed();
690 add( Lbcp, bcp_offset, Rtmp);
691 andcc( Rtmp, 3, G0);
692 Label aligned;
693 switch (should_set_CC ) {
694 default: ShouldNotReachHere();
696 case set_CC: break;
697 case dont_set_CC: break;
698 }
700 br(Assembler::zero, true, Assembler::pn, aligned);
701 #ifdef _LP64
702 delayed()->ldsw(Rtmp, 0, Rdst);
703 #else
704 delayed()->ld(Rtmp, 0, Rdst);
705 #endif
707 ldub(Lbcp, bcp_offset + 3, Rdst);
708 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
709 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
710 #ifdef _LP64
711 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
712 #else
713 // Unsigned load is faster than signed on some implementations
714 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
715 #endif
716 or3(Rtmp, Rdst, Rdst );
718 bind(aligned);
719 if (should_set_CC == set_CC) tst(Rdst);
720 }
722 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index,
723 int bcp_offset, size_t index_size) {
724 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
725 if (index_size == sizeof(u2)) {
726 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);
727 } else if (index_size == sizeof(u4)) {
728 assert(EnableInvokeDynamic, "giant index used only for JSR 292");
729 get_4_byte_integer_at_bcp(bcp_offset, temp, index);
730 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
731 xor3(index, -1, index); // convert to plain index
732 } else if (index_size == sizeof(u1)) {
733 ldub(Lbcp, bcp_offset, index);
734 } else {
735 ShouldNotReachHere();
736 }
737 }
740 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
741 int bcp_offset, size_t index_size) {
742 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
743 assert_different_registers(cache, tmp);
744 assert_not_delayed();
745 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
746 // convert from field index to ConstantPoolCacheEntry index and from
747 // word index to byte offset
748 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
749 add(LcpoolCache, tmp, cache);
750 }
753 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
754 Register temp,
755 Register bytecode,
756 int byte_no,
757 int bcp_offset,
758 size_t index_size) {
759 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
760 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
761 const int shift_count = (1 + byte_no) * BitsPerByte;
762 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
763 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
764 "correct shift count");
765 srl(bytecode, shift_count, bytecode);
766 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
767 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);
768 }
771 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
772 int bcp_offset, size_t index_size) {
773 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
774 assert_different_registers(cache, tmp);
775 assert_not_delayed();
776 if (index_size == sizeof(u2)) {
777 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
778 } else {
779 ShouldNotReachHere(); // other sizes not supported here
780 }
781 // convert from field index to ConstantPoolCacheEntry index
782 // and from word index to byte offset
783 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
784 // skip past the header
785 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp);
786 // construct pointer to cache entry
787 add(LcpoolCache, tmp, cache);
788 }
791 // Load object from cpool->resolved_references(index)
792 void InterpreterMacroAssembler::load_resolved_reference_at_index(
793 Register result, Register index) {
794 assert_different_registers(result, index);
795 assert_not_delayed();
796 // convert from field index to resolved_references() index and from
797 // word index to byte offset. Since this is a java object, it can be compressed
798 Register tmp = index; // reuse
799 sll(index, LogBytesPerHeapOop, tmp);
800 get_constant_pool(result);
801 // load pointer for resolved_references[] objArray
802 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
803 // JNIHandles::resolve(result)
804 ld_ptr(result, 0, result);
805 // Add in the index
806 add(result, tmp, result);
807 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
808 }
811 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
812 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
813 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
814 Register Rsuper_klass,
815 Register Rtmp1,
816 Register Rtmp2,
817 Register Rtmp3,
818 Label &ok_is_subtype ) {
819 Label not_subtype;
821 // Profile the not-null value's klass.
822 profile_typecheck(Rsub_klass, Rtmp1);
824 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
825 Rtmp1, Rtmp2,
826 &ok_is_subtype, ¬_subtype, NULL);
828 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
829 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
830 &ok_is_subtype, NULL);
832 bind(not_subtype);
833 profile_typecheck_failed(Rtmp1);
834 }
836 // Separate these two to allow for delay slot in middle
837 // These are used to do a test and full jump to exception-throwing code.
839 // %%%%% Could possibly reoptimize this by testing to see if could use
840 // a single conditional branch (i.e. if span is small enough.
841 // If you go that route, than get rid of the split and give up
842 // on the delay-slot hack.
844 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
845 Label& ok ) {
846 assert_not_delayed();
847 br(ok_condition, true, pt, ok);
848 // DELAY SLOT
849 }
851 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
852 Label& ok ) {
853 assert_not_delayed();
854 bp( ok_condition, true, Assembler::xcc, pt, ok);
855 // DELAY SLOT
856 }
858 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
859 Label& ok ) {
860 assert_not_delayed();
861 brx(ok_condition, true, pt, ok);
862 // DELAY SLOT
863 }
865 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
866 Register Rscratch,
867 Label& ok ) {
868 assert(throw_entry_point != NULL, "entry point must be generated by now");
869 AddressLiteral dest(throw_entry_point);
870 jump_to(dest, Rscratch);
871 delayed()->nop();
872 bind(ok);
873 }
876 // And if you cannot use the delay slot, here is a shorthand:
878 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
879 address throw_entry_point,
880 Register Rscratch ) {
881 Label ok;
882 if (ok_condition != never) {
883 throw_if_not_1_icc( ok_condition, ok);
884 delayed()->nop();
885 }
886 throw_if_not_2( throw_entry_point, Rscratch, ok);
887 }
888 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
889 address throw_entry_point,
890 Register Rscratch ) {
891 Label ok;
892 if (ok_condition != never) {
893 throw_if_not_1_xcc( ok_condition, ok);
894 delayed()->nop();
895 }
896 throw_if_not_2( throw_entry_point, Rscratch, ok);
897 }
898 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
899 address throw_entry_point,
900 Register Rscratch ) {
901 Label ok;
902 if (ok_condition != never) {
903 throw_if_not_1_x( ok_condition, ok);
904 delayed()->nop();
905 }
906 throw_if_not_2( throw_entry_point, Rscratch, ok);
907 }
909 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
910 // Note: res is still shy of address by array offset into object.
912 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
913 assert_not_delayed();
915 verify_oop(array);
916 #ifdef _LP64
917 // sign extend since tos (index) can be a 32bit value
918 sra(index, G0, index);
919 #endif // _LP64
921 // check array
922 Label ptr_ok;
923 tst(array);
924 throw_if_not_1_x( notZero, ptr_ok );
925 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
926 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
928 Label index_ok;
929 cmp(index, tmp);
930 throw_if_not_1_icc( lessUnsigned, index_ok );
931 if (index_shift > 0) delayed()->sll(index, index_shift, index);
932 else delayed()->add(array, index, res); // addr - const offset in index
933 // convention: move aberrant index into G3_scratch for exception message
934 mov(index, G3_scratch);
935 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
937 // add offset if didn't do it in delay slot
938 if (index_shift > 0) add(array, index, res); // addr - const offset in index
939 }
942 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
943 assert_not_delayed();
945 // pop array
946 pop_ptr(array);
948 // check array
949 index_check_without_pop(array, index, index_shift, tmp, res);
950 }
953 void InterpreterMacroAssembler::get_const(Register Rdst) {
954 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst);
955 }
958 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
959 get_const(Rdst);
960 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
961 }
964 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
965 get_constant_pool(Rdst);
966 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
967 }
970 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
971 get_constant_pool(Rcpool);
972 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags);
973 }
976 // unlock if synchronized method
977 //
978 // Unlock the receiver if this is a synchronized method.
979 // Unlock any Java monitors from syncronized blocks.
980 //
981 // If there are locked Java monitors
982 // If throw_monitor_exception
983 // throws IllegalMonitorStateException
984 // Else if install_monitor_exception
985 // installs IllegalMonitorStateException
986 // Else
987 // no error processing
988 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
989 bool throw_monitor_exception,
990 bool install_monitor_exception) {
991 Label unlocked, unlock, no_unlock;
993 // get the value of _do_not_unlock_if_synchronized into G1_scratch
994 const Address do_not_unlock_if_synchronized(G2_thread,
995 JavaThread::do_not_unlock_if_synchronized_offset());
996 ldbool(do_not_unlock_if_synchronized, G1_scratch);
997 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
999 // check if synchronized method
1000 const Address access_flags(Lmethod, Method::access_flags_offset());
1001 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1002 push(state); // save tos
1003 ld(access_flags, G3_scratch); // Load access flags.
1004 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
1005 br(zero, false, pt, unlocked);
1006 delayed()->nop();
1008 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1009 // is set.
1010 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);
1011 delayed()->nop();
1013 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1014 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1016 //Intel: if (throw_monitor_exception) ... else ...
1017 // Entry already unlocked, need to throw exception
1018 //...
1020 // pass top-most monitor elem
1021 add( top_most_monitor(), O1 );
1023 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
1024 br_notnull_short(G3_scratch, pt, unlock);
1026 if (throw_monitor_exception) {
1027 // Entry already unlocked need to throw an exception
1028 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1029 should_not_reach_here();
1030 } else {
1031 // Monitor already unlocked during a stack unroll.
1032 // If requested, install an illegal_monitor_state_exception.
1033 // Continue with stack unrolling.
1034 if (install_monitor_exception) {
1035 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1036 }
1037 ba_short(unlocked);
1038 }
1040 bind(unlock);
1042 unlock_object(O1);
1044 bind(unlocked);
1046 // I0, I1: Might contain return value
1048 // Check that all monitors are unlocked
1049 { Label loop, exception, entry, restart;
1051 Register Rmptr = O0;
1052 Register Rtemp = O1;
1053 Register Rlimit = Lmonitors;
1054 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1055 assert( (delta & LongAlignmentMask) == 0,
1056 "sizeof BasicObjectLock must be even number of doublewords");
1058 #ifdef ASSERT
1059 add(top_most_monitor(), Rmptr, delta);
1060 { Label L;
1061 // ensure that Rmptr starts out above (or at) Rlimit
1062 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1063 stop("monitor stack has negative size");
1064 bind(L);
1065 }
1066 #endif
1067 bind(restart);
1068 ba(entry);
1069 delayed()->
1070 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry
1072 // Entry is still locked, need to throw exception
1073 bind(exception);
1074 if (throw_monitor_exception) {
1075 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1076 should_not_reach_here();
1077 } else {
1078 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1079 // Unlock does not block, so don't have to worry about the frame
1080 unlock_object(Rmptr);
1081 if (install_monitor_exception) {
1082 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1083 }
1084 ba_short(restart);
1085 }
1087 bind(loop);
1088 cmp(Rtemp, G0); // check if current entry is used
1089 brx(Assembler::notEqual, false, pn, exception);
1090 delayed()->
1091 dec(Rmptr, delta); // otherwise advance to next entry
1092 #ifdef ASSERT
1093 { Label L;
1094 // ensure that Rmptr has not somehow stepped below Rlimit
1095 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1096 stop("ran off the end of the monitor stack");
1097 bind(L);
1098 }
1099 #endif
1100 bind(entry);
1101 cmp(Rmptr, Rlimit); // check if bottom reached
1102 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry
1103 delayed()->
1104 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1105 }
1107 bind(no_unlock);
1108 pop(state);
1109 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1110 }
1113 // remove activation
1114 //
1115 // Unlock the receiver if this is a synchronized method.
1116 // Unlock any Java monitors from syncronized blocks.
1117 // Remove the activation from the stack.
1118 //
1119 // If there are locked Java monitors
1120 // If throw_monitor_exception
1121 // throws IllegalMonitorStateException
1122 // Else if install_monitor_exception
1123 // installs IllegalMonitorStateException
1124 // Else
1125 // no error processing
1126 void InterpreterMacroAssembler::remove_activation(TosState state,
1127 bool throw_monitor_exception,
1128 bool install_monitor_exception) {
1130 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
1132 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1133 notify_method_exit(false, state, NotifyJVMTI);
1135 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1136 verify_thread();
1138 // return tos
1139 assert(Otos_l1 == Otos_i, "adjust code below");
1140 switch (state) {
1141 #ifdef _LP64
1142 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
1143 #else
1144 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
1145 #endif
1146 case btos: // fall through
1147 case ctos:
1148 case stos: // fall through
1149 case atos: // fall through
1150 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0
1151 case ftos: // fall through
1152 case dtos: // fall through
1153 case vtos: /* nothing to do */ break;
1154 default : ShouldNotReachHere();
1155 }
1157 #if defined(COMPILER2) && !defined(_LP64)
1158 if (state == ltos) {
1159 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1160 // or compiled so just be safe use G1 and O0/O1
1162 // Shift bits into high (msb) of G1
1163 sllx(Otos_l1->after_save(), 32, G1);
1164 // Zero extend low bits
1165 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1166 or3 (Otos_l2->after_save(), G1, G1);
1167 }
1168 #endif /* COMPILER2 */
1170 }
1171 #endif /* CC_INTERP */
1174 // Lock object
1175 //
1176 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1177 // it must be initialized with the object to lock
1178 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1179 if (UseHeavyMonitors) {
1180 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1181 }
1182 else {
1183 Register obj_reg = Object;
1184 Register mark_reg = G4_scratch;
1185 Register temp_reg = G1_scratch;
1186 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1187 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1188 Label done;
1190 Label slow_case;
1192 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1194 // load markOop from object into mark_reg
1195 ld_ptr(mark_addr, mark_reg);
1197 if (UseBiasedLocking) {
1198 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1199 }
1201 // get the address of basicLock on stack that will be stored in the object
1202 // we need a temporary register here as we do not want to clobber lock_reg
1203 // (cas clobbers the destination register)
1204 mov(lock_reg, temp_reg);
1205 // set mark reg to be (markOop of object | UNLOCK_VALUE)
1206 or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1207 // initialize the box (Must happen before we update the object mark!)
1208 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1209 // compare and exchange object_addr, markOop | 1, stack address of basicLock
1210 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1211 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1212 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1214 // if the compare and exchange succeeded we are done (we saw an unlocked object)
1215 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
1217 // We did not see an unlocked object so try the fast recursive case
1219 // Check if owner is self by comparing the value in the markOop of object
1220 // with the stack pointer
1221 sub(temp_reg, SP, temp_reg);
1222 #ifdef _LP64
1223 sub(temp_reg, STACK_BIAS, temp_reg);
1224 #endif
1225 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1227 // Composite "andcc" test:
1228 // (a) %sp -vs- markword proximity check, and,
1229 // (b) verify mark word LSBs == 0 (Stack-locked).
1230 //
1231 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1232 // Note that the page size used for %sp proximity testing is arbitrary and is
1233 // unrelated to the actual MMU page size. We use a 'logical' page size of
1234 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1235 // field of the andcc instruction.
1236 andcc (temp_reg, 0xFFFFF003, G0) ;
1238 // if condition is true we are done and hence we can store 0 in the displaced
1239 // header indicating it is a recursive lock and be done
1240 brx(Assembler::zero, true, Assembler::pt, done);
1241 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1243 // none of the above fast optimizations worked so we have to get into the
1244 // slow case of monitor enter
1245 bind(slow_case);
1246 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1248 bind(done);
1249 }
1250 }
1252 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1253 //
1254 // Argument - lock_reg points to the BasicObjectLock for lock
1255 // Throw IllegalMonitorException if object is not locked by current thread
1256 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1257 if (UseHeavyMonitors) {
1258 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1259 } else {
1260 Register obj_reg = G3_scratch;
1261 Register mark_reg = G4_scratch;
1262 Register displaced_header_reg = G1_scratch;
1263 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
1264 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1265 Label done;
1267 if (UseBiasedLocking) {
1268 // load the object out of the BasicObjectLock
1269 ld_ptr(lockobj_addr, obj_reg);
1270 biased_locking_exit(mark_addr, mark_reg, done, true);
1271 st_ptr(G0, lockobj_addr); // free entry
1272 }
1274 // Test first if we are in the fast recursive case
1275 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
1276 ld_ptr(lock_addr, displaced_header_reg);
1277 br_null(displaced_header_reg, true, Assembler::pn, done);
1278 delayed()->st_ptr(G0, lockobj_addr); // free entry
1280 // See if it is still a light weight lock, if so we just unlock
1281 // the object and we are done
1283 if (!UseBiasedLocking) {
1284 // load the object out of the BasicObjectLock
1285 ld_ptr(lockobj_addr, obj_reg);
1286 }
1288 // we have the displaced header in displaced_header_reg
1289 // we expect to see the stack address of the basicLock in case the
1290 // lock is still a light weight lock (lock_reg)
1291 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1292 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
1293 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1294 cmp(lock_reg, displaced_header_reg);
1295 brx(Assembler::equal, true, Assembler::pn, done);
1296 delayed()->st_ptr(G0, lockobj_addr); // free entry
1298 // The lock has been converted into a heavy lock and hence
1299 // we need to get into the slow case
1301 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1303 bind(done);
1304 }
1305 }
1307 #ifndef CC_INTERP
1309 // Get the method data pointer from the Method* and set the
1310 // specified register to its value.
1312 void InterpreterMacroAssembler::set_method_data_pointer() {
1313 assert(ProfileInterpreter, "must be profiling interpreter");
1314 Label get_continue;
1316 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1317 test_method_data_pointer(get_continue);
1318 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1319 bind(get_continue);
1320 }
1322 // Set the method data pointer for the current bcp.
1324 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1325 assert(ProfileInterpreter, "must be profiling interpreter");
1326 Label zero_continue;
1328 // Test MDO to avoid the call if it is NULL.
1329 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1330 test_method_data_pointer(zero_continue);
1331 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1332 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1333 add(ImethodDataPtr, O0, ImethodDataPtr);
1334 bind(zero_continue);
1335 }
1337 // Test ImethodDataPtr. If it is null, continue at the specified label
1339 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1340 assert(ProfileInterpreter, "must be profiling interpreter");
1341 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);
1342 }
1344 void InterpreterMacroAssembler::verify_method_data_pointer() {
1345 assert(ProfileInterpreter, "must be profiling interpreter");
1346 #ifdef ASSERT
1347 Label verify_continue;
1348 test_method_data_pointer(verify_continue);
1350 // If the mdp is valid, it will point to a DataLayout header which is
1351 // consistent with the bcp. The converse is highly probable also.
1352 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1353 ld_ptr(Lmethod, Method::const_offset(), O5);
1354 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch);
1355 add(G3_scratch, O5, G3_scratch);
1356 cmp(Lbcp, G3_scratch);
1357 brx(Assembler::equal, false, Assembler::pt, verify_continue);
1359 Register temp_reg = O5;
1360 delayed()->mov(ImethodDataPtr, temp_reg);
1361 // %%% should use call_VM_leaf here?
1362 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
1363 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
1364 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
1365 stf(FloatRegisterImpl::D, Ftos_d, d_save);
1366 mov(temp_reg->after_save(), O2);
1367 save_thread(L7_thread_cache);
1368 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1369 delayed()->nop();
1370 restore_thread(L7_thread_cache);
1371 ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1372 restore();
1373 bind(verify_continue);
1374 #endif // ASSERT
1375 }
1377 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1378 Register Rtmp,
1379 Label &profile_continue) {
1380 assert(ProfileInterpreter, "must be profiling interpreter");
1381 // Control will flow to "profile_continue" if the counter is less than the
1382 // limit or if we call profile_method()
1384 Label done;
1386 // if no method data exists, and the counter is high enough, make one
1387 br_notnull_short(ImethodDataPtr, Assembler::pn, done);
1389 // Test to see if we should create a method data oop
1390 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1391 sethi(profile_limit, Rtmp);
1392 ld(Rtmp, profile_limit.low10(), Rtmp);
1393 cmp(invocation_count, Rtmp);
1394 // Use long branches because call_VM() code and following code generated by
1395 // test_backedge_count_for_osr() is large in debug VM.
1396 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1397 delayed()->nop();
1399 // Build it now.
1400 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1401 set_method_data_pointer_for_bcp();
1402 ba(profile_continue);
1403 delayed()->nop();
1404 bind(done);
1405 }
1407 // Store a value at some constant offset from the method data pointer.
1409 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1410 assert(ProfileInterpreter, "must be profiling interpreter");
1411 st_ptr(value, ImethodDataPtr, constant);
1412 }
1414 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1415 Register bumped_count,
1416 bool decrement) {
1417 assert(ProfileInterpreter, "must be profiling interpreter");
1419 // Load the counter.
1420 ld_ptr(counter, bumped_count);
1422 if (decrement) {
1423 // Decrement the register. Set condition codes.
1424 subcc(bumped_count, DataLayout::counter_increment, bumped_count);
1426 // If the decrement causes the counter to overflow, stay negative
1427 Label L;
1428 brx(Assembler::negative, true, Assembler::pn, L);
1430 // Store the decremented counter, if it is still negative.
1431 delayed()->st_ptr(bumped_count, counter);
1432 bind(L);
1433 } else {
1434 // Increment the register. Set carry flag.
1435 addcc(bumped_count, DataLayout::counter_increment, bumped_count);
1437 // If the increment causes the counter to overflow, pull back by 1.
1438 assert(DataLayout::counter_increment == 1, "subc works");
1439 subc(bumped_count, G0, bumped_count);
1441 // Store the incremented counter.
1442 st_ptr(bumped_count, counter);
1443 }
1444 }
1446 // Increment the value at some constant offset from the method data pointer.
1448 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1449 Register bumped_count,
1450 bool decrement) {
1451 // Locate the counter at a fixed offset from the mdp:
1452 Address counter(ImethodDataPtr, constant);
1453 increment_mdp_data_at(counter, bumped_count, decrement);
1454 }
1456 // Increment the value at some non-fixed (reg + constant) offset from
1457 // the method data pointer.
1459 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1460 int constant,
1461 Register bumped_count,
1462 Register scratch2,
1463 bool decrement) {
1464 // Add the constant to reg to get the offset.
1465 add(ImethodDataPtr, reg, scratch2);
1466 Address counter(scratch2, constant);
1467 increment_mdp_data_at(counter, bumped_count, decrement);
1468 }
1470 // Set a flag value at the current method data pointer position.
1471 // Updates a single byte of the header, to avoid races with other header bits.
1473 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1474 Register scratch) {
1475 assert(ProfileInterpreter, "must be profiling interpreter");
1476 // Load the data header
1477 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
1479 // Set the flag
1480 or3(scratch, flag_constant, scratch);
1482 // Store the modified header.
1483 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
1484 }
1486 // Test the location at some offset from the method data pointer.
1487 // If it is not equal to value, branch to the not_equal_continue Label.
1488 // Set condition codes to match the nullness of the loaded value.
1490 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1491 Register value,
1492 Label& not_equal_continue,
1493 Register scratch) {
1494 assert(ProfileInterpreter, "must be profiling interpreter");
1495 ld_ptr(ImethodDataPtr, offset, scratch);
1496 cmp(value, scratch);
1497 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
1498 delayed()->tst(scratch);
1499 }
1501 // Update the method data pointer by the displacement located at some fixed
1502 // offset from the method data pointer.
1504 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1505 Register scratch) {
1506 assert(ProfileInterpreter, "must be profiling interpreter");
1507 ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
1508 add(ImethodDataPtr, scratch, ImethodDataPtr);
1509 }
1511 // Update the method data pointer by the displacement located at the
1512 // offset (reg + offset_of_disp).
1514 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1515 int offset_of_disp,
1516 Register scratch) {
1517 assert(ProfileInterpreter, "must be profiling interpreter");
1518 add(reg, offset_of_disp, scratch);
1519 ld_ptr(ImethodDataPtr, scratch, scratch);
1520 add(ImethodDataPtr, scratch, ImethodDataPtr);
1521 }
1523 // Update the method data pointer by a simple constant displacement.
1525 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1526 assert(ProfileInterpreter, "must be profiling interpreter");
1527 add(ImethodDataPtr, constant, ImethodDataPtr);
1528 }
1530 // Update the method data pointer for a _ret bytecode whose target
1531 // was not among our cached targets.
1533 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1534 Register return_bci) {
1535 assert(ProfileInterpreter, "must be profiling interpreter");
1536 push(state);
1537 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile
1538 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1539 ld_ptr(l_tmp, return_bci);
1540 pop(state);
1541 }
1543 // Count a taken branch in the bytecodes.
1545 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1546 if (ProfileInterpreter) {
1547 Label profile_continue;
1549 // If no method data exists, go to profile_continue.
1550 test_method_data_pointer(profile_continue);
1552 // We are taking a branch. Increment the taken count.
1553 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
1555 // The method data pointer needs to be updated to reflect the new target.
1556 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1557 bind (profile_continue);
1558 }
1559 }
1562 // Count a not-taken branch in the bytecodes.
1564 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
1565 if (ProfileInterpreter) {
1566 Label profile_continue;
1568 // If no method data exists, go to profile_continue.
1569 test_method_data_pointer(profile_continue);
1571 // We are taking a branch. Increment the not taken count.
1572 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
1574 // The method data pointer needs to be updated to correspond to the
1575 // next bytecode.
1576 update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1577 bind (profile_continue);
1578 }
1579 }
1582 // Count a non-virtual call in the bytecodes.
1584 void InterpreterMacroAssembler::profile_call(Register scratch) {
1585 if (ProfileInterpreter) {
1586 Label profile_continue;
1588 // If no method data exists, go to profile_continue.
1589 test_method_data_pointer(profile_continue);
1591 // We are making a call. Increment the count.
1592 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1594 // The method data pointer needs to be updated to reflect the new target.
1595 update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1596 bind (profile_continue);
1597 }
1598 }
1601 // Count a final call in the bytecodes.
1603 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
1604 if (ProfileInterpreter) {
1605 Label profile_continue;
1607 // If no method data exists, go to profile_continue.
1608 test_method_data_pointer(profile_continue);
1610 // We are making a call. Increment the count.
1611 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1613 // The method data pointer needs to be updated to reflect the new target.
1614 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1615 bind (profile_continue);
1616 }
1617 }
1620 // Count a virtual call in the bytecodes.
1622 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1623 Register scratch,
1624 bool receiver_can_be_null) {
1625 if (ProfileInterpreter) {
1626 Label profile_continue;
1628 // If no method data exists, go to profile_continue.
1629 test_method_data_pointer(profile_continue);
1632 Label skip_receiver_profile;
1633 if (receiver_can_be_null) {
1634 Label not_null;
1635 br_notnull_short(receiver, Assembler::pt, not_null);
1636 // We are making a call. Increment the count for null receiver.
1637 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1638 ba_short(skip_receiver_profile);
1639 bind(not_null);
1640 }
1642 // Record the receiver type.
1643 record_klass_in_profile(receiver, scratch, true);
1644 bind(skip_receiver_profile);
1646 // The method data pointer needs to be updated to reflect the new target.
1647 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1648 bind (profile_continue);
1649 }
1650 }
1652 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1653 Register receiver, Register scratch,
1654 int start_row, Label& done, bool is_virtual_call) {
1655 if (TypeProfileWidth == 0) {
1656 if (is_virtual_call) {
1657 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1658 }
1659 return;
1660 }
1662 int last_row = VirtualCallData::row_limit() - 1;
1663 assert(start_row <= last_row, "must be work left to do");
1664 // Test this row for both the receiver and for null.
1665 // Take any of three different outcomes:
1666 // 1. found receiver => increment count and goto done
1667 // 2. found null => keep looking for case 1, maybe allocate this cell
1668 // 3. found something else => keep looking for cases 1 and 2
1669 // Case 3 is handled by a recursive call.
1670 for (int row = start_row; row <= last_row; row++) {
1671 Label next_test;
1672 bool test_for_null_also = (row == start_row);
1674 // See if the receiver is receiver[n].
1675 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1676 test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1677 // delayed()->tst(scratch);
1679 // The receiver is receiver[n]. Increment count[n].
1680 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1681 increment_mdp_data_at(count_offset, scratch);
1682 ba_short(done);
1683 bind(next_test);
1685 if (test_for_null_also) {
1686 Label found_null;
1687 // Failed the equality check on receiver[n]... Test for null.
1688 if (start_row == last_row) {
1689 // The only thing left to do is handle the null case.
1690 if (is_virtual_call) {
1691 brx(Assembler::zero, false, Assembler::pn, found_null);
1692 delayed()->nop();
1693 // Receiver did not match any saved receiver and there is no empty row for it.
1694 // Increment total counter to indicate polymorphic case.
1695 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1696 ba_short(done);
1697 bind(found_null);
1698 } else {
1699 brx(Assembler::notZero, false, Assembler::pt, done);
1700 delayed()->nop();
1701 }
1702 break;
1703 }
1704 // Since null is rare, make it be the branch-taken case.
1705 brx(Assembler::zero, false, Assembler::pn, found_null);
1706 delayed()->nop();
1708 // Put all the "Case 3" tests here.
1709 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1711 // Found a null. Keep searching for a matching receiver,
1712 // but remember that this is an empty (unused) slot.
1713 bind(found_null);
1714 }
1715 }
1717 // In the fall-through case, we found no matching receiver, but we
1718 // observed the receiver[start_row] is NULL.
1720 // Fill in the receiver field and increment the count.
1721 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1722 set_mdp_data_at(recvr_offset, receiver);
1723 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1724 mov(DataLayout::counter_increment, scratch);
1725 set_mdp_data_at(count_offset, scratch);
1726 if (start_row > 0) {
1727 ba_short(done);
1728 }
1729 }
1731 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1732 Register scratch, bool is_virtual_call) {
1733 assert(ProfileInterpreter, "must be profiling");
1734 Label done;
1736 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1738 bind (done);
1739 }
1742 // Count a ret in the bytecodes.
1744 void InterpreterMacroAssembler::profile_ret(TosState state,
1745 Register return_bci,
1746 Register scratch) {
1747 if (ProfileInterpreter) {
1748 Label profile_continue;
1749 uint row;
1751 // If no method data exists, go to profile_continue.
1752 test_method_data_pointer(profile_continue);
1754 // Update the total ret count.
1755 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1757 for (row = 0; row < RetData::row_limit(); row++) {
1758 Label next_test;
1760 // See if return_bci is equal to bci[n]:
1761 test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1762 return_bci, next_test, scratch);
1764 // return_bci is equal to bci[n]. Increment the count.
1765 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1767 // The method data pointer needs to be updated to reflect the new target.
1768 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1769 ba_short(profile_continue);
1770 bind(next_test);
1771 }
1773 update_mdp_for_ret(state, return_bci);
1775 bind (profile_continue);
1776 }
1777 }
1779 // Profile an unexpected null in the bytecodes.
1780 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1781 if (ProfileInterpreter) {
1782 Label profile_continue;
1784 // If no method data exists, go to profile_continue.
1785 test_method_data_pointer(profile_continue);
1787 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1789 // The method data pointer needs to be updated.
1790 int mdp_delta = in_bytes(BitData::bit_data_size());
1791 if (TypeProfileCasts) {
1792 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1793 }
1794 update_mdp_by_constant(mdp_delta);
1796 bind (profile_continue);
1797 }
1798 }
1800 void InterpreterMacroAssembler::profile_typecheck(Register klass,
1801 Register scratch) {
1802 if (ProfileInterpreter) {
1803 Label profile_continue;
1805 // If no method data exists, go to profile_continue.
1806 test_method_data_pointer(profile_continue);
1808 int mdp_delta = in_bytes(BitData::bit_data_size());
1809 if (TypeProfileCasts) {
1810 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1812 // Record the object type.
1813 record_klass_in_profile(klass, scratch, false);
1814 }
1816 // The method data pointer needs to be updated.
1817 update_mdp_by_constant(mdp_delta);
1819 bind (profile_continue);
1820 }
1821 }
1823 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
1824 if (ProfileInterpreter && TypeProfileCasts) {
1825 Label profile_continue;
1827 // If no method data exists, go to profile_continue.
1828 test_method_data_pointer(profile_continue);
1830 int count_offset = in_bytes(CounterData::count_offset());
1831 // Back up the address, since we have already bumped the mdp.
1832 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1834 // *Decrement* the counter. We expect to see zero or small negatives.
1835 increment_mdp_data_at(count_offset, scratch, true);
1837 bind (profile_continue);
1838 }
1839 }
1841 // Count the default case of a switch construct.
1843 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
1844 if (ProfileInterpreter) {
1845 Label profile_continue;
1847 // If no method data exists, go to profile_continue.
1848 test_method_data_pointer(profile_continue);
1850 // Update the default case count
1851 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1852 scratch);
1854 // The method data pointer needs to be updated.
1855 update_mdp_by_offset(
1856 in_bytes(MultiBranchData::default_displacement_offset()),
1857 scratch);
1859 bind (profile_continue);
1860 }
1861 }
1863 // Count the index'th case of a switch construct.
1865 void InterpreterMacroAssembler::profile_switch_case(Register index,
1866 Register scratch,
1867 Register scratch2,
1868 Register scratch3) {
1869 if (ProfileInterpreter) {
1870 Label profile_continue;
1872 // If no method data exists, go to profile_continue.
1873 test_method_data_pointer(profile_continue);
1875 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1876 set(in_bytes(MultiBranchData::per_case_size()), scratch);
1877 smul(index, scratch, scratch);
1878 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
1880 // Update the case count
1881 increment_mdp_data_at(scratch,
1882 in_bytes(MultiBranchData::relative_count_offset()),
1883 scratch2,
1884 scratch3);
1886 // The method data pointer needs to be updated.
1887 update_mdp_by_offset(scratch,
1888 in_bytes(MultiBranchData::relative_displacement_offset()),
1889 scratch2);
1891 bind (profile_continue);
1892 }
1893 }
1895 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
1897 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
1898 Register Rtemp,
1899 Register Rtemp2 ) {
1901 Register Rlimit = Lmonitors;
1902 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1903 assert( (delta & LongAlignmentMask) == 0,
1904 "sizeof BasicObjectLock must be even number of doublewords");
1906 sub( SP, delta, SP);
1907 sub( Lesp, delta, Lesp);
1908 sub( Lmonitors, delta, Lmonitors);
1910 if (!stack_is_empty) {
1912 // must copy stack contents down
1914 Label start_copying, next;
1916 // untested("monitor stack expansion");
1917 compute_stack_base(Rtemp);
1918 ba(start_copying);
1919 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1921 // note: must copy from low memory upwards
1922 // On entry to loop,
1923 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1924 // Loop mutates Rtemp
1926 bind( next);
1928 st_ptr(Rtemp2, Rtemp, 0);
1929 inc(Rtemp, wordSize);
1930 cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1932 bind( start_copying );
1934 brx( notEqual, true, pn, next );
1935 delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1937 // done copying stack
1938 }
1939 }
1941 // Locals
1942 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
1943 assert_not_delayed();
1944 sll(index, Interpreter::logStackElementSize, index);
1945 sub(Llocals, index, index);
1946 ld_ptr(index, 0, dst);
1947 // Note: index must hold the effective address--the iinc template uses it
1948 }
1950 // Just like access_local_ptr but the tag is a returnAddress
1951 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
1952 Register dst ) {
1953 assert_not_delayed();
1954 sll(index, Interpreter::logStackElementSize, index);
1955 sub(Llocals, index, index);
1956 ld_ptr(index, 0, dst);
1957 }
1959 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
1960 assert_not_delayed();
1961 sll(index, Interpreter::logStackElementSize, index);
1962 sub(Llocals, index, index);
1963 ld(index, 0, dst);
1964 // Note: index must hold the effective address--the iinc template uses it
1965 }
1968 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
1969 assert_not_delayed();
1970 sll(index, Interpreter::logStackElementSize, index);
1971 sub(Llocals, index, index);
1972 // First half stored at index n+1 (which grows down from Llocals[n])
1973 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
1974 }
1977 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
1978 assert_not_delayed();
1979 sll(index, Interpreter::logStackElementSize, index);
1980 sub(Llocals, index, index);
1981 ldf(FloatRegisterImpl::S, index, 0, dst);
1982 }
1985 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
1986 assert_not_delayed();
1987 sll(index, Interpreter::logStackElementSize, index);
1988 sub(Llocals, index, index);
1989 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1990 }
1993 #ifdef ASSERT
1994 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1995 Label L;
1997 assert(Rindex != Rscratch, "Registers cannot be same");
1998 assert(Rindex != Rscratch1, "Registers cannot be same");
1999 assert(Rlimit != Rscratch, "Registers cannot be same");
2000 assert(Rlimit != Rscratch1, "Registers cannot be same");
2001 assert(Rscratch1 != Rscratch, "Registers cannot be same");
2003 // untested("reg area corruption");
2004 add(Rindex, offset, Rscratch);
2005 add(Rlimit, 64 + STACK_BIAS, Rscratch1);
2006 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);
2007 stop("regsave area is being clobbered");
2008 bind(L);
2009 }
2010 #endif // ASSERT
2013 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2014 assert_not_delayed();
2015 sll(index, Interpreter::logStackElementSize, index);
2016 sub(Llocals, index, index);
2017 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
2018 st(src, index, 0);
2019 }
2021 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
2022 assert_not_delayed();
2023 sll(index, Interpreter::logStackElementSize, index);
2024 sub(Llocals, index, index);
2025 #ifdef ASSERT
2026 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2027 #endif
2028 st_ptr(src, index, 0);
2029 }
2033 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
2034 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
2035 }
2037 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
2038 assert_not_delayed();
2039 sll(index, Interpreter::logStackElementSize, index);
2040 sub(Llocals, index, index);
2041 #ifdef ASSERT
2042 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2043 #endif
2044 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
2045 }
2048 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
2049 assert_not_delayed();
2050 sll(index, Interpreter::logStackElementSize, index);
2051 sub(Llocals, index, index);
2052 #ifdef ASSERT
2053 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2054 #endif
2055 stf(FloatRegisterImpl::S, src, index, 0);
2056 }
2059 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
2060 assert_not_delayed();
2061 sll(index, Interpreter::logStackElementSize, index);
2062 sub(Llocals, index, index);
2063 #ifdef ASSERT
2064 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2065 #endif
2066 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2067 }
2070 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2071 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2072 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2073 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2074 }
2077 Address InterpreterMacroAssembler::top_most_monitor() {
2078 return Address(FP, top_most_monitor_byte_offset());
2079 }
2082 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2083 add( Lesp, wordSize, Rdest );
2084 }
2086 #endif /* CC_INTERP */
2088 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
2089 assert(UseCompiler, "incrementing must be useful");
2090 #ifdef CC_INTERP
2091 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2092 InvocationCounter::counter_offset());
2093 Address be_counter (G5_method, Method::backedge_counter_offset() +
2094 InvocationCounter::counter_offset());
2095 #else
2096 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2097 InvocationCounter::counter_offset());
2098 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2099 InvocationCounter::counter_offset());
2100 #endif /* CC_INTERP */
2101 int delta = InvocationCounter::count_increment;
2103 // Load each counter in a register
2104 ld( inv_counter, Rtmp );
2105 ld( be_counter, Rtmp2 );
2107 assert( is_simm13( delta ), " delta too large.");
2109 // Add the delta to the invocation counter and store the result
2110 add( Rtmp, delta, Rtmp );
2112 // Mask the backedge counter
2113 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2115 // Store value
2116 st( Rtmp, inv_counter);
2118 // Add invocation counter + backedge counter
2119 add( Rtmp, Rtmp2, Rtmp);
2121 // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
2122 }
2124 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
2125 assert(UseCompiler, "incrementing must be useful");
2126 #ifdef CC_INTERP
2127 Address be_counter (G5_method, Method::backedge_counter_offset() +
2128 InvocationCounter::counter_offset());
2129 Address inv_counter(G5_method, Method::invocation_counter_offset() +
2130 InvocationCounter::counter_offset());
2131 #else
2132 Address be_counter (Lmethod, Method::backedge_counter_offset() +
2133 InvocationCounter::counter_offset());
2134 Address inv_counter(Lmethod, Method::invocation_counter_offset() +
2135 InvocationCounter::counter_offset());
2136 #endif /* CC_INTERP */
2137 int delta = InvocationCounter::count_increment;
2138 // Load each counter in a register
2139 ld( be_counter, Rtmp );
2140 ld( inv_counter, Rtmp2 );
2142 // Add the delta to the backedge counter
2143 add( Rtmp, delta, Rtmp );
2145 // Mask the invocation counter, add to backedge counter
2146 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2148 // and store the result to memory
2149 st( Rtmp, be_counter );
2151 // Add backedge + invocation counter
2152 add( Rtmp, Rtmp2, Rtmp );
2154 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2155 }
2157 #ifndef CC_INTERP
2158 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2159 Register branch_bcp,
2160 Register Rtmp ) {
2161 Label did_not_overflow;
2162 Label overflow_with_error;
2163 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2164 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2166 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2167 load_contents(limit, Rtmp);
2168 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2170 // When ProfileInterpreter is on, the backedge_count comes from the
2171 // MethodData*, which value does not get reset on the call to
2172 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2173 // routine while the method is being compiled, add a second test to make sure
2174 // the overflow function is called only once every overflow_frequency.
2175 if (ProfileInterpreter) {
2176 const int overflow_frequency = 1024;
2177 andcc(backedge_count, overflow_frequency-1, Rtmp);
2178 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2179 delayed()->nop();
2180 }
2182 // overflow in loop, pass branch bytecode
2183 set(6,Rtmp);
2184 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2186 // Was an OSR adapter generated?
2187 // O0 = osr nmethod
2188 br_null_short(O0, Assembler::pn, overflow_with_error);
2190 // Has the nmethod been invalidated already?
2191 ld(O0, nmethod::entry_bci_offset(), O2);
2192 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
2194 // migrate the interpreter frame off of the stack
2196 mov(G2_thread, L7);
2197 // save nmethod
2198 mov(O0, L6);
2199 set_last_Java_frame(SP, noreg);
2200 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2201 reset_last_Java_frame();
2202 mov(L7, G2_thread);
2204 // move OSR nmethod to I1
2205 mov(L6, I1);
2207 // OSR buffer to I0
2208 mov(O0, I0);
2210 // remove the interpreter frame
2211 restore(I5_savedSP, 0, SP);
2213 // Jump to the osr code.
2214 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
2215 jmp(O2, G0);
2216 delayed()->nop();
2218 bind(overflow_with_error);
2220 bind(did_not_overflow);
2221 }
2225 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
2226 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2227 }
2230 // local helper function for the verify_oop_or_return_address macro
2231 static bool verify_return_address(Method* m, int bci) {
2232 #ifndef PRODUCT
2233 address pc = (address)(m->constMethod())
2234 + in_bytes(ConstMethod::codes_offset()) + bci;
2235 // assume it is a valid return address if it is inside m and is preceded by a jsr
2236 if (!m->contains(pc)) return false;
2237 address jsr_pc;
2238 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2239 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
2240 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2241 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
2242 #endif // PRODUCT
2243 return false;
2244 }
2247 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2248 if (!VerifyOops) return;
2249 // the VM documentation for the astore[_wide] bytecode allows
2250 // the TOS to be not only an oop but also a return address
2251 Label test;
2252 Label skip;
2253 // See if it is an address (in the current method):
2255 mov(reg, Rtmp);
2256 const int log2_bytecode_size_limit = 16;
2257 srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2258 br_notnull_short( Rtmp, pt, test );
2260 // %%% should use call_VM_leaf here?
2261 save_frame_and_mov(0, Lmethod, O0, reg, O1);
2262 save_thread(L7_thread_cache);
2263 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2264 delayed()->nop();
2265 restore_thread(L7_thread_cache);
2266 br_notnull( O0, false, pt, skip );
2267 delayed()->restore();
2269 // Perform a more elaborate out-of-line call
2270 // Not an address; verify it:
2271 bind(test);
2272 verify_oop(reg);
2273 bind(skip);
2274 }
2277 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2278 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2279 }
2280 #endif /* CC_INTERP */
2282 // Inline assembly for:
2283 //
2284 // if (thread is in interp_only_mode) {
2285 // InterpreterRuntime::post_method_entry();
2286 // }
2287 // if (DTraceMethodProbes) {
2288 // SharedRuntime::dtrace_method_entry(method, receiver);
2289 // }
2290 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2291 // SharedRuntime::rc_trace_method_entry(method, receiver);
2292 // }
2294 void InterpreterMacroAssembler::notify_method_entry() {
2296 // C++ interpreter only uses this for native methods.
2298 // Whenever JVMTI puts a thread in interp_only_mode, method
2299 // entry/exit events are sent for that thread to track stack
2300 // depth. If it is possible to enter interp_only_mode we add
2301 // the code to check if the event should be sent.
2302 if (JvmtiExport::can_post_interpreter_events()) {
2303 Label L;
2304 Register temp_reg = O5;
2305 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2306 ld(interp_only, temp_reg);
2307 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2308 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2309 bind(L);
2310 }
2312 {
2313 Register temp_reg = O5;
2314 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2315 call_VM_leaf(noreg,
2316 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2317 G2_thread, Lmethod);
2318 }
2320 // RedefineClasses() tracing support for obsolete method entry
2321 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2322 call_VM_leaf(noreg,
2323 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2324 G2_thread, Lmethod);
2325 }
2326 }
2329 // Inline assembly for:
2330 //
2331 // if (thread is in interp_only_mode) {
2332 // // save result
2333 // InterpreterRuntime::post_method_exit();
2334 // // restore result
2335 // }
2336 // if (DTraceMethodProbes) {
2337 // SharedRuntime::dtrace_method_exit(thread, method);
2338 // }
2339 //
2340 // Native methods have their result stored in d_tmp and l_tmp
2341 // Java methods have their result stored in the expression stack
2343 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2344 TosState state,
2345 NotifyMethodExitMode mode) {
2346 // C++ interpreter only uses this for native methods.
2348 // Whenever JVMTI puts a thread in interp_only_mode, method
2349 // entry/exit events are sent for that thread to track stack
2350 // depth. If it is possible to enter interp_only_mode we add
2351 // the code to check if the event should be sent.
2352 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2353 Label L;
2354 Register temp_reg = O5;
2355 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2356 ld(interp_only, temp_reg);
2357 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2359 // Note: frame::interpreter_frame_result has a dependency on how the
2360 // method result is saved across the call to post_method_exit. For
2361 // native methods it assumes the result registers are saved to
2362 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2363 // implementation will need to be updated too.
2365 save_return_value(state, is_native_method);
2366 call_VM(noreg,
2367 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2368 restore_return_value(state, is_native_method);
2369 bind(L);
2370 }
2372 {
2373 Register temp_reg = O5;
2374 // Dtrace notification
2375 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2376 save_return_value(state, is_native_method);
2377 call_VM_leaf(
2378 noreg,
2379 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2380 G2_thread, Lmethod);
2381 restore_return_value(state, is_native_method);
2382 }
2383 }
2385 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2386 #ifdef CC_INTERP
2387 // result potentially in O0/O1: save it across calls
2388 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2389 #ifdef _LP64
2390 stx(O0, STATE(_native_lresult));
2391 #else
2392 std(O0, STATE(_native_lresult));
2393 #endif
2394 #else // CC_INTERP
2395 if (is_native_call) {
2396 stf(FloatRegisterImpl::D, F0, d_tmp);
2397 #ifdef _LP64
2398 stx(O0, l_tmp);
2399 #else
2400 std(O0, l_tmp);
2401 #endif
2402 } else {
2403 push(state);
2404 }
2405 #endif // CC_INTERP
2406 }
2408 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2409 #ifdef CC_INTERP
2410 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2411 #ifdef _LP64
2412 ldx(STATE(_native_lresult), O0);
2413 #else
2414 ldd(STATE(_native_lresult), O0);
2415 #endif
2416 #else // CC_INTERP
2417 if (is_native_call) {
2418 ldf(FloatRegisterImpl::D, d_tmp, F0);
2419 #ifdef _LP64
2420 ldx(l_tmp, O0);
2421 #else
2422 ldd(l_tmp, O0);
2423 #endif
2424 } else {
2425 pop(state);
2426 }
2427 #endif // CC_INTERP
2428 }
2430 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2431 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2432 int increment, int mask,
2433 Register scratch1, Register scratch2,
2434 Condition cond, Label *where) {
2435 ld(counter_addr, scratch1);
2436 add(scratch1, increment, scratch1);
2437 if (is_simm13(mask)) {
2438 andcc(scratch1, mask, G0);
2439 } else {
2440 set(mask, scratch2);
2441 andcc(scratch1, scratch2, G0);
2442 }
2443 br(cond, false, Assembler::pn, *where);
2444 delayed()->st(scratch1, counter_addr);
2445 }