Wed, 15 May 2013 11:05:09 +0200
6843347: Boundary values in some public GC options cause crashes
Summary: Setting some public integer options to specific values causes crashes or undefined GC behavior. This patchset adds the necessary argument checking for these options.
Reviewed-by: jmasa, brutisso
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interp_masm_sparc.hpp"
27 #include "interpreter/interpreter.hpp"
28 #include "interpreter/interpreterRuntime.hpp"
29 #include "oops/arrayOop.hpp"
30 #include "oops/markOop.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/method.hpp"
33 #include "oops/methodCounters.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "prims/jvmtiRedefineClassesTrace.hpp"
36 #include "prims/jvmtiThreadState.hpp"
37 #include "runtime/basicLock.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/thread.inline.hpp"
42 #ifndef CC_INTERP
43 #ifndef FAST_DISPATCH
44 #define FAST_DISPATCH 1
45 #endif
46 #undef FAST_DISPATCH
48 // Implementation of InterpreterMacroAssembler
50 // This file specializes the assember with interpreter-specific macros
52 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
53 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
55 #else // CC_INTERP
56 #ifndef STATE
57 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
58 #endif // STATE
60 #endif // CC_INTERP
62 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
63 // Note: this algorithm is also used by C1's OSR entry sequence.
64 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
65 assert_different_registers(args_size, locals_size);
66 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
67 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
68 // Use br/mov combination because it works on both V8 and V9 and is
69 // faster.
70 Label skip_move;
71 br(Assembler::negative, true, Assembler::pt, skip_move);
72 delayed()->mov(G0, delta);
73 bind(skip_move);
74 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
75 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
76 }
78 #ifndef CC_INTERP
80 // Dispatch code executed in the prolog of a bytecode which does not do it's
81 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
82 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
83 assert_not_delayed();
84 #ifdef FAST_DISPATCH
85 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
86 // they both use I2.
87 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
88 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
89 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
90 // add offset to correct dispatch table
91 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
92 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
93 #else
94 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
95 // dispatch table to use
96 AddressLiteral tbl(Interpreter::dispatch_table(state));
97 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
98 set(tbl, G3_scratch); // compute addr of table
99 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
100 #endif
101 }
104 // Dispatch code executed in the epilog of a bytecode which does not do it's
105 // own dispatch. The dispatch address in IdispatchAddress is used for the
106 // dispatch.
107 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
108 assert_not_delayed();
109 verify_FPU(1, state);
110 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
111 jmp( IdispatchAddress, 0 );
112 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
113 else delayed()->nop();
114 }
117 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
118 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
119 assert_not_delayed();
120 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
121 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
122 }
125 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
126 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
127 assert_not_delayed();
128 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
129 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
130 }
133 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
134 // load current bytecode
135 assert_not_delayed();
136 ldub( Lbcp, 0, Lbyte_code); // load next bytecode
137 dispatch_base(state, table);
138 }
141 void InterpreterMacroAssembler::call_VM_leaf_base(
142 Register java_thread,
143 address entry_point,
144 int number_of_arguments
145 ) {
146 if (!java_thread->is_valid())
147 java_thread = L7_thread_cache;
148 // super call
149 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
150 }
153 void InterpreterMacroAssembler::call_VM_base(
154 Register oop_result,
155 Register java_thread,
156 Register last_java_sp,
157 address entry_point,
158 int number_of_arguments,
159 bool check_exception
160 ) {
161 if (!java_thread->is_valid())
162 java_thread = L7_thread_cache;
163 // See class ThreadInVMfromInterpreter, which assumes that the interpreter
164 // takes responsibility for setting its own thread-state on call-out.
165 // However, ThreadInVMfromInterpreter resets the state to "in_Java".
167 //save_bcp(); // save bcp
168 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
169 //restore_bcp(); // restore bcp
170 //restore_locals(); // restore locals pointer
171 }
174 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
175 if (JvmtiExport::can_pop_frame()) {
176 Label L;
178 // Check the "pending popframe condition" flag in the current thread
179 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
181 // Initiate popframe handling only if it is not already being processed. If the flag
182 // has the popframe_processing bit set, it means that this code is called *during* popframe
183 // handling - we don't want to reenter.
184 btst(JavaThread::popframe_pending_bit, scratch_reg);
185 br(zero, false, pt, L);
186 delayed()->nop();
187 btst(JavaThread::popframe_processing_bit, scratch_reg);
188 br(notZero, false, pt, L);
189 delayed()->nop();
191 // Call Interpreter::remove_activation_preserving_args_entry() to get the
192 // address of the same-named entrypoint in the generated interpreter code.
193 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
195 // Jump to Interpreter::_remove_activation_preserving_args_entry
196 jmpl(O0, G0, G0);
197 delayed()->nop();
198 bind(L);
199 }
200 }
203 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
204 Register thr_state = G4_scratch;
205 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
206 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
207 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
208 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
209 switch (state) {
210 case ltos: ld_long(val_addr, Otos_l); break;
211 case atos: ld_ptr(oop_addr, Otos_l);
212 st_ptr(G0, oop_addr); break;
213 case btos: // fall through
214 case ctos: // fall through
215 case stos: // fall through
216 case itos: ld(val_addr, Otos_l1); break;
217 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
218 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
219 case vtos: /* nothing to do */ break;
220 default : ShouldNotReachHere();
221 }
222 // Clean up tos value in the jvmti thread state
223 or3(G0, ilgl, G3_scratch);
224 stw(G3_scratch, tos_addr);
225 st_long(G0, val_addr);
226 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
227 }
230 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
231 if (JvmtiExport::can_force_early_return()) {
232 Label L;
233 Register thr_state = G3_scratch;
234 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
235 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
237 // Initiate earlyret handling only if it is not already being processed.
238 // If the flag has the earlyret_processing bit set, it means that this code
239 // is called *during* earlyret handling - we don't want to reenter.
240 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
241 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
243 // Call Interpreter::remove_activation_early_entry() to get the address of the
244 // same-named entrypoint in the generated interpreter code
245 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
246 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
248 // Jump to Interpreter::_remove_activation_early_entry
249 jmpl(O0, G0, G0);
250 delayed()->nop();
251 bind(L);
252 }
253 }
256 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
257 mov(arg_1, O0);
258 mov(arg_2, O1);
259 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
260 }
261 #endif /* CC_INTERP */
264 #ifndef CC_INTERP
266 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
267 assert_not_delayed();
268 dispatch_Lbyte_code(state, table);
269 }
272 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
273 dispatch_base(state, Interpreter::normal_table(state));
274 }
277 void InterpreterMacroAssembler::dispatch_only(TosState state) {
278 dispatch_base(state, Interpreter::dispatch_table(state));
279 }
282 // common code to dispatch and dispatch_only
283 // dispatch value in Lbyte_code and increment Lbcp
285 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
286 verify_FPU(1, state);
287 // %%%%% maybe implement +VerifyActivationFrameSize here
288 //verify_thread(); //too slow; we will just verify on method entry & exit
289 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
290 #ifdef FAST_DISPATCH
291 if (table == Interpreter::dispatch_table(state)) {
292 // use IdispatchTables
293 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
294 // add offset to correct dispatch table
295 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
296 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
297 } else {
298 #endif
299 // dispatch table to use
300 AddressLiteral tbl(table);
301 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
302 set(tbl, G3_scratch); // compute addr of table
303 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
304 #ifdef FAST_DISPATCH
305 }
306 #endif
307 jmp( G3_scratch, 0 );
308 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
309 else delayed()->nop();
310 }
313 // Helpers for expression stack
315 // Longs and doubles are Category 2 computational types in the
316 // JVM specification (section 3.11.1) and take 2 expression stack or
317 // local slots.
318 // Aligning them on 32 bit with tagged stacks is hard because the code generated
319 // for the dup* bytecodes depends on what types are already on the stack.
320 // If the types are split into the two stack/local slots, that is much easier
321 // (and we can use 0 for non-reference tags).
323 // Known good alignment in _LP64 but unknown otherwise
324 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
325 assert_not_delayed();
327 #ifdef _LP64
328 ldf(FloatRegisterImpl::D, r1, offset, d);
329 #else
330 ldf(FloatRegisterImpl::S, r1, offset, d);
331 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
332 #endif
333 }
335 // Known good alignment in _LP64 but unknown otherwise
336 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
337 assert_not_delayed();
339 #ifdef _LP64
340 stf(FloatRegisterImpl::D, d, r1, offset);
341 // store something more useful here
342 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
343 #else
344 stf(FloatRegisterImpl::S, d, r1, offset);
345 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
346 #endif
347 }
350 // Known good alignment in _LP64 but unknown otherwise
351 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
352 assert_not_delayed();
353 #ifdef _LP64
354 ldx(r1, offset, rd);
355 #else
356 ld(r1, offset, rd);
357 ld(r1, offset + Interpreter::stackElementSize, rd->successor());
358 #endif
359 }
361 // Known good alignment in _LP64 but unknown otherwise
362 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
363 assert_not_delayed();
365 #ifdef _LP64
366 stx(l, r1, offset);
367 // store something more useful here
368 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
369 #else
370 st(l, r1, offset);
371 st(l->successor(), r1, offset + Interpreter::stackElementSize);
372 #endif
373 }
375 void InterpreterMacroAssembler::pop_i(Register r) {
376 assert_not_delayed();
377 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
378 inc(Lesp, Interpreter::stackElementSize);
379 debug_only(verify_esp(Lesp));
380 }
382 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
383 assert_not_delayed();
384 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
385 inc(Lesp, Interpreter::stackElementSize);
386 debug_only(verify_esp(Lesp));
387 }
389 void InterpreterMacroAssembler::pop_l(Register r) {
390 assert_not_delayed();
391 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
392 inc(Lesp, 2*Interpreter::stackElementSize);
393 debug_only(verify_esp(Lesp));
394 }
397 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
398 assert_not_delayed();
399 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
400 inc(Lesp, Interpreter::stackElementSize);
401 debug_only(verify_esp(Lesp));
402 }
405 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
406 assert_not_delayed();
407 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
408 inc(Lesp, 2*Interpreter::stackElementSize);
409 debug_only(verify_esp(Lesp));
410 }
413 void InterpreterMacroAssembler::push_i(Register r) {
414 assert_not_delayed();
415 debug_only(verify_esp(Lesp));
416 st(r, Lesp, 0);
417 dec(Lesp, Interpreter::stackElementSize);
418 }
420 void InterpreterMacroAssembler::push_ptr(Register r) {
421 assert_not_delayed();
422 st_ptr(r, Lesp, 0);
423 dec(Lesp, Interpreter::stackElementSize);
424 }
426 // remember: our convention for longs in SPARC is:
427 // O0 (Otos_l1) has high-order part in first word,
428 // O1 (Otos_l2) has low-order part in second word
430 void InterpreterMacroAssembler::push_l(Register r) {
431 assert_not_delayed();
432 debug_only(verify_esp(Lesp));
433 // Longs are stored in memory-correct order, even if unaligned.
434 int offset = -Interpreter::stackElementSize;
435 store_unaligned_long(r, Lesp, offset);
436 dec(Lesp, 2 * Interpreter::stackElementSize);
437 }
440 void InterpreterMacroAssembler::push_f(FloatRegister f) {
441 assert_not_delayed();
442 debug_only(verify_esp(Lesp));
443 stf(FloatRegisterImpl::S, f, Lesp, 0);
444 dec(Lesp, Interpreter::stackElementSize);
445 }
448 void InterpreterMacroAssembler::push_d(FloatRegister d) {
449 assert_not_delayed();
450 debug_only(verify_esp(Lesp));
451 // Longs are stored in memory-correct order, even if unaligned.
452 int offset = -Interpreter::stackElementSize;
453 store_unaligned_double(d, Lesp, offset);
454 dec(Lesp, 2 * Interpreter::stackElementSize);
455 }
458 void InterpreterMacroAssembler::push(TosState state) {
459 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
460 switch (state) {
461 case atos: push_ptr(); break;
462 case btos: push_i(); break;
463 case ctos:
464 case stos: push_i(); break;
465 case itos: push_i(); break;
466 case ltos: push_l(); break;
467 case ftos: push_f(); break;
468 case dtos: push_d(); break;
469 case vtos: /* nothing to do */ break;
470 default : ShouldNotReachHere();
471 }
472 }
475 void InterpreterMacroAssembler::pop(TosState state) {
476 switch (state) {
477 case atos: pop_ptr(); break;
478 case btos: pop_i(); break;
479 case ctos:
480 case stos: pop_i(); break;
481 case itos: pop_i(); break;
482 case ltos: pop_l(); break;
483 case ftos: pop_f(); break;
484 case dtos: pop_d(); break;
485 case vtos: /* nothing to do */ break;
486 default : ShouldNotReachHere();
487 }
488 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
489 }
492 // Helpers for swap and dup
493 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
494 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
495 }
496 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
497 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
498 }
501 void InterpreterMacroAssembler::load_receiver(Register param_count,
502 Register recv) {
503 sll(param_count, Interpreter::logStackElementSize, param_count);
504 ld_ptr(Lesp, param_count, recv); // gets receiver oop
505 }
507 void InterpreterMacroAssembler::empty_expression_stack() {
508 // Reset Lesp.
509 sub( Lmonitors, wordSize, Lesp );
511 // Reset SP by subtracting more space from Lesp.
512 Label done;
513 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
515 // A native does not need to do this, since its callee does not change SP.
516 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags.
517 btst(JVM_ACC_NATIVE, Gframe_size);
518 br(Assembler::notZero, false, Assembler::pt, done);
519 delayed()->nop();
521 // Compute max expression stack+register save area
522 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size);
523 lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack.
524 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
526 //
527 // now set up a stack frame with the size computed above
528 //
529 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
530 sll( Gframe_size, LogBytesPerWord, Gframe_size );
531 sub( Lesp, Gframe_size, Gframe_size );
532 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
533 debug_only(verify_sp(Gframe_size, G4_scratch));
534 #ifdef _LP64
535 sub(Gframe_size, STACK_BIAS, Gframe_size );
536 #endif
537 mov(Gframe_size, SP);
539 bind(done);
540 }
543 #ifdef ASSERT
544 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
545 Label Bad, OK;
547 // Saved SP must be aligned.
548 #ifdef _LP64
549 btst(2*BytesPerWord-1, Rsp);
550 #else
551 btst(LongAlignmentMask, Rsp);
552 #endif
553 br(Assembler::notZero, false, Assembler::pn, Bad);
554 delayed()->nop();
556 // Saved SP, plus register window size, must not be above FP.
557 add(Rsp, frame::register_save_words * wordSize, Rtemp);
558 #ifdef _LP64
559 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
560 #endif
561 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
563 // Saved SP must not be ridiculously below current SP.
564 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
565 set(maxstack, Rtemp);
566 sub(SP, Rtemp, Rtemp);
567 #ifdef _LP64
568 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
569 #endif
570 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
572 ba_short(OK);
574 bind(Bad);
575 stop("on return to interpreted call, restored SP is corrupted");
577 bind(OK);
578 }
581 void InterpreterMacroAssembler::verify_esp(Register Resp) {
582 // about to read or write Resp[0]
583 // make sure it is not in the monitors or the register save area
584 Label OK1, OK2;
586 cmp(Resp, Lmonitors);
587 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
588 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
589 stop("too many pops: Lesp points into monitor area");
590 bind(OK1);
591 #ifdef _LP64
592 sub(Resp, STACK_BIAS, Resp);
593 #endif
594 cmp(Resp, SP);
595 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
596 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
597 stop("too many pushes: Lesp points into register window");
598 bind(OK2);
599 }
600 #endif // ASSERT
602 // Load compiled (i2c) or interpreter entry when calling from interpreted and
603 // do the call. Centralized so that all interpreter calls will do the same actions.
604 // If jvmti single stepping is on for a thread we must not call compiled code.
605 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
607 // Assume we want to go compiled if available
609 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target);
611 if (JvmtiExport::can_post_interpreter_events()) {
612 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
613 // compiled code in threads for which the event is enabled. Check here for
614 // interp_only_mode if these events CAN be enabled.
615 verify_thread();
616 Label skip_compiled_code;
618 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
619 ld(interp_only, scratch);
620 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);
621 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
622 bind(skip_compiled_code);
623 }
625 // the i2c_adapters need Method* in G5_method (right? %%%)
626 // do the call
627 #ifdef ASSERT
628 {
629 Label ok;
630 br_notnull_short(target, Assembler::pt, ok);
631 stop("null entry point");
632 bind(ok);
633 }
634 #endif // ASSERT
636 // Adjust Rret first so Llast_SP can be same as Rret
637 add(Rret, -frame::pc_return_offset, O7);
638 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
639 // Record SP so we can remove any stack space allocated by adapter transition
640 jmp(target, 0);
641 delayed()->mov(SP, Llast_SP);
642 }
644 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
645 assert_not_delayed();
647 Label not_taken;
648 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
649 else br (cc, false, Assembler::pn, not_taken);
650 delayed()->nop();
652 TemplateTable::branch(false,false);
654 bind(not_taken);
656 profile_not_taken_branch(G3_scratch);
657 }
660 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
661 int bcp_offset,
662 Register Rtmp,
663 Register Rdst,
664 signedOrNot is_signed,
665 setCCOrNot should_set_CC ) {
666 assert(Rtmp != Rdst, "need separate temp register");
667 assert_not_delayed();
668 switch (is_signed) {
669 default: ShouldNotReachHere();
671 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte
672 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte
673 }
674 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
675 sll( Rdst, BitsPerByte, Rdst);
676 switch (should_set_CC ) {
677 default: ShouldNotReachHere();
679 case set_CC: orcc( Rdst, Rtmp, Rdst ); break;
680 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;
681 }
682 }
685 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
686 int bcp_offset,
687 Register Rtmp,
688 Register Rdst,
689 setCCOrNot should_set_CC ) {
690 assert(Rtmp != Rdst, "need separate temp register");
691 assert_not_delayed();
692 add( Lbcp, bcp_offset, Rtmp);
693 andcc( Rtmp, 3, G0);
694 Label aligned;
695 switch (should_set_CC ) {
696 default: ShouldNotReachHere();
698 case set_CC: break;
699 case dont_set_CC: break;
700 }
702 br(Assembler::zero, true, Assembler::pn, aligned);
703 #ifdef _LP64
704 delayed()->ldsw(Rtmp, 0, Rdst);
705 #else
706 delayed()->ld(Rtmp, 0, Rdst);
707 #endif
709 ldub(Lbcp, bcp_offset + 3, Rdst);
710 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
711 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
712 #ifdef _LP64
713 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
714 #else
715 // Unsigned load is faster than signed on some implementations
716 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
717 #endif
718 or3(Rtmp, Rdst, Rdst );
720 bind(aligned);
721 if (should_set_CC == set_CC) tst(Rdst);
722 }
724 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index,
725 int bcp_offset, size_t index_size) {
726 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
727 if (index_size == sizeof(u2)) {
728 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned);
729 } else if (index_size == sizeof(u4)) {
730 assert(EnableInvokeDynamic, "giant index used only for JSR 292");
731 get_4_byte_integer_at_bcp(bcp_offset, temp, index);
732 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
733 xor3(index, -1, index); // convert to plain index
734 } else if (index_size == sizeof(u1)) {
735 ldub(Lbcp, bcp_offset, index);
736 } else {
737 ShouldNotReachHere();
738 }
739 }
742 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
743 int bcp_offset, size_t index_size) {
744 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
745 assert_different_registers(cache, tmp);
746 assert_not_delayed();
747 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
748 // convert from field index to ConstantPoolCacheEntry index and from
749 // word index to byte offset
750 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
751 add(LcpoolCache, tmp, cache);
752 }
755 void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache,
756 Register temp,
757 Register bytecode,
758 int byte_no,
759 int bcp_offset,
760 size_t index_size) {
761 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size);
762 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode);
763 const int shift_count = (1 + byte_no) * BitsPerByte;
764 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) ||
765 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift),
766 "correct shift count");
767 srl(bytecode, shift_count, bytecode);
768 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask");
769 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode);
770 }
773 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
774 int bcp_offset, size_t index_size) {
775 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
776 assert_different_registers(cache, tmp);
777 assert_not_delayed();
778 if (index_size == sizeof(u2)) {
779 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
780 } else {
781 ShouldNotReachHere(); // other sizes not supported here
782 }
783 // convert from field index to ConstantPoolCacheEntry index
784 // and from word index to byte offset
785 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
786 // skip past the header
787 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp);
788 // construct pointer to cache entry
789 add(LcpoolCache, tmp, cache);
790 }
793 // Load object from cpool->resolved_references(index)
794 void InterpreterMacroAssembler::load_resolved_reference_at_index(
795 Register result, Register index) {
796 assert_different_registers(result, index);
797 assert_not_delayed();
798 // convert from field index to resolved_references() index and from
799 // word index to byte offset. Since this is a java object, it can be compressed
800 Register tmp = index; // reuse
801 sll(index, LogBytesPerHeapOop, tmp);
802 get_constant_pool(result);
803 // load pointer for resolved_references[] objArray
804 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result);
805 // JNIHandles::resolve(result)
806 ld_ptr(result, 0, result);
807 // Add in the index
808 add(result, tmp, result);
809 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result);
810 }
813 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
814 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
815 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
816 Register Rsuper_klass,
817 Register Rtmp1,
818 Register Rtmp2,
819 Register Rtmp3,
820 Label &ok_is_subtype ) {
821 Label not_subtype;
823 // Profile the not-null value's klass.
824 profile_typecheck(Rsub_klass, Rtmp1);
826 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
827 Rtmp1, Rtmp2,
828 &ok_is_subtype, ¬_subtype, NULL);
830 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
831 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
832 &ok_is_subtype, NULL);
834 bind(not_subtype);
835 profile_typecheck_failed(Rtmp1);
836 }
838 // Separate these two to allow for delay slot in middle
839 // These are used to do a test and full jump to exception-throwing code.
841 // %%%%% Could possibly reoptimize this by testing to see if could use
842 // a single conditional branch (i.e. if span is small enough.
843 // If you go that route, than get rid of the split and give up
844 // on the delay-slot hack.
846 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
847 Label& ok ) {
848 assert_not_delayed();
849 br(ok_condition, true, pt, ok);
850 // DELAY SLOT
851 }
853 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
854 Label& ok ) {
855 assert_not_delayed();
856 bp( ok_condition, true, Assembler::xcc, pt, ok);
857 // DELAY SLOT
858 }
860 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
861 Label& ok ) {
862 assert_not_delayed();
863 brx(ok_condition, true, pt, ok);
864 // DELAY SLOT
865 }
867 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
868 Register Rscratch,
869 Label& ok ) {
870 assert(throw_entry_point != NULL, "entry point must be generated by now");
871 AddressLiteral dest(throw_entry_point);
872 jump_to(dest, Rscratch);
873 delayed()->nop();
874 bind(ok);
875 }
878 // And if you cannot use the delay slot, here is a shorthand:
880 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
881 address throw_entry_point,
882 Register Rscratch ) {
883 Label ok;
884 if (ok_condition != never) {
885 throw_if_not_1_icc( ok_condition, ok);
886 delayed()->nop();
887 }
888 throw_if_not_2( throw_entry_point, Rscratch, ok);
889 }
890 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
891 address throw_entry_point,
892 Register Rscratch ) {
893 Label ok;
894 if (ok_condition != never) {
895 throw_if_not_1_xcc( ok_condition, ok);
896 delayed()->nop();
897 }
898 throw_if_not_2( throw_entry_point, Rscratch, ok);
899 }
900 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
901 address throw_entry_point,
902 Register Rscratch ) {
903 Label ok;
904 if (ok_condition != never) {
905 throw_if_not_1_x( ok_condition, ok);
906 delayed()->nop();
907 }
908 throw_if_not_2( throw_entry_point, Rscratch, ok);
909 }
911 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
912 // Note: res is still shy of address by array offset into object.
914 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
915 assert_not_delayed();
917 verify_oop(array);
918 #ifdef _LP64
919 // sign extend since tos (index) can be a 32bit value
920 sra(index, G0, index);
921 #endif // _LP64
923 // check array
924 Label ptr_ok;
925 tst(array);
926 throw_if_not_1_x( notZero, ptr_ok );
927 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
928 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
930 Label index_ok;
931 cmp(index, tmp);
932 throw_if_not_1_icc( lessUnsigned, index_ok );
933 if (index_shift > 0) delayed()->sll(index, index_shift, index);
934 else delayed()->add(array, index, res); // addr - const offset in index
935 // convention: move aberrant index into G3_scratch for exception message
936 mov(index, G3_scratch);
937 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
939 // add offset if didn't do it in delay slot
940 if (index_shift > 0) add(array, index, res); // addr - const offset in index
941 }
944 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
945 assert_not_delayed();
947 // pop array
948 pop_ptr(array);
950 // check array
951 index_check_without_pop(array, index, index_shift, tmp, res);
952 }
955 void InterpreterMacroAssembler::get_const(Register Rdst) {
956 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst);
957 }
960 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
961 get_const(Rdst);
962 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
963 }
966 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
967 get_constant_pool(Rdst);
968 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
969 }
972 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
973 get_constant_pool(Rcpool);
974 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags);
975 }
978 // unlock if synchronized method
979 //
980 // Unlock the receiver if this is a synchronized method.
981 // Unlock any Java monitors from syncronized blocks.
982 //
983 // If there are locked Java monitors
984 // If throw_monitor_exception
985 // throws IllegalMonitorStateException
986 // Else if install_monitor_exception
987 // installs IllegalMonitorStateException
988 // Else
989 // no error processing
990 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
991 bool throw_monitor_exception,
992 bool install_monitor_exception) {
993 Label unlocked, unlock, no_unlock;
995 // get the value of _do_not_unlock_if_synchronized into G1_scratch
996 const Address do_not_unlock_if_synchronized(G2_thread,
997 JavaThread::do_not_unlock_if_synchronized_offset());
998 ldbool(do_not_unlock_if_synchronized, G1_scratch);
999 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
1001 // check if synchronized method
1002 const Address access_flags(Lmethod, Method::access_flags_offset());
1003 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1004 push(state); // save tos
1005 ld(access_flags, G3_scratch); // Load access flags.
1006 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
1007 br(zero, false, pt, unlocked);
1008 delayed()->nop();
1010 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1011 // is set.
1012 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);
1013 delayed()->nop();
1015 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1016 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1018 //Intel: if (throw_monitor_exception) ... else ...
1019 // Entry already unlocked, need to throw exception
1020 //...
1022 // pass top-most monitor elem
1023 add( top_most_monitor(), O1 );
1025 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
1026 br_notnull_short(G3_scratch, pt, unlock);
1028 if (throw_monitor_exception) {
1029 // Entry already unlocked need to throw an exception
1030 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1031 should_not_reach_here();
1032 } else {
1033 // Monitor already unlocked during a stack unroll.
1034 // If requested, install an illegal_monitor_state_exception.
1035 // Continue with stack unrolling.
1036 if (install_monitor_exception) {
1037 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1038 }
1039 ba_short(unlocked);
1040 }
1042 bind(unlock);
1044 unlock_object(O1);
1046 bind(unlocked);
1048 // I0, I1: Might contain return value
1050 // Check that all monitors are unlocked
1051 { Label loop, exception, entry, restart;
1053 Register Rmptr = O0;
1054 Register Rtemp = O1;
1055 Register Rlimit = Lmonitors;
1056 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1057 assert( (delta & LongAlignmentMask) == 0,
1058 "sizeof BasicObjectLock must be even number of doublewords");
1060 #ifdef ASSERT
1061 add(top_most_monitor(), Rmptr, delta);
1062 { Label L;
1063 // ensure that Rmptr starts out above (or at) Rlimit
1064 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1065 stop("monitor stack has negative size");
1066 bind(L);
1067 }
1068 #endif
1069 bind(restart);
1070 ba(entry);
1071 delayed()->
1072 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry
1074 // Entry is still locked, need to throw exception
1075 bind(exception);
1076 if (throw_monitor_exception) {
1077 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1078 should_not_reach_here();
1079 } else {
1080 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1081 // Unlock does not block, so don't have to worry about the frame
1082 unlock_object(Rmptr);
1083 if (install_monitor_exception) {
1084 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1085 }
1086 ba_short(restart);
1087 }
1089 bind(loop);
1090 cmp(Rtemp, G0); // check if current entry is used
1091 brx(Assembler::notEqual, false, pn, exception);
1092 delayed()->
1093 dec(Rmptr, delta); // otherwise advance to next entry
1094 #ifdef ASSERT
1095 { Label L;
1096 // ensure that Rmptr has not somehow stepped below Rlimit
1097 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1098 stop("ran off the end of the monitor stack");
1099 bind(L);
1100 }
1101 #endif
1102 bind(entry);
1103 cmp(Rmptr, Rlimit); // check if bottom reached
1104 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry
1105 delayed()->
1106 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1107 }
1109 bind(no_unlock);
1110 pop(state);
1111 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1112 }
1115 // remove activation
1116 //
1117 // Unlock the receiver if this is a synchronized method.
1118 // Unlock any Java monitors from syncronized blocks.
1119 // Remove the activation from the stack.
1120 //
1121 // If there are locked Java monitors
1122 // If throw_monitor_exception
1123 // throws IllegalMonitorStateException
1124 // Else if install_monitor_exception
1125 // installs IllegalMonitorStateException
1126 // Else
1127 // no error processing
1128 void InterpreterMacroAssembler::remove_activation(TosState state,
1129 bool throw_monitor_exception,
1130 bool install_monitor_exception) {
1132 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
1134 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1135 notify_method_exit(false, state, NotifyJVMTI);
1137 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1138 verify_thread();
1140 // return tos
1141 assert(Otos_l1 == Otos_i, "adjust code below");
1142 switch (state) {
1143 #ifdef _LP64
1144 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
1145 #else
1146 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
1147 #endif
1148 case btos: // fall through
1149 case ctos:
1150 case stos: // fall through
1151 case atos: // fall through
1152 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0
1153 case ftos: // fall through
1154 case dtos: // fall through
1155 case vtos: /* nothing to do */ break;
1156 default : ShouldNotReachHere();
1157 }
1159 #if defined(COMPILER2) && !defined(_LP64)
1160 if (state == ltos) {
1161 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1162 // or compiled so just be safe use G1 and O0/O1
1164 // Shift bits into high (msb) of G1
1165 sllx(Otos_l1->after_save(), 32, G1);
1166 // Zero extend low bits
1167 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1168 or3 (Otos_l2->after_save(), G1, G1);
1169 }
1170 #endif /* COMPILER2 */
1172 }
1173 #endif /* CC_INTERP */
1176 // Lock object
1177 //
1178 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1179 // it must be initialized with the object to lock
1180 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1181 if (UseHeavyMonitors) {
1182 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1183 }
1184 else {
1185 Register obj_reg = Object;
1186 Register mark_reg = G4_scratch;
1187 Register temp_reg = G1_scratch;
1188 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1189 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1190 Label done;
1192 Label slow_case;
1194 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1196 // load markOop from object into mark_reg
1197 ld_ptr(mark_addr, mark_reg);
1199 if (UseBiasedLocking) {
1200 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1201 }
1203 // get the address of basicLock on stack that will be stored in the object
1204 // we need a temporary register here as we do not want to clobber lock_reg
1205 // (cas clobbers the destination register)
1206 mov(lock_reg, temp_reg);
1207 // set mark reg to be (markOop of object | UNLOCK_VALUE)
1208 or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1209 // initialize the box (Must happen before we update the object mark!)
1210 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1211 // compare and exchange object_addr, markOop | 1, stack address of basicLock
1212 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1213 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1214 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1216 // if the compare and exchange succeeded we are done (we saw an unlocked object)
1217 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
1219 // We did not see an unlocked object so try the fast recursive case
1221 // Check if owner is self by comparing the value in the markOop of object
1222 // with the stack pointer
1223 sub(temp_reg, SP, temp_reg);
1224 #ifdef _LP64
1225 sub(temp_reg, STACK_BIAS, temp_reg);
1226 #endif
1227 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1229 // Composite "andcc" test:
1230 // (a) %sp -vs- markword proximity check, and,
1231 // (b) verify mark word LSBs == 0 (Stack-locked).
1232 //
1233 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1234 // Note that the page size used for %sp proximity testing is arbitrary and is
1235 // unrelated to the actual MMU page size. We use a 'logical' page size of
1236 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1237 // field of the andcc instruction.
1238 andcc (temp_reg, 0xFFFFF003, G0) ;
1240 // if condition is true we are done and hence we can store 0 in the displaced
1241 // header indicating it is a recursive lock and be done
1242 brx(Assembler::zero, true, Assembler::pt, done);
1243 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1245 // none of the above fast optimizations worked so we have to get into the
1246 // slow case of monitor enter
1247 bind(slow_case);
1248 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1250 bind(done);
1251 }
1252 }
1254 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1255 //
1256 // Argument - lock_reg points to the BasicObjectLock for lock
1257 // Throw IllegalMonitorException if object is not locked by current thread
1258 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1259 if (UseHeavyMonitors) {
1260 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1261 } else {
1262 Register obj_reg = G3_scratch;
1263 Register mark_reg = G4_scratch;
1264 Register displaced_header_reg = G1_scratch;
1265 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
1266 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1267 Label done;
1269 if (UseBiasedLocking) {
1270 // load the object out of the BasicObjectLock
1271 ld_ptr(lockobj_addr, obj_reg);
1272 biased_locking_exit(mark_addr, mark_reg, done, true);
1273 st_ptr(G0, lockobj_addr); // free entry
1274 }
1276 // Test first if we are in the fast recursive case
1277 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
1278 ld_ptr(lock_addr, displaced_header_reg);
1279 br_null(displaced_header_reg, true, Assembler::pn, done);
1280 delayed()->st_ptr(G0, lockobj_addr); // free entry
1282 // See if it is still a light weight lock, if so we just unlock
1283 // the object and we are done
1285 if (!UseBiasedLocking) {
1286 // load the object out of the BasicObjectLock
1287 ld_ptr(lockobj_addr, obj_reg);
1288 }
1290 // we have the displaced header in displaced_header_reg
1291 // we expect to see the stack address of the basicLock in case the
1292 // lock is still a light weight lock (lock_reg)
1293 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1294 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
1295 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1296 cmp(lock_reg, displaced_header_reg);
1297 brx(Assembler::equal, true, Assembler::pn, done);
1298 delayed()->st_ptr(G0, lockobj_addr); // free entry
1300 // The lock has been converted into a heavy lock and hence
1301 // we need to get into the slow case
1303 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1305 bind(done);
1306 }
1307 }
1309 #ifndef CC_INTERP
1311 // Get the method data pointer from the Method* and set the
1312 // specified register to its value.
1314 void InterpreterMacroAssembler::set_method_data_pointer() {
1315 assert(ProfileInterpreter, "must be profiling interpreter");
1316 Label get_continue;
1318 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1319 test_method_data_pointer(get_continue);
1320 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1321 bind(get_continue);
1322 }
1324 // Set the method data pointer for the current bcp.
1326 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1327 assert(ProfileInterpreter, "must be profiling interpreter");
1328 Label zero_continue;
1330 // Test MDO to avoid the call if it is NULL.
1331 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr);
1332 test_method_data_pointer(zero_continue);
1333 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1334 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr);
1335 add(ImethodDataPtr, O0, ImethodDataPtr);
1336 bind(zero_continue);
1337 }
1339 // Test ImethodDataPtr. If it is null, continue at the specified label
1341 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1342 assert(ProfileInterpreter, "must be profiling interpreter");
1343 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);
1344 }
1346 void InterpreterMacroAssembler::verify_method_data_pointer() {
1347 assert(ProfileInterpreter, "must be profiling interpreter");
1348 #ifdef ASSERT
1349 Label verify_continue;
1350 test_method_data_pointer(verify_continue);
1352 // If the mdp is valid, it will point to a DataLayout header which is
1353 // consistent with the bcp. The converse is highly probable also.
1354 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1355 ld_ptr(Lmethod, Method::const_offset(), O5);
1356 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch);
1357 add(G3_scratch, O5, G3_scratch);
1358 cmp(Lbcp, G3_scratch);
1359 brx(Assembler::equal, false, Assembler::pt, verify_continue);
1361 Register temp_reg = O5;
1362 delayed()->mov(ImethodDataPtr, temp_reg);
1363 // %%% should use call_VM_leaf here?
1364 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
1365 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
1366 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
1367 stf(FloatRegisterImpl::D, Ftos_d, d_save);
1368 mov(temp_reg->after_save(), O2);
1369 save_thread(L7_thread_cache);
1370 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1371 delayed()->nop();
1372 restore_thread(L7_thread_cache);
1373 ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1374 restore();
1375 bind(verify_continue);
1376 #endif // ASSERT
1377 }
1379 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1380 Register Rtmp,
1381 Label &profile_continue) {
1382 assert(ProfileInterpreter, "must be profiling interpreter");
1383 // Control will flow to "profile_continue" if the counter is less than the
1384 // limit or if we call profile_method()
1386 Label done;
1388 // if no method data exists, and the counter is high enough, make one
1389 br_notnull_short(ImethodDataPtr, Assembler::pn, done);
1391 // Test to see if we should create a method data oop
1392 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1393 sethi(profile_limit, Rtmp);
1394 ld(Rtmp, profile_limit.low10(), Rtmp);
1395 cmp(invocation_count, Rtmp);
1396 // Use long branches because call_VM() code and following code generated by
1397 // test_backedge_count_for_osr() is large in debug VM.
1398 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1399 delayed()->nop();
1401 // Build it now.
1402 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1403 set_method_data_pointer_for_bcp();
1404 ba(profile_continue);
1405 delayed()->nop();
1406 bind(done);
1407 }
1409 // Store a value at some constant offset from the method data pointer.
1411 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1412 assert(ProfileInterpreter, "must be profiling interpreter");
1413 st_ptr(value, ImethodDataPtr, constant);
1414 }
1416 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1417 Register bumped_count,
1418 bool decrement) {
1419 assert(ProfileInterpreter, "must be profiling interpreter");
1421 // Load the counter.
1422 ld_ptr(counter, bumped_count);
1424 if (decrement) {
1425 // Decrement the register. Set condition codes.
1426 subcc(bumped_count, DataLayout::counter_increment, bumped_count);
1428 // If the decrement causes the counter to overflow, stay negative
1429 Label L;
1430 brx(Assembler::negative, true, Assembler::pn, L);
1432 // Store the decremented counter, if it is still negative.
1433 delayed()->st_ptr(bumped_count, counter);
1434 bind(L);
1435 } else {
1436 // Increment the register. Set carry flag.
1437 addcc(bumped_count, DataLayout::counter_increment, bumped_count);
1439 // If the increment causes the counter to overflow, pull back by 1.
1440 assert(DataLayout::counter_increment == 1, "subc works");
1441 subc(bumped_count, G0, bumped_count);
1443 // Store the incremented counter.
1444 st_ptr(bumped_count, counter);
1445 }
1446 }
1448 // Increment the value at some constant offset from the method data pointer.
1450 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1451 Register bumped_count,
1452 bool decrement) {
1453 // Locate the counter at a fixed offset from the mdp:
1454 Address counter(ImethodDataPtr, constant);
1455 increment_mdp_data_at(counter, bumped_count, decrement);
1456 }
1458 // Increment the value at some non-fixed (reg + constant) offset from
1459 // the method data pointer.
1461 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1462 int constant,
1463 Register bumped_count,
1464 Register scratch2,
1465 bool decrement) {
1466 // Add the constant to reg to get the offset.
1467 add(ImethodDataPtr, reg, scratch2);
1468 Address counter(scratch2, constant);
1469 increment_mdp_data_at(counter, bumped_count, decrement);
1470 }
1472 // Set a flag value at the current method data pointer position.
1473 // Updates a single byte of the header, to avoid races with other header bits.
1475 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1476 Register scratch) {
1477 assert(ProfileInterpreter, "must be profiling interpreter");
1478 // Load the data header
1479 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
1481 // Set the flag
1482 or3(scratch, flag_constant, scratch);
1484 // Store the modified header.
1485 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
1486 }
1488 // Test the location at some offset from the method data pointer.
1489 // If it is not equal to value, branch to the not_equal_continue Label.
1490 // Set condition codes to match the nullness of the loaded value.
1492 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1493 Register value,
1494 Label& not_equal_continue,
1495 Register scratch) {
1496 assert(ProfileInterpreter, "must be profiling interpreter");
1497 ld_ptr(ImethodDataPtr, offset, scratch);
1498 cmp(value, scratch);
1499 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
1500 delayed()->tst(scratch);
1501 }
1503 // Update the method data pointer by the displacement located at some fixed
1504 // offset from the method data pointer.
1506 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1507 Register scratch) {
1508 assert(ProfileInterpreter, "must be profiling interpreter");
1509 ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
1510 add(ImethodDataPtr, scratch, ImethodDataPtr);
1511 }
1513 // Update the method data pointer by the displacement located at the
1514 // offset (reg + offset_of_disp).
1516 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1517 int offset_of_disp,
1518 Register scratch) {
1519 assert(ProfileInterpreter, "must be profiling interpreter");
1520 add(reg, offset_of_disp, scratch);
1521 ld_ptr(ImethodDataPtr, scratch, scratch);
1522 add(ImethodDataPtr, scratch, ImethodDataPtr);
1523 }
1525 // Update the method data pointer by a simple constant displacement.
1527 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1528 assert(ProfileInterpreter, "must be profiling interpreter");
1529 add(ImethodDataPtr, constant, ImethodDataPtr);
1530 }
1532 // Update the method data pointer for a _ret bytecode whose target
1533 // was not among our cached targets.
1535 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1536 Register return_bci) {
1537 assert(ProfileInterpreter, "must be profiling interpreter");
1538 push(state);
1539 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile
1540 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1541 ld_ptr(l_tmp, return_bci);
1542 pop(state);
1543 }
1545 // Count a taken branch in the bytecodes.
1547 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1548 if (ProfileInterpreter) {
1549 Label profile_continue;
1551 // If no method data exists, go to profile_continue.
1552 test_method_data_pointer(profile_continue);
1554 // We are taking a branch. Increment the taken count.
1555 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
1557 // The method data pointer needs to be updated to reflect the new target.
1558 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1559 bind (profile_continue);
1560 }
1561 }
1564 // Count a not-taken branch in the bytecodes.
1566 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
1567 if (ProfileInterpreter) {
1568 Label profile_continue;
1570 // If no method data exists, go to profile_continue.
1571 test_method_data_pointer(profile_continue);
1573 // We are taking a branch. Increment the not taken count.
1574 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
1576 // The method data pointer needs to be updated to correspond to the
1577 // next bytecode.
1578 update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1579 bind (profile_continue);
1580 }
1581 }
1584 // Count a non-virtual call in the bytecodes.
1586 void InterpreterMacroAssembler::profile_call(Register scratch) {
1587 if (ProfileInterpreter) {
1588 Label profile_continue;
1590 // If no method data exists, go to profile_continue.
1591 test_method_data_pointer(profile_continue);
1593 // We are making a call. Increment the count.
1594 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1596 // The method data pointer needs to be updated to reflect the new target.
1597 update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1598 bind (profile_continue);
1599 }
1600 }
1603 // Count a final call in the bytecodes.
1605 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
1606 if (ProfileInterpreter) {
1607 Label profile_continue;
1609 // If no method data exists, go to profile_continue.
1610 test_method_data_pointer(profile_continue);
1612 // We are making a call. Increment the count.
1613 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1615 // The method data pointer needs to be updated to reflect the new target.
1616 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1617 bind (profile_continue);
1618 }
1619 }
1622 // Count a virtual call in the bytecodes.
1624 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1625 Register scratch,
1626 bool receiver_can_be_null) {
1627 if (ProfileInterpreter) {
1628 Label profile_continue;
1630 // If no method data exists, go to profile_continue.
1631 test_method_data_pointer(profile_continue);
1634 Label skip_receiver_profile;
1635 if (receiver_can_be_null) {
1636 Label not_null;
1637 br_notnull_short(receiver, Assembler::pt, not_null);
1638 // We are making a call. Increment the count for null receiver.
1639 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1640 ba_short(skip_receiver_profile);
1641 bind(not_null);
1642 }
1644 // Record the receiver type.
1645 record_klass_in_profile(receiver, scratch, true);
1646 bind(skip_receiver_profile);
1648 // The method data pointer needs to be updated to reflect the new target.
1649 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1650 bind (profile_continue);
1651 }
1652 }
1654 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1655 Register receiver, Register scratch,
1656 int start_row, Label& done, bool is_virtual_call) {
1657 if (TypeProfileWidth == 0) {
1658 if (is_virtual_call) {
1659 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1660 }
1661 return;
1662 }
1664 int last_row = VirtualCallData::row_limit() - 1;
1665 assert(start_row <= last_row, "must be work left to do");
1666 // Test this row for both the receiver and for null.
1667 // Take any of three different outcomes:
1668 // 1. found receiver => increment count and goto done
1669 // 2. found null => keep looking for case 1, maybe allocate this cell
1670 // 3. found something else => keep looking for cases 1 and 2
1671 // Case 3 is handled by a recursive call.
1672 for (int row = start_row; row <= last_row; row++) {
1673 Label next_test;
1674 bool test_for_null_also = (row == start_row);
1676 // See if the receiver is receiver[n].
1677 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1678 test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1679 // delayed()->tst(scratch);
1681 // The receiver is receiver[n]. Increment count[n].
1682 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1683 increment_mdp_data_at(count_offset, scratch);
1684 ba_short(done);
1685 bind(next_test);
1687 if (test_for_null_also) {
1688 Label found_null;
1689 // Failed the equality check on receiver[n]... Test for null.
1690 if (start_row == last_row) {
1691 // The only thing left to do is handle the null case.
1692 if (is_virtual_call) {
1693 brx(Assembler::zero, false, Assembler::pn, found_null);
1694 delayed()->nop();
1695 // Receiver did not match any saved receiver and there is no empty row for it.
1696 // Increment total counter to indicate polymorphic case.
1697 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1698 ba_short(done);
1699 bind(found_null);
1700 } else {
1701 brx(Assembler::notZero, false, Assembler::pt, done);
1702 delayed()->nop();
1703 }
1704 break;
1705 }
1706 // Since null is rare, make it be the branch-taken case.
1707 brx(Assembler::zero, false, Assembler::pn, found_null);
1708 delayed()->nop();
1710 // Put all the "Case 3" tests here.
1711 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1713 // Found a null. Keep searching for a matching receiver,
1714 // but remember that this is an empty (unused) slot.
1715 bind(found_null);
1716 }
1717 }
1719 // In the fall-through case, we found no matching receiver, but we
1720 // observed the receiver[start_row] is NULL.
1722 // Fill in the receiver field and increment the count.
1723 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1724 set_mdp_data_at(recvr_offset, receiver);
1725 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1726 mov(DataLayout::counter_increment, scratch);
1727 set_mdp_data_at(count_offset, scratch);
1728 if (start_row > 0) {
1729 ba_short(done);
1730 }
1731 }
1733 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1734 Register scratch, bool is_virtual_call) {
1735 assert(ProfileInterpreter, "must be profiling");
1736 Label done;
1738 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1740 bind (done);
1741 }
1744 // Count a ret in the bytecodes.
1746 void InterpreterMacroAssembler::profile_ret(TosState state,
1747 Register return_bci,
1748 Register scratch) {
1749 if (ProfileInterpreter) {
1750 Label profile_continue;
1751 uint row;
1753 // If no method data exists, go to profile_continue.
1754 test_method_data_pointer(profile_continue);
1756 // Update the total ret count.
1757 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1759 for (row = 0; row < RetData::row_limit(); row++) {
1760 Label next_test;
1762 // See if return_bci is equal to bci[n]:
1763 test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1764 return_bci, next_test, scratch);
1766 // return_bci is equal to bci[n]. Increment the count.
1767 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1769 // The method data pointer needs to be updated to reflect the new target.
1770 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1771 ba_short(profile_continue);
1772 bind(next_test);
1773 }
1775 update_mdp_for_ret(state, return_bci);
1777 bind (profile_continue);
1778 }
1779 }
1781 // Profile an unexpected null in the bytecodes.
1782 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1783 if (ProfileInterpreter) {
1784 Label profile_continue;
1786 // If no method data exists, go to profile_continue.
1787 test_method_data_pointer(profile_continue);
1789 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1791 // The method data pointer needs to be updated.
1792 int mdp_delta = in_bytes(BitData::bit_data_size());
1793 if (TypeProfileCasts) {
1794 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1795 }
1796 update_mdp_by_constant(mdp_delta);
1798 bind (profile_continue);
1799 }
1800 }
1802 void InterpreterMacroAssembler::profile_typecheck(Register klass,
1803 Register scratch) {
1804 if (ProfileInterpreter) {
1805 Label profile_continue;
1807 // If no method data exists, go to profile_continue.
1808 test_method_data_pointer(profile_continue);
1810 int mdp_delta = in_bytes(BitData::bit_data_size());
1811 if (TypeProfileCasts) {
1812 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1814 // Record the object type.
1815 record_klass_in_profile(klass, scratch, false);
1816 }
1818 // The method data pointer needs to be updated.
1819 update_mdp_by_constant(mdp_delta);
1821 bind (profile_continue);
1822 }
1823 }
1825 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
1826 if (ProfileInterpreter && TypeProfileCasts) {
1827 Label profile_continue;
1829 // If no method data exists, go to profile_continue.
1830 test_method_data_pointer(profile_continue);
1832 int count_offset = in_bytes(CounterData::count_offset());
1833 // Back up the address, since we have already bumped the mdp.
1834 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1836 // *Decrement* the counter. We expect to see zero or small negatives.
1837 increment_mdp_data_at(count_offset, scratch, true);
1839 bind (profile_continue);
1840 }
1841 }
1843 // Count the default case of a switch construct.
1845 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
1846 if (ProfileInterpreter) {
1847 Label profile_continue;
1849 // If no method data exists, go to profile_continue.
1850 test_method_data_pointer(profile_continue);
1852 // Update the default case count
1853 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1854 scratch);
1856 // The method data pointer needs to be updated.
1857 update_mdp_by_offset(
1858 in_bytes(MultiBranchData::default_displacement_offset()),
1859 scratch);
1861 bind (profile_continue);
1862 }
1863 }
1865 // Count the index'th case of a switch construct.
1867 void InterpreterMacroAssembler::profile_switch_case(Register index,
1868 Register scratch,
1869 Register scratch2,
1870 Register scratch3) {
1871 if (ProfileInterpreter) {
1872 Label profile_continue;
1874 // If no method data exists, go to profile_continue.
1875 test_method_data_pointer(profile_continue);
1877 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1878 set(in_bytes(MultiBranchData::per_case_size()), scratch);
1879 smul(index, scratch, scratch);
1880 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
1882 // Update the case count
1883 increment_mdp_data_at(scratch,
1884 in_bytes(MultiBranchData::relative_count_offset()),
1885 scratch2,
1886 scratch3);
1888 // The method data pointer needs to be updated.
1889 update_mdp_by_offset(scratch,
1890 in_bytes(MultiBranchData::relative_displacement_offset()),
1891 scratch2);
1893 bind (profile_continue);
1894 }
1895 }
1897 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
1899 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
1900 Register Rtemp,
1901 Register Rtemp2 ) {
1903 Register Rlimit = Lmonitors;
1904 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1905 assert( (delta & LongAlignmentMask) == 0,
1906 "sizeof BasicObjectLock must be even number of doublewords");
1908 sub( SP, delta, SP);
1909 sub( Lesp, delta, Lesp);
1910 sub( Lmonitors, delta, Lmonitors);
1912 if (!stack_is_empty) {
1914 // must copy stack contents down
1916 Label start_copying, next;
1918 // untested("monitor stack expansion");
1919 compute_stack_base(Rtemp);
1920 ba(start_copying);
1921 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1923 // note: must copy from low memory upwards
1924 // On entry to loop,
1925 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1926 // Loop mutates Rtemp
1928 bind( next);
1930 st_ptr(Rtemp2, Rtemp, 0);
1931 inc(Rtemp, wordSize);
1932 cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1934 bind( start_copying );
1936 brx( notEqual, true, pn, next );
1937 delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1939 // done copying stack
1940 }
1941 }
1943 // Locals
1944 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
1945 assert_not_delayed();
1946 sll(index, Interpreter::logStackElementSize, index);
1947 sub(Llocals, index, index);
1948 ld_ptr(index, 0, dst);
1949 // Note: index must hold the effective address--the iinc template uses it
1950 }
1952 // Just like access_local_ptr but the tag is a returnAddress
1953 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
1954 Register dst ) {
1955 assert_not_delayed();
1956 sll(index, Interpreter::logStackElementSize, index);
1957 sub(Llocals, index, index);
1958 ld_ptr(index, 0, dst);
1959 }
1961 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
1962 assert_not_delayed();
1963 sll(index, Interpreter::logStackElementSize, index);
1964 sub(Llocals, index, index);
1965 ld(index, 0, dst);
1966 // Note: index must hold the effective address--the iinc template uses it
1967 }
1970 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
1971 assert_not_delayed();
1972 sll(index, Interpreter::logStackElementSize, index);
1973 sub(Llocals, index, index);
1974 // First half stored at index n+1 (which grows down from Llocals[n])
1975 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
1976 }
1979 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
1980 assert_not_delayed();
1981 sll(index, Interpreter::logStackElementSize, index);
1982 sub(Llocals, index, index);
1983 ldf(FloatRegisterImpl::S, index, 0, dst);
1984 }
1987 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
1988 assert_not_delayed();
1989 sll(index, Interpreter::logStackElementSize, index);
1990 sub(Llocals, index, index);
1991 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1992 }
1995 #ifdef ASSERT
1996 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1997 Label L;
1999 assert(Rindex != Rscratch, "Registers cannot be same");
2000 assert(Rindex != Rscratch1, "Registers cannot be same");
2001 assert(Rlimit != Rscratch, "Registers cannot be same");
2002 assert(Rlimit != Rscratch1, "Registers cannot be same");
2003 assert(Rscratch1 != Rscratch, "Registers cannot be same");
2005 // untested("reg area corruption");
2006 add(Rindex, offset, Rscratch);
2007 add(Rlimit, 64 + STACK_BIAS, Rscratch1);
2008 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);
2009 stop("regsave area is being clobbered");
2010 bind(L);
2011 }
2012 #endif // ASSERT
2015 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2016 assert_not_delayed();
2017 sll(index, Interpreter::logStackElementSize, index);
2018 sub(Llocals, index, index);
2019 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
2020 st(src, index, 0);
2021 }
2023 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
2024 assert_not_delayed();
2025 sll(index, Interpreter::logStackElementSize, index);
2026 sub(Llocals, index, index);
2027 #ifdef ASSERT
2028 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2029 #endif
2030 st_ptr(src, index, 0);
2031 }
2035 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
2036 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
2037 }
2039 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
2040 assert_not_delayed();
2041 sll(index, Interpreter::logStackElementSize, index);
2042 sub(Llocals, index, index);
2043 #ifdef ASSERT
2044 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2045 #endif
2046 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
2047 }
2050 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
2051 assert_not_delayed();
2052 sll(index, Interpreter::logStackElementSize, index);
2053 sub(Llocals, index, index);
2054 #ifdef ASSERT
2055 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2056 #endif
2057 stf(FloatRegisterImpl::S, src, index, 0);
2058 }
2061 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
2062 assert_not_delayed();
2063 sll(index, Interpreter::logStackElementSize, index);
2064 sub(Llocals, index, index);
2065 #ifdef ASSERT
2066 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2067 #endif
2068 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2069 }
2072 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2073 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2074 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2075 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2076 }
2079 Address InterpreterMacroAssembler::top_most_monitor() {
2080 return Address(FP, top_most_monitor_byte_offset());
2081 }
2084 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2085 add( Lesp, wordSize, Rdest );
2086 }
2088 #endif /* CC_INTERP */
2090 void InterpreterMacroAssembler::get_method_counters(Register method,
2091 Register Rcounters,
2092 Label& skip) {
2093 Label has_counters;
2094 Address method_counters(method, in_bytes(Method::method_counters_offset()));
2095 ld_ptr(method_counters, Rcounters);
2096 br_notnull_short(Rcounters, Assembler::pt, has_counters);
2097 call_VM(noreg, CAST_FROM_FN_PTR(address,
2098 InterpreterRuntime::build_method_counters), method);
2099 ld_ptr(method_counters, Rcounters);
2100 br_null(Rcounters, false, Assembler::pn, skip); // No MethodCounters, OutOfMemory
2101 delayed()->nop();
2102 bind(has_counters);
2103 }
2105 void InterpreterMacroAssembler::increment_invocation_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {
2106 assert(UseCompiler, "incrementing must be useful");
2107 assert_different_registers(Rcounters, Rtmp, Rtmp2);
2109 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +
2110 InvocationCounter::counter_offset());
2111 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() +
2112 InvocationCounter::counter_offset());
2113 int delta = InvocationCounter::count_increment;
2115 // Load each counter in a register
2116 ld( inv_counter, Rtmp );
2117 ld( be_counter, Rtmp2 );
2119 assert( is_simm13( delta ), " delta too large.");
2121 // Add the delta to the invocation counter and store the result
2122 add( Rtmp, delta, Rtmp );
2124 // Mask the backedge counter
2125 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2127 // Store value
2128 st( Rtmp, inv_counter);
2130 // Add invocation counter + backedge counter
2131 add( Rtmp, Rtmp2, Rtmp);
2133 // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
2134 }
2136 void InterpreterMacroAssembler::increment_backedge_counter( Register Rcounters, Register Rtmp, Register Rtmp2 ) {
2137 assert(UseCompiler, "incrementing must be useful");
2138 assert_different_registers(Rcounters, Rtmp, Rtmp2);
2140 Address be_counter (Rcounters, MethodCounters::backedge_counter_offset() +
2141 InvocationCounter::counter_offset());
2142 Address inv_counter(Rcounters, MethodCounters::invocation_counter_offset() +
2143 InvocationCounter::counter_offset());
2145 int delta = InvocationCounter::count_increment;
2146 // Load each counter in a register
2147 ld( be_counter, Rtmp );
2148 ld( inv_counter, Rtmp2 );
2150 // Add the delta to the backedge counter
2151 add( Rtmp, delta, Rtmp );
2153 // Mask the invocation counter, add to backedge counter
2154 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2156 // and store the result to memory
2157 st( Rtmp, be_counter );
2159 // Add backedge + invocation counter
2160 add( Rtmp, Rtmp2, Rtmp );
2162 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2163 }
2165 #ifndef CC_INTERP
2166 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2167 Register branch_bcp,
2168 Register Rtmp ) {
2169 Label did_not_overflow;
2170 Label overflow_with_error;
2171 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2172 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2174 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2175 load_contents(limit, Rtmp);
2176 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2178 // When ProfileInterpreter is on, the backedge_count comes from the
2179 // MethodData*, which value does not get reset on the call to
2180 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2181 // routine while the method is being compiled, add a second test to make sure
2182 // the overflow function is called only once every overflow_frequency.
2183 if (ProfileInterpreter) {
2184 const int overflow_frequency = 1024;
2185 andcc(backedge_count, overflow_frequency-1, Rtmp);
2186 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2187 delayed()->nop();
2188 }
2190 // overflow in loop, pass branch bytecode
2191 set(6,Rtmp);
2192 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2194 // Was an OSR adapter generated?
2195 // O0 = osr nmethod
2196 br_null_short(O0, Assembler::pn, overflow_with_error);
2198 // Has the nmethod been invalidated already?
2199 ld(O0, nmethod::entry_bci_offset(), O2);
2200 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
2202 // migrate the interpreter frame off of the stack
2204 mov(G2_thread, L7);
2205 // save nmethod
2206 mov(O0, L6);
2207 set_last_Java_frame(SP, noreg);
2208 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2209 reset_last_Java_frame();
2210 mov(L7, G2_thread);
2212 // move OSR nmethod to I1
2213 mov(L6, I1);
2215 // OSR buffer to I0
2216 mov(O0, I0);
2218 // remove the interpreter frame
2219 restore(I5_savedSP, 0, SP);
2221 // Jump to the osr code.
2222 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
2223 jmp(O2, G0);
2224 delayed()->nop();
2226 bind(overflow_with_error);
2228 bind(did_not_overflow);
2229 }
2233 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
2234 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2235 }
2238 // local helper function for the verify_oop_or_return_address macro
2239 static bool verify_return_address(Method* m, int bci) {
2240 #ifndef PRODUCT
2241 address pc = (address)(m->constMethod())
2242 + in_bytes(ConstMethod::codes_offset()) + bci;
2243 // assume it is a valid return address if it is inside m and is preceded by a jsr
2244 if (!m->contains(pc)) return false;
2245 address jsr_pc;
2246 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2247 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
2248 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2249 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
2250 #endif // PRODUCT
2251 return false;
2252 }
2255 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2256 if (!VerifyOops) return;
2257 // the VM documentation for the astore[_wide] bytecode allows
2258 // the TOS to be not only an oop but also a return address
2259 Label test;
2260 Label skip;
2261 // See if it is an address (in the current method):
2263 mov(reg, Rtmp);
2264 const int log2_bytecode_size_limit = 16;
2265 srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2266 br_notnull_short( Rtmp, pt, test );
2268 // %%% should use call_VM_leaf here?
2269 save_frame_and_mov(0, Lmethod, O0, reg, O1);
2270 save_thread(L7_thread_cache);
2271 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2272 delayed()->nop();
2273 restore_thread(L7_thread_cache);
2274 br_notnull( O0, false, pt, skip );
2275 delayed()->restore();
2277 // Perform a more elaborate out-of-line call
2278 // Not an address; verify it:
2279 bind(test);
2280 verify_oop(reg);
2281 bind(skip);
2282 }
2285 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2286 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2287 }
2288 #endif /* CC_INTERP */
2290 // Inline assembly for:
2291 //
2292 // if (thread is in interp_only_mode) {
2293 // InterpreterRuntime::post_method_entry();
2294 // }
2295 // if (DTraceMethodProbes) {
2296 // SharedRuntime::dtrace_method_entry(method, receiver);
2297 // }
2298 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2299 // SharedRuntime::rc_trace_method_entry(method, receiver);
2300 // }
2302 void InterpreterMacroAssembler::notify_method_entry() {
2304 // C++ interpreter only uses this for native methods.
2306 // Whenever JVMTI puts a thread in interp_only_mode, method
2307 // entry/exit events are sent for that thread to track stack
2308 // depth. If it is possible to enter interp_only_mode we add
2309 // the code to check if the event should be sent.
2310 if (JvmtiExport::can_post_interpreter_events()) {
2311 Label L;
2312 Register temp_reg = O5;
2313 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2314 ld(interp_only, temp_reg);
2315 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2316 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2317 bind(L);
2318 }
2320 {
2321 Register temp_reg = O5;
2322 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2323 call_VM_leaf(noreg,
2324 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2325 G2_thread, Lmethod);
2326 }
2328 // RedefineClasses() tracing support for obsolete method entry
2329 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2330 call_VM_leaf(noreg,
2331 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2332 G2_thread, Lmethod);
2333 }
2334 }
2337 // Inline assembly for:
2338 //
2339 // if (thread is in interp_only_mode) {
2340 // // save result
2341 // InterpreterRuntime::post_method_exit();
2342 // // restore result
2343 // }
2344 // if (DTraceMethodProbes) {
2345 // SharedRuntime::dtrace_method_exit(thread, method);
2346 // }
2347 //
2348 // Native methods have their result stored in d_tmp and l_tmp
2349 // Java methods have their result stored in the expression stack
2351 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2352 TosState state,
2353 NotifyMethodExitMode mode) {
2354 // C++ interpreter only uses this for native methods.
2356 // Whenever JVMTI puts a thread in interp_only_mode, method
2357 // entry/exit events are sent for that thread to track stack
2358 // depth. If it is possible to enter interp_only_mode we add
2359 // the code to check if the event should be sent.
2360 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2361 Label L;
2362 Register temp_reg = O5;
2363 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2364 ld(interp_only, temp_reg);
2365 cmp_and_br_short(temp_reg, 0, equal, pt, L);
2367 // Note: frame::interpreter_frame_result has a dependency on how the
2368 // method result is saved across the call to post_method_exit. For
2369 // native methods it assumes the result registers are saved to
2370 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2371 // implementation will need to be updated too.
2373 save_return_value(state, is_native_method);
2374 call_VM(noreg,
2375 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2376 restore_return_value(state, is_native_method);
2377 bind(L);
2378 }
2380 {
2381 Register temp_reg = O5;
2382 // Dtrace notification
2383 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2384 save_return_value(state, is_native_method);
2385 call_VM_leaf(
2386 noreg,
2387 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2388 G2_thread, Lmethod);
2389 restore_return_value(state, is_native_method);
2390 }
2391 }
2393 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2394 #ifdef CC_INTERP
2395 // result potentially in O0/O1: save it across calls
2396 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2397 #ifdef _LP64
2398 stx(O0, STATE(_native_lresult));
2399 #else
2400 std(O0, STATE(_native_lresult));
2401 #endif
2402 #else // CC_INTERP
2403 if (is_native_call) {
2404 stf(FloatRegisterImpl::D, F0, d_tmp);
2405 #ifdef _LP64
2406 stx(O0, l_tmp);
2407 #else
2408 std(O0, l_tmp);
2409 #endif
2410 } else {
2411 push(state);
2412 }
2413 #endif // CC_INTERP
2414 }
2416 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2417 #ifdef CC_INTERP
2418 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2419 #ifdef _LP64
2420 ldx(STATE(_native_lresult), O0);
2421 #else
2422 ldd(STATE(_native_lresult), O0);
2423 #endif
2424 #else // CC_INTERP
2425 if (is_native_call) {
2426 ldf(FloatRegisterImpl::D, d_tmp, F0);
2427 #ifdef _LP64
2428 ldx(l_tmp, O0);
2429 #else
2430 ldd(l_tmp, O0);
2431 #endif
2432 } else {
2433 pop(state);
2434 }
2435 #endif // CC_INTERP
2436 }
2438 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2439 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2440 int increment, int mask,
2441 Register scratch1, Register scratch2,
2442 Condition cond, Label *where) {
2443 ld(counter_addr, scratch1);
2444 add(scratch1, increment, scratch1);
2445 if (is_simm13(mask)) {
2446 andcc(scratch1, mask, G0);
2447 } else {
2448 set(mask, scratch2);
2449 andcc(scratch1, scratch2, G0);
2450 }
2451 br(cond, false, Assembler::pn, *where);
2452 delayed()->st(scratch1, counter_addr);
2453 }