Thu, 07 Oct 2010 15:12:57 -0400
6989297: Integrate additional portability improvements
Reviewed-by: vladidan, dholmes
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_interp_masm_sparc.cpp.incl"
28 #ifndef CC_INTERP
29 #ifndef FAST_DISPATCH
30 #define FAST_DISPATCH 1
31 #endif
32 #undef FAST_DISPATCH
34 // Implementation of InterpreterMacroAssembler
36 // This file specializes the assember with interpreter-specific macros
38 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
39 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
41 #else // CC_INTERP
42 #ifndef STATE
43 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
44 #endif // STATE
46 #endif // CC_INTERP
48 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
49 // Note: this algorithm is also used by C1's OSR entry sequence.
50 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
51 assert_different_registers(args_size, locals_size);
52 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
53 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
54 // Use br/mov combination because it works on both V8 and V9 and is
55 // faster.
56 Label skip_move;
57 br(Assembler::negative, true, Assembler::pt, skip_move);
58 delayed()->mov(G0, delta);
59 bind(skip_move);
60 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
61 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
62 }
64 #ifndef CC_INTERP
66 // Dispatch code executed in the prolog of a bytecode which does not do it's
67 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
68 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
69 assert_not_delayed();
70 #ifdef FAST_DISPATCH
71 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
72 // they both use I2.
73 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
74 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
75 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
76 // add offset to correct dispatch table
77 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
78 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
79 #else
80 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
81 // dispatch table to use
82 AddressLiteral tbl(Interpreter::dispatch_table(state));
83 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
84 set(tbl, G3_scratch); // compute addr of table
85 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
86 #endif
87 }
90 // Dispatch code executed in the epilog of a bytecode which does not do it's
91 // own dispatch. The dispatch address in IdispatchAddress is used for the
92 // dispatch.
93 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
94 assert_not_delayed();
95 verify_FPU(1, state);
96 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
97 jmp( IdispatchAddress, 0 );
98 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
99 else delayed()->nop();
100 }
103 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
104 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
105 assert_not_delayed();
106 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
107 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
108 }
111 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
112 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
113 assert_not_delayed();
114 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
115 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
116 }
119 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
120 // load current bytecode
121 assert_not_delayed();
122 ldub( Lbcp, 0, Lbyte_code); // load next bytecode
123 dispatch_base(state, table);
124 }
127 void InterpreterMacroAssembler::call_VM_leaf_base(
128 Register java_thread,
129 address entry_point,
130 int number_of_arguments
131 ) {
132 if (!java_thread->is_valid())
133 java_thread = L7_thread_cache;
134 // super call
135 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
136 }
139 void InterpreterMacroAssembler::call_VM_base(
140 Register oop_result,
141 Register java_thread,
142 Register last_java_sp,
143 address entry_point,
144 int number_of_arguments,
145 bool check_exception
146 ) {
147 if (!java_thread->is_valid())
148 java_thread = L7_thread_cache;
149 // See class ThreadInVMfromInterpreter, which assumes that the interpreter
150 // takes responsibility for setting its own thread-state on call-out.
151 // However, ThreadInVMfromInterpreter resets the state to "in_Java".
153 //save_bcp(); // save bcp
154 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
155 //restore_bcp(); // restore bcp
156 //restore_locals(); // restore locals pointer
157 }
160 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
161 if (JvmtiExport::can_pop_frame()) {
162 Label L;
164 // Check the "pending popframe condition" flag in the current thread
165 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
167 // Initiate popframe handling only if it is not already being processed. If the flag
168 // has the popframe_processing bit set, it means that this code is called *during* popframe
169 // handling - we don't want to reenter.
170 btst(JavaThread::popframe_pending_bit, scratch_reg);
171 br(zero, false, pt, L);
172 delayed()->nop();
173 btst(JavaThread::popframe_processing_bit, scratch_reg);
174 br(notZero, false, pt, L);
175 delayed()->nop();
177 // Call Interpreter::remove_activation_preserving_args_entry() to get the
178 // address of the same-named entrypoint in the generated interpreter code.
179 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
181 // Jump to Interpreter::_remove_activation_preserving_args_entry
182 jmpl(O0, G0, G0);
183 delayed()->nop();
184 bind(L);
185 }
186 }
189 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
190 Register thr_state = G4_scratch;
191 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
192 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
193 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
194 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
195 switch (state) {
196 case ltos: ld_long(val_addr, Otos_l); break;
197 case atos: ld_ptr(oop_addr, Otos_l);
198 st_ptr(G0, oop_addr); break;
199 case btos: // fall through
200 case ctos: // fall through
201 case stos: // fall through
202 case itos: ld(val_addr, Otos_l1); break;
203 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
204 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
205 case vtos: /* nothing to do */ break;
206 default : ShouldNotReachHere();
207 }
208 // Clean up tos value in the jvmti thread state
209 or3(G0, ilgl, G3_scratch);
210 stw(G3_scratch, tos_addr);
211 st_long(G0, val_addr);
212 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
213 }
216 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
217 if (JvmtiExport::can_force_early_return()) {
218 Label L;
219 Register thr_state = G3_scratch;
220 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
221 tst(thr_state);
222 br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
223 delayed()->nop();
225 // Initiate earlyret handling only if it is not already being processed.
226 // If the flag has the earlyret_processing bit set, it means that this code
227 // is called *during* earlyret handling - we don't want to reenter.
228 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
229 cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
230 br(Assembler::notEqual, false, pt, L);
231 delayed()->nop();
233 // Call Interpreter::remove_activation_early_entry() to get the address of the
234 // same-named entrypoint in the generated interpreter code
235 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
236 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
238 // Jump to Interpreter::_remove_activation_early_entry
239 jmpl(O0, G0, G0);
240 delayed()->nop();
241 bind(L);
242 }
243 }
246 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
247 mov(arg_1, O0);
248 mov(arg_2, O1);
249 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
250 }
251 #endif /* CC_INTERP */
254 #ifndef CC_INTERP
256 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
257 assert_not_delayed();
258 dispatch_Lbyte_code(state, table);
259 }
262 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
263 dispatch_base(state, Interpreter::normal_table(state));
264 }
267 void InterpreterMacroAssembler::dispatch_only(TosState state) {
268 dispatch_base(state, Interpreter::dispatch_table(state));
269 }
272 // common code to dispatch and dispatch_only
273 // dispatch value in Lbyte_code and increment Lbcp
275 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
276 verify_FPU(1, state);
277 // %%%%% maybe implement +VerifyActivationFrameSize here
278 //verify_thread(); //too slow; we will just verify on method entry & exit
279 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
280 #ifdef FAST_DISPATCH
281 if (table == Interpreter::dispatch_table(state)) {
282 // use IdispatchTables
283 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
284 // add offset to correct dispatch table
285 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
286 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
287 } else {
288 #endif
289 // dispatch table to use
290 AddressLiteral tbl(table);
291 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
292 set(tbl, G3_scratch); // compute addr of table
293 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
294 #ifdef FAST_DISPATCH
295 }
296 #endif
297 jmp( G3_scratch, 0 );
298 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
299 else delayed()->nop();
300 }
303 // Helpers for expression stack
305 // Longs and doubles are Category 2 computational types in the
306 // JVM specification (section 3.11.1) and take 2 expression stack or
307 // local slots.
308 // Aligning them on 32 bit with tagged stacks is hard because the code generated
309 // for the dup* bytecodes depends on what types are already on the stack.
310 // If the types are split into the two stack/local slots, that is much easier
311 // (and we can use 0 for non-reference tags).
313 // Known good alignment in _LP64 but unknown otherwise
314 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
315 assert_not_delayed();
317 #ifdef _LP64
318 ldf(FloatRegisterImpl::D, r1, offset, d);
319 #else
320 ldf(FloatRegisterImpl::S, r1, offset, d);
321 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
322 #endif
323 }
325 // Known good alignment in _LP64 but unknown otherwise
326 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
327 assert_not_delayed();
329 #ifdef _LP64
330 stf(FloatRegisterImpl::D, d, r1, offset);
331 // store something more useful here
332 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
333 #else
334 stf(FloatRegisterImpl::S, d, r1, offset);
335 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
336 #endif
337 }
340 // Known good alignment in _LP64 but unknown otherwise
341 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
342 assert_not_delayed();
343 #ifdef _LP64
344 ldx(r1, offset, rd);
345 #else
346 ld(r1, offset, rd);
347 ld(r1, offset + Interpreter::stackElementSize, rd->successor());
348 #endif
349 }
351 // Known good alignment in _LP64 but unknown otherwise
352 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
353 assert_not_delayed();
355 #ifdef _LP64
356 stx(l, r1, offset);
357 // store something more useful here
358 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
359 #else
360 st(l, r1, offset);
361 st(l->successor(), r1, offset + Interpreter::stackElementSize);
362 #endif
363 }
365 void InterpreterMacroAssembler::pop_i(Register r) {
366 assert_not_delayed();
367 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
368 inc(Lesp, Interpreter::stackElementSize);
369 debug_only(verify_esp(Lesp));
370 }
372 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
373 assert_not_delayed();
374 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
375 inc(Lesp, Interpreter::stackElementSize);
376 debug_only(verify_esp(Lesp));
377 }
379 void InterpreterMacroAssembler::pop_l(Register r) {
380 assert_not_delayed();
381 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
382 inc(Lesp, 2*Interpreter::stackElementSize);
383 debug_only(verify_esp(Lesp));
384 }
387 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
388 assert_not_delayed();
389 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
390 inc(Lesp, Interpreter::stackElementSize);
391 debug_only(verify_esp(Lesp));
392 }
395 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
396 assert_not_delayed();
397 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
398 inc(Lesp, 2*Interpreter::stackElementSize);
399 debug_only(verify_esp(Lesp));
400 }
403 void InterpreterMacroAssembler::push_i(Register r) {
404 assert_not_delayed();
405 debug_only(verify_esp(Lesp));
406 st(r, Lesp, 0);
407 dec(Lesp, Interpreter::stackElementSize);
408 }
410 void InterpreterMacroAssembler::push_ptr(Register r) {
411 assert_not_delayed();
412 st_ptr(r, Lesp, 0);
413 dec(Lesp, Interpreter::stackElementSize);
414 }
416 // remember: our convention for longs in SPARC is:
417 // O0 (Otos_l1) has high-order part in first word,
418 // O1 (Otos_l2) has low-order part in second word
420 void InterpreterMacroAssembler::push_l(Register r) {
421 assert_not_delayed();
422 debug_only(verify_esp(Lesp));
423 // Longs are stored in memory-correct order, even if unaligned.
424 int offset = -Interpreter::stackElementSize;
425 store_unaligned_long(r, Lesp, offset);
426 dec(Lesp, 2 * Interpreter::stackElementSize);
427 }
430 void InterpreterMacroAssembler::push_f(FloatRegister f) {
431 assert_not_delayed();
432 debug_only(verify_esp(Lesp));
433 stf(FloatRegisterImpl::S, f, Lesp, 0);
434 dec(Lesp, Interpreter::stackElementSize);
435 }
438 void InterpreterMacroAssembler::push_d(FloatRegister d) {
439 assert_not_delayed();
440 debug_only(verify_esp(Lesp));
441 // Longs are stored in memory-correct order, even if unaligned.
442 int offset = -Interpreter::stackElementSize;
443 store_unaligned_double(d, Lesp, offset);
444 dec(Lesp, 2 * Interpreter::stackElementSize);
445 }
448 void InterpreterMacroAssembler::push(TosState state) {
449 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
450 switch (state) {
451 case atos: push_ptr(); break;
452 case btos: push_i(); break;
453 case ctos:
454 case stos: push_i(); break;
455 case itos: push_i(); break;
456 case ltos: push_l(); break;
457 case ftos: push_f(); break;
458 case dtos: push_d(); break;
459 case vtos: /* nothing to do */ break;
460 default : ShouldNotReachHere();
461 }
462 }
465 void InterpreterMacroAssembler::pop(TosState state) {
466 switch (state) {
467 case atos: pop_ptr(); break;
468 case btos: pop_i(); break;
469 case ctos:
470 case stos: pop_i(); break;
471 case itos: pop_i(); break;
472 case ltos: pop_l(); break;
473 case ftos: pop_f(); break;
474 case dtos: pop_d(); break;
475 case vtos: /* nothing to do */ break;
476 default : ShouldNotReachHere();
477 }
478 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
479 }
482 // Helpers for swap and dup
483 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
484 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
485 }
486 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
487 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
488 }
491 void InterpreterMacroAssembler::load_receiver(Register param_count,
492 Register recv) {
493 sll(param_count, Interpreter::logStackElementSize, param_count);
494 ld_ptr(Lesp, param_count, recv); // gets receiver Oop
495 }
497 void InterpreterMacroAssembler::empty_expression_stack() {
498 // Reset Lesp.
499 sub( Lmonitors, wordSize, Lesp );
501 // Reset SP by subtracting more space from Lesp.
502 Label done;
503 verify_oop(Lmethod);
504 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
506 // A native does not need to do this, since its callee does not change SP.
507 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags.
508 btst(JVM_ACC_NATIVE, Gframe_size);
509 br(Assembler::notZero, false, Assembler::pt, done);
510 delayed()->nop();
512 // Compute max expression stack+register save area
513 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
514 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
516 //
517 // now set up a stack frame with the size computed above
518 //
519 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
520 sll( Gframe_size, LogBytesPerWord, Gframe_size );
521 sub( Lesp, Gframe_size, Gframe_size );
522 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
523 debug_only(verify_sp(Gframe_size, G4_scratch));
524 #ifdef _LP64
525 sub(Gframe_size, STACK_BIAS, Gframe_size );
526 #endif
527 mov(Gframe_size, SP);
529 bind(done);
530 }
533 #ifdef ASSERT
534 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
535 Label Bad, OK;
537 // Saved SP must be aligned.
538 #ifdef _LP64
539 btst(2*BytesPerWord-1, Rsp);
540 #else
541 btst(LongAlignmentMask, Rsp);
542 #endif
543 br(Assembler::notZero, false, Assembler::pn, Bad);
544 delayed()->nop();
546 // Saved SP, plus register window size, must not be above FP.
547 add(Rsp, frame::register_save_words * wordSize, Rtemp);
548 #ifdef _LP64
549 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
550 #endif
551 cmp(Rtemp, FP);
552 brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad);
553 delayed()->nop();
555 // Saved SP must not be ridiculously below current SP.
556 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
557 set(maxstack, Rtemp);
558 sub(SP, Rtemp, Rtemp);
559 #ifdef _LP64
560 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
561 #endif
562 cmp(Rsp, Rtemp);
563 brx(Assembler::lessUnsigned, false, Assembler::pn, Bad);
564 delayed()->nop();
566 br(Assembler::always, false, Assembler::pn, OK);
567 delayed()->nop();
569 bind(Bad);
570 stop("on return to interpreted call, restored SP is corrupted");
572 bind(OK);
573 }
576 void InterpreterMacroAssembler::verify_esp(Register Resp) {
577 // about to read or write Resp[0]
578 // make sure it is not in the monitors or the register save area
579 Label OK1, OK2;
581 cmp(Resp, Lmonitors);
582 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
583 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
584 stop("too many pops: Lesp points into monitor area");
585 bind(OK1);
586 #ifdef _LP64
587 sub(Resp, STACK_BIAS, Resp);
588 #endif
589 cmp(Resp, SP);
590 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
591 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
592 stop("too many pushes: Lesp points into register window");
593 bind(OK2);
594 }
595 #endif // ASSERT
597 // Load compiled (i2c) or interpreter entry when calling from interpreted and
598 // do the call. Centralized so that all interpreter calls will do the same actions.
599 // If jvmti single stepping is on for a thread we must not call compiled code.
600 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
602 // Assume we want to go compiled if available
604 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
606 if (JvmtiExport::can_post_interpreter_events()) {
607 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
608 // compiled code in threads for which the event is enabled. Check here for
609 // interp_only_mode if these events CAN be enabled.
610 verify_thread();
611 Label skip_compiled_code;
613 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
614 ld(interp_only, scratch);
615 tst(scratch);
616 br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
617 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
618 bind(skip_compiled_code);
619 }
621 // the i2c_adapters need methodOop in G5_method (right? %%%)
622 // do the call
623 #ifdef ASSERT
624 {
625 Label ok;
626 br_notnull(target, false, Assembler::pt, ok);
627 delayed()->nop();
628 stop("null entry point");
629 bind(ok);
630 }
631 #endif // ASSERT
633 // Adjust Rret first so Llast_SP can be same as Rret
634 add(Rret, -frame::pc_return_offset, O7);
635 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
636 // Record SP so we can remove any stack space allocated by adapter transition
637 jmp(target, 0);
638 delayed()->mov(SP, Llast_SP);
639 }
641 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
642 assert_not_delayed();
644 Label not_taken;
645 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
646 else br (cc, false, Assembler::pn, not_taken);
647 delayed()->nop();
649 TemplateTable::branch(false,false);
651 bind(not_taken);
653 profile_not_taken_branch(G3_scratch);
654 }
657 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
658 int bcp_offset,
659 Register Rtmp,
660 Register Rdst,
661 signedOrNot is_signed,
662 setCCOrNot should_set_CC ) {
663 assert(Rtmp != Rdst, "need separate temp register");
664 assert_not_delayed();
665 switch (is_signed) {
666 default: ShouldNotReachHere();
668 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte
669 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte
670 }
671 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
672 sll( Rdst, BitsPerByte, Rdst);
673 switch (should_set_CC ) {
674 default: ShouldNotReachHere();
676 case set_CC: orcc( Rdst, Rtmp, Rdst ); break;
677 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;
678 }
679 }
682 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
683 int bcp_offset,
684 Register Rtmp,
685 Register Rdst,
686 setCCOrNot should_set_CC ) {
687 assert(Rtmp != Rdst, "need separate temp register");
688 assert_not_delayed();
689 add( Lbcp, bcp_offset, Rtmp);
690 andcc( Rtmp, 3, G0);
691 Label aligned;
692 switch (should_set_CC ) {
693 default: ShouldNotReachHere();
695 case set_CC: break;
696 case dont_set_CC: break;
697 }
699 br(Assembler::zero, true, Assembler::pn, aligned);
700 #ifdef _LP64
701 delayed()->ldsw(Rtmp, 0, Rdst);
702 #else
703 delayed()->ld(Rtmp, 0, Rdst);
704 #endif
706 ldub(Lbcp, bcp_offset + 3, Rdst);
707 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
708 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
709 #ifdef _LP64
710 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
711 #else
712 // Unsigned load is faster than signed on some implementations
713 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
714 #endif
715 or3(Rtmp, Rdst, Rdst );
717 bind(aligned);
718 if (should_set_CC == set_CC) tst(Rdst);
719 }
722 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
723 int bcp_offset, size_t index_size) {
724 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
725 if (index_size == sizeof(u2)) {
726 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
727 } else if (index_size == sizeof(u4)) {
728 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
729 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
730 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
731 xor3(tmp, -1, tmp); // convert to plain index
732 } else if (index_size == sizeof(u1)) {
733 assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
734 ldub(Lbcp, bcp_offset, tmp);
735 } else {
736 ShouldNotReachHere();
737 }
738 }
741 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
742 int bcp_offset, size_t index_size) {
743 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
744 assert_different_registers(cache, tmp);
745 assert_not_delayed();
746 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
747 // convert from field index to ConstantPoolCacheEntry index and from
748 // word index to byte offset
749 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
750 add(LcpoolCache, tmp, cache);
751 }
754 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
755 int bcp_offset, size_t index_size) {
756 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
757 assert_different_registers(cache, tmp);
758 assert_not_delayed();
759 if (index_size == sizeof(u2)) {
760 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
761 } else {
762 ShouldNotReachHere(); // other sizes not supported here
763 }
764 // convert from field index to ConstantPoolCacheEntry index
765 // and from word index to byte offset
766 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
767 // skip past the header
768 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp);
769 // construct pointer to cache entry
770 add(LcpoolCache, tmp, cache);
771 }
774 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
775 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
776 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
777 Register Rsuper_klass,
778 Register Rtmp1,
779 Register Rtmp2,
780 Register Rtmp3,
781 Label &ok_is_subtype ) {
782 Label not_subtype;
784 // Profile the not-null value's klass.
785 profile_typecheck(Rsub_klass, Rtmp1);
787 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
788 Rtmp1, Rtmp2,
789 &ok_is_subtype, ¬_subtype, NULL);
791 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
792 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
793 &ok_is_subtype, NULL);
795 bind(not_subtype);
796 profile_typecheck_failed(Rtmp1);
797 }
799 // Separate these two to allow for delay slot in middle
800 // These are used to do a test and full jump to exception-throwing code.
802 // %%%%% Could possibly reoptimize this by testing to see if could use
803 // a single conditional branch (i.e. if span is small enough.
804 // If you go that route, than get rid of the split and give up
805 // on the delay-slot hack.
807 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
808 Label& ok ) {
809 assert_not_delayed();
810 br(ok_condition, true, pt, ok);
811 // DELAY SLOT
812 }
814 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
815 Label& ok ) {
816 assert_not_delayed();
817 bp( ok_condition, true, Assembler::xcc, pt, ok);
818 // DELAY SLOT
819 }
821 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
822 Label& ok ) {
823 assert_not_delayed();
824 brx(ok_condition, true, pt, ok);
825 // DELAY SLOT
826 }
828 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
829 Register Rscratch,
830 Label& ok ) {
831 assert(throw_entry_point != NULL, "entry point must be generated by now");
832 AddressLiteral dest(throw_entry_point);
833 jump_to(dest, Rscratch);
834 delayed()->nop();
835 bind(ok);
836 }
839 // And if you cannot use the delay slot, here is a shorthand:
841 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
842 address throw_entry_point,
843 Register Rscratch ) {
844 Label ok;
845 if (ok_condition != never) {
846 throw_if_not_1_icc( ok_condition, ok);
847 delayed()->nop();
848 }
849 throw_if_not_2( throw_entry_point, Rscratch, ok);
850 }
851 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
852 address throw_entry_point,
853 Register Rscratch ) {
854 Label ok;
855 if (ok_condition != never) {
856 throw_if_not_1_xcc( ok_condition, ok);
857 delayed()->nop();
858 }
859 throw_if_not_2( throw_entry_point, Rscratch, ok);
860 }
861 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
862 address throw_entry_point,
863 Register Rscratch ) {
864 Label ok;
865 if (ok_condition != never) {
866 throw_if_not_1_x( ok_condition, ok);
867 delayed()->nop();
868 }
869 throw_if_not_2( throw_entry_point, Rscratch, ok);
870 }
872 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
873 // Note: res is still shy of address by array offset into object.
875 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
876 assert_not_delayed();
878 verify_oop(array);
879 #ifdef _LP64
880 // sign extend since tos (index) can be a 32bit value
881 sra(index, G0, index);
882 #endif // _LP64
884 // check array
885 Label ptr_ok;
886 tst(array);
887 throw_if_not_1_x( notZero, ptr_ok );
888 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
889 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
891 Label index_ok;
892 cmp(index, tmp);
893 throw_if_not_1_icc( lessUnsigned, index_ok );
894 if (index_shift > 0) delayed()->sll(index, index_shift, index);
895 else delayed()->add(array, index, res); // addr - const offset in index
896 // convention: move aberrant index into G3_scratch for exception message
897 mov(index, G3_scratch);
898 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
900 // add offset if didn't do it in delay slot
901 if (index_shift > 0) add(array, index, res); // addr - const offset in index
902 }
905 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
906 assert_not_delayed();
908 // pop array
909 pop_ptr(array);
911 // check array
912 index_check_without_pop(array, index, index_shift, tmp, res);
913 }
916 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
917 ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst);
918 }
921 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
922 get_constant_pool(Rdst);
923 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst);
924 }
927 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
928 get_constant_pool(Rcpool);
929 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags);
930 }
933 // unlock if synchronized method
934 //
935 // Unlock the receiver if this is a synchronized method.
936 // Unlock any Java monitors from syncronized blocks.
937 //
938 // If there are locked Java monitors
939 // If throw_monitor_exception
940 // throws IllegalMonitorStateException
941 // Else if install_monitor_exception
942 // installs IllegalMonitorStateException
943 // Else
944 // no error processing
945 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
946 bool throw_monitor_exception,
947 bool install_monitor_exception) {
948 Label unlocked, unlock, no_unlock;
950 // get the value of _do_not_unlock_if_synchronized into G1_scratch
951 const Address do_not_unlock_if_synchronized(G2_thread,
952 JavaThread::do_not_unlock_if_synchronized_offset());
953 ldbool(do_not_unlock_if_synchronized, G1_scratch);
954 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
956 // check if synchronized method
957 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
958 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
959 push(state); // save tos
960 ld(access_flags, G3_scratch); // Load access flags.
961 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
962 br(zero, false, pt, unlocked);
963 delayed()->nop();
965 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
966 // is set.
967 tstbool(G1_scratch);
968 br(Assembler::notZero, false, pn, no_unlock);
969 delayed()->nop();
971 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
972 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
974 //Intel: if (throw_monitor_exception) ... else ...
975 // Entry already unlocked, need to throw exception
976 //...
978 // pass top-most monitor elem
979 add( top_most_monitor(), O1 );
981 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
982 br_notnull(G3_scratch, false, pt, unlock);
983 delayed()->nop();
985 if (throw_monitor_exception) {
986 // Entry already unlocked need to throw an exception
987 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
988 should_not_reach_here();
989 } else {
990 // Monitor already unlocked during a stack unroll.
991 // If requested, install an illegal_monitor_state_exception.
992 // Continue with stack unrolling.
993 if (install_monitor_exception) {
994 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
995 }
996 ba(false, unlocked);
997 delayed()->nop();
998 }
1000 bind(unlock);
1002 unlock_object(O1);
1004 bind(unlocked);
1006 // I0, I1: Might contain return value
1008 // Check that all monitors are unlocked
1009 { Label loop, exception, entry, restart;
1011 Register Rmptr = O0;
1012 Register Rtemp = O1;
1013 Register Rlimit = Lmonitors;
1014 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1015 assert( (delta & LongAlignmentMask) == 0,
1016 "sizeof BasicObjectLock must be even number of doublewords");
1018 #ifdef ASSERT
1019 add(top_most_monitor(), Rmptr, delta);
1020 { Label L;
1021 // ensure that Rmptr starts out above (or at) Rlimit
1022 cmp(Rmptr, Rlimit);
1023 brx(Assembler::greaterEqualUnsigned, false, pn, L);
1024 delayed()->nop();
1025 stop("monitor stack has negative size");
1026 bind(L);
1027 }
1028 #endif
1029 bind(restart);
1030 ba(false, entry);
1031 delayed()->
1032 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry
1034 // Entry is still locked, need to throw exception
1035 bind(exception);
1036 if (throw_monitor_exception) {
1037 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1038 should_not_reach_here();
1039 } else {
1040 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1041 // Unlock does not block, so don't have to worry about the frame
1042 unlock_object(Rmptr);
1043 if (install_monitor_exception) {
1044 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1045 }
1046 ba(false, restart);
1047 delayed()->nop();
1048 }
1050 bind(loop);
1051 cmp(Rtemp, G0); // check if current entry is used
1052 brx(Assembler::notEqual, false, pn, exception);
1053 delayed()->
1054 dec(Rmptr, delta); // otherwise advance to next entry
1055 #ifdef ASSERT
1056 { Label L;
1057 // ensure that Rmptr has not somehow stepped below Rlimit
1058 cmp(Rmptr, Rlimit);
1059 brx(Assembler::greaterEqualUnsigned, false, pn, L);
1060 delayed()->nop();
1061 stop("ran off the end of the monitor stack");
1062 bind(L);
1063 }
1064 #endif
1065 bind(entry);
1066 cmp(Rmptr, Rlimit); // check if bottom reached
1067 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry
1068 delayed()->
1069 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1070 }
1072 bind(no_unlock);
1073 pop(state);
1074 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1075 }
1078 // remove activation
1079 //
1080 // Unlock the receiver if this is a synchronized method.
1081 // Unlock any Java monitors from syncronized blocks.
1082 // Remove the activation from the stack.
1083 //
1084 // If there are locked Java monitors
1085 // If throw_monitor_exception
1086 // throws IllegalMonitorStateException
1087 // Else if install_monitor_exception
1088 // installs IllegalMonitorStateException
1089 // Else
1090 // no error processing
1091 void InterpreterMacroAssembler::remove_activation(TosState state,
1092 bool throw_monitor_exception,
1093 bool install_monitor_exception) {
1095 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
1097 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1098 notify_method_exit(false, state, NotifyJVMTI);
1100 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1101 verify_oop(Lmethod);
1102 verify_thread();
1104 // return tos
1105 assert(Otos_l1 == Otos_i, "adjust code below");
1106 switch (state) {
1107 #ifdef _LP64
1108 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
1109 #else
1110 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
1111 #endif
1112 case btos: // fall through
1113 case ctos:
1114 case stos: // fall through
1115 case atos: // fall through
1116 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0
1117 case ftos: // fall through
1118 case dtos: // fall through
1119 case vtos: /* nothing to do */ break;
1120 default : ShouldNotReachHere();
1121 }
1123 #if defined(COMPILER2) && !defined(_LP64)
1124 if (state == ltos) {
1125 // C2 expects long results in G1 we can't tell if we're returning to interpreted
1126 // or compiled so just be safe use G1 and O0/O1
1128 // Shift bits into high (msb) of G1
1129 sllx(Otos_l1->after_save(), 32, G1);
1130 // Zero extend low bits
1131 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1132 or3 (Otos_l2->after_save(), G1, G1);
1133 }
1134 #endif /* COMPILER2 */
1136 }
1137 #endif /* CC_INTERP */
1140 // Lock object
1141 //
1142 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1143 // it must be initialized with the object to lock
1144 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1145 if (UseHeavyMonitors) {
1146 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1147 }
1148 else {
1149 Register obj_reg = Object;
1150 Register mark_reg = G4_scratch;
1151 Register temp_reg = G1_scratch;
1152 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1153 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1154 Label done;
1156 Label slow_case;
1158 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1160 // load markOop from object into mark_reg
1161 ld_ptr(mark_addr, mark_reg);
1163 if (UseBiasedLocking) {
1164 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1165 }
1167 // get the address of basicLock on stack that will be stored in the object
1168 // we need a temporary register here as we do not want to clobber lock_reg
1169 // (cas clobbers the destination register)
1170 mov(lock_reg, temp_reg);
1171 // set mark reg to be (markOop of object | UNLOCK_VALUE)
1172 or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1173 // initialize the box (Must happen before we update the object mark!)
1174 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1175 // compare and exchange object_addr, markOop | 1, stack address of basicLock
1176 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1177 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1178 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1180 // if the compare and exchange succeeded we are done (we saw an unlocked object)
1181 cmp(mark_reg, temp_reg);
1182 brx(Assembler::equal, true, Assembler::pt, done);
1183 delayed()->nop();
1185 // We did not see an unlocked object so try the fast recursive case
1187 // Check if owner is self by comparing the value in the markOop of object
1188 // with the stack pointer
1189 sub(temp_reg, SP, temp_reg);
1190 #ifdef _LP64
1191 sub(temp_reg, STACK_BIAS, temp_reg);
1192 #endif
1193 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1195 // Composite "andcc" test:
1196 // (a) %sp -vs- markword proximity check, and,
1197 // (b) verify mark word LSBs == 0 (Stack-locked).
1198 //
1199 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1200 // Note that the page size used for %sp proximity testing is arbitrary and is
1201 // unrelated to the actual MMU page size. We use a 'logical' page size of
1202 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1203 // field of the andcc instruction.
1204 andcc (temp_reg, 0xFFFFF003, G0) ;
1206 // if condition is true we are done and hence we can store 0 in the displaced
1207 // header indicating it is a recursive lock and be done
1208 brx(Assembler::zero, true, Assembler::pt, done);
1209 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1211 // none of the above fast optimizations worked so we have to get into the
1212 // slow case of monitor enter
1213 bind(slow_case);
1214 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1216 bind(done);
1217 }
1218 }
1220 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1221 //
1222 // Argument - lock_reg points to the BasicObjectLock for lock
1223 // Throw IllegalMonitorException if object is not locked by current thread
1224 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1225 if (UseHeavyMonitors) {
1226 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1227 } else {
1228 Register obj_reg = G3_scratch;
1229 Register mark_reg = G4_scratch;
1230 Register displaced_header_reg = G1_scratch;
1231 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
1232 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1233 Label done;
1235 if (UseBiasedLocking) {
1236 // load the object out of the BasicObjectLock
1237 ld_ptr(lockobj_addr, obj_reg);
1238 biased_locking_exit(mark_addr, mark_reg, done, true);
1239 st_ptr(G0, lockobj_addr); // free entry
1240 }
1242 // Test first if we are in the fast recursive case
1243 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
1244 ld_ptr(lock_addr, displaced_header_reg);
1245 br_null(displaced_header_reg, true, Assembler::pn, done);
1246 delayed()->st_ptr(G0, lockobj_addr); // free entry
1248 // See if it is still a light weight lock, if so we just unlock
1249 // the object and we are done
1251 if (!UseBiasedLocking) {
1252 // load the object out of the BasicObjectLock
1253 ld_ptr(lockobj_addr, obj_reg);
1254 }
1256 // we have the displaced header in displaced_header_reg
1257 // we expect to see the stack address of the basicLock in case the
1258 // lock is still a light weight lock (lock_reg)
1259 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1260 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
1261 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1262 cmp(lock_reg, displaced_header_reg);
1263 brx(Assembler::equal, true, Assembler::pn, done);
1264 delayed()->st_ptr(G0, lockobj_addr); // free entry
1266 // The lock has been converted into a heavy lock and hence
1267 // we need to get into the slow case
1269 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1271 bind(done);
1272 }
1273 }
1275 #ifndef CC_INTERP
1277 // Get the method data pointer from the methodOop and set the
1278 // specified register to its value.
1280 void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) {
1281 assert(ProfileInterpreter, "must be profiling interpreter");
1282 Label get_continue;
1284 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1285 test_method_data_pointer(get_continue);
1286 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1287 if (Roff != noreg)
1288 // Roff contains a method data index ("mdi"). It defaults to zero.
1289 add(ImethodDataPtr, Roff, ImethodDataPtr);
1290 bind(get_continue);
1291 }
1293 // Set the method data pointer for the current bcp.
1295 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1296 assert(ProfileInterpreter, "must be profiling interpreter");
1297 Label zero_continue;
1299 // Test MDO to avoid the call if it is NULL.
1300 ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
1301 test_method_data_pointer(zero_continue);
1302 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1303 set_method_data_pointer_offset(O0);
1304 bind(zero_continue);
1305 }
1307 // Test ImethodDataPtr. If it is null, continue at the specified label
1309 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1310 assert(ProfileInterpreter, "must be profiling interpreter");
1311 #ifdef _LP64
1312 bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue);
1313 #else
1314 tst(ImethodDataPtr);
1315 br(Assembler::zero, false, Assembler::pn, zero_continue);
1316 #endif
1317 delayed()->nop();
1318 }
1320 void InterpreterMacroAssembler::verify_method_data_pointer() {
1321 assert(ProfileInterpreter, "must be profiling interpreter");
1322 #ifdef ASSERT
1323 Label verify_continue;
1324 test_method_data_pointer(verify_continue);
1326 // If the mdp is valid, it will point to a DataLayout header which is
1327 // consistent with the bcp. The converse is highly probable also.
1328 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1329 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1330 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1331 add(G3_scratch, O5, G3_scratch);
1332 cmp(Lbcp, G3_scratch);
1333 brx(Assembler::equal, false, Assembler::pt, verify_continue);
1335 Register temp_reg = O5;
1336 delayed()->mov(ImethodDataPtr, temp_reg);
1337 // %%% should use call_VM_leaf here?
1338 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
1339 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
1340 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
1341 stf(FloatRegisterImpl::D, Ftos_d, d_save);
1342 mov(temp_reg->after_save(), O2);
1343 save_thread(L7_thread_cache);
1344 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1345 delayed()->nop();
1346 restore_thread(L7_thread_cache);
1347 ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1348 restore();
1349 bind(verify_continue);
1350 #endif // ASSERT
1351 }
1353 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1354 Register cur_bcp,
1355 Register Rtmp,
1356 Label &profile_continue) {
1357 assert(ProfileInterpreter, "must be profiling interpreter");
1358 // Control will flow to "profile_continue" if the counter is less than the
1359 // limit or if we call profile_method()
1361 Label done;
1363 // if no method data exists, and the counter is high enough, make one
1364 #ifdef _LP64
1365 bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done);
1366 #else
1367 tst(ImethodDataPtr);
1368 br(Assembler::notZero, false, Assembler::pn, done);
1369 #endif
1371 // Test to see if we should create a method data oop
1372 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1373 #ifdef _LP64
1374 delayed()->nop();
1375 sethi(profile_limit, Rtmp);
1376 #else
1377 delayed()->sethi(profile_limit, Rtmp);
1378 #endif
1379 ld(Rtmp, profile_limit.low10(), Rtmp);
1380 cmp(invocation_count, Rtmp);
1381 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1382 delayed()->nop();
1384 // Build it now.
1385 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp);
1386 set_method_data_pointer_offset(O0);
1387 ba(false, profile_continue);
1388 delayed()->nop();
1389 bind(done);
1390 }
1392 // Store a value at some constant offset from the method data pointer.
1394 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1395 assert(ProfileInterpreter, "must be profiling interpreter");
1396 st_ptr(value, ImethodDataPtr, constant);
1397 }
1399 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1400 Register bumped_count,
1401 bool decrement) {
1402 assert(ProfileInterpreter, "must be profiling interpreter");
1404 // Load the counter.
1405 ld_ptr(counter, bumped_count);
1407 if (decrement) {
1408 // Decrement the register. Set condition codes.
1409 subcc(bumped_count, DataLayout::counter_increment, bumped_count);
1411 // If the decrement causes the counter to overflow, stay negative
1412 Label L;
1413 brx(Assembler::negative, true, Assembler::pn, L);
1415 // Store the decremented counter, if it is still negative.
1416 delayed()->st_ptr(bumped_count, counter);
1417 bind(L);
1418 } else {
1419 // Increment the register. Set carry flag.
1420 addcc(bumped_count, DataLayout::counter_increment, bumped_count);
1422 // If the increment causes the counter to overflow, pull back by 1.
1423 assert(DataLayout::counter_increment == 1, "subc works");
1424 subc(bumped_count, G0, bumped_count);
1426 // Store the incremented counter.
1427 st_ptr(bumped_count, counter);
1428 }
1429 }
1431 // Increment the value at some constant offset from the method data pointer.
1433 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1434 Register bumped_count,
1435 bool decrement) {
1436 // Locate the counter at a fixed offset from the mdp:
1437 Address counter(ImethodDataPtr, constant);
1438 increment_mdp_data_at(counter, bumped_count, decrement);
1439 }
1441 // Increment the value at some non-fixed (reg + constant) offset from
1442 // the method data pointer.
1444 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1445 int constant,
1446 Register bumped_count,
1447 Register scratch2,
1448 bool decrement) {
1449 // Add the constant to reg to get the offset.
1450 add(ImethodDataPtr, reg, scratch2);
1451 Address counter(scratch2, constant);
1452 increment_mdp_data_at(counter, bumped_count, decrement);
1453 }
1455 // Set a flag value at the current method data pointer position.
1456 // Updates a single byte of the header, to avoid races with other header bits.
1458 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1459 Register scratch) {
1460 assert(ProfileInterpreter, "must be profiling interpreter");
1461 // Load the data header
1462 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
1464 // Set the flag
1465 or3(scratch, flag_constant, scratch);
1467 // Store the modified header.
1468 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
1469 }
1471 // Test the location at some offset from the method data pointer.
1472 // If it is not equal to value, branch to the not_equal_continue Label.
1473 // Set condition codes to match the nullness of the loaded value.
1475 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1476 Register value,
1477 Label& not_equal_continue,
1478 Register scratch) {
1479 assert(ProfileInterpreter, "must be profiling interpreter");
1480 ld_ptr(ImethodDataPtr, offset, scratch);
1481 cmp(value, scratch);
1482 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
1483 delayed()->tst(scratch);
1484 }
1486 // Update the method data pointer by the displacement located at some fixed
1487 // offset from the method data pointer.
1489 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1490 Register scratch) {
1491 assert(ProfileInterpreter, "must be profiling interpreter");
1492 ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
1493 add(ImethodDataPtr, scratch, ImethodDataPtr);
1494 }
1496 // Update the method data pointer by the displacement located at the
1497 // offset (reg + offset_of_disp).
1499 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1500 int offset_of_disp,
1501 Register scratch) {
1502 assert(ProfileInterpreter, "must be profiling interpreter");
1503 add(reg, offset_of_disp, scratch);
1504 ld_ptr(ImethodDataPtr, scratch, scratch);
1505 add(ImethodDataPtr, scratch, ImethodDataPtr);
1506 }
1508 // Update the method data pointer by a simple constant displacement.
1510 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1511 assert(ProfileInterpreter, "must be profiling interpreter");
1512 add(ImethodDataPtr, constant, ImethodDataPtr);
1513 }
1515 // Update the method data pointer for a _ret bytecode whose target
1516 // was not among our cached targets.
1518 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1519 Register return_bci) {
1520 assert(ProfileInterpreter, "must be profiling interpreter");
1521 push(state);
1522 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile
1523 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1524 ld_ptr(l_tmp, return_bci);
1525 pop(state);
1526 }
1528 // Count a taken branch in the bytecodes.
1530 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1531 if (ProfileInterpreter) {
1532 Label profile_continue;
1534 // If no method data exists, go to profile_continue.
1535 test_method_data_pointer(profile_continue);
1537 // We are taking a branch. Increment the taken count.
1538 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
1540 // The method data pointer needs to be updated to reflect the new target.
1541 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1542 bind (profile_continue);
1543 }
1544 }
1547 // Count a not-taken branch in the bytecodes.
1549 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
1550 if (ProfileInterpreter) {
1551 Label profile_continue;
1553 // If no method data exists, go to profile_continue.
1554 test_method_data_pointer(profile_continue);
1556 // We are taking a branch. Increment the not taken count.
1557 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
1559 // The method data pointer needs to be updated to correspond to the
1560 // next bytecode.
1561 update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1562 bind (profile_continue);
1563 }
1564 }
1567 // Count a non-virtual call in the bytecodes.
1569 void InterpreterMacroAssembler::profile_call(Register scratch) {
1570 if (ProfileInterpreter) {
1571 Label profile_continue;
1573 // If no method data exists, go to profile_continue.
1574 test_method_data_pointer(profile_continue);
1576 // We are making a call. Increment the count.
1577 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1579 // The method data pointer needs to be updated to reflect the new target.
1580 update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1581 bind (profile_continue);
1582 }
1583 }
1586 // Count a final call in the bytecodes.
1588 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
1589 if (ProfileInterpreter) {
1590 Label profile_continue;
1592 // If no method data exists, go to profile_continue.
1593 test_method_data_pointer(profile_continue);
1595 // We are making a call. Increment the count.
1596 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1598 // The method data pointer needs to be updated to reflect the new target.
1599 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1600 bind (profile_continue);
1601 }
1602 }
1605 // Count a virtual call in the bytecodes.
1607 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1608 Register scratch,
1609 bool receiver_can_be_null) {
1610 if (ProfileInterpreter) {
1611 Label profile_continue;
1613 // If no method data exists, go to profile_continue.
1614 test_method_data_pointer(profile_continue);
1617 Label skip_receiver_profile;
1618 if (receiver_can_be_null) {
1619 Label not_null;
1620 tst(receiver);
1621 brx(Assembler::notZero, false, Assembler::pt, not_null);
1622 delayed()->nop();
1623 // We are making a call. Increment the count for null receiver.
1624 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1625 ba(false, skip_receiver_profile);
1626 delayed()->nop();
1627 bind(not_null);
1628 }
1630 // Record the receiver type.
1631 record_klass_in_profile(receiver, scratch, true);
1632 bind(skip_receiver_profile);
1634 // The method data pointer needs to be updated to reflect the new target.
1635 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1636 bind (profile_continue);
1637 }
1638 }
1640 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1641 Register receiver, Register scratch,
1642 int start_row, Label& done, bool is_virtual_call) {
1643 if (TypeProfileWidth == 0) {
1644 if (is_virtual_call) {
1645 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1646 }
1647 return;
1648 }
1650 int last_row = VirtualCallData::row_limit() - 1;
1651 assert(start_row <= last_row, "must be work left to do");
1652 // Test this row for both the receiver and for null.
1653 // Take any of three different outcomes:
1654 // 1. found receiver => increment count and goto done
1655 // 2. found null => keep looking for case 1, maybe allocate this cell
1656 // 3. found something else => keep looking for cases 1 and 2
1657 // Case 3 is handled by a recursive call.
1658 for (int row = start_row; row <= last_row; row++) {
1659 Label next_test;
1660 bool test_for_null_also = (row == start_row);
1662 // See if the receiver is receiver[n].
1663 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1664 test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1665 // delayed()->tst(scratch);
1667 // The receiver is receiver[n]. Increment count[n].
1668 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1669 increment_mdp_data_at(count_offset, scratch);
1670 ba(false, done);
1671 delayed()->nop();
1672 bind(next_test);
1674 if (test_for_null_also) {
1675 Label found_null;
1676 // Failed the equality check on receiver[n]... Test for null.
1677 if (start_row == last_row) {
1678 // The only thing left to do is handle the null case.
1679 if (is_virtual_call) {
1680 brx(Assembler::zero, false, Assembler::pn, found_null);
1681 delayed()->nop();
1682 // Receiver did not match any saved receiver and there is no empty row for it.
1683 // Increment total counter to indicate polymorphic case.
1684 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1685 ba(false, done);
1686 delayed()->nop();
1687 bind(found_null);
1688 } else {
1689 brx(Assembler::notZero, false, Assembler::pt, done);
1690 delayed()->nop();
1691 }
1692 break;
1693 }
1694 // Since null is rare, make it be the branch-taken case.
1695 brx(Assembler::zero, false, Assembler::pn, found_null);
1696 delayed()->nop();
1698 // Put all the "Case 3" tests here.
1699 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1701 // Found a null. Keep searching for a matching receiver,
1702 // but remember that this is an empty (unused) slot.
1703 bind(found_null);
1704 }
1705 }
1707 // In the fall-through case, we found no matching receiver, but we
1708 // observed the receiver[start_row] is NULL.
1710 // Fill in the receiver field and increment the count.
1711 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1712 set_mdp_data_at(recvr_offset, receiver);
1713 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1714 mov(DataLayout::counter_increment, scratch);
1715 set_mdp_data_at(count_offset, scratch);
1716 if (start_row > 0) {
1717 ba(false, done);
1718 delayed()->nop();
1719 }
1720 }
1722 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1723 Register scratch, bool is_virtual_call) {
1724 assert(ProfileInterpreter, "must be profiling");
1725 Label done;
1727 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1729 bind (done);
1730 }
1733 // Count a ret in the bytecodes.
1735 void InterpreterMacroAssembler::profile_ret(TosState state,
1736 Register return_bci,
1737 Register scratch) {
1738 if (ProfileInterpreter) {
1739 Label profile_continue;
1740 uint row;
1742 // If no method data exists, go to profile_continue.
1743 test_method_data_pointer(profile_continue);
1745 // Update the total ret count.
1746 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1748 for (row = 0; row < RetData::row_limit(); row++) {
1749 Label next_test;
1751 // See if return_bci is equal to bci[n]:
1752 test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1753 return_bci, next_test, scratch);
1755 // return_bci is equal to bci[n]. Increment the count.
1756 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1758 // The method data pointer needs to be updated to reflect the new target.
1759 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1760 ba(false, profile_continue);
1761 delayed()->nop();
1762 bind(next_test);
1763 }
1765 update_mdp_for_ret(state, return_bci);
1767 bind (profile_continue);
1768 }
1769 }
1771 // Profile an unexpected null in the bytecodes.
1772 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1773 if (ProfileInterpreter) {
1774 Label profile_continue;
1776 // If no method data exists, go to profile_continue.
1777 test_method_data_pointer(profile_continue);
1779 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1781 // The method data pointer needs to be updated.
1782 int mdp_delta = in_bytes(BitData::bit_data_size());
1783 if (TypeProfileCasts) {
1784 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1785 }
1786 update_mdp_by_constant(mdp_delta);
1788 bind (profile_continue);
1789 }
1790 }
1792 void InterpreterMacroAssembler::profile_typecheck(Register klass,
1793 Register scratch) {
1794 if (ProfileInterpreter) {
1795 Label profile_continue;
1797 // If no method data exists, go to profile_continue.
1798 test_method_data_pointer(profile_continue);
1800 int mdp_delta = in_bytes(BitData::bit_data_size());
1801 if (TypeProfileCasts) {
1802 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1804 // Record the object type.
1805 record_klass_in_profile(klass, scratch, false);
1806 }
1808 // The method data pointer needs to be updated.
1809 update_mdp_by_constant(mdp_delta);
1811 bind (profile_continue);
1812 }
1813 }
1815 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
1816 if (ProfileInterpreter && TypeProfileCasts) {
1817 Label profile_continue;
1819 // If no method data exists, go to profile_continue.
1820 test_method_data_pointer(profile_continue);
1822 int count_offset = in_bytes(CounterData::count_offset());
1823 // Back up the address, since we have already bumped the mdp.
1824 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1826 // *Decrement* the counter. We expect to see zero or small negatives.
1827 increment_mdp_data_at(count_offset, scratch, true);
1829 bind (profile_continue);
1830 }
1831 }
1833 // Count the default case of a switch construct.
1835 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
1836 if (ProfileInterpreter) {
1837 Label profile_continue;
1839 // If no method data exists, go to profile_continue.
1840 test_method_data_pointer(profile_continue);
1842 // Update the default case count
1843 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1844 scratch);
1846 // The method data pointer needs to be updated.
1847 update_mdp_by_offset(
1848 in_bytes(MultiBranchData::default_displacement_offset()),
1849 scratch);
1851 bind (profile_continue);
1852 }
1853 }
1855 // Count the index'th case of a switch construct.
1857 void InterpreterMacroAssembler::profile_switch_case(Register index,
1858 Register scratch,
1859 Register scratch2,
1860 Register scratch3) {
1861 if (ProfileInterpreter) {
1862 Label profile_continue;
1864 // If no method data exists, go to profile_continue.
1865 test_method_data_pointer(profile_continue);
1867 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1868 set(in_bytes(MultiBranchData::per_case_size()), scratch);
1869 smul(index, scratch, scratch);
1870 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
1872 // Update the case count
1873 increment_mdp_data_at(scratch,
1874 in_bytes(MultiBranchData::relative_count_offset()),
1875 scratch2,
1876 scratch3);
1878 // The method data pointer needs to be updated.
1879 update_mdp_by_offset(scratch,
1880 in_bytes(MultiBranchData::relative_displacement_offset()),
1881 scratch2);
1883 bind (profile_continue);
1884 }
1885 }
1887 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
1889 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
1890 Register Rtemp,
1891 Register Rtemp2 ) {
1893 Register Rlimit = Lmonitors;
1894 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1895 assert( (delta & LongAlignmentMask) == 0,
1896 "sizeof BasicObjectLock must be even number of doublewords");
1898 sub( SP, delta, SP);
1899 sub( Lesp, delta, Lesp);
1900 sub( Lmonitors, delta, Lmonitors);
1902 if (!stack_is_empty) {
1904 // must copy stack contents down
1906 Label start_copying, next;
1908 // untested("monitor stack expansion");
1909 compute_stack_base(Rtemp);
1910 ba( false, start_copying );
1911 delayed()->cmp( Rtemp, Rlimit); // done? duplicated below
1913 // note: must copy from low memory upwards
1914 // On entry to loop,
1915 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1916 // Loop mutates Rtemp
1918 bind( next);
1920 st_ptr(Rtemp2, Rtemp, 0);
1921 inc(Rtemp, wordSize);
1922 cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1924 bind( start_copying );
1926 brx( notEqual, true, pn, next );
1927 delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1929 // done copying stack
1930 }
1931 }
1933 // Locals
1934 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
1935 assert_not_delayed();
1936 sll(index, Interpreter::logStackElementSize, index);
1937 sub(Llocals, index, index);
1938 ld_ptr(index, 0, dst);
1939 // Note: index must hold the effective address--the iinc template uses it
1940 }
1942 // Just like access_local_ptr but the tag is a returnAddress
1943 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
1944 Register dst ) {
1945 assert_not_delayed();
1946 sll(index, Interpreter::logStackElementSize, index);
1947 sub(Llocals, index, index);
1948 ld_ptr(index, 0, dst);
1949 }
1951 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
1952 assert_not_delayed();
1953 sll(index, Interpreter::logStackElementSize, index);
1954 sub(Llocals, index, index);
1955 ld(index, 0, dst);
1956 // Note: index must hold the effective address--the iinc template uses it
1957 }
1960 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
1961 assert_not_delayed();
1962 sll(index, Interpreter::logStackElementSize, index);
1963 sub(Llocals, index, index);
1964 // First half stored at index n+1 (which grows down from Llocals[n])
1965 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
1966 }
1969 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
1970 assert_not_delayed();
1971 sll(index, Interpreter::logStackElementSize, index);
1972 sub(Llocals, index, index);
1973 ldf(FloatRegisterImpl::S, index, 0, dst);
1974 }
1977 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
1978 assert_not_delayed();
1979 sll(index, Interpreter::logStackElementSize, index);
1980 sub(Llocals, index, index);
1981 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1982 }
1985 #ifdef ASSERT
1986 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1987 Label L;
1989 assert(Rindex != Rscratch, "Registers cannot be same");
1990 assert(Rindex != Rscratch1, "Registers cannot be same");
1991 assert(Rlimit != Rscratch, "Registers cannot be same");
1992 assert(Rlimit != Rscratch1, "Registers cannot be same");
1993 assert(Rscratch1 != Rscratch, "Registers cannot be same");
1995 // untested("reg area corruption");
1996 add(Rindex, offset, Rscratch);
1997 add(Rlimit, 64 + STACK_BIAS, Rscratch1);
1998 cmp(Rscratch, Rscratch1);
1999 brx(Assembler::greaterEqualUnsigned, false, pn, L);
2000 delayed()->nop();
2001 stop("regsave area is being clobbered");
2002 bind(L);
2003 }
2004 #endif // ASSERT
2007 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2008 assert_not_delayed();
2009 sll(index, Interpreter::logStackElementSize, index);
2010 sub(Llocals, index, index);
2011 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
2012 st(src, index, 0);
2013 }
2015 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
2016 assert_not_delayed();
2017 sll(index, Interpreter::logStackElementSize, index);
2018 sub(Llocals, index, index);
2019 #ifdef ASSERT
2020 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2021 #endif
2022 st_ptr(src, index, 0);
2023 }
2027 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
2028 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
2029 }
2031 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
2032 assert_not_delayed();
2033 sll(index, Interpreter::logStackElementSize, index);
2034 sub(Llocals, index, index);
2035 #ifdef ASSERT
2036 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2037 #endif
2038 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
2039 }
2042 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
2043 assert_not_delayed();
2044 sll(index, Interpreter::logStackElementSize, index);
2045 sub(Llocals, index, index);
2046 #ifdef ASSERT
2047 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
2048 #endif
2049 stf(FloatRegisterImpl::S, src, index, 0);
2050 }
2053 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
2054 assert_not_delayed();
2055 sll(index, Interpreter::logStackElementSize, index);
2056 sub(Llocals, index, index);
2057 #ifdef ASSERT
2058 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2059 #endif
2060 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2061 }
2064 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2065 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2066 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2067 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2068 }
2071 Address InterpreterMacroAssembler::top_most_monitor() {
2072 return Address(FP, top_most_monitor_byte_offset());
2073 }
2076 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2077 add( Lesp, wordSize, Rdest );
2078 }
2080 #endif /* CC_INTERP */
2082 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
2083 assert(UseCompiler, "incrementing must be useful");
2084 #ifdef CC_INTERP
2085 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
2086 InvocationCounter::counter_offset());
2087 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
2088 InvocationCounter::counter_offset());
2089 #else
2090 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
2091 InvocationCounter::counter_offset());
2092 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
2093 InvocationCounter::counter_offset());
2094 #endif /* CC_INTERP */
2095 int delta = InvocationCounter::count_increment;
2097 // Load each counter in a register
2098 ld( inv_counter, Rtmp );
2099 ld( be_counter, Rtmp2 );
2101 assert( is_simm13( delta ), " delta too large.");
2103 // Add the delta to the invocation counter and store the result
2104 add( Rtmp, delta, Rtmp );
2106 // Mask the backedge counter
2107 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2109 // Store value
2110 st( Rtmp, inv_counter);
2112 // Add invocation counter + backedge counter
2113 add( Rtmp, Rtmp2, Rtmp);
2115 // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
2116 }
2118 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
2119 assert(UseCompiler, "incrementing must be useful");
2120 #ifdef CC_INTERP
2121 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
2122 InvocationCounter::counter_offset());
2123 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
2124 InvocationCounter::counter_offset());
2125 #else
2126 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
2127 InvocationCounter::counter_offset());
2128 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
2129 InvocationCounter::counter_offset());
2130 #endif /* CC_INTERP */
2131 int delta = InvocationCounter::count_increment;
2132 // Load each counter in a register
2133 ld( be_counter, Rtmp );
2134 ld( inv_counter, Rtmp2 );
2136 // Add the delta to the backedge counter
2137 add( Rtmp, delta, Rtmp );
2139 // Mask the invocation counter, add to backedge counter
2140 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2142 // and store the result to memory
2143 st( Rtmp, be_counter );
2145 // Add backedge + invocation counter
2146 add( Rtmp, Rtmp2, Rtmp );
2148 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2149 }
2151 #ifndef CC_INTERP
2152 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2153 Register branch_bcp,
2154 Register Rtmp ) {
2155 Label did_not_overflow;
2156 Label overflow_with_error;
2157 assert_different_registers(backedge_count, Rtmp, branch_bcp);
2158 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2160 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2161 load_contents(limit, Rtmp);
2162 cmp(backedge_count, Rtmp);
2163 br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
2164 delayed()->nop();
2166 // When ProfileInterpreter is on, the backedge_count comes from the
2167 // methodDataOop, which value does not get reset on the call to
2168 // frequency_counter_overflow(). To avoid excessive calls to the overflow
2169 // routine while the method is being compiled, add a second test to make sure
2170 // the overflow function is called only once every overflow_frequency.
2171 if (ProfileInterpreter) {
2172 const int overflow_frequency = 1024;
2173 andcc(backedge_count, overflow_frequency-1, Rtmp);
2174 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2175 delayed()->nop();
2176 }
2178 // overflow in loop, pass branch bytecode
2179 set(6,Rtmp);
2180 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2182 // Was an OSR adapter generated?
2183 // O0 = osr nmethod
2184 tst(O0);
2185 brx(Assembler::zero, false, Assembler::pn, overflow_with_error);
2186 delayed()->nop();
2188 // Has the nmethod been invalidated already?
2189 ld(O0, nmethod::entry_bci_offset(), O2);
2190 cmp(O2, InvalidOSREntryBci);
2191 br(Assembler::equal, false, Assembler::pn, overflow_with_error);
2192 delayed()->nop();
2194 // migrate the interpreter frame off of the stack
2196 mov(G2_thread, L7);
2197 // save nmethod
2198 mov(O0, L6);
2199 set_last_Java_frame(SP, noreg);
2200 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2201 reset_last_Java_frame();
2202 mov(L7, G2_thread);
2204 // move OSR nmethod to I1
2205 mov(L6, I1);
2207 // OSR buffer to I0
2208 mov(O0, I0);
2210 // remove the interpreter frame
2211 restore(I5_savedSP, 0, SP);
2213 // Jump to the osr code.
2214 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
2215 jmp(O2, G0);
2216 delayed()->nop();
2218 bind(overflow_with_error);
2220 bind(did_not_overflow);
2221 }
2225 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
2226 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2227 }
2230 // local helper function for the verify_oop_or_return_address macro
2231 static bool verify_return_address(methodOopDesc* m, int bci) {
2232 #ifndef PRODUCT
2233 address pc = (address)(m->constMethod())
2234 + in_bytes(constMethodOopDesc::codes_offset()) + bci;
2235 // assume it is a valid return address if it is inside m and is preceded by a jsr
2236 if (!m->contains(pc)) return false;
2237 address jsr_pc;
2238 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2239 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
2240 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2241 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
2242 #endif // PRODUCT
2243 return false;
2244 }
2247 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2248 if (!VerifyOops) return;
2249 // the VM documentation for the astore[_wide] bytecode allows
2250 // the TOS to be not only an oop but also a return address
2251 Label test;
2252 Label skip;
2253 // See if it is an address (in the current method):
2255 mov(reg, Rtmp);
2256 const int log2_bytecode_size_limit = 16;
2257 srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2258 br_notnull( Rtmp, false, pt, test );
2259 delayed()->nop();
2261 // %%% should use call_VM_leaf here?
2262 save_frame_and_mov(0, Lmethod, O0, reg, O1);
2263 save_thread(L7_thread_cache);
2264 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2265 delayed()->nop();
2266 restore_thread(L7_thread_cache);
2267 br_notnull( O0, false, pt, skip );
2268 delayed()->restore();
2270 // Perform a more elaborate out-of-line call
2271 // Not an address; verify it:
2272 bind(test);
2273 verify_oop(reg);
2274 bind(skip);
2275 }
2278 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2279 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2280 }
2281 #endif /* CC_INTERP */
2283 // Inline assembly for:
2284 //
2285 // if (thread is in interp_only_mode) {
2286 // InterpreterRuntime::post_method_entry();
2287 // }
2288 // if (DTraceMethodProbes) {
2289 // SharedRuntime::dtrace_method_entry(method, receiver);
2290 // }
2291 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2292 // SharedRuntime::rc_trace_method_entry(method, receiver);
2293 // }
2295 void InterpreterMacroAssembler::notify_method_entry() {
2297 // C++ interpreter only uses this for native methods.
2299 // Whenever JVMTI puts a thread in interp_only_mode, method
2300 // entry/exit events are sent for that thread to track stack
2301 // depth. If it is possible to enter interp_only_mode we add
2302 // the code to check if the event should be sent.
2303 if (JvmtiExport::can_post_interpreter_events()) {
2304 Label L;
2305 Register temp_reg = O5;
2306 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2307 ld(interp_only, temp_reg);
2308 tst(temp_reg);
2309 br(zero, false, pt, L);
2310 delayed()->nop();
2311 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2312 bind(L);
2313 }
2315 {
2316 Register temp_reg = O5;
2317 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2318 call_VM_leaf(noreg,
2319 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2320 G2_thread, Lmethod);
2321 }
2323 // RedefineClasses() tracing support for obsolete method entry
2324 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2325 call_VM_leaf(noreg,
2326 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2327 G2_thread, Lmethod);
2328 }
2329 }
2332 // Inline assembly for:
2333 //
2334 // if (thread is in interp_only_mode) {
2335 // // save result
2336 // InterpreterRuntime::post_method_exit();
2337 // // restore result
2338 // }
2339 // if (DTraceMethodProbes) {
2340 // SharedRuntime::dtrace_method_exit(thread, method);
2341 // }
2342 //
2343 // Native methods have their result stored in d_tmp and l_tmp
2344 // Java methods have their result stored in the expression stack
2346 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2347 TosState state,
2348 NotifyMethodExitMode mode) {
2349 // C++ interpreter only uses this for native methods.
2351 // Whenever JVMTI puts a thread in interp_only_mode, method
2352 // entry/exit events are sent for that thread to track stack
2353 // depth. If it is possible to enter interp_only_mode we add
2354 // the code to check if the event should be sent.
2355 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2356 Label L;
2357 Register temp_reg = O5;
2358 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2359 ld(interp_only, temp_reg);
2360 tst(temp_reg);
2361 br(zero, false, pt, L);
2362 delayed()->nop();
2364 // Note: frame::interpreter_frame_result has a dependency on how the
2365 // method result is saved across the call to post_method_exit. For
2366 // native methods it assumes the result registers are saved to
2367 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2368 // implementation will need to be updated too.
2370 save_return_value(state, is_native_method);
2371 call_VM(noreg,
2372 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2373 restore_return_value(state, is_native_method);
2374 bind(L);
2375 }
2377 {
2378 Register temp_reg = O5;
2379 // Dtrace notification
2380 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2381 save_return_value(state, is_native_method);
2382 call_VM_leaf(
2383 noreg,
2384 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2385 G2_thread, Lmethod);
2386 restore_return_value(state, is_native_method);
2387 }
2388 }
2390 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2391 #ifdef CC_INTERP
2392 // result potentially in O0/O1: save it across calls
2393 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2394 #ifdef _LP64
2395 stx(O0, STATE(_native_lresult));
2396 #else
2397 std(O0, STATE(_native_lresult));
2398 #endif
2399 #else // CC_INTERP
2400 if (is_native_call) {
2401 stf(FloatRegisterImpl::D, F0, d_tmp);
2402 #ifdef _LP64
2403 stx(O0, l_tmp);
2404 #else
2405 std(O0, l_tmp);
2406 #endif
2407 } else {
2408 push(state);
2409 }
2410 #endif // CC_INTERP
2411 }
2413 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2414 #ifdef CC_INTERP
2415 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2416 #ifdef _LP64
2417 ldx(STATE(_native_lresult), O0);
2418 #else
2419 ldd(STATE(_native_lresult), O0);
2420 #endif
2421 #else // CC_INTERP
2422 if (is_native_call) {
2423 ldf(FloatRegisterImpl::D, d_tmp, F0);
2424 #ifdef _LP64
2425 ldx(l_tmp, O0);
2426 #else
2427 ldd(l_tmp, O0);
2428 #endif
2429 } else {
2430 pop(state);
2431 }
2432 #endif // CC_INTERP
2433 }
2435 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
2436 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
2437 int increment, int mask,
2438 Register scratch1, Register scratch2,
2439 Condition cond, Label *where) {
2440 ld(counter_addr, scratch1);
2441 add(scratch1, increment, scratch1);
2442 if (is_simm13(mask)) {
2443 andcc(scratch1, mask, G0);
2444 } else {
2445 set(mask, scratch2);
2446 andcc(scratch1, scratch2, G0);
2447 }
2448 br(cond, false, Assembler::pn, *where);
2449 delayed()->st(scratch1, counter_addr);
2450 }