src/cpu/sparc/vm/interp_masm_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2438
dd031b2226de
child 2698
38fea01eb669
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
iveresov@2438 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "interp_masm_sparc.hpp"
stefank@2314 27 #include "interpreter/interpreter.hpp"
stefank@2314 28 #include "interpreter/interpreterRuntime.hpp"
stefank@2314 29 #include "oops/arrayOop.hpp"
stefank@2314 30 #include "oops/markOop.hpp"
stefank@2314 31 #include "oops/methodDataOop.hpp"
stefank@2314 32 #include "oops/methodOop.hpp"
stefank@2314 33 #include "prims/jvmtiExport.hpp"
stefank@2314 34 #include "prims/jvmtiRedefineClassesTrace.hpp"
stefank@2314 35 #include "prims/jvmtiThreadState.hpp"
stefank@2314 36 #include "runtime/basicLock.hpp"
stefank@2314 37 #include "runtime/biasedLocking.hpp"
stefank@2314 38 #include "runtime/sharedRuntime.hpp"
stefank@2314 39 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 40 # include "thread_linux.inline.hpp"
stefank@2314 41 #endif
stefank@2314 42 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 43 # include "thread_solaris.inline.hpp"
stefank@2314 44 #endif
duke@435 45
duke@435 46 #ifndef CC_INTERP
duke@435 47 #ifndef FAST_DISPATCH
duke@435 48 #define FAST_DISPATCH 1
duke@435 49 #endif
duke@435 50 #undef FAST_DISPATCH
duke@435 51
duke@435 52 // Implementation of InterpreterMacroAssembler
duke@435 53
duke@435 54 // This file specializes the assember with interpreter-specific macros
duke@435 55
twisti@1162 56 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
twisti@1162 57 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
duke@435 58
duke@435 59 #else // CC_INTERP
duke@435 60 #ifndef STATE
duke@435 61 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
duke@435 62 #endif // STATE
duke@435 63
duke@435 64 #endif // CC_INTERP
duke@435 65
duke@435 66 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
duke@435 67 // Note: this algorithm is also used by C1's OSR entry sequence.
duke@435 68 // Any changes should also be applied to CodeEmitter::emit_osr_entry().
duke@435 69 assert_different_registers(args_size, locals_size);
duke@435 70 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted.
duke@435 71 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
duke@435 72 // Use br/mov combination because it works on both V8 and V9 and is
duke@435 73 // faster.
duke@435 74 Label skip_move;
duke@435 75 br(Assembler::negative, true, Assembler::pt, skip_move);
duke@435 76 delayed()->mov(G0, delta);
duke@435 77 bind(skip_move);
duke@435 78 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned)
duke@435 79 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes
duke@435 80 }
duke@435 81
duke@435 82 #ifndef CC_INTERP
duke@435 83
duke@435 84 // Dispatch code executed in the prolog of a bytecode which does not do it's
duke@435 85 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
duke@435 86 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
duke@435 87 assert_not_delayed();
duke@435 88 #ifdef FAST_DISPATCH
duke@435 89 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
duke@435 90 // they both use I2.
duke@435 91 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
duke@435 92 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode
duke@435 93 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
duke@435 94 // add offset to correct dispatch table
duke@435 95 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
duke@435 96 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
duke@435 97 #else
twisti@1162 98 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
duke@435 99 // dispatch table to use
twisti@1162 100 AddressLiteral tbl(Interpreter::dispatch_table(state));
twisti@1162 101 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
twisti@1162 102 set(tbl, G3_scratch); // compute addr of table
twisti@1162 103 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr
duke@435 104 #endif
duke@435 105 }
duke@435 106
duke@435 107
duke@435 108 // Dispatch code executed in the epilog of a bytecode which does not do it's
duke@435 109 // own dispatch. The dispatch address in IdispatchAddress is used for the
duke@435 110 // dispatch.
duke@435 111 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
duke@435 112 assert_not_delayed();
duke@435 113 verify_FPU(1, state);
duke@435 114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 115 jmp( IdispatchAddress, 0 );
duke@435 116 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
duke@435 117 else delayed()->nop();
duke@435 118 }
duke@435 119
duke@435 120
duke@435 121 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
duke@435 122 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
duke@435 123 assert_not_delayed();
duke@435 124 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
duke@435 125 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
duke@435 126 }
duke@435 127
duke@435 128
duke@435 129 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
duke@435 130 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
duke@435 131 assert_not_delayed();
duke@435 132 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode
duke@435 133 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
duke@435 134 }
duke@435 135
duke@435 136
duke@435 137 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
duke@435 138 // load current bytecode
duke@435 139 assert_not_delayed();
duke@435 140 ldub( Lbcp, 0, Lbyte_code); // load next bytecode
duke@435 141 dispatch_base(state, table);
duke@435 142 }
duke@435 143
duke@435 144
duke@435 145 void InterpreterMacroAssembler::call_VM_leaf_base(
duke@435 146 Register java_thread,
duke@435 147 address entry_point,
duke@435 148 int number_of_arguments
duke@435 149 ) {
duke@435 150 if (!java_thread->is_valid())
duke@435 151 java_thread = L7_thread_cache;
duke@435 152 // super call
duke@435 153 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
duke@435 154 }
duke@435 155
duke@435 156
duke@435 157 void InterpreterMacroAssembler::call_VM_base(
duke@435 158 Register oop_result,
duke@435 159 Register java_thread,
duke@435 160 Register last_java_sp,
duke@435 161 address entry_point,
duke@435 162 int number_of_arguments,
duke@435 163 bool check_exception
duke@435 164 ) {
duke@435 165 if (!java_thread->is_valid())
duke@435 166 java_thread = L7_thread_cache;
duke@435 167 // See class ThreadInVMfromInterpreter, which assumes that the interpreter
duke@435 168 // takes responsibility for setting its own thread-state on call-out.
duke@435 169 // However, ThreadInVMfromInterpreter resets the state to "in_Java".
duke@435 170
duke@435 171 //save_bcp(); // save bcp
duke@435 172 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
duke@435 173 //restore_bcp(); // restore bcp
duke@435 174 //restore_locals(); // restore locals pointer
duke@435 175 }
duke@435 176
duke@435 177
duke@435 178 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
duke@435 179 if (JvmtiExport::can_pop_frame()) {
duke@435 180 Label L;
duke@435 181
duke@435 182 // Check the "pending popframe condition" flag in the current thread
twisti@1162 183 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
duke@435 184
duke@435 185 // Initiate popframe handling only if it is not already being processed. If the flag
duke@435 186 // has the popframe_processing bit set, it means that this code is called *during* popframe
duke@435 187 // handling - we don't want to reenter.
duke@435 188 btst(JavaThread::popframe_pending_bit, scratch_reg);
duke@435 189 br(zero, false, pt, L);
duke@435 190 delayed()->nop();
duke@435 191 btst(JavaThread::popframe_processing_bit, scratch_reg);
duke@435 192 br(notZero, false, pt, L);
duke@435 193 delayed()->nop();
duke@435 194
duke@435 195 // Call Interpreter::remove_activation_preserving_args_entry() to get the
duke@435 196 // address of the same-named entrypoint in the generated interpreter code.
duke@435 197 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
duke@435 198
duke@435 199 // Jump to Interpreter::_remove_activation_preserving_args_entry
duke@435 200 jmpl(O0, G0, G0);
duke@435 201 delayed()->nop();
duke@435 202 bind(L);
duke@435 203 }
duke@435 204 }
duke@435 205
duke@435 206
duke@435 207 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
duke@435 208 Register thr_state = G4_scratch;
twisti@1162 209 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
twisti@1162 210 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
twisti@1162 211 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
twisti@1162 212 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
duke@435 213 switch (state) {
duke@435 214 case ltos: ld_long(val_addr, Otos_l); break;
duke@435 215 case atos: ld_ptr(oop_addr, Otos_l);
duke@435 216 st_ptr(G0, oop_addr); break;
duke@435 217 case btos: // fall through
duke@435 218 case ctos: // fall through
duke@435 219 case stos: // fall through
duke@435 220 case itos: ld(val_addr, Otos_l1); break;
duke@435 221 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
duke@435 222 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
duke@435 223 case vtos: /* nothing to do */ break;
duke@435 224 default : ShouldNotReachHere();
duke@435 225 }
duke@435 226 // Clean up tos value in the jvmti thread state
duke@435 227 or3(G0, ilgl, G3_scratch);
duke@435 228 stw(G3_scratch, tos_addr);
duke@435 229 st_long(G0, val_addr);
duke@435 230 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 231 }
duke@435 232
duke@435 233
duke@435 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
duke@435 235 if (JvmtiExport::can_force_early_return()) {
duke@435 236 Label L;
duke@435 237 Register thr_state = G3_scratch;
twisti@1162 238 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
duke@435 239 tst(thr_state);
duke@435 240 br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
duke@435 241 delayed()->nop();
duke@435 242
duke@435 243 // Initiate earlyret handling only if it is not already being processed.
duke@435 244 // If the flag has the earlyret_processing bit set, it means that this code
duke@435 245 // is called *during* earlyret handling - we don't want to reenter.
twisti@1162 246 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
duke@435 247 cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
duke@435 248 br(Assembler::notEqual, false, pt, L);
duke@435 249 delayed()->nop();
duke@435 250
duke@435 251 // Call Interpreter::remove_activation_early_entry() to get the address of the
duke@435 252 // same-named entrypoint in the generated interpreter code
twisti@1162 253 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
duke@435 254 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
duke@435 255
duke@435 256 // Jump to Interpreter::_remove_activation_early_entry
duke@435 257 jmpl(O0, G0, G0);
duke@435 258 delayed()->nop();
duke@435 259 bind(L);
duke@435 260 }
duke@435 261 }
duke@435 262
duke@435 263
twisti@1730 264 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
duke@435 265 mov(arg_1, O0);
twisti@1730 266 mov(arg_2, O1);
twisti@1730 267 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
duke@435 268 }
duke@435 269 #endif /* CC_INTERP */
duke@435 270
duke@435 271
duke@435 272 #ifndef CC_INTERP
duke@435 273
duke@435 274 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
duke@435 275 assert_not_delayed();
duke@435 276 dispatch_Lbyte_code(state, table);
duke@435 277 }
duke@435 278
duke@435 279
duke@435 280 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
duke@435 281 dispatch_base(state, Interpreter::normal_table(state));
duke@435 282 }
duke@435 283
duke@435 284
duke@435 285 void InterpreterMacroAssembler::dispatch_only(TosState state) {
duke@435 286 dispatch_base(state, Interpreter::dispatch_table(state));
duke@435 287 }
duke@435 288
duke@435 289
duke@435 290 // common code to dispatch and dispatch_only
duke@435 291 // dispatch value in Lbyte_code and increment Lbcp
duke@435 292
duke@435 293 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
duke@435 294 verify_FPU(1, state);
duke@435 295 // %%%%% maybe implement +VerifyActivationFrameSize here
duke@435 296 //verify_thread(); //too slow; we will just verify on method entry & exit
duke@435 297 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 298 #ifdef FAST_DISPATCH
duke@435 299 if (table == Interpreter::dispatch_table(state)) {
duke@435 300 // use IdispatchTables
duke@435 301 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
duke@435 302 // add offset to correct dispatch table
duke@435 303 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
duke@435 304 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr
duke@435 305 } else {
duke@435 306 #endif
duke@435 307 // dispatch table to use
twisti@1162 308 AddressLiteral tbl(table);
duke@435 309 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize
twisti@1162 310 set(tbl, G3_scratch); // compute addr of table
duke@435 311 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr
duke@435 312 #ifdef FAST_DISPATCH
duke@435 313 }
duke@435 314 #endif
duke@435 315 jmp( G3_scratch, 0 );
duke@435 316 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr);
duke@435 317 else delayed()->nop();
duke@435 318 }
duke@435 319
duke@435 320
duke@435 321 // Helpers for expression stack
duke@435 322
duke@435 323 // Longs and doubles are Category 2 computational types in the
duke@435 324 // JVM specification (section 3.11.1) and take 2 expression stack or
duke@435 325 // local slots.
duke@435 326 // Aligning them on 32 bit with tagged stacks is hard because the code generated
duke@435 327 // for the dup* bytecodes depends on what types are already on the stack.
duke@435 328 // If the types are split into the two stack/local slots, that is much easier
duke@435 329 // (and we can use 0 for non-reference tags).
duke@435 330
duke@435 331 // Known good alignment in _LP64 but unknown otherwise
duke@435 332 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
duke@435 333 assert_not_delayed();
duke@435 334
duke@435 335 #ifdef _LP64
duke@435 336 ldf(FloatRegisterImpl::D, r1, offset, d);
duke@435 337 #else
duke@435 338 ldf(FloatRegisterImpl::S, r1, offset, d);
twisti@1861 339 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor());
duke@435 340 #endif
duke@435 341 }
duke@435 342
duke@435 343 // Known good alignment in _LP64 but unknown otherwise
duke@435 344 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
duke@435 345 assert_not_delayed();
duke@435 346
duke@435 347 #ifdef _LP64
duke@435 348 stf(FloatRegisterImpl::D, d, r1, offset);
duke@435 349 // store something more useful here
twisti@1861 350 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
duke@435 351 #else
duke@435 352 stf(FloatRegisterImpl::S, d, r1, offset);
twisti@1861 353 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize);
duke@435 354 #endif
duke@435 355 }
duke@435 356
duke@435 357
duke@435 358 // Known good alignment in _LP64 but unknown otherwise
duke@435 359 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
duke@435 360 assert_not_delayed();
duke@435 361 #ifdef _LP64
duke@435 362 ldx(r1, offset, rd);
duke@435 363 #else
duke@435 364 ld(r1, offset, rd);
twisti@1861 365 ld(r1, offset + Interpreter::stackElementSize, rd->successor());
duke@435 366 #endif
duke@435 367 }
duke@435 368
duke@435 369 // Known good alignment in _LP64 but unknown otherwise
duke@435 370 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
duke@435 371 assert_not_delayed();
duke@435 372
duke@435 373 #ifdef _LP64
duke@435 374 stx(l, r1, offset);
duke@435 375 // store something more useful here
twisti@1861 376 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);)
duke@435 377 #else
duke@435 378 st(l, r1, offset);
twisti@1861 379 st(l->successor(), r1, offset + Interpreter::stackElementSize);
duke@435 380 #endif
duke@435 381 }
duke@435 382
duke@435 383 void InterpreterMacroAssembler::pop_i(Register r) {
duke@435 384 assert_not_delayed();
duke@435 385 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
twisti@1861 386 inc(Lesp, Interpreter::stackElementSize);
duke@435 387 debug_only(verify_esp(Lesp));
duke@435 388 }
duke@435 389
duke@435 390 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
duke@435 391 assert_not_delayed();
duke@435 392 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
twisti@1861 393 inc(Lesp, Interpreter::stackElementSize);
duke@435 394 debug_only(verify_esp(Lesp));
duke@435 395 }
duke@435 396
duke@435 397 void InterpreterMacroAssembler::pop_l(Register r) {
duke@435 398 assert_not_delayed();
duke@435 399 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
twisti@1861 400 inc(Lesp, 2*Interpreter::stackElementSize);
duke@435 401 debug_only(verify_esp(Lesp));
duke@435 402 }
duke@435 403
duke@435 404
duke@435 405 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
duke@435 406 assert_not_delayed();
duke@435 407 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
twisti@1861 408 inc(Lesp, Interpreter::stackElementSize);
duke@435 409 debug_only(verify_esp(Lesp));
duke@435 410 }
duke@435 411
duke@435 412
duke@435 413 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
duke@435 414 assert_not_delayed();
duke@435 415 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
twisti@1861 416 inc(Lesp, 2*Interpreter::stackElementSize);
duke@435 417 debug_only(verify_esp(Lesp));
duke@435 418 }
duke@435 419
duke@435 420
duke@435 421 void InterpreterMacroAssembler::push_i(Register r) {
duke@435 422 assert_not_delayed();
duke@435 423 debug_only(verify_esp(Lesp));
twisti@1861 424 st(r, Lesp, 0);
twisti@1861 425 dec(Lesp, Interpreter::stackElementSize);
duke@435 426 }
duke@435 427
duke@435 428 void InterpreterMacroAssembler::push_ptr(Register r) {
duke@435 429 assert_not_delayed();
twisti@1861 430 st_ptr(r, Lesp, 0);
twisti@1861 431 dec(Lesp, Interpreter::stackElementSize);
duke@435 432 }
duke@435 433
duke@435 434 // remember: our convention for longs in SPARC is:
duke@435 435 // O0 (Otos_l1) has high-order part in first word,
duke@435 436 // O1 (Otos_l2) has low-order part in second word
duke@435 437
duke@435 438 void InterpreterMacroAssembler::push_l(Register r) {
duke@435 439 assert_not_delayed();
duke@435 440 debug_only(verify_esp(Lesp));
twisti@1861 441 // Longs are stored in memory-correct order, even if unaligned.
twisti@1861 442 int offset = -Interpreter::stackElementSize;
duke@435 443 store_unaligned_long(r, Lesp, offset);
twisti@1861 444 dec(Lesp, 2 * Interpreter::stackElementSize);
duke@435 445 }
duke@435 446
duke@435 447
duke@435 448 void InterpreterMacroAssembler::push_f(FloatRegister f) {
duke@435 449 assert_not_delayed();
duke@435 450 debug_only(verify_esp(Lesp));
twisti@1861 451 stf(FloatRegisterImpl::S, f, Lesp, 0);
twisti@1861 452 dec(Lesp, Interpreter::stackElementSize);
duke@435 453 }
duke@435 454
duke@435 455
duke@435 456 void InterpreterMacroAssembler::push_d(FloatRegister d) {
duke@435 457 assert_not_delayed();
duke@435 458 debug_only(verify_esp(Lesp));
twisti@1861 459 // Longs are stored in memory-correct order, even if unaligned.
twisti@1861 460 int offset = -Interpreter::stackElementSize;
duke@435 461 store_unaligned_double(d, Lesp, offset);
twisti@1861 462 dec(Lesp, 2 * Interpreter::stackElementSize);
duke@435 463 }
duke@435 464
duke@435 465
duke@435 466 void InterpreterMacroAssembler::push(TosState state) {
duke@435 467 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 468 switch (state) {
duke@435 469 case atos: push_ptr(); break;
duke@435 470 case btos: push_i(); break;
duke@435 471 case ctos:
duke@435 472 case stos: push_i(); break;
duke@435 473 case itos: push_i(); break;
duke@435 474 case ltos: push_l(); break;
duke@435 475 case ftos: push_f(); break;
duke@435 476 case dtos: push_d(); break;
duke@435 477 case vtos: /* nothing to do */ break;
duke@435 478 default : ShouldNotReachHere();
duke@435 479 }
duke@435 480 }
duke@435 481
duke@435 482
duke@435 483 void InterpreterMacroAssembler::pop(TosState state) {
duke@435 484 switch (state) {
duke@435 485 case atos: pop_ptr(); break;
duke@435 486 case btos: pop_i(); break;
duke@435 487 case ctos:
duke@435 488 case stos: pop_i(); break;
duke@435 489 case itos: pop_i(); break;
duke@435 490 case ltos: pop_l(); break;
duke@435 491 case ftos: pop_f(); break;
duke@435 492 case dtos: pop_d(); break;
duke@435 493 case vtos: /* nothing to do */ break;
duke@435 494 default : ShouldNotReachHere();
duke@435 495 }
duke@435 496 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 497 }
duke@435 498
duke@435 499
twisti@1861 500 // Helpers for swap and dup
twisti@1861 501 void InterpreterMacroAssembler::load_ptr(int n, Register val) {
duke@435 502 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
duke@435 503 }
twisti@1861 504 void InterpreterMacroAssembler::store_ptr(int n, Register val) {
duke@435 505 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
duke@435 506 }
duke@435 507
duke@435 508
duke@435 509 void InterpreterMacroAssembler::load_receiver(Register param_count,
duke@435 510 Register recv) {
twisti@1861 511 sll(param_count, Interpreter::logStackElementSize, param_count);
duke@435 512 ld_ptr(Lesp, param_count, recv); // gets receiver Oop
duke@435 513 }
duke@435 514
duke@435 515 void InterpreterMacroAssembler::empty_expression_stack() {
duke@435 516 // Reset Lesp.
duke@435 517 sub( Lmonitors, wordSize, Lesp );
duke@435 518
duke@435 519 // Reset SP by subtracting more space from Lesp.
duke@435 520 Label done;
duke@435 521 verify_oop(Lmethod);
twisti@1162 522 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
duke@435 523
duke@435 524 // A native does not need to do this, since its callee does not change SP.
twisti@1162 525 ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size); // Load access flags.
duke@435 526 btst(JVM_ACC_NATIVE, Gframe_size);
duke@435 527 br(Assembler::notZero, false, Assembler::pt, done);
duke@435 528 delayed()->nop();
duke@435 529
duke@435 530 // Compute max expression stack+register save area
twisti@1162 531 lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size); // Load max stack.
duke@435 532 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
duke@435 533
duke@435 534 //
duke@435 535 // now set up a stack frame with the size computed above
duke@435 536 //
duke@435 537 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
duke@435 538 sll( Gframe_size, LogBytesPerWord, Gframe_size );
duke@435 539 sub( Lesp, Gframe_size, Gframe_size );
duke@435 540 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary
duke@435 541 debug_only(verify_sp(Gframe_size, G4_scratch));
duke@435 542 #ifdef _LP64
duke@435 543 sub(Gframe_size, STACK_BIAS, Gframe_size );
duke@435 544 #endif
duke@435 545 mov(Gframe_size, SP);
duke@435 546
duke@435 547 bind(done);
duke@435 548 }
duke@435 549
duke@435 550
duke@435 551 #ifdef ASSERT
duke@435 552 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
duke@435 553 Label Bad, OK;
duke@435 554
duke@435 555 // Saved SP must be aligned.
duke@435 556 #ifdef _LP64
duke@435 557 btst(2*BytesPerWord-1, Rsp);
duke@435 558 #else
duke@435 559 btst(LongAlignmentMask, Rsp);
duke@435 560 #endif
duke@435 561 br(Assembler::notZero, false, Assembler::pn, Bad);
duke@435 562 delayed()->nop();
duke@435 563
duke@435 564 // Saved SP, plus register window size, must not be above FP.
duke@435 565 add(Rsp, frame::register_save_words * wordSize, Rtemp);
duke@435 566 #ifdef _LP64
duke@435 567 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP
duke@435 568 #endif
duke@435 569 cmp(Rtemp, FP);
duke@435 570 brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad);
duke@435 571 delayed()->nop();
duke@435 572
duke@435 573 // Saved SP must not be ridiculously below current SP.
duke@435 574 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
duke@435 575 set(maxstack, Rtemp);
duke@435 576 sub(SP, Rtemp, Rtemp);
duke@435 577 #ifdef _LP64
duke@435 578 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp
duke@435 579 #endif
duke@435 580 cmp(Rsp, Rtemp);
duke@435 581 brx(Assembler::lessUnsigned, false, Assembler::pn, Bad);
duke@435 582 delayed()->nop();
duke@435 583
duke@435 584 br(Assembler::always, false, Assembler::pn, OK);
duke@435 585 delayed()->nop();
duke@435 586
duke@435 587 bind(Bad);
duke@435 588 stop("on return to interpreted call, restored SP is corrupted");
duke@435 589
duke@435 590 bind(OK);
duke@435 591 }
duke@435 592
duke@435 593
duke@435 594 void InterpreterMacroAssembler::verify_esp(Register Resp) {
duke@435 595 // about to read or write Resp[0]
duke@435 596 // make sure it is not in the monitors or the register save area
duke@435 597 Label OK1, OK2;
duke@435 598
duke@435 599 cmp(Resp, Lmonitors);
duke@435 600 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
duke@435 601 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
duke@435 602 stop("too many pops: Lesp points into monitor area");
duke@435 603 bind(OK1);
duke@435 604 #ifdef _LP64
duke@435 605 sub(Resp, STACK_BIAS, Resp);
duke@435 606 #endif
duke@435 607 cmp(Resp, SP);
duke@435 608 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
duke@435 609 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
duke@435 610 stop("too many pushes: Lesp points into register window");
duke@435 611 bind(OK2);
duke@435 612 }
duke@435 613 #endif // ASSERT
duke@435 614
duke@435 615 // Load compiled (i2c) or interpreter entry when calling from interpreted and
duke@435 616 // do the call. Centralized so that all interpreter calls will do the same actions.
duke@435 617 // If jvmti single stepping is on for a thread we must not call compiled code.
duke@435 618 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
duke@435 619
duke@435 620 // Assume we want to go compiled if available
duke@435 621
duke@435 622 ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
duke@435 623
duke@435 624 if (JvmtiExport::can_post_interpreter_events()) {
duke@435 625 // JVMTI events, such as single-stepping, are implemented partly by avoiding running
duke@435 626 // compiled code in threads for which the event is enabled. Check here for
duke@435 627 // interp_only_mode if these events CAN be enabled.
duke@435 628 verify_thread();
duke@435 629 Label skip_compiled_code;
duke@435 630
twisti@1162 631 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
duke@435 632 ld(interp_only, scratch);
duke@435 633 tst(scratch);
duke@435 634 br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
duke@435 635 delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
duke@435 636 bind(skip_compiled_code);
duke@435 637 }
duke@435 638
duke@435 639 // the i2c_adapters need methodOop in G5_method (right? %%%)
duke@435 640 // do the call
duke@435 641 #ifdef ASSERT
duke@435 642 {
duke@435 643 Label ok;
duke@435 644 br_notnull(target, false, Assembler::pt, ok);
duke@435 645 delayed()->nop();
duke@435 646 stop("null entry point");
duke@435 647 bind(ok);
duke@435 648 }
duke@435 649 #endif // ASSERT
duke@435 650
duke@435 651 // Adjust Rret first so Llast_SP can be same as Rret
duke@435 652 add(Rret, -frame::pc_return_offset, O7);
duke@435 653 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
duke@435 654 // Record SP so we can remove any stack space allocated by adapter transition
duke@435 655 jmp(target, 0);
duke@435 656 delayed()->mov(SP, Llast_SP);
duke@435 657 }
duke@435 658
duke@435 659 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
duke@435 660 assert_not_delayed();
duke@435 661
duke@435 662 Label not_taken;
duke@435 663 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
duke@435 664 else br (cc, false, Assembler::pn, not_taken);
duke@435 665 delayed()->nop();
duke@435 666
duke@435 667 TemplateTable::branch(false,false);
duke@435 668
duke@435 669 bind(not_taken);
duke@435 670
duke@435 671 profile_not_taken_branch(G3_scratch);
duke@435 672 }
duke@435 673
duke@435 674
duke@435 675 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
duke@435 676 int bcp_offset,
duke@435 677 Register Rtmp,
duke@435 678 Register Rdst,
duke@435 679 signedOrNot is_signed,
duke@435 680 setCCOrNot should_set_CC ) {
duke@435 681 assert(Rtmp != Rdst, "need separate temp register");
duke@435 682 assert_not_delayed();
duke@435 683 switch (is_signed) {
duke@435 684 default: ShouldNotReachHere();
duke@435 685
duke@435 686 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte
duke@435 687 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte
duke@435 688 }
duke@435 689 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
duke@435 690 sll( Rdst, BitsPerByte, Rdst);
duke@435 691 switch (should_set_CC ) {
duke@435 692 default: ShouldNotReachHere();
duke@435 693
duke@435 694 case set_CC: orcc( Rdst, Rtmp, Rdst ); break;
duke@435 695 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break;
duke@435 696 }
duke@435 697 }
duke@435 698
duke@435 699
duke@435 700 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
duke@435 701 int bcp_offset,
duke@435 702 Register Rtmp,
duke@435 703 Register Rdst,
duke@435 704 setCCOrNot should_set_CC ) {
duke@435 705 assert(Rtmp != Rdst, "need separate temp register");
duke@435 706 assert_not_delayed();
duke@435 707 add( Lbcp, bcp_offset, Rtmp);
duke@435 708 andcc( Rtmp, 3, G0);
duke@435 709 Label aligned;
duke@435 710 switch (should_set_CC ) {
duke@435 711 default: ShouldNotReachHere();
duke@435 712
duke@435 713 case set_CC: break;
duke@435 714 case dont_set_CC: break;
duke@435 715 }
duke@435 716
duke@435 717 br(Assembler::zero, true, Assembler::pn, aligned);
duke@435 718 #ifdef _LP64
duke@435 719 delayed()->ldsw(Rtmp, 0, Rdst);
duke@435 720 #else
duke@435 721 delayed()->ld(Rtmp, 0, Rdst);
duke@435 722 #endif
duke@435 723
duke@435 724 ldub(Lbcp, bcp_offset + 3, Rdst);
duke@435 725 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst);
duke@435 726 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst);
duke@435 727 #ifdef _LP64
duke@435 728 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
duke@435 729 #else
duke@435 730 // Unsigned load is faster than signed on some implementations
duke@435 731 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp);
duke@435 732 #endif
duke@435 733 or3(Rtmp, Rdst, Rdst );
duke@435 734
duke@435 735 bind(aligned);
duke@435 736 if (should_set_CC == set_CC) tst(Rdst);
duke@435 737 }
duke@435 738
duke@435 739
twisti@1858 740 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register cache, Register tmp,
jrose@1920 741 int bcp_offset, size_t index_size) {
twisti@1858 742 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
jrose@1920 743 if (index_size == sizeof(u2)) {
twisti@1858 744 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
jrose@1920 745 } else if (index_size == sizeof(u4)) {
twisti@1858 746 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic");
twisti@1858 747 get_4_byte_integer_at_bcp(bcp_offset, cache, tmp);
twisti@1858 748 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line");
twisti@1858 749 xor3(tmp, -1, tmp); // convert to plain index
jrose@1920 750 } else if (index_size == sizeof(u1)) {
jrose@1920 751 assert(EnableMethodHandles, "tiny index used only for EnableMethodHandles");
jrose@1920 752 ldub(Lbcp, bcp_offset, tmp);
jrose@1920 753 } else {
jrose@1920 754 ShouldNotReachHere();
twisti@1858 755 }
twisti@1858 756 }
twisti@1858 757
twisti@1858 758
twisti@1858 759 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp,
jrose@1920 760 int bcp_offset, size_t index_size) {
duke@435 761 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
duke@435 762 assert_different_registers(cache, tmp);
duke@435 763 assert_not_delayed();
jrose@1920 764 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size);
twisti@1858 765 // convert from field index to ConstantPoolCacheEntry index and from
twisti@1858 766 // word index to byte offset
duke@435 767 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
duke@435 768 add(LcpoolCache, tmp, cache);
duke@435 769 }
duke@435 770
duke@435 771
twisti@1858 772 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
jrose@1920 773 int bcp_offset, size_t index_size) {
duke@435 774 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
duke@435 775 assert_different_registers(cache, tmp);
duke@435 776 assert_not_delayed();
jrose@1920 777 if (index_size == sizeof(u2)) {
jrose@1920 778 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
jrose@1920 779 } else {
jrose@1920 780 ShouldNotReachHere(); // other sizes not supported here
jrose@1920 781 }
duke@435 782 // convert from field index to ConstantPoolCacheEntry index
duke@435 783 // and from word index to byte offset
duke@435 784 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
duke@435 785 // skip past the header
duke@435 786 add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp);
duke@435 787 // construct pointer to cache entry
duke@435 788 add(LcpoolCache, tmp, cache);
duke@435 789 }
duke@435 790
duke@435 791
duke@435 792 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
coleenp@548 793 // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
duke@435 794 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
duke@435 795 Register Rsuper_klass,
duke@435 796 Register Rtmp1,
duke@435 797 Register Rtmp2,
duke@435 798 Register Rtmp3,
duke@435 799 Label &ok_is_subtype ) {
jrose@1079 800 Label not_subtype;
duke@435 801
duke@435 802 // Profile the not-null value's klass.
duke@435 803 profile_typecheck(Rsub_klass, Rtmp1);
duke@435 804
jrose@1079 805 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
jrose@1079 806 Rtmp1, Rtmp2,
jrose@1079 807 &ok_is_subtype, &not_subtype, NULL);
jrose@1079 808
jrose@1079 809 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
jrose@1079 810 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
jrose@1079 811 &ok_is_subtype, NULL);
duke@435 812
duke@435 813 bind(not_subtype);
duke@435 814 profile_typecheck_failed(Rtmp1);
duke@435 815 }
duke@435 816
duke@435 817 // Separate these two to allow for delay slot in middle
duke@435 818 // These are used to do a test and full jump to exception-throwing code.
duke@435 819
duke@435 820 // %%%%% Could possibly reoptimize this by testing to see if could use
duke@435 821 // a single conditional branch (i.e. if span is small enough.
duke@435 822 // If you go that route, than get rid of the split and give up
duke@435 823 // on the delay-slot hack.
duke@435 824
duke@435 825 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
duke@435 826 Label& ok ) {
duke@435 827 assert_not_delayed();
duke@435 828 br(ok_condition, true, pt, ok);
duke@435 829 // DELAY SLOT
duke@435 830 }
duke@435 831
duke@435 832 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
duke@435 833 Label& ok ) {
duke@435 834 assert_not_delayed();
duke@435 835 bp( ok_condition, true, Assembler::xcc, pt, ok);
duke@435 836 // DELAY SLOT
duke@435 837 }
duke@435 838
duke@435 839 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
duke@435 840 Label& ok ) {
duke@435 841 assert_not_delayed();
duke@435 842 brx(ok_condition, true, pt, ok);
duke@435 843 // DELAY SLOT
duke@435 844 }
duke@435 845
duke@435 846 void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point,
duke@435 847 Register Rscratch,
duke@435 848 Label& ok ) {
duke@435 849 assert(throw_entry_point != NULL, "entry point must be generated by now");
twisti@1162 850 AddressLiteral dest(throw_entry_point);
twisti@1162 851 jump_to(dest, Rscratch);
duke@435 852 delayed()->nop();
duke@435 853 bind(ok);
duke@435 854 }
duke@435 855
duke@435 856
duke@435 857 // And if you cannot use the delay slot, here is a shorthand:
duke@435 858
duke@435 859 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
duke@435 860 address throw_entry_point,
duke@435 861 Register Rscratch ) {
duke@435 862 Label ok;
duke@435 863 if (ok_condition != never) {
duke@435 864 throw_if_not_1_icc( ok_condition, ok);
duke@435 865 delayed()->nop();
duke@435 866 }
duke@435 867 throw_if_not_2( throw_entry_point, Rscratch, ok);
duke@435 868 }
duke@435 869 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
duke@435 870 address throw_entry_point,
duke@435 871 Register Rscratch ) {
duke@435 872 Label ok;
duke@435 873 if (ok_condition != never) {
duke@435 874 throw_if_not_1_xcc( ok_condition, ok);
duke@435 875 delayed()->nop();
duke@435 876 }
duke@435 877 throw_if_not_2( throw_entry_point, Rscratch, ok);
duke@435 878 }
duke@435 879 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
duke@435 880 address throw_entry_point,
duke@435 881 Register Rscratch ) {
duke@435 882 Label ok;
duke@435 883 if (ok_condition != never) {
duke@435 884 throw_if_not_1_x( ok_condition, ok);
duke@435 885 delayed()->nop();
duke@435 886 }
duke@435 887 throw_if_not_2( throw_entry_point, Rscratch, ok);
duke@435 888 }
duke@435 889
duke@435 890 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
duke@435 891 // Note: res is still shy of address by array offset into object.
duke@435 892
duke@435 893 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
duke@435 894 assert_not_delayed();
duke@435 895
duke@435 896 verify_oop(array);
duke@435 897 #ifdef _LP64
duke@435 898 // sign extend since tos (index) can be a 32bit value
duke@435 899 sra(index, G0, index);
duke@435 900 #endif // _LP64
duke@435 901
duke@435 902 // check array
duke@435 903 Label ptr_ok;
duke@435 904 tst(array);
duke@435 905 throw_if_not_1_x( notZero, ptr_ok );
duke@435 906 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
duke@435 907 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
duke@435 908
duke@435 909 Label index_ok;
duke@435 910 cmp(index, tmp);
duke@435 911 throw_if_not_1_icc( lessUnsigned, index_ok );
duke@435 912 if (index_shift > 0) delayed()->sll(index, index_shift, index);
duke@435 913 else delayed()->add(array, index, res); // addr - const offset in index
duke@435 914 // convention: move aberrant index into G3_scratch for exception message
duke@435 915 mov(index, G3_scratch);
duke@435 916 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
duke@435 917
duke@435 918 // add offset if didn't do it in delay slot
duke@435 919 if (index_shift > 0) add(array, index, res); // addr - const offset in index
duke@435 920 }
duke@435 921
duke@435 922
duke@435 923 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
duke@435 924 assert_not_delayed();
duke@435 925
duke@435 926 // pop array
duke@435 927 pop_ptr(array);
duke@435 928
duke@435 929 // check array
duke@435 930 index_check_without_pop(array, index, index_shift, tmp, res);
duke@435 931 }
duke@435 932
duke@435 933
duke@435 934 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
duke@435 935 ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst);
duke@435 936 }
duke@435 937
duke@435 938
duke@435 939 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
duke@435 940 get_constant_pool(Rdst);
duke@435 941 ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst);
duke@435 942 }
duke@435 943
duke@435 944
duke@435 945 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
duke@435 946 get_constant_pool(Rcpool);
duke@435 947 ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags);
duke@435 948 }
duke@435 949
duke@435 950
duke@435 951 // unlock if synchronized method
duke@435 952 //
duke@435 953 // Unlock the receiver if this is a synchronized method.
duke@435 954 // Unlock any Java monitors from syncronized blocks.
duke@435 955 //
duke@435 956 // If there are locked Java monitors
duke@435 957 // If throw_monitor_exception
duke@435 958 // throws IllegalMonitorStateException
duke@435 959 // Else if install_monitor_exception
duke@435 960 // installs IllegalMonitorStateException
duke@435 961 // Else
duke@435 962 // no error processing
duke@435 963 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
duke@435 964 bool throw_monitor_exception,
duke@435 965 bool install_monitor_exception) {
duke@435 966 Label unlocked, unlock, no_unlock;
duke@435 967
duke@435 968 // get the value of _do_not_unlock_if_synchronized into G1_scratch
twisti@1162 969 const Address do_not_unlock_if_synchronized(G2_thread,
twisti@1162 970 JavaThread::do_not_unlock_if_synchronized_offset());
duke@435 971 ldbool(do_not_unlock_if_synchronized, G1_scratch);
duke@435 972 stbool(G0, do_not_unlock_if_synchronized); // reset the flag
duke@435 973
duke@435 974 // check if synchronized method
twisti@1162 975 const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
duke@435 976 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 977 push(state); // save tos
twisti@1162 978 ld(access_flags, G3_scratch); // Load access flags.
duke@435 979 btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
twisti@1162 980 br(zero, false, pt, unlocked);
duke@435 981 delayed()->nop();
duke@435 982
duke@435 983 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
duke@435 984 // is set.
duke@435 985 tstbool(G1_scratch);
duke@435 986 br(Assembler::notZero, false, pn, no_unlock);
duke@435 987 delayed()->nop();
duke@435 988
duke@435 989 // BasicObjectLock will be first in list, since this is a synchronized method. However, need
duke@435 990 // to check that the object has not been unlocked by an explicit monitorexit bytecode.
duke@435 991
duke@435 992 //Intel: if (throw_monitor_exception) ... else ...
duke@435 993 // Entry already unlocked, need to throw exception
duke@435 994 //...
duke@435 995
duke@435 996 // pass top-most monitor elem
duke@435 997 add( top_most_monitor(), O1 );
duke@435 998
duke@435 999 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
duke@435 1000 br_notnull(G3_scratch, false, pt, unlock);
duke@435 1001 delayed()->nop();
duke@435 1002
duke@435 1003 if (throw_monitor_exception) {
duke@435 1004 // Entry already unlocked need to throw an exception
duke@435 1005 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
duke@435 1006 should_not_reach_here();
duke@435 1007 } else {
duke@435 1008 // Monitor already unlocked during a stack unroll.
duke@435 1009 // If requested, install an illegal_monitor_state_exception.
duke@435 1010 // Continue with stack unrolling.
duke@435 1011 if (install_monitor_exception) {
duke@435 1012 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
duke@435 1013 }
duke@435 1014 ba(false, unlocked);
duke@435 1015 delayed()->nop();
duke@435 1016 }
duke@435 1017
duke@435 1018 bind(unlock);
duke@435 1019
duke@435 1020 unlock_object(O1);
duke@435 1021
duke@435 1022 bind(unlocked);
duke@435 1023
duke@435 1024 // I0, I1: Might contain return value
duke@435 1025
duke@435 1026 // Check that all monitors are unlocked
duke@435 1027 { Label loop, exception, entry, restart;
duke@435 1028
duke@435 1029 Register Rmptr = O0;
duke@435 1030 Register Rtemp = O1;
duke@435 1031 Register Rlimit = Lmonitors;
duke@435 1032 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
duke@435 1033 assert( (delta & LongAlignmentMask) == 0,
duke@435 1034 "sizeof BasicObjectLock must be even number of doublewords");
duke@435 1035
duke@435 1036 #ifdef ASSERT
duke@435 1037 add(top_most_monitor(), Rmptr, delta);
duke@435 1038 { Label L;
duke@435 1039 // ensure that Rmptr starts out above (or at) Rlimit
duke@435 1040 cmp(Rmptr, Rlimit);
duke@435 1041 brx(Assembler::greaterEqualUnsigned, false, pn, L);
duke@435 1042 delayed()->nop();
duke@435 1043 stop("monitor stack has negative size");
duke@435 1044 bind(L);
duke@435 1045 }
duke@435 1046 #endif
duke@435 1047 bind(restart);
duke@435 1048 ba(false, entry);
duke@435 1049 delayed()->
duke@435 1050 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry
duke@435 1051
duke@435 1052 // Entry is still locked, need to throw exception
duke@435 1053 bind(exception);
duke@435 1054 if (throw_monitor_exception) {
duke@435 1055 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
duke@435 1056 should_not_reach_here();
duke@435 1057 } else {
duke@435 1058 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
duke@435 1059 // Unlock does not block, so don't have to worry about the frame
duke@435 1060 unlock_object(Rmptr);
duke@435 1061 if (install_monitor_exception) {
duke@435 1062 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
duke@435 1063 }
duke@435 1064 ba(false, restart);
duke@435 1065 delayed()->nop();
duke@435 1066 }
duke@435 1067
duke@435 1068 bind(loop);
duke@435 1069 cmp(Rtemp, G0); // check if current entry is used
duke@435 1070 brx(Assembler::notEqual, false, pn, exception);
duke@435 1071 delayed()->
duke@435 1072 dec(Rmptr, delta); // otherwise advance to next entry
duke@435 1073 #ifdef ASSERT
duke@435 1074 { Label L;
duke@435 1075 // ensure that Rmptr has not somehow stepped below Rlimit
duke@435 1076 cmp(Rmptr, Rlimit);
duke@435 1077 brx(Assembler::greaterEqualUnsigned, false, pn, L);
duke@435 1078 delayed()->nop();
duke@435 1079 stop("ran off the end of the monitor stack");
duke@435 1080 bind(L);
duke@435 1081 }
duke@435 1082 #endif
duke@435 1083 bind(entry);
duke@435 1084 cmp(Rmptr, Rlimit); // check if bottom reached
duke@435 1085 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry
duke@435 1086 delayed()->
duke@435 1087 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
duke@435 1088 }
duke@435 1089
duke@435 1090 bind(no_unlock);
duke@435 1091 pop(state);
duke@435 1092 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 1093 }
duke@435 1094
duke@435 1095
duke@435 1096 // remove activation
duke@435 1097 //
duke@435 1098 // Unlock the receiver if this is a synchronized method.
duke@435 1099 // Unlock any Java monitors from syncronized blocks.
duke@435 1100 // Remove the activation from the stack.
duke@435 1101 //
duke@435 1102 // If there are locked Java monitors
duke@435 1103 // If throw_monitor_exception
duke@435 1104 // throws IllegalMonitorStateException
duke@435 1105 // Else if install_monitor_exception
duke@435 1106 // installs IllegalMonitorStateException
duke@435 1107 // Else
duke@435 1108 // no error processing
duke@435 1109 void InterpreterMacroAssembler::remove_activation(TosState state,
duke@435 1110 bool throw_monitor_exception,
duke@435 1111 bool install_monitor_exception) {
duke@435 1112
duke@435 1113 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
duke@435 1114
duke@435 1115 // save result (push state before jvmti call and pop it afterwards) and notify jvmti
duke@435 1116 notify_method_exit(false, state, NotifyJVMTI);
duke@435 1117
duke@435 1118 interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
duke@435 1119 verify_oop(Lmethod);
duke@435 1120 verify_thread();
duke@435 1121
duke@435 1122 // return tos
duke@435 1123 assert(Otos_l1 == Otos_i, "adjust code below");
duke@435 1124 switch (state) {
duke@435 1125 #ifdef _LP64
duke@435 1126 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
duke@435 1127 #else
duke@435 1128 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1
duke@435 1129 #endif
duke@435 1130 case btos: // fall through
duke@435 1131 case ctos:
duke@435 1132 case stos: // fall through
duke@435 1133 case atos: // fall through
duke@435 1134 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0
duke@435 1135 case ftos: // fall through
duke@435 1136 case dtos: // fall through
duke@435 1137 case vtos: /* nothing to do */ break;
duke@435 1138 default : ShouldNotReachHere();
duke@435 1139 }
duke@435 1140
duke@435 1141 #if defined(COMPILER2) && !defined(_LP64)
duke@435 1142 if (state == ltos) {
duke@435 1143 // C2 expects long results in G1 we can't tell if we're returning to interpreted
duke@435 1144 // or compiled so just be safe use G1 and O0/O1
duke@435 1145
duke@435 1146 // Shift bits into high (msb) of G1
duke@435 1147 sllx(Otos_l1->after_save(), 32, G1);
duke@435 1148 // Zero extend low bits
duke@435 1149 srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
duke@435 1150 or3 (Otos_l2->after_save(), G1, G1);
duke@435 1151 }
duke@435 1152 #endif /* COMPILER2 */
duke@435 1153
duke@435 1154 }
duke@435 1155 #endif /* CC_INTERP */
duke@435 1156
duke@435 1157
duke@435 1158 // Lock object
duke@435 1159 //
duke@435 1160 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
duke@435 1161 // it must be initialized with the object to lock
duke@435 1162 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
duke@435 1163 if (UseHeavyMonitors) {
duke@435 1164 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
duke@435 1165 }
duke@435 1166 else {
duke@435 1167 Register obj_reg = Object;
duke@435 1168 Register mark_reg = G4_scratch;
duke@435 1169 Register temp_reg = G1_scratch;
twisti@1162 1170 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
twisti@1162 1171 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
duke@435 1172 Label done;
duke@435 1173
duke@435 1174 Label slow_case;
duke@435 1175
duke@435 1176 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
duke@435 1177
duke@435 1178 // load markOop from object into mark_reg
duke@435 1179 ld_ptr(mark_addr, mark_reg);
duke@435 1180
duke@435 1181 if (UseBiasedLocking) {
duke@435 1182 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
duke@435 1183 }
duke@435 1184
duke@435 1185 // get the address of basicLock on stack that will be stored in the object
duke@435 1186 // we need a temporary register here as we do not want to clobber lock_reg
duke@435 1187 // (cas clobbers the destination register)
duke@435 1188 mov(lock_reg, temp_reg);
duke@435 1189 // set mark reg to be (markOop of object | UNLOCK_VALUE)
duke@435 1190 or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
duke@435 1191 // initialize the box (Must happen before we update the object mark!)
duke@435 1192 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
duke@435 1193 // compare and exchange object_addr, markOop | 1, stack address of basicLock
duke@435 1194 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 1195 casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
duke@435 1196 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
duke@435 1197
duke@435 1198 // if the compare and exchange succeeded we are done (we saw an unlocked object)
duke@435 1199 cmp(mark_reg, temp_reg);
duke@435 1200 brx(Assembler::equal, true, Assembler::pt, done);
duke@435 1201 delayed()->nop();
duke@435 1202
duke@435 1203 // We did not see an unlocked object so try the fast recursive case
duke@435 1204
duke@435 1205 // Check if owner is self by comparing the value in the markOop of object
duke@435 1206 // with the stack pointer
duke@435 1207 sub(temp_reg, SP, temp_reg);
duke@435 1208 #ifdef _LP64
duke@435 1209 sub(temp_reg, STACK_BIAS, temp_reg);
duke@435 1210 #endif
duke@435 1211 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
duke@435 1212
duke@435 1213 // Composite "andcc" test:
duke@435 1214 // (a) %sp -vs- markword proximity check, and,
duke@435 1215 // (b) verify mark word LSBs == 0 (Stack-locked).
duke@435 1216 //
duke@435 1217 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
duke@435 1218 // Note that the page size used for %sp proximity testing is arbitrary and is
duke@435 1219 // unrelated to the actual MMU page size. We use a 'logical' page size of
duke@435 1220 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate
duke@435 1221 // field of the andcc instruction.
duke@435 1222 andcc (temp_reg, 0xFFFFF003, G0) ;
duke@435 1223
duke@435 1224 // if condition is true we are done and hence we can store 0 in the displaced
duke@435 1225 // header indicating it is a recursive lock and be done
duke@435 1226 brx(Assembler::zero, true, Assembler::pt, done);
duke@435 1227 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
duke@435 1228
duke@435 1229 // none of the above fast optimizations worked so we have to get into the
duke@435 1230 // slow case of monitor enter
duke@435 1231 bind(slow_case);
duke@435 1232 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
duke@435 1233
duke@435 1234 bind(done);
duke@435 1235 }
duke@435 1236 }
duke@435 1237
duke@435 1238 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
duke@435 1239 //
duke@435 1240 // Argument - lock_reg points to the BasicObjectLock for lock
duke@435 1241 // Throw IllegalMonitorException if object is not locked by current thread
duke@435 1242 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
duke@435 1243 if (UseHeavyMonitors) {
duke@435 1244 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
duke@435 1245 } else {
duke@435 1246 Register obj_reg = G3_scratch;
duke@435 1247 Register mark_reg = G4_scratch;
duke@435 1248 Register displaced_header_reg = G1_scratch;
twisti@1162 1249 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
twisti@1162 1250 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
duke@435 1251 Label done;
duke@435 1252
duke@435 1253 if (UseBiasedLocking) {
duke@435 1254 // load the object out of the BasicObjectLock
duke@435 1255 ld_ptr(lockobj_addr, obj_reg);
duke@435 1256 biased_locking_exit(mark_addr, mark_reg, done, true);
duke@435 1257 st_ptr(G0, lockobj_addr); // free entry
duke@435 1258 }
duke@435 1259
duke@435 1260 // Test first if we are in the fast recursive case
twisti@1162 1261 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
twisti@1162 1262 ld_ptr(lock_addr, displaced_header_reg);
duke@435 1263 br_null(displaced_header_reg, true, Assembler::pn, done);
duke@435 1264 delayed()->st_ptr(G0, lockobj_addr); // free entry
duke@435 1265
duke@435 1266 // See if it is still a light weight lock, if so we just unlock
duke@435 1267 // the object and we are done
duke@435 1268
duke@435 1269 if (!UseBiasedLocking) {
duke@435 1270 // load the object out of the BasicObjectLock
duke@435 1271 ld_ptr(lockobj_addr, obj_reg);
duke@435 1272 }
duke@435 1273
duke@435 1274 // we have the displaced header in displaced_header_reg
duke@435 1275 // we expect to see the stack address of the basicLock in case the
duke@435 1276 // lock is still a light weight lock (lock_reg)
duke@435 1277 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
duke@435 1278 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
duke@435 1279 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
duke@435 1280 cmp(lock_reg, displaced_header_reg);
duke@435 1281 brx(Assembler::equal, true, Assembler::pn, done);
duke@435 1282 delayed()->st_ptr(G0, lockobj_addr); // free entry
duke@435 1283
duke@435 1284 // The lock has been converted into a heavy lock and hence
duke@435 1285 // we need to get into the slow case
duke@435 1286
duke@435 1287 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
duke@435 1288
duke@435 1289 bind(done);
duke@435 1290 }
duke@435 1291 }
duke@435 1292
duke@435 1293 #ifndef CC_INTERP
duke@435 1294
duke@435 1295 // Get the method data pointer from the methodOop and set the
duke@435 1296 // specified register to its value.
duke@435 1297
iveresov@2438 1298 void InterpreterMacroAssembler::set_method_data_pointer() {
duke@435 1299 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1300 Label get_continue;
duke@435 1301
duke@435 1302 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
duke@435 1303 test_method_data_pointer(get_continue);
duke@435 1304 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
duke@435 1305 bind(get_continue);
duke@435 1306 }
duke@435 1307
duke@435 1308 // Set the method data pointer for the current bcp.
duke@435 1309
duke@435 1310 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
duke@435 1311 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1312 Label zero_continue;
duke@435 1313
duke@435 1314 // Test MDO to avoid the call if it is NULL.
iveresov@2438 1315 ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
duke@435 1316 test_method_data_pointer(zero_continue);
duke@435 1317 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
iveresov@2438 1318 add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
iveresov@2438 1319 add(ImethodDataPtr, O0, ImethodDataPtr);
duke@435 1320 bind(zero_continue);
duke@435 1321 }
duke@435 1322
duke@435 1323 // Test ImethodDataPtr. If it is null, continue at the specified label
duke@435 1324
duke@435 1325 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
duke@435 1326 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1327 #ifdef _LP64
duke@435 1328 bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue);
duke@435 1329 #else
duke@435 1330 tst(ImethodDataPtr);
duke@435 1331 br(Assembler::zero, false, Assembler::pn, zero_continue);
duke@435 1332 #endif
duke@435 1333 delayed()->nop();
duke@435 1334 }
duke@435 1335
duke@435 1336 void InterpreterMacroAssembler::verify_method_data_pointer() {
duke@435 1337 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1338 #ifdef ASSERT
duke@435 1339 Label verify_continue;
duke@435 1340 test_method_data_pointer(verify_continue);
duke@435 1341
duke@435 1342 // If the mdp is valid, it will point to a DataLayout header which is
duke@435 1343 // consistent with the bcp. The converse is highly probable also.
duke@435 1344 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
twisti@1162 1345 ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
duke@435 1346 add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
duke@435 1347 add(G3_scratch, O5, G3_scratch);
duke@435 1348 cmp(Lbcp, G3_scratch);
duke@435 1349 brx(Assembler::equal, false, Assembler::pt, verify_continue);
duke@435 1350
duke@435 1351 Register temp_reg = O5;
duke@435 1352 delayed()->mov(ImethodDataPtr, temp_reg);
duke@435 1353 // %%% should use call_VM_leaf here?
duke@435 1354 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
duke@435 1355 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
twisti@1162 1356 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
duke@435 1357 stf(FloatRegisterImpl::D, Ftos_d, d_save);
duke@435 1358 mov(temp_reg->after_save(), O2);
duke@435 1359 save_thread(L7_thread_cache);
duke@435 1360 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
duke@435 1361 delayed()->nop();
duke@435 1362 restore_thread(L7_thread_cache);
duke@435 1363 ldf(FloatRegisterImpl::D, d_save, Ftos_d);
duke@435 1364 restore();
duke@435 1365 bind(verify_continue);
duke@435 1366 #endif // ASSERT
duke@435 1367 }
duke@435 1368
duke@435 1369 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
duke@435 1370 Register Rtmp,
duke@435 1371 Label &profile_continue) {
duke@435 1372 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1373 // Control will flow to "profile_continue" if the counter is less than the
duke@435 1374 // limit or if we call profile_method()
duke@435 1375
duke@435 1376 Label done;
duke@435 1377
duke@435 1378 // if no method data exists, and the counter is high enough, make one
duke@435 1379 #ifdef _LP64
duke@435 1380 bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done);
duke@435 1381 #else
duke@435 1382 tst(ImethodDataPtr);
duke@435 1383 br(Assembler::notZero, false, Assembler::pn, done);
duke@435 1384 #endif
duke@435 1385
duke@435 1386 // Test to see if we should create a method data oop
twisti@1162 1387 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
duke@435 1388 #ifdef _LP64
duke@435 1389 delayed()->nop();
twisti@1162 1390 sethi(profile_limit, Rtmp);
duke@435 1391 #else
twisti@1162 1392 delayed()->sethi(profile_limit, Rtmp);
duke@435 1393 #endif
twisti@1162 1394 ld(Rtmp, profile_limit.low10(), Rtmp);
duke@435 1395 cmp(invocation_count, Rtmp);
duke@435 1396 br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
duke@435 1397 delayed()->nop();
duke@435 1398
duke@435 1399 // Build it now.
iveresov@2438 1400 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
iveresov@2438 1401 set_method_data_pointer_for_bcp();
duke@435 1402 ba(false, profile_continue);
duke@435 1403 delayed()->nop();
duke@435 1404 bind(done);
duke@435 1405 }
duke@435 1406
duke@435 1407 // Store a value at some constant offset from the method data pointer.
duke@435 1408
duke@435 1409 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
duke@435 1410 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1411 st_ptr(value, ImethodDataPtr, constant);
duke@435 1412 }
duke@435 1413
duke@435 1414 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
duke@435 1415 Register bumped_count,
duke@435 1416 bool decrement) {
duke@435 1417 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1418
duke@435 1419 // Load the counter.
duke@435 1420 ld_ptr(counter, bumped_count);
duke@435 1421
duke@435 1422 if (decrement) {
duke@435 1423 // Decrement the register. Set condition codes.
duke@435 1424 subcc(bumped_count, DataLayout::counter_increment, bumped_count);
duke@435 1425
duke@435 1426 // If the decrement causes the counter to overflow, stay negative
duke@435 1427 Label L;
duke@435 1428 brx(Assembler::negative, true, Assembler::pn, L);
duke@435 1429
duke@435 1430 // Store the decremented counter, if it is still negative.
duke@435 1431 delayed()->st_ptr(bumped_count, counter);
duke@435 1432 bind(L);
duke@435 1433 } else {
duke@435 1434 // Increment the register. Set carry flag.
duke@435 1435 addcc(bumped_count, DataLayout::counter_increment, bumped_count);
duke@435 1436
duke@435 1437 // If the increment causes the counter to overflow, pull back by 1.
duke@435 1438 assert(DataLayout::counter_increment == 1, "subc works");
duke@435 1439 subc(bumped_count, G0, bumped_count);
duke@435 1440
duke@435 1441 // Store the incremented counter.
duke@435 1442 st_ptr(bumped_count, counter);
duke@435 1443 }
duke@435 1444 }
duke@435 1445
duke@435 1446 // Increment the value at some constant offset from the method data pointer.
duke@435 1447
duke@435 1448 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
duke@435 1449 Register bumped_count,
duke@435 1450 bool decrement) {
duke@435 1451 // Locate the counter at a fixed offset from the mdp:
twisti@1162 1452 Address counter(ImethodDataPtr, constant);
duke@435 1453 increment_mdp_data_at(counter, bumped_count, decrement);
duke@435 1454 }
duke@435 1455
duke@435 1456 // Increment the value at some non-fixed (reg + constant) offset from
duke@435 1457 // the method data pointer.
duke@435 1458
duke@435 1459 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
duke@435 1460 int constant,
duke@435 1461 Register bumped_count,
duke@435 1462 Register scratch2,
duke@435 1463 bool decrement) {
duke@435 1464 // Add the constant to reg to get the offset.
duke@435 1465 add(ImethodDataPtr, reg, scratch2);
twisti@1162 1466 Address counter(scratch2, constant);
duke@435 1467 increment_mdp_data_at(counter, bumped_count, decrement);
duke@435 1468 }
duke@435 1469
duke@435 1470 // Set a flag value at the current method data pointer position.
duke@435 1471 // Updates a single byte of the header, to avoid races with other header bits.
duke@435 1472
duke@435 1473 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
duke@435 1474 Register scratch) {
duke@435 1475 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1476 // Load the data header
duke@435 1477 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
duke@435 1478
duke@435 1479 // Set the flag
duke@435 1480 or3(scratch, flag_constant, scratch);
duke@435 1481
duke@435 1482 // Store the modified header.
duke@435 1483 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
duke@435 1484 }
duke@435 1485
duke@435 1486 // Test the location at some offset from the method data pointer.
duke@435 1487 // If it is not equal to value, branch to the not_equal_continue Label.
duke@435 1488 // Set condition codes to match the nullness of the loaded value.
duke@435 1489
duke@435 1490 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
duke@435 1491 Register value,
duke@435 1492 Label& not_equal_continue,
duke@435 1493 Register scratch) {
duke@435 1494 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1495 ld_ptr(ImethodDataPtr, offset, scratch);
duke@435 1496 cmp(value, scratch);
duke@435 1497 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
duke@435 1498 delayed()->tst(scratch);
duke@435 1499 }
duke@435 1500
duke@435 1501 // Update the method data pointer by the displacement located at some fixed
duke@435 1502 // offset from the method data pointer.
duke@435 1503
duke@435 1504 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
duke@435 1505 Register scratch) {
duke@435 1506 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1507 ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
duke@435 1508 add(ImethodDataPtr, scratch, ImethodDataPtr);
duke@435 1509 }
duke@435 1510
duke@435 1511 // Update the method data pointer by the displacement located at the
duke@435 1512 // offset (reg + offset_of_disp).
duke@435 1513
duke@435 1514 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
duke@435 1515 int offset_of_disp,
duke@435 1516 Register scratch) {
duke@435 1517 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1518 add(reg, offset_of_disp, scratch);
duke@435 1519 ld_ptr(ImethodDataPtr, scratch, scratch);
duke@435 1520 add(ImethodDataPtr, scratch, ImethodDataPtr);
duke@435 1521 }
duke@435 1522
duke@435 1523 // Update the method data pointer by a simple constant displacement.
duke@435 1524
duke@435 1525 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
duke@435 1526 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1527 add(ImethodDataPtr, constant, ImethodDataPtr);
duke@435 1528 }
duke@435 1529
duke@435 1530 // Update the method data pointer for a _ret bytecode whose target
duke@435 1531 // was not among our cached targets.
duke@435 1532
duke@435 1533 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
duke@435 1534 Register return_bci) {
duke@435 1535 assert(ProfileInterpreter, "must be profiling interpreter");
duke@435 1536 push(state);
duke@435 1537 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile
duke@435 1538 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
duke@435 1539 ld_ptr(l_tmp, return_bci);
duke@435 1540 pop(state);
duke@435 1541 }
duke@435 1542
duke@435 1543 // Count a taken branch in the bytecodes.
duke@435 1544
duke@435 1545 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
duke@435 1546 if (ProfileInterpreter) {
duke@435 1547 Label profile_continue;
duke@435 1548
duke@435 1549 // If no method data exists, go to profile_continue.
duke@435 1550 test_method_data_pointer(profile_continue);
duke@435 1551
duke@435 1552 // We are taking a branch. Increment the taken count.
duke@435 1553 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
duke@435 1554
duke@435 1555 // The method data pointer needs to be updated to reflect the new target.
duke@435 1556 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
duke@435 1557 bind (profile_continue);
duke@435 1558 }
duke@435 1559 }
duke@435 1560
duke@435 1561
duke@435 1562 // Count a not-taken branch in the bytecodes.
duke@435 1563
duke@435 1564 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
duke@435 1565 if (ProfileInterpreter) {
duke@435 1566 Label profile_continue;
duke@435 1567
duke@435 1568 // If no method data exists, go to profile_continue.
duke@435 1569 test_method_data_pointer(profile_continue);
duke@435 1570
duke@435 1571 // We are taking a branch. Increment the not taken count.
duke@435 1572 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
duke@435 1573
duke@435 1574 // The method data pointer needs to be updated to correspond to the
duke@435 1575 // next bytecode.
duke@435 1576 update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
duke@435 1577 bind (profile_continue);
duke@435 1578 }
duke@435 1579 }
duke@435 1580
duke@435 1581
duke@435 1582 // Count a non-virtual call in the bytecodes.
duke@435 1583
duke@435 1584 void InterpreterMacroAssembler::profile_call(Register scratch) {
duke@435 1585 if (ProfileInterpreter) {
duke@435 1586 Label profile_continue;
duke@435 1587
duke@435 1588 // If no method data exists, go to profile_continue.
duke@435 1589 test_method_data_pointer(profile_continue);
duke@435 1590
duke@435 1591 // We are making a call. Increment the count.
duke@435 1592 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
duke@435 1593
duke@435 1594 // The method data pointer needs to be updated to reflect the new target.
duke@435 1595 update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
duke@435 1596 bind (profile_continue);
duke@435 1597 }
duke@435 1598 }
duke@435 1599
duke@435 1600
duke@435 1601 // Count a final call in the bytecodes.
duke@435 1602
duke@435 1603 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
duke@435 1604 if (ProfileInterpreter) {
duke@435 1605 Label profile_continue;
duke@435 1606
duke@435 1607 // If no method data exists, go to profile_continue.
duke@435 1608 test_method_data_pointer(profile_continue);
duke@435 1609
duke@435 1610 // We are making a call. Increment the count.
duke@435 1611 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
duke@435 1612
duke@435 1613 // The method data pointer needs to be updated to reflect the new target.
duke@435 1614 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
duke@435 1615 bind (profile_continue);
duke@435 1616 }
duke@435 1617 }
duke@435 1618
duke@435 1619
duke@435 1620 // Count a virtual call in the bytecodes.
duke@435 1621
duke@435 1622 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
twisti@1858 1623 Register scratch,
twisti@1858 1624 bool receiver_can_be_null) {
duke@435 1625 if (ProfileInterpreter) {
duke@435 1626 Label profile_continue;
duke@435 1627
duke@435 1628 // If no method data exists, go to profile_continue.
duke@435 1629 test_method_data_pointer(profile_continue);
duke@435 1630
twisti@1858 1631
twisti@1858 1632 Label skip_receiver_profile;
twisti@1858 1633 if (receiver_can_be_null) {
twisti@1858 1634 Label not_null;
twisti@1858 1635 tst(receiver);
twisti@1858 1636 brx(Assembler::notZero, false, Assembler::pt, not_null);
twisti@1858 1637 delayed()->nop();
twisti@1858 1638 // We are making a call. Increment the count for null receiver.
twisti@1858 1639 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
twisti@1858 1640 ba(false, skip_receiver_profile);
twisti@1858 1641 delayed()->nop();
twisti@1858 1642 bind(not_null);
twisti@1858 1643 }
twisti@1858 1644
duke@435 1645 // Record the receiver type.
kvn@1641 1646 record_klass_in_profile(receiver, scratch, true);
twisti@1858 1647 bind(skip_receiver_profile);
duke@435 1648
duke@435 1649 // The method data pointer needs to be updated to reflect the new target.
duke@435 1650 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
duke@435 1651 bind (profile_continue);
duke@435 1652 }
duke@435 1653 }
duke@435 1654
duke@435 1655 void InterpreterMacroAssembler::record_klass_in_profile_helper(
duke@435 1656 Register receiver, Register scratch,
kvn@1641 1657 int start_row, Label& done, bool is_virtual_call) {
kvn@1641 1658 if (TypeProfileWidth == 0) {
kvn@1641 1659 if (is_virtual_call) {
kvn@1641 1660 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
kvn@1641 1661 }
poonam@1402 1662 return;
kvn@1641 1663 }
poonam@1402 1664
duke@435 1665 int last_row = VirtualCallData::row_limit() - 1;
duke@435 1666 assert(start_row <= last_row, "must be work left to do");
duke@435 1667 // Test this row for both the receiver and for null.
duke@435 1668 // Take any of three different outcomes:
duke@435 1669 // 1. found receiver => increment count and goto done
duke@435 1670 // 2. found null => keep looking for case 1, maybe allocate this cell
duke@435 1671 // 3. found something else => keep looking for cases 1 and 2
duke@435 1672 // Case 3 is handled by a recursive call.
duke@435 1673 for (int row = start_row; row <= last_row; row++) {
duke@435 1674 Label next_test;
duke@435 1675 bool test_for_null_also = (row == start_row);
duke@435 1676
duke@435 1677 // See if the receiver is receiver[n].
duke@435 1678 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
duke@435 1679 test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
kvn@1641 1680 // delayed()->tst(scratch);
duke@435 1681
duke@435 1682 // The receiver is receiver[n]. Increment count[n].
duke@435 1683 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
duke@435 1684 increment_mdp_data_at(count_offset, scratch);
duke@435 1685 ba(false, done);
duke@435 1686 delayed()->nop();
duke@435 1687 bind(next_test);
duke@435 1688
duke@435 1689 if (test_for_null_also) {
kvn@1641 1690 Label found_null;
duke@435 1691 // Failed the equality check on receiver[n]... Test for null.
duke@435 1692 if (start_row == last_row) {
duke@435 1693 // The only thing left to do is handle the null case.
kvn@1641 1694 if (is_virtual_call) {
kvn@1641 1695 brx(Assembler::zero, false, Assembler::pn, found_null);
kvn@1641 1696 delayed()->nop();
kvn@1641 1697 // Receiver did not match any saved receiver and there is no empty row for it.
kvn@1686 1698 // Increment total counter to indicate polymorphic case.
kvn@1641 1699 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
kvn@1641 1700 ba(false, done);
kvn@1641 1701 delayed()->nop();
kvn@1641 1702 bind(found_null);
kvn@1641 1703 } else {
kvn@1641 1704 brx(Assembler::notZero, false, Assembler::pt, done);
kvn@1641 1705 delayed()->nop();
kvn@1641 1706 }
duke@435 1707 break;
duke@435 1708 }
duke@435 1709 // Since null is rare, make it be the branch-taken case.
duke@435 1710 brx(Assembler::zero, false, Assembler::pn, found_null);
duke@435 1711 delayed()->nop();
duke@435 1712
duke@435 1713 // Put all the "Case 3" tests here.
kvn@1641 1714 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
duke@435 1715
duke@435 1716 // Found a null. Keep searching for a matching receiver,
duke@435 1717 // but remember that this is an empty (unused) slot.
duke@435 1718 bind(found_null);
duke@435 1719 }
duke@435 1720 }
duke@435 1721
duke@435 1722 // In the fall-through case, we found no matching receiver, but we
duke@435 1723 // observed the receiver[start_row] is NULL.
duke@435 1724
duke@435 1725 // Fill in the receiver field and increment the count.
duke@435 1726 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
duke@435 1727 set_mdp_data_at(recvr_offset, receiver);
duke@435 1728 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
duke@435 1729 mov(DataLayout::counter_increment, scratch);
duke@435 1730 set_mdp_data_at(count_offset, scratch);
kvn@1641 1731 if (start_row > 0) {
kvn@1641 1732 ba(false, done);
kvn@1641 1733 delayed()->nop();
kvn@1641 1734 }
duke@435 1735 }
duke@435 1736
duke@435 1737 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
kvn@1641 1738 Register scratch, bool is_virtual_call) {
duke@435 1739 assert(ProfileInterpreter, "must be profiling");
duke@435 1740 Label done;
duke@435 1741
kvn@1641 1742 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
duke@435 1743
duke@435 1744 bind (done);
duke@435 1745 }
duke@435 1746
duke@435 1747
duke@435 1748 // Count a ret in the bytecodes.
duke@435 1749
duke@435 1750 void InterpreterMacroAssembler::profile_ret(TosState state,
duke@435 1751 Register return_bci,
duke@435 1752 Register scratch) {
duke@435 1753 if (ProfileInterpreter) {
duke@435 1754 Label profile_continue;
duke@435 1755 uint row;
duke@435 1756
duke@435 1757 // If no method data exists, go to profile_continue.
duke@435 1758 test_method_data_pointer(profile_continue);
duke@435 1759
duke@435 1760 // Update the total ret count.
duke@435 1761 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
duke@435 1762
duke@435 1763 for (row = 0; row < RetData::row_limit(); row++) {
duke@435 1764 Label next_test;
duke@435 1765
duke@435 1766 // See if return_bci is equal to bci[n]:
duke@435 1767 test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
duke@435 1768 return_bci, next_test, scratch);
duke@435 1769
duke@435 1770 // return_bci is equal to bci[n]. Increment the count.
duke@435 1771 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
duke@435 1772
duke@435 1773 // The method data pointer needs to be updated to reflect the new target.
duke@435 1774 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
duke@435 1775 ba(false, profile_continue);
duke@435 1776 delayed()->nop();
duke@435 1777 bind(next_test);
duke@435 1778 }
duke@435 1779
duke@435 1780 update_mdp_for_ret(state, return_bci);
duke@435 1781
duke@435 1782 bind (profile_continue);
duke@435 1783 }
duke@435 1784 }
duke@435 1785
duke@435 1786 // Profile an unexpected null in the bytecodes.
duke@435 1787 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
duke@435 1788 if (ProfileInterpreter) {
duke@435 1789 Label profile_continue;
duke@435 1790
duke@435 1791 // If no method data exists, go to profile_continue.
duke@435 1792 test_method_data_pointer(profile_continue);
duke@435 1793
duke@435 1794 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
duke@435 1795
duke@435 1796 // The method data pointer needs to be updated.
duke@435 1797 int mdp_delta = in_bytes(BitData::bit_data_size());
duke@435 1798 if (TypeProfileCasts) {
duke@435 1799 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
duke@435 1800 }
duke@435 1801 update_mdp_by_constant(mdp_delta);
duke@435 1802
duke@435 1803 bind (profile_continue);
duke@435 1804 }
duke@435 1805 }
duke@435 1806
duke@435 1807 void InterpreterMacroAssembler::profile_typecheck(Register klass,
duke@435 1808 Register scratch) {
duke@435 1809 if (ProfileInterpreter) {
duke@435 1810 Label profile_continue;
duke@435 1811
duke@435 1812 // If no method data exists, go to profile_continue.
duke@435 1813 test_method_data_pointer(profile_continue);
duke@435 1814
duke@435 1815 int mdp_delta = in_bytes(BitData::bit_data_size());
duke@435 1816 if (TypeProfileCasts) {
duke@435 1817 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
duke@435 1818
duke@435 1819 // Record the object type.
kvn@1641 1820 record_klass_in_profile(klass, scratch, false);
duke@435 1821 }
duke@435 1822
duke@435 1823 // The method data pointer needs to be updated.
duke@435 1824 update_mdp_by_constant(mdp_delta);
duke@435 1825
duke@435 1826 bind (profile_continue);
duke@435 1827 }
duke@435 1828 }
duke@435 1829
duke@435 1830 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
duke@435 1831 if (ProfileInterpreter && TypeProfileCasts) {
duke@435 1832 Label profile_continue;
duke@435 1833
duke@435 1834 // If no method data exists, go to profile_continue.
duke@435 1835 test_method_data_pointer(profile_continue);
duke@435 1836
duke@435 1837 int count_offset = in_bytes(CounterData::count_offset());
duke@435 1838 // Back up the address, since we have already bumped the mdp.
duke@435 1839 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
duke@435 1840
duke@435 1841 // *Decrement* the counter. We expect to see zero or small negatives.
duke@435 1842 increment_mdp_data_at(count_offset, scratch, true);
duke@435 1843
duke@435 1844 bind (profile_continue);
duke@435 1845 }
duke@435 1846 }
duke@435 1847
duke@435 1848 // Count the default case of a switch construct.
duke@435 1849
duke@435 1850 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
duke@435 1851 if (ProfileInterpreter) {
duke@435 1852 Label profile_continue;
duke@435 1853
duke@435 1854 // If no method data exists, go to profile_continue.
duke@435 1855 test_method_data_pointer(profile_continue);
duke@435 1856
duke@435 1857 // Update the default case count
duke@435 1858 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
duke@435 1859 scratch);
duke@435 1860
duke@435 1861 // The method data pointer needs to be updated.
duke@435 1862 update_mdp_by_offset(
duke@435 1863 in_bytes(MultiBranchData::default_displacement_offset()),
duke@435 1864 scratch);
duke@435 1865
duke@435 1866 bind (profile_continue);
duke@435 1867 }
duke@435 1868 }
duke@435 1869
duke@435 1870 // Count the index'th case of a switch construct.
duke@435 1871
duke@435 1872 void InterpreterMacroAssembler::profile_switch_case(Register index,
duke@435 1873 Register scratch,
duke@435 1874 Register scratch2,
duke@435 1875 Register scratch3) {
duke@435 1876 if (ProfileInterpreter) {
duke@435 1877 Label profile_continue;
duke@435 1878
duke@435 1879 // If no method data exists, go to profile_continue.
duke@435 1880 test_method_data_pointer(profile_continue);
duke@435 1881
duke@435 1882 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
duke@435 1883 set(in_bytes(MultiBranchData::per_case_size()), scratch);
duke@435 1884 smul(index, scratch, scratch);
duke@435 1885 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
duke@435 1886
duke@435 1887 // Update the case count
duke@435 1888 increment_mdp_data_at(scratch,
duke@435 1889 in_bytes(MultiBranchData::relative_count_offset()),
duke@435 1890 scratch2,
duke@435 1891 scratch3);
duke@435 1892
duke@435 1893 // The method data pointer needs to be updated.
duke@435 1894 update_mdp_by_offset(scratch,
duke@435 1895 in_bytes(MultiBranchData::relative_displacement_offset()),
duke@435 1896 scratch2);
duke@435 1897
duke@435 1898 bind (profile_continue);
duke@435 1899 }
duke@435 1900 }
duke@435 1901
duke@435 1902 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
duke@435 1903
duke@435 1904 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
duke@435 1905 Register Rtemp,
duke@435 1906 Register Rtemp2 ) {
duke@435 1907
duke@435 1908 Register Rlimit = Lmonitors;
duke@435 1909 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
duke@435 1910 assert( (delta & LongAlignmentMask) == 0,
duke@435 1911 "sizeof BasicObjectLock must be even number of doublewords");
duke@435 1912
duke@435 1913 sub( SP, delta, SP);
duke@435 1914 sub( Lesp, delta, Lesp);
duke@435 1915 sub( Lmonitors, delta, Lmonitors);
duke@435 1916
duke@435 1917 if (!stack_is_empty) {
duke@435 1918
duke@435 1919 // must copy stack contents down
duke@435 1920
duke@435 1921 Label start_copying, next;
duke@435 1922
duke@435 1923 // untested("monitor stack expansion");
duke@435 1924 compute_stack_base(Rtemp);
duke@435 1925 ba( false, start_copying );
duke@435 1926 delayed()->cmp( Rtemp, Rlimit); // done? duplicated below
duke@435 1927
duke@435 1928 // note: must copy from low memory upwards
duke@435 1929 // On entry to loop,
duke@435 1930 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
duke@435 1931 // Loop mutates Rtemp
duke@435 1932
duke@435 1933 bind( next);
duke@435 1934
duke@435 1935 st_ptr(Rtemp2, Rtemp, 0);
duke@435 1936 inc(Rtemp, wordSize);
duke@435 1937 cmp(Rtemp, Rlimit); // are we done? (duplicated above)
duke@435 1938
duke@435 1939 bind( start_copying );
duke@435 1940
duke@435 1941 brx( notEqual, true, pn, next );
duke@435 1942 delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
duke@435 1943
duke@435 1944 // done copying stack
duke@435 1945 }
duke@435 1946 }
duke@435 1947
duke@435 1948 // Locals
duke@435 1949 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
duke@435 1950 assert_not_delayed();
twisti@1861 1951 sll(index, Interpreter::logStackElementSize, index);
duke@435 1952 sub(Llocals, index, index);
twisti@1861 1953 ld_ptr(index, 0, dst);
duke@435 1954 // Note: index must hold the effective address--the iinc template uses it
duke@435 1955 }
duke@435 1956
duke@435 1957 // Just like access_local_ptr but the tag is a returnAddress
duke@435 1958 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
duke@435 1959 Register dst ) {
duke@435 1960 assert_not_delayed();
twisti@1861 1961 sll(index, Interpreter::logStackElementSize, index);
duke@435 1962 sub(Llocals, index, index);
twisti@1861 1963 ld_ptr(index, 0, dst);
duke@435 1964 }
duke@435 1965
duke@435 1966 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
duke@435 1967 assert_not_delayed();
twisti@1861 1968 sll(index, Interpreter::logStackElementSize, index);
duke@435 1969 sub(Llocals, index, index);
twisti@1861 1970 ld(index, 0, dst);
duke@435 1971 // Note: index must hold the effective address--the iinc template uses it
duke@435 1972 }
duke@435 1973
duke@435 1974
duke@435 1975 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
duke@435 1976 assert_not_delayed();
twisti@1861 1977 sll(index, Interpreter::logStackElementSize, index);
duke@435 1978 sub(Llocals, index, index);
duke@435 1979 // First half stored at index n+1 (which grows down from Llocals[n])
duke@435 1980 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
duke@435 1981 }
duke@435 1982
duke@435 1983
duke@435 1984 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
duke@435 1985 assert_not_delayed();
twisti@1861 1986 sll(index, Interpreter::logStackElementSize, index);
duke@435 1987 sub(Llocals, index, index);
twisti@1861 1988 ldf(FloatRegisterImpl::S, index, 0, dst);
duke@435 1989 }
duke@435 1990
duke@435 1991
duke@435 1992 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
duke@435 1993 assert_not_delayed();
twisti@1861 1994 sll(index, Interpreter::logStackElementSize, index);
duke@435 1995 sub(Llocals, index, index);
duke@435 1996 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
duke@435 1997 }
duke@435 1998
duke@435 1999
duke@435 2000 #ifdef ASSERT
duke@435 2001 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
duke@435 2002 Label L;
duke@435 2003
duke@435 2004 assert(Rindex != Rscratch, "Registers cannot be same");
duke@435 2005 assert(Rindex != Rscratch1, "Registers cannot be same");
duke@435 2006 assert(Rlimit != Rscratch, "Registers cannot be same");
duke@435 2007 assert(Rlimit != Rscratch1, "Registers cannot be same");
duke@435 2008 assert(Rscratch1 != Rscratch, "Registers cannot be same");
duke@435 2009
duke@435 2010 // untested("reg area corruption");
duke@435 2011 add(Rindex, offset, Rscratch);
duke@435 2012 add(Rlimit, 64 + STACK_BIAS, Rscratch1);
duke@435 2013 cmp(Rscratch, Rscratch1);
duke@435 2014 brx(Assembler::greaterEqualUnsigned, false, pn, L);
duke@435 2015 delayed()->nop();
duke@435 2016 stop("regsave area is being clobbered");
duke@435 2017 bind(L);
duke@435 2018 }
duke@435 2019 #endif // ASSERT
duke@435 2020
duke@435 2021
duke@435 2022 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
duke@435 2023 assert_not_delayed();
twisti@1861 2024 sll(index, Interpreter::logStackElementSize, index);
duke@435 2025 sub(Llocals, index, index);
twisti@1861 2026 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
twisti@1861 2027 st(src, index, 0);
duke@435 2028 }
duke@435 2029
twisti@1861 2030 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
duke@435 2031 assert_not_delayed();
twisti@1861 2032 sll(index, Interpreter::logStackElementSize, index);
duke@435 2033 sub(Llocals, index, index);
twisti@1861 2034 #ifdef ASSERT
twisti@1861 2035 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
twisti@1861 2036 #endif
twisti@1861 2037 st_ptr(src, index, 0);
duke@435 2038 }
duke@435 2039
duke@435 2040
duke@435 2041
twisti@1861 2042 void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) {
twisti@1861 2043 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n));
duke@435 2044 }
duke@435 2045
duke@435 2046 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
duke@435 2047 assert_not_delayed();
twisti@1861 2048 sll(index, Interpreter::logStackElementSize, index);
duke@435 2049 sub(Llocals, index, index);
twisti@1861 2050 #ifdef ASSERT
duke@435 2051 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
twisti@1861 2052 #endif
duke@435 2053 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
duke@435 2054 }
duke@435 2055
duke@435 2056
duke@435 2057 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
duke@435 2058 assert_not_delayed();
twisti@1861 2059 sll(index, Interpreter::logStackElementSize, index);
duke@435 2060 sub(Llocals, index, index);
twisti@1861 2061 #ifdef ASSERT
twisti@1861 2062 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);
twisti@1861 2063 #endif
twisti@1861 2064 stf(FloatRegisterImpl::S, src, index, 0);
duke@435 2065 }
duke@435 2066
duke@435 2067
duke@435 2068 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
duke@435 2069 assert_not_delayed();
twisti@1861 2070 sll(index, Interpreter::logStackElementSize, index);
duke@435 2071 sub(Llocals, index, index);
twisti@1861 2072 #ifdef ASSERT
duke@435 2073 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
twisti@1861 2074 #endif
duke@435 2075 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
duke@435 2076 }
duke@435 2077
duke@435 2078
duke@435 2079 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
duke@435 2080 const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
duke@435 2081 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
duke@435 2082 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
duke@435 2083 }
duke@435 2084
duke@435 2085
duke@435 2086 Address InterpreterMacroAssembler::top_most_monitor() {
twisti@1162 2087 return Address(FP, top_most_monitor_byte_offset());
duke@435 2088 }
duke@435 2089
duke@435 2090
duke@435 2091 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
duke@435 2092 add( Lesp, wordSize, Rdest );
duke@435 2093 }
duke@435 2094
duke@435 2095 #endif /* CC_INTERP */
duke@435 2096
duke@435 2097 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
duke@435 2098 assert(UseCompiler, "incrementing must be useful");
duke@435 2099 #ifdef CC_INTERP
twisti@1162 2100 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
twisti@1162 2101 InvocationCounter::counter_offset());
twisti@1162 2102 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
twisti@1162 2103 InvocationCounter::counter_offset());
duke@435 2104 #else
twisti@1162 2105 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
twisti@1162 2106 InvocationCounter::counter_offset());
twisti@1162 2107 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
twisti@1162 2108 InvocationCounter::counter_offset());
duke@435 2109 #endif /* CC_INTERP */
duke@435 2110 int delta = InvocationCounter::count_increment;
duke@435 2111
duke@435 2112 // Load each counter in a register
duke@435 2113 ld( inv_counter, Rtmp );
duke@435 2114 ld( be_counter, Rtmp2 );
duke@435 2115
duke@435 2116 assert( is_simm13( delta ), " delta too large.");
duke@435 2117
duke@435 2118 // Add the delta to the invocation counter and store the result
duke@435 2119 add( Rtmp, delta, Rtmp );
duke@435 2120
duke@435 2121 // Mask the backedge counter
duke@435 2122 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
duke@435 2123
duke@435 2124 // Store value
duke@435 2125 st( Rtmp, inv_counter);
duke@435 2126
duke@435 2127 // Add invocation counter + backedge counter
duke@435 2128 add( Rtmp, Rtmp2, Rtmp);
duke@435 2129
duke@435 2130 // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
duke@435 2131 }
duke@435 2132
duke@435 2133 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
duke@435 2134 assert(UseCompiler, "incrementing must be useful");
duke@435 2135 #ifdef CC_INTERP
twisti@1162 2136 Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
twisti@1162 2137 InvocationCounter::counter_offset());
twisti@1162 2138 Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
twisti@1162 2139 InvocationCounter::counter_offset());
duke@435 2140 #else
twisti@1162 2141 Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
twisti@1162 2142 InvocationCounter::counter_offset());
twisti@1162 2143 Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
twisti@1162 2144 InvocationCounter::counter_offset());
duke@435 2145 #endif /* CC_INTERP */
duke@435 2146 int delta = InvocationCounter::count_increment;
duke@435 2147 // Load each counter in a register
duke@435 2148 ld( be_counter, Rtmp );
duke@435 2149 ld( inv_counter, Rtmp2 );
duke@435 2150
duke@435 2151 // Add the delta to the backedge counter
duke@435 2152 add( Rtmp, delta, Rtmp );
duke@435 2153
duke@435 2154 // Mask the invocation counter, add to backedge counter
duke@435 2155 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
duke@435 2156
duke@435 2157 // and store the result to memory
duke@435 2158 st( Rtmp, be_counter );
duke@435 2159
duke@435 2160 // Add backedge + invocation counter
duke@435 2161 add( Rtmp, Rtmp2, Rtmp );
duke@435 2162
duke@435 2163 // Note that this macro must leave backedge_count + invocation_count in Rtmp!
duke@435 2164 }
duke@435 2165
duke@435 2166 #ifndef CC_INTERP
duke@435 2167 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
duke@435 2168 Register branch_bcp,
duke@435 2169 Register Rtmp ) {
duke@435 2170 Label did_not_overflow;
duke@435 2171 Label overflow_with_error;
duke@435 2172 assert_different_registers(backedge_count, Rtmp, branch_bcp);
duke@435 2173 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
duke@435 2174
twisti@1162 2175 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
duke@435 2176 load_contents(limit, Rtmp);
duke@435 2177 cmp(backedge_count, Rtmp);
duke@435 2178 br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
duke@435 2179 delayed()->nop();
duke@435 2180
duke@435 2181 // When ProfileInterpreter is on, the backedge_count comes from the
duke@435 2182 // methodDataOop, which value does not get reset on the call to
duke@435 2183 // frequency_counter_overflow(). To avoid excessive calls to the overflow
duke@435 2184 // routine while the method is being compiled, add a second test to make sure
duke@435 2185 // the overflow function is called only once every overflow_frequency.
duke@435 2186 if (ProfileInterpreter) {
duke@435 2187 const int overflow_frequency = 1024;
duke@435 2188 andcc(backedge_count, overflow_frequency-1, Rtmp);
duke@435 2189 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
duke@435 2190 delayed()->nop();
duke@435 2191 }
duke@435 2192
duke@435 2193 // overflow in loop, pass branch bytecode
duke@435 2194 set(6,Rtmp);
duke@435 2195 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
duke@435 2196
duke@435 2197 // Was an OSR adapter generated?
duke@435 2198 // O0 = osr nmethod
duke@435 2199 tst(O0);
duke@435 2200 brx(Assembler::zero, false, Assembler::pn, overflow_with_error);
duke@435 2201 delayed()->nop();
duke@435 2202
duke@435 2203 // Has the nmethod been invalidated already?
duke@435 2204 ld(O0, nmethod::entry_bci_offset(), O2);
duke@435 2205 cmp(O2, InvalidOSREntryBci);
duke@435 2206 br(Assembler::equal, false, Assembler::pn, overflow_with_error);
duke@435 2207 delayed()->nop();
duke@435 2208
duke@435 2209 // migrate the interpreter frame off of the stack
duke@435 2210
duke@435 2211 mov(G2_thread, L7);
duke@435 2212 // save nmethod
duke@435 2213 mov(O0, L6);
duke@435 2214 set_last_Java_frame(SP, noreg);
duke@435 2215 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
duke@435 2216 reset_last_Java_frame();
duke@435 2217 mov(L7, G2_thread);
duke@435 2218
duke@435 2219 // move OSR nmethod to I1
duke@435 2220 mov(L6, I1);
duke@435 2221
duke@435 2222 // OSR buffer to I0
duke@435 2223 mov(O0, I0);
duke@435 2224
duke@435 2225 // remove the interpreter frame
duke@435 2226 restore(I5_savedSP, 0, SP);
duke@435 2227
duke@435 2228 // Jump to the osr code.
duke@435 2229 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
duke@435 2230 jmp(O2, G0);
duke@435 2231 delayed()->nop();
duke@435 2232
duke@435 2233 bind(overflow_with_error);
duke@435 2234
duke@435 2235 bind(did_not_overflow);
duke@435 2236 }
duke@435 2237
duke@435 2238
duke@435 2239
duke@435 2240 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
duke@435 2241 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
duke@435 2242 }
duke@435 2243
duke@435 2244
duke@435 2245 // local helper function for the verify_oop_or_return_address macro
duke@435 2246 static bool verify_return_address(methodOopDesc* m, int bci) {
duke@435 2247 #ifndef PRODUCT
duke@435 2248 address pc = (address)(m->constMethod())
duke@435 2249 + in_bytes(constMethodOopDesc::codes_offset()) + bci;
duke@435 2250 // assume it is a valid return address if it is inside m and is preceded by a jsr
duke@435 2251 if (!m->contains(pc)) return false;
duke@435 2252 address jsr_pc;
duke@435 2253 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
duke@435 2254 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true;
duke@435 2255 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
duke@435 2256 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true;
duke@435 2257 #endif // PRODUCT
duke@435 2258 return false;
duke@435 2259 }
duke@435 2260
duke@435 2261
duke@435 2262 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
duke@435 2263 if (!VerifyOops) return;
duke@435 2264 // the VM documentation for the astore[_wide] bytecode allows
duke@435 2265 // the TOS to be not only an oop but also a return address
duke@435 2266 Label test;
duke@435 2267 Label skip;
duke@435 2268 // See if it is an address (in the current method):
duke@435 2269
duke@435 2270 mov(reg, Rtmp);
duke@435 2271 const int log2_bytecode_size_limit = 16;
duke@435 2272 srl(Rtmp, log2_bytecode_size_limit, Rtmp);
duke@435 2273 br_notnull( Rtmp, false, pt, test );
duke@435 2274 delayed()->nop();
duke@435 2275
duke@435 2276 // %%% should use call_VM_leaf here?
duke@435 2277 save_frame_and_mov(0, Lmethod, O0, reg, O1);
duke@435 2278 save_thread(L7_thread_cache);
duke@435 2279 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
duke@435 2280 delayed()->nop();
duke@435 2281 restore_thread(L7_thread_cache);
duke@435 2282 br_notnull( O0, false, pt, skip );
duke@435 2283 delayed()->restore();
duke@435 2284
duke@435 2285 // Perform a more elaborate out-of-line call
duke@435 2286 // Not an address; verify it:
duke@435 2287 bind(test);
duke@435 2288 verify_oop(reg);
duke@435 2289 bind(skip);
duke@435 2290 }
duke@435 2291
duke@435 2292
duke@435 2293 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
duke@435 2294 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
duke@435 2295 }
duke@435 2296 #endif /* CC_INTERP */
duke@435 2297
duke@435 2298 // Inline assembly for:
duke@435 2299 //
duke@435 2300 // if (thread is in interp_only_mode) {
duke@435 2301 // InterpreterRuntime::post_method_entry();
duke@435 2302 // }
duke@435 2303 // if (DTraceMethodProbes) {
twisti@1040 2304 // SharedRuntime::dtrace_method_entry(method, receiver);
duke@435 2305 // }
dcubed@1045 2306 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
dcubed@1045 2307 // SharedRuntime::rc_trace_method_entry(method, receiver);
coleenp@857 2308 // }
duke@435 2309
duke@435 2310 void InterpreterMacroAssembler::notify_method_entry() {
duke@435 2311
duke@435 2312 // C++ interpreter only uses this for native methods.
duke@435 2313
duke@435 2314 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 2315 // entry/exit events are sent for that thread to track stack
duke@435 2316 // depth. If it is possible to enter interp_only_mode we add
duke@435 2317 // the code to check if the event should be sent.
duke@435 2318 if (JvmtiExport::can_post_interpreter_events()) {
duke@435 2319 Label L;
duke@435 2320 Register temp_reg = O5;
twisti@1162 2321 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
duke@435 2322 ld(interp_only, temp_reg);
duke@435 2323 tst(temp_reg);
duke@435 2324 br(zero, false, pt, L);
duke@435 2325 delayed()->nop();
duke@435 2326 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
duke@435 2327 bind(L);
duke@435 2328 }
duke@435 2329
duke@435 2330 {
duke@435 2331 Register temp_reg = O5;
duke@435 2332 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
duke@435 2333 call_VM_leaf(noreg,
duke@435 2334 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
duke@435 2335 G2_thread, Lmethod);
duke@435 2336 }
dcubed@1045 2337
dcubed@1045 2338 // RedefineClasses() tracing support for obsolete method entry
dcubed@1045 2339 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
dcubed@1045 2340 call_VM_leaf(noreg,
dcubed@1045 2341 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
dcubed@1045 2342 G2_thread, Lmethod);
dcubed@1045 2343 }
duke@435 2344 }
duke@435 2345
duke@435 2346
duke@435 2347 // Inline assembly for:
duke@435 2348 //
duke@435 2349 // if (thread is in interp_only_mode) {
duke@435 2350 // // save result
duke@435 2351 // InterpreterRuntime::post_method_exit();
duke@435 2352 // // restore result
duke@435 2353 // }
duke@435 2354 // if (DTraceMethodProbes) {
duke@435 2355 // SharedRuntime::dtrace_method_exit(thread, method);
duke@435 2356 // }
duke@435 2357 //
duke@435 2358 // Native methods have their result stored in d_tmp and l_tmp
duke@435 2359 // Java methods have their result stored in the expression stack
duke@435 2360
duke@435 2361 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
duke@435 2362 TosState state,
duke@435 2363 NotifyMethodExitMode mode) {
duke@435 2364 // C++ interpreter only uses this for native methods.
duke@435 2365
duke@435 2366 // Whenever JVMTI puts a thread in interp_only_mode, method
duke@435 2367 // entry/exit events are sent for that thread to track stack
duke@435 2368 // depth. If it is possible to enter interp_only_mode we add
duke@435 2369 // the code to check if the event should be sent.
duke@435 2370 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
duke@435 2371 Label L;
duke@435 2372 Register temp_reg = O5;
twisti@1162 2373 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
duke@435 2374 ld(interp_only, temp_reg);
duke@435 2375 tst(temp_reg);
duke@435 2376 br(zero, false, pt, L);
duke@435 2377 delayed()->nop();
duke@435 2378
duke@435 2379 // Note: frame::interpreter_frame_result has a dependency on how the
duke@435 2380 // method result is saved across the call to post_method_exit. For
duke@435 2381 // native methods it assumes the result registers are saved to
duke@435 2382 // l_scratch and d_scratch. If this changes then the interpreter_frame_result
duke@435 2383 // implementation will need to be updated too.
duke@435 2384
duke@435 2385 save_return_value(state, is_native_method);
duke@435 2386 call_VM(noreg,
duke@435 2387 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
duke@435 2388 restore_return_value(state, is_native_method);
duke@435 2389 bind(L);
duke@435 2390 }
duke@435 2391
duke@435 2392 {
duke@435 2393 Register temp_reg = O5;
duke@435 2394 // Dtrace notification
duke@435 2395 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
duke@435 2396 save_return_value(state, is_native_method);
duke@435 2397 call_VM_leaf(
duke@435 2398 noreg,
duke@435 2399 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
duke@435 2400 G2_thread, Lmethod);
duke@435 2401 restore_return_value(state, is_native_method);
duke@435 2402 }
duke@435 2403 }
duke@435 2404
duke@435 2405 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
duke@435 2406 #ifdef CC_INTERP
duke@435 2407 // result potentially in O0/O1: save it across calls
duke@435 2408 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
duke@435 2409 #ifdef _LP64
duke@435 2410 stx(O0, STATE(_native_lresult));
duke@435 2411 #else
duke@435 2412 std(O0, STATE(_native_lresult));
duke@435 2413 #endif
duke@435 2414 #else // CC_INTERP
duke@435 2415 if (is_native_call) {
duke@435 2416 stf(FloatRegisterImpl::D, F0, d_tmp);
duke@435 2417 #ifdef _LP64
duke@435 2418 stx(O0, l_tmp);
duke@435 2419 #else
duke@435 2420 std(O0, l_tmp);
duke@435 2421 #endif
duke@435 2422 } else {
duke@435 2423 push(state);
duke@435 2424 }
duke@435 2425 #endif // CC_INTERP
duke@435 2426 }
duke@435 2427
duke@435 2428 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
duke@435 2429 #ifdef CC_INTERP
duke@435 2430 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
duke@435 2431 #ifdef _LP64
duke@435 2432 ldx(STATE(_native_lresult), O0);
duke@435 2433 #else
duke@435 2434 ldd(STATE(_native_lresult), O0);
duke@435 2435 #endif
duke@435 2436 #else // CC_INTERP
duke@435 2437 if (is_native_call) {
duke@435 2438 ldf(FloatRegisterImpl::D, d_tmp, F0);
duke@435 2439 #ifdef _LP64
duke@435 2440 ldx(l_tmp, O0);
duke@435 2441 #else
duke@435 2442 ldd(l_tmp, O0);
duke@435 2443 #endif
duke@435 2444 } else {
duke@435 2445 pop(state);
duke@435 2446 }
duke@435 2447 #endif // CC_INTERP
duke@435 2448 }
iveresov@2138 2449
iveresov@2138 2450 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
iveresov@2138 2451 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
iveresov@2138 2452 int increment, int mask,
iveresov@2138 2453 Register scratch1, Register scratch2,
iveresov@2138 2454 Condition cond, Label *where) {
iveresov@2138 2455 ld(counter_addr, scratch1);
iveresov@2138 2456 add(scratch1, increment, scratch1);
iveresov@2138 2457 if (is_simm13(mask)) {
iveresov@2138 2458 andcc(scratch1, mask, G0);
iveresov@2138 2459 } else {
iveresov@2138 2460 set(mask, scratch2);
iveresov@2138 2461 andcc(scratch1, scratch2, G0);
iveresov@2138 2462 }
iveresov@2138 2463 br(cond, false, Assembler::pn, *where);
iveresov@2138 2464 delayed()->st(scratch1, counter_addr);
iveresov@2138 2465 }

mercurial