Thu, 29 Nov 2012 13:55:49 -0800
Merge
duke@435 | 1 | /* |
jiangli@3826 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "interp_masm_sparc.hpp" |
stefank@2314 | 27 | #include "interpreter/interpreter.hpp" |
stefank@2314 | 28 | #include "interpreter/interpreterRuntime.hpp" |
stefank@2314 | 29 | #include "oops/arrayOop.hpp" |
stefank@2314 | 30 | #include "oops/markOop.hpp" |
coleenp@4037 | 31 | #include "oops/methodData.hpp" |
coleenp@4037 | 32 | #include "oops/method.hpp" |
stefank@2314 | 33 | #include "prims/jvmtiExport.hpp" |
stefank@2314 | 34 | #include "prims/jvmtiRedefineClassesTrace.hpp" |
stefank@2314 | 35 | #include "prims/jvmtiThreadState.hpp" |
stefank@2314 | 36 | #include "runtime/basicLock.hpp" |
stefank@2314 | 37 | #include "runtime/biasedLocking.hpp" |
stefank@2314 | 38 | #include "runtime/sharedRuntime.hpp" |
stefank@4299 | 39 | #include "runtime/thread.inline.hpp" |
duke@435 | 40 | |
duke@435 | 41 | #ifndef CC_INTERP |
duke@435 | 42 | #ifndef FAST_DISPATCH |
duke@435 | 43 | #define FAST_DISPATCH 1 |
duke@435 | 44 | #endif |
duke@435 | 45 | #undef FAST_DISPATCH |
duke@435 | 46 | |
duke@435 | 47 | // Implementation of InterpreterMacroAssembler |
duke@435 | 48 | |
duke@435 | 49 | // This file specializes the assember with interpreter-specific macros |
duke@435 | 50 | |
twisti@1162 | 51 | const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); |
twisti@1162 | 52 | const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); |
duke@435 | 53 | |
duke@435 | 54 | #else // CC_INTERP |
duke@435 | 55 | #ifndef STATE |
duke@435 | 56 | #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) |
duke@435 | 57 | #endif // STATE |
duke@435 | 58 | |
duke@435 | 59 | #endif // CC_INTERP |
duke@435 | 60 | |
duke@435 | 61 | void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { |
duke@435 | 62 | // Note: this algorithm is also used by C1's OSR entry sequence. |
duke@435 | 63 | // Any changes should also be applied to CodeEmitter::emit_osr_entry(). |
duke@435 | 64 | assert_different_registers(args_size, locals_size); |
duke@435 | 65 | // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. |
duke@435 | 66 | subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words |
duke@435 | 67 | // Use br/mov combination because it works on both V8 and V9 and is |
duke@435 | 68 | // faster. |
duke@435 | 69 | Label skip_move; |
duke@435 | 70 | br(Assembler::negative, true, Assembler::pt, skip_move); |
duke@435 | 71 | delayed()->mov(G0, delta); |
duke@435 | 72 | bind(skip_move); |
duke@435 | 73 | round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) |
duke@435 | 74 | sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes |
duke@435 | 75 | } |
duke@435 | 76 | |
duke@435 | 77 | #ifndef CC_INTERP |
duke@435 | 78 | |
duke@435 | 79 | // Dispatch code executed in the prolog of a bytecode which does not do it's |
duke@435 | 80 | // own dispatch. The dispatch address is computed and placed in IdispatchAddress |
duke@435 | 81 | void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { |
duke@435 | 82 | assert_not_delayed(); |
duke@435 | 83 | #ifdef FAST_DISPATCH |
duke@435 | 84 | // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since |
duke@435 | 85 | // they both use I2. |
duke@435 | 86 | assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); |
duke@435 | 87 | ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
duke@435 | 88 | add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); |
duke@435 | 89 | // add offset to correct dispatch table |
duke@435 | 90 | sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
duke@435 | 91 | ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr |
duke@435 | 92 | #else |
twisti@1162 | 93 | ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
duke@435 | 94 | // dispatch table to use |
twisti@1162 | 95 | AddressLiteral tbl(Interpreter::dispatch_table(state)); |
twisti@1162 | 96 | sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
twisti@1162 | 97 | set(tbl, G3_scratch); // compute addr of table |
twisti@1162 | 98 | ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr |
duke@435 | 99 | #endif |
duke@435 | 100 | } |
duke@435 | 101 | |
duke@435 | 102 | |
duke@435 | 103 | // Dispatch code executed in the epilog of a bytecode which does not do it's |
duke@435 | 104 | // own dispatch. The dispatch address in IdispatchAddress is used for the |
duke@435 | 105 | // dispatch. |
duke@435 | 106 | void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { |
duke@435 | 107 | assert_not_delayed(); |
duke@435 | 108 | verify_FPU(1, state); |
duke@435 | 109 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 110 | jmp( IdispatchAddress, 0 ); |
duke@435 | 111 | if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); |
duke@435 | 112 | else delayed()->nop(); |
duke@435 | 113 | } |
duke@435 | 114 | |
duke@435 | 115 | |
duke@435 | 116 | void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { |
duke@435 | 117 | // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) |
duke@435 | 118 | assert_not_delayed(); |
duke@435 | 119 | ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
duke@435 | 120 | dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); |
duke@435 | 121 | } |
duke@435 | 122 | |
duke@435 | 123 | |
duke@435 | 124 | void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { |
duke@435 | 125 | // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) |
duke@435 | 126 | assert_not_delayed(); |
duke@435 | 127 | ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode |
duke@435 | 128 | dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); |
duke@435 | 129 | } |
duke@435 | 130 | |
duke@435 | 131 | |
duke@435 | 132 | void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { |
duke@435 | 133 | // load current bytecode |
duke@435 | 134 | assert_not_delayed(); |
duke@435 | 135 | ldub( Lbcp, 0, Lbyte_code); // load next bytecode |
duke@435 | 136 | dispatch_base(state, table); |
duke@435 | 137 | } |
duke@435 | 138 | |
duke@435 | 139 | |
duke@435 | 140 | void InterpreterMacroAssembler::call_VM_leaf_base( |
duke@435 | 141 | Register java_thread, |
duke@435 | 142 | address entry_point, |
duke@435 | 143 | int number_of_arguments |
duke@435 | 144 | ) { |
duke@435 | 145 | if (!java_thread->is_valid()) |
duke@435 | 146 | java_thread = L7_thread_cache; |
duke@435 | 147 | // super call |
duke@435 | 148 | MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); |
duke@435 | 149 | } |
duke@435 | 150 | |
duke@435 | 151 | |
duke@435 | 152 | void InterpreterMacroAssembler::call_VM_base( |
duke@435 | 153 | Register oop_result, |
duke@435 | 154 | Register java_thread, |
duke@435 | 155 | Register last_java_sp, |
duke@435 | 156 | address entry_point, |
duke@435 | 157 | int number_of_arguments, |
duke@435 | 158 | bool check_exception |
duke@435 | 159 | ) { |
duke@435 | 160 | if (!java_thread->is_valid()) |
duke@435 | 161 | java_thread = L7_thread_cache; |
duke@435 | 162 | // See class ThreadInVMfromInterpreter, which assumes that the interpreter |
duke@435 | 163 | // takes responsibility for setting its own thread-state on call-out. |
duke@435 | 164 | // However, ThreadInVMfromInterpreter resets the state to "in_Java". |
duke@435 | 165 | |
duke@435 | 166 | //save_bcp(); // save bcp |
duke@435 | 167 | MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); |
duke@435 | 168 | //restore_bcp(); // restore bcp |
duke@435 | 169 | //restore_locals(); // restore locals pointer |
duke@435 | 170 | } |
duke@435 | 171 | |
duke@435 | 172 | |
duke@435 | 173 | void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { |
duke@435 | 174 | if (JvmtiExport::can_pop_frame()) { |
duke@435 | 175 | Label L; |
duke@435 | 176 | |
duke@435 | 177 | // Check the "pending popframe condition" flag in the current thread |
twisti@1162 | 178 | ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); |
duke@435 | 179 | |
duke@435 | 180 | // Initiate popframe handling only if it is not already being processed. If the flag |
duke@435 | 181 | // has the popframe_processing bit set, it means that this code is called *during* popframe |
duke@435 | 182 | // handling - we don't want to reenter. |
duke@435 | 183 | btst(JavaThread::popframe_pending_bit, scratch_reg); |
duke@435 | 184 | br(zero, false, pt, L); |
duke@435 | 185 | delayed()->nop(); |
duke@435 | 186 | btst(JavaThread::popframe_processing_bit, scratch_reg); |
duke@435 | 187 | br(notZero, false, pt, L); |
duke@435 | 188 | delayed()->nop(); |
duke@435 | 189 | |
duke@435 | 190 | // Call Interpreter::remove_activation_preserving_args_entry() to get the |
duke@435 | 191 | // address of the same-named entrypoint in the generated interpreter code. |
duke@435 | 192 | call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); |
duke@435 | 193 | |
duke@435 | 194 | // Jump to Interpreter::_remove_activation_preserving_args_entry |
duke@435 | 195 | jmpl(O0, G0, G0); |
duke@435 | 196 | delayed()->nop(); |
duke@435 | 197 | bind(L); |
duke@435 | 198 | } |
duke@435 | 199 | } |
duke@435 | 200 | |
duke@435 | 201 | |
duke@435 | 202 | void InterpreterMacroAssembler::load_earlyret_value(TosState state) { |
duke@435 | 203 | Register thr_state = G4_scratch; |
twisti@1162 | 204 | ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
twisti@1162 | 205 | const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); |
twisti@1162 | 206 | const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); |
twisti@1162 | 207 | const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); |
duke@435 | 208 | switch (state) { |
duke@435 | 209 | case ltos: ld_long(val_addr, Otos_l); break; |
duke@435 | 210 | case atos: ld_ptr(oop_addr, Otos_l); |
duke@435 | 211 | st_ptr(G0, oop_addr); break; |
duke@435 | 212 | case btos: // fall through |
duke@435 | 213 | case ctos: // fall through |
duke@435 | 214 | case stos: // fall through |
duke@435 | 215 | case itos: ld(val_addr, Otos_l1); break; |
duke@435 | 216 | case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; |
duke@435 | 217 | case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; |
duke@435 | 218 | case vtos: /* nothing to do */ break; |
duke@435 | 219 | default : ShouldNotReachHere(); |
duke@435 | 220 | } |
duke@435 | 221 | // Clean up tos value in the jvmti thread state |
duke@435 | 222 | or3(G0, ilgl, G3_scratch); |
duke@435 | 223 | stw(G3_scratch, tos_addr); |
duke@435 | 224 | st_long(G0, val_addr); |
duke@435 | 225 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 226 | } |
duke@435 | 227 | |
duke@435 | 228 | |
duke@435 | 229 | void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { |
duke@435 | 230 | if (JvmtiExport::can_force_early_return()) { |
duke@435 | 231 | Label L; |
duke@435 | 232 | Register thr_state = G3_scratch; |
twisti@1162 | 233 | ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); |
kvn@3037 | 234 | br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; |
duke@435 | 235 | |
duke@435 | 236 | // Initiate earlyret handling only if it is not already being processed. |
duke@435 | 237 | // If the flag has the earlyret_processing bit set, it means that this code |
duke@435 | 238 | // is called *during* earlyret handling - we don't want to reenter. |
twisti@1162 | 239 | ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); |
kvn@3037 | 240 | cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); |
duke@435 | 241 | |
duke@435 | 242 | // Call Interpreter::remove_activation_early_entry() to get the address of the |
duke@435 | 243 | // same-named entrypoint in the generated interpreter code |
twisti@1162 | 244 | ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); |
duke@435 | 245 | call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); |
duke@435 | 246 | |
duke@435 | 247 | // Jump to Interpreter::_remove_activation_early_entry |
duke@435 | 248 | jmpl(O0, G0, G0); |
duke@435 | 249 | delayed()->nop(); |
duke@435 | 250 | bind(L); |
duke@435 | 251 | } |
duke@435 | 252 | } |
duke@435 | 253 | |
duke@435 | 254 | |
twisti@1730 | 255 | void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { |
duke@435 | 256 | mov(arg_1, O0); |
twisti@1730 | 257 | mov(arg_2, O1); |
twisti@1730 | 258 | MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); |
duke@435 | 259 | } |
duke@435 | 260 | #endif /* CC_INTERP */ |
duke@435 | 261 | |
duke@435 | 262 | |
duke@435 | 263 | #ifndef CC_INTERP |
duke@435 | 264 | |
duke@435 | 265 | void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { |
duke@435 | 266 | assert_not_delayed(); |
duke@435 | 267 | dispatch_Lbyte_code(state, table); |
duke@435 | 268 | } |
duke@435 | 269 | |
duke@435 | 270 | |
duke@435 | 271 | void InterpreterMacroAssembler::dispatch_normal(TosState state) { |
duke@435 | 272 | dispatch_base(state, Interpreter::normal_table(state)); |
duke@435 | 273 | } |
duke@435 | 274 | |
duke@435 | 275 | |
duke@435 | 276 | void InterpreterMacroAssembler::dispatch_only(TosState state) { |
duke@435 | 277 | dispatch_base(state, Interpreter::dispatch_table(state)); |
duke@435 | 278 | } |
duke@435 | 279 | |
duke@435 | 280 | |
duke@435 | 281 | // common code to dispatch and dispatch_only |
duke@435 | 282 | // dispatch value in Lbyte_code and increment Lbcp |
duke@435 | 283 | |
duke@435 | 284 | void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { |
duke@435 | 285 | verify_FPU(1, state); |
duke@435 | 286 | // %%%%% maybe implement +VerifyActivationFrameSize here |
duke@435 | 287 | //verify_thread(); //too slow; we will just verify on method entry & exit |
duke@435 | 288 | if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 289 | #ifdef FAST_DISPATCH |
duke@435 | 290 | if (table == Interpreter::dispatch_table(state)) { |
duke@435 | 291 | // use IdispatchTables |
duke@435 | 292 | add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); |
duke@435 | 293 | // add offset to correct dispatch table |
duke@435 | 294 | sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
duke@435 | 295 | ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr |
duke@435 | 296 | } else { |
duke@435 | 297 | #endif |
duke@435 | 298 | // dispatch table to use |
twisti@1162 | 299 | AddressLiteral tbl(table); |
duke@435 | 300 | sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize |
twisti@1162 | 301 | set(tbl, G3_scratch); // compute addr of table |
duke@435 | 302 | ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr |
duke@435 | 303 | #ifdef FAST_DISPATCH |
duke@435 | 304 | } |
duke@435 | 305 | #endif |
duke@435 | 306 | jmp( G3_scratch, 0 ); |
duke@435 | 307 | if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); |
duke@435 | 308 | else delayed()->nop(); |
duke@435 | 309 | } |
duke@435 | 310 | |
duke@435 | 311 | |
duke@435 | 312 | // Helpers for expression stack |
duke@435 | 313 | |
duke@435 | 314 | // Longs and doubles are Category 2 computational types in the |
duke@435 | 315 | // JVM specification (section 3.11.1) and take 2 expression stack or |
duke@435 | 316 | // local slots. |
duke@435 | 317 | // Aligning them on 32 bit with tagged stacks is hard because the code generated |
duke@435 | 318 | // for the dup* bytecodes depends on what types are already on the stack. |
duke@435 | 319 | // If the types are split into the two stack/local slots, that is much easier |
duke@435 | 320 | // (and we can use 0 for non-reference tags). |
duke@435 | 321 | |
duke@435 | 322 | // Known good alignment in _LP64 but unknown otherwise |
duke@435 | 323 | void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { |
duke@435 | 324 | assert_not_delayed(); |
duke@435 | 325 | |
duke@435 | 326 | #ifdef _LP64 |
duke@435 | 327 | ldf(FloatRegisterImpl::D, r1, offset, d); |
duke@435 | 328 | #else |
duke@435 | 329 | ldf(FloatRegisterImpl::S, r1, offset, d); |
twisti@1861 | 330 | ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); |
duke@435 | 331 | #endif |
duke@435 | 332 | } |
duke@435 | 333 | |
duke@435 | 334 | // Known good alignment in _LP64 but unknown otherwise |
duke@435 | 335 | void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { |
duke@435 | 336 | assert_not_delayed(); |
duke@435 | 337 | |
duke@435 | 338 | #ifdef _LP64 |
duke@435 | 339 | stf(FloatRegisterImpl::D, d, r1, offset); |
duke@435 | 340 | // store something more useful here |
twisti@1861 | 341 | debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
duke@435 | 342 | #else |
duke@435 | 343 | stf(FloatRegisterImpl::S, d, r1, offset); |
twisti@1861 | 344 | stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); |
duke@435 | 345 | #endif |
duke@435 | 346 | } |
duke@435 | 347 | |
duke@435 | 348 | |
duke@435 | 349 | // Known good alignment in _LP64 but unknown otherwise |
duke@435 | 350 | void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { |
duke@435 | 351 | assert_not_delayed(); |
duke@435 | 352 | #ifdef _LP64 |
duke@435 | 353 | ldx(r1, offset, rd); |
duke@435 | 354 | #else |
duke@435 | 355 | ld(r1, offset, rd); |
twisti@1861 | 356 | ld(r1, offset + Interpreter::stackElementSize, rd->successor()); |
duke@435 | 357 | #endif |
duke@435 | 358 | } |
duke@435 | 359 | |
duke@435 | 360 | // Known good alignment in _LP64 but unknown otherwise |
duke@435 | 361 | void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { |
duke@435 | 362 | assert_not_delayed(); |
duke@435 | 363 | |
duke@435 | 364 | #ifdef _LP64 |
duke@435 | 365 | stx(l, r1, offset); |
duke@435 | 366 | // store something more useful here |
twisti@1861 | 367 | debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) |
duke@435 | 368 | #else |
duke@435 | 369 | st(l, r1, offset); |
twisti@1861 | 370 | st(l->successor(), r1, offset + Interpreter::stackElementSize); |
duke@435 | 371 | #endif |
duke@435 | 372 | } |
duke@435 | 373 | |
duke@435 | 374 | void InterpreterMacroAssembler::pop_i(Register r) { |
duke@435 | 375 | assert_not_delayed(); |
duke@435 | 376 | ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); |
twisti@1861 | 377 | inc(Lesp, Interpreter::stackElementSize); |
duke@435 | 378 | debug_only(verify_esp(Lesp)); |
duke@435 | 379 | } |
duke@435 | 380 | |
duke@435 | 381 | void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { |
duke@435 | 382 | assert_not_delayed(); |
duke@435 | 383 | ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); |
twisti@1861 | 384 | inc(Lesp, Interpreter::stackElementSize); |
duke@435 | 385 | debug_only(verify_esp(Lesp)); |
duke@435 | 386 | } |
duke@435 | 387 | |
duke@435 | 388 | void InterpreterMacroAssembler::pop_l(Register r) { |
duke@435 | 389 | assert_not_delayed(); |
duke@435 | 390 | load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); |
twisti@1861 | 391 | inc(Lesp, 2*Interpreter::stackElementSize); |
duke@435 | 392 | debug_only(verify_esp(Lesp)); |
duke@435 | 393 | } |
duke@435 | 394 | |
duke@435 | 395 | |
duke@435 | 396 | void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { |
duke@435 | 397 | assert_not_delayed(); |
duke@435 | 398 | ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); |
twisti@1861 | 399 | inc(Lesp, Interpreter::stackElementSize); |
duke@435 | 400 | debug_only(verify_esp(Lesp)); |
duke@435 | 401 | } |
duke@435 | 402 | |
duke@435 | 403 | |
duke@435 | 404 | void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { |
duke@435 | 405 | assert_not_delayed(); |
duke@435 | 406 | load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); |
twisti@1861 | 407 | inc(Lesp, 2*Interpreter::stackElementSize); |
duke@435 | 408 | debug_only(verify_esp(Lesp)); |
duke@435 | 409 | } |
duke@435 | 410 | |
duke@435 | 411 | |
duke@435 | 412 | void InterpreterMacroAssembler::push_i(Register r) { |
duke@435 | 413 | assert_not_delayed(); |
duke@435 | 414 | debug_only(verify_esp(Lesp)); |
twisti@1861 | 415 | st(r, Lesp, 0); |
twisti@1861 | 416 | dec(Lesp, Interpreter::stackElementSize); |
duke@435 | 417 | } |
duke@435 | 418 | |
duke@435 | 419 | void InterpreterMacroAssembler::push_ptr(Register r) { |
duke@435 | 420 | assert_not_delayed(); |
twisti@1861 | 421 | st_ptr(r, Lesp, 0); |
twisti@1861 | 422 | dec(Lesp, Interpreter::stackElementSize); |
duke@435 | 423 | } |
duke@435 | 424 | |
duke@435 | 425 | // remember: our convention for longs in SPARC is: |
duke@435 | 426 | // O0 (Otos_l1) has high-order part in first word, |
duke@435 | 427 | // O1 (Otos_l2) has low-order part in second word |
duke@435 | 428 | |
duke@435 | 429 | void InterpreterMacroAssembler::push_l(Register r) { |
duke@435 | 430 | assert_not_delayed(); |
duke@435 | 431 | debug_only(verify_esp(Lesp)); |
twisti@1861 | 432 | // Longs are stored in memory-correct order, even if unaligned. |
twisti@1861 | 433 | int offset = -Interpreter::stackElementSize; |
duke@435 | 434 | store_unaligned_long(r, Lesp, offset); |
twisti@1861 | 435 | dec(Lesp, 2 * Interpreter::stackElementSize); |
duke@435 | 436 | } |
duke@435 | 437 | |
duke@435 | 438 | |
duke@435 | 439 | void InterpreterMacroAssembler::push_f(FloatRegister f) { |
duke@435 | 440 | assert_not_delayed(); |
duke@435 | 441 | debug_only(verify_esp(Lesp)); |
twisti@1861 | 442 | stf(FloatRegisterImpl::S, f, Lesp, 0); |
twisti@1861 | 443 | dec(Lesp, Interpreter::stackElementSize); |
duke@435 | 444 | } |
duke@435 | 445 | |
duke@435 | 446 | |
duke@435 | 447 | void InterpreterMacroAssembler::push_d(FloatRegister d) { |
duke@435 | 448 | assert_not_delayed(); |
duke@435 | 449 | debug_only(verify_esp(Lesp)); |
twisti@1861 | 450 | // Longs are stored in memory-correct order, even if unaligned. |
twisti@1861 | 451 | int offset = -Interpreter::stackElementSize; |
duke@435 | 452 | store_unaligned_double(d, Lesp, offset); |
twisti@1861 | 453 | dec(Lesp, 2 * Interpreter::stackElementSize); |
duke@435 | 454 | } |
duke@435 | 455 | |
duke@435 | 456 | |
duke@435 | 457 | void InterpreterMacroAssembler::push(TosState state) { |
duke@435 | 458 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 459 | switch (state) { |
duke@435 | 460 | case atos: push_ptr(); break; |
duke@435 | 461 | case btos: push_i(); break; |
duke@435 | 462 | case ctos: |
duke@435 | 463 | case stos: push_i(); break; |
duke@435 | 464 | case itos: push_i(); break; |
duke@435 | 465 | case ltos: push_l(); break; |
duke@435 | 466 | case ftos: push_f(); break; |
duke@435 | 467 | case dtos: push_d(); break; |
duke@435 | 468 | case vtos: /* nothing to do */ break; |
duke@435 | 469 | default : ShouldNotReachHere(); |
duke@435 | 470 | } |
duke@435 | 471 | } |
duke@435 | 472 | |
duke@435 | 473 | |
duke@435 | 474 | void InterpreterMacroAssembler::pop(TosState state) { |
duke@435 | 475 | switch (state) { |
duke@435 | 476 | case atos: pop_ptr(); break; |
duke@435 | 477 | case btos: pop_i(); break; |
duke@435 | 478 | case ctos: |
duke@435 | 479 | case stos: pop_i(); break; |
duke@435 | 480 | case itos: pop_i(); break; |
duke@435 | 481 | case ltos: pop_l(); break; |
duke@435 | 482 | case ftos: pop_f(); break; |
duke@435 | 483 | case dtos: pop_d(); break; |
duke@435 | 484 | case vtos: /* nothing to do */ break; |
duke@435 | 485 | default : ShouldNotReachHere(); |
duke@435 | 486 | } |
duke@435 | 487 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 488 | } |
duke@435 | 489 | |
duke@435 | 490 | |
twisti@1861 | 491 | // Helpers for swap and dup |
twisti@1861 | 492 | void InterpreterMacroAssembler::load_ptr(int n, Register val) { |
duke@435 | 493 | ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); |
duke@435 | 494 | } |
twisti@1861 | 495 | void InterpreterMacroAssembler::store_ptr(int n, Register val) { |
duke@435 | 496 | st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); |
duke@435 | 497 | } |
duke@435 | 498 | |
duke@435 | 499 | |
duke@435 | 500 | void InterpreterMacroAssembler::load_receiver(Register param_count, |
duke@435 | 501 | Register recv) { |
twisti@1861 | 502 | sll(param_count, Interpreter::logStackElementSize, param_count); |
twisti@3969 | 503 | ld_ptr(Lesp, param_count, recv); // gets receiver oop |
duke@435 | 504 | } |
duke@435 | 505 | |
duke@435 | 506 | void InterpreterMacroAssembler::empty_expression_stack() { |
duke@435 | 507 | // Reset Lesp. |
duke@435 | 508 | sub( Lmonitors, wordSize, Lesp ); |
duke@435 | 509 | |
duke@435 | 510 | // Reset SP by subtracting more space from Lesp. |
duke@435 | 511 | Label done; |
twisti@1162 | 512 | assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); |
duke@435 | 513 | |
duke@435 | 514 | // A native does not need to do this, since its callee does not change SP. |
coleenp@4037 | 515 | ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. |
duke@435 | 516 | btst(JVM_ACC_NATIVE, Gframe_size); |
duke@435 | 517 | br(Assembler::notZero, false, Assembler::pt, done); |
duke@435 | 518 | delayed()->nop(); |
duke@435 | 519 | |
duke@435 | 520 | // Compute max expression stack+register save area |
jiangli@4302 | 521 | ld_ptr(Lmethod, in_bytes(Method::const_offset()), Gframe_size); |
jiangli@4302 | 522 | lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size); // Load max stack. |
duke@435 | 523 | add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); |
duke@435 | 524 | |
duke@435 | 525 | // |
duke@435 | 526 | // now set up a stack frame with the size computed above |
duke@435 | 527 | // |
duke@435 | 528 | //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below |
duke@435 | 529 | sll( Gframe_size, LogBytesPerWord, Gframe_size ); |
duke@435 | 530 | sub( Lesp, Gframe_size, Gframe_size ); |
duke@435 | 531 | and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary |
duke@435 | 532 | debug_only(verify_sp(Gframe_size, G4_scratch)); |
duke@435 | 533 | #ifdef _LP64 |
duke@435 | 534 | sub(Gframe_size, STACK_BIAS, Gframe_size ); |
duke@435 | 535 | #endif |
duke@435 | 536 | mov(Gframe_size, SP); |
duke@435 | 537 | |
duke@435 | 538 | bind(done); |
duke@435 | 539 | } |
duke@435 | 540 | |
duke@435 | 541 | |
duke@435 | 542 | #ifdef ASSERT |
duke@435 | 543 | void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { |
duke@435 | 544 | Label Bad, OK; |
duke@435 | 545 | |
duke@435 | 546 | // Saved SP must be aligned. |
duke@435 | 547 | #ifdef _LP64 |
duke@435 | 548 | btst(2*BytesPerWord-1, Rsp); |
duke@435 | 549 | #else |
duke@435 | 550 | btst(LongAlignmentMask, Rsp); |
duke@435 | 551 | #endif |
duke@435 | 552 | br(Assembler::notZero, false, Assembler::pn, Bad); |
duke@435 | 553 | delayed()->nop(); |
duke@435 | 554 | |
duke@435 | 555 | // Saved SP, plus register window size, must not be above FP. |
duke@435 | 556 | add(Rsp, frame::register_save_words * wordSize, Rtemp); |
duke@435 | 557 | #ifdef _LP64 |
duke@435 | 558 | sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP |
duke@435 | 559 | #endif |
kvn@3037 | 560 | cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); |
duke@435 | 561 | |
duke@435 | 562 | // Saved SP must not be ridiculously below current SP. |
duke@435 | 563 | size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); |
duke@435 | 564 | set(maxstack, Rtemp); |
duke@435 | 565 | sub(SP, Rtemp, Rtemp); |
duke@435 | 566 | #ifdef _LP64 |
duke@435 | 567 | add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp |
duke@435 | 568 | #endif |
kvn@3037 | 569 | cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); |
kvn@3037 | 570 | |
kvn@3037 | 571 | ba_short(OK); |
duke@435 | 572 | |
duke@435 | 573 | bind(Bad); |
duke@435 | 574 | stop("on return to interpreted call, restored SP is corrupted"); |
duke@435 | 575 | |
duke@435 | 576 | bind(OK); |
duke@435 | 577 | } |
duke@435 | 578 | |
duke@435 | 579 | |
duke@435 | 580 | void InterpreterMacroAssembler::verify_esp(Register Resp) { |
duke@435 | 581 | // about to read or write Resp[0] |
duke@435 | 582 | // make sure it is not in the monitors or the register save area |
duke@435 | 583 | Label OK1, OK2; |
duke@435 | 584 | |
duke@435 | 585 | cmp(Resp, Lmonitors); |
duke@435 | 586 | brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); |
duke@435 | 587 | delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); |
duke@435 | 588 | stop("too many pops: Lesp points into monitor area"); |
duke@435 | 589 | bind(OK1); |
duke@435 | 590 | #ifdef _LP64 |
duke@435 | 591 | sub(Resp, STACK_BIAS, Resp); |
duke@435 | 592 | #endif |
duke@435 | 593 | cmp(Resp, SP); |
duke@435 | 594 | brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); |
duke@435 | 595 | delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); |
duke@435 | 596 | stop("too many pushes: Lesp points into register window"); |
duke@435 | 597 | bind(OK2); |
duke@435 | 598 | } |
duke@435 | 599 | #endif // ASSERT |
duke@435 | 600 | |
duke@435 | 601 | // Load compiled (i2c) or interpreter entry when calling from interpreted and |
duke@435 | 602 | // do the call. Centralized so that all interpreter calls will do the same actions. |
duke@435 | 603 | // If jvmti single stepping is on for a thread we must not call compiled code. |
duke@435 | 604 | void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { |
duke@435 | 605 | |
duke@435 | 606 | // Assume we want to go compiled if available |
duke@435 | 607 | |
coleenp@4037 | 608 | ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); |
duke@435 | 609 | |
duke@435 | 610 | if (JvmtiExport::can_post_interpreter_events()) { |
duke@435 | 611 | // JVMTI events, such as single-stepping, are implemented partly by avoiding running |
duke@435 | 612 | // compiled code in threads for which the event is enabled. Check here for |
duke@435 | 613 | // interp_only_mode if these events CAN be enabled. |
duke@435 | 614 | verify_thread(); |
duke@435 | 615 | Label skip_compiled_code; |
duke@435 | 616 | |
twisti@1162 | 617 | const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
duke@435 | 618 | ld(interp_only, scratch); |
kvn@3037 | 619 | cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); |
coleenp@4037 | 620 | delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); |
duke@435 | 621 | bind(skip_compiled_code); |
duke@435 | 622 | } |
duke@435 | 623 | |
coleenp@4037 | 624 | // the i2c_adapters need Method* in G5_method (right? %%%) |
duke@435 | 625 | // do the call |
duke@435 | 626 | #ifdef ASSERT |
duke@435 | 627 | { |
duke@435 | 628 | Label ok; |
kvn@3037 | 629 | br_notnull_short(target, Assembler::pt, ok); |
duke@435 | 630 | stop("null entry point"); |
duke@435 | 631 | bind(ok); |
duke@435 | 632 | } |
duke@435 | 633 | #endif // ASSERT |
duke@435 | 634 | |
duke@435 | 635 | // Adjust Rret first so Llast_SP can be same as Rret |
duke@435 | 636 | add(Rret, -frame::pc_return_offset, O7); |
duke@435 | 637 | add(Lesp, BytesPerWord, Gargs); // setup parameter pointer |
duke@435 | 638 | // Record SP so we can remove any stack space allocated by adapter transition |
duke@435 | 639 | jmp(target, 0); |
duke@435 | 640 | delayed()->mov(SP, Llast_SP); |
duke@435 | 641 | } |
duke@435 | 642 | |
duke@435 | 643 | void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { |
duke@435 | 644 | assert_not_delayed(); |
duke@435 | 645 | |
duke@435 | 646 | Label not_taken; |
duke@435 | 647 | if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); |
duke@435 | 648 | else br (cc, false, Assembler::pn, not_taken); |
duke@435 | 649 | delayed()->nop(); |
duke@435 | 650 | |
duke@435 | 651 | TemplateTable::branch(false,false); |
duke@435 | 652 | |
duke@435 | 653 | bind(not_taken); |
duke@435 | 654 | |
duke@435 | 655 | profile_not_taken_branch(G3_scratch); |
duke@435 | 656 | } |
duke@435 | 657 | |
duke@435 | 658 | |
duke@435 | 659 | void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( |
duke@435 | 660 | int bcp_offset, |
duke@435 | 661 | Register Rtmp, |
duke@435 | 662 | Register Rdst, |
duke@435 | 663 | signedOrNot is_signed, |
duke@435 | 664 | setCCOrNot should_set_CC ) { |
duke@435 | 665 | assert(Rtmp != Rdst, "need separate temp register"); |
duke@435 | 666 | assert_not_delayed(); |
duke@435 | 667 | switch (is_signed) { |
duke@435 | 668 | default: ShouldNotReachHere(); |
duke@435 | 669 | |
duke@435 | 670 | case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte |
duke@435 | 671 | case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte |
duke@435 | 672 | } |
duke@435 | 673 | ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte |
duke@435 | 674 | sll( Rdst, BitsPerByte, Rdst); |
duke@435 | 675 | switch (should_set_CC ) { |
duke@435 | 676 | default: ShouldNotReachHere(); |
duke@435 | 677 | |
duke@435 | 678 | case set_CC: orcc( Rdst, Rtmp, Rdst ); break; |
duke@435 | 679 | case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; |
duke@435 | 680 | } |
duke@435 | 681 | } |
duke@435 | 682 | |
duke@435 | 683 | |
duke@435 | 684 | void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( |
duke@435 | 685 | int bcp_offset, |
duke@435 | 686 | Register Rtmp, |
duke@435 | 687 | Register Rdst, |
duke@435 | 688 | setCCOrNot should_set_CC ) { |
duke@435 | 689 | assert(Rtmp != Rdst, "need separate temp register"); |
duke@435 | 690 | assert_not_delayed(); |
duke@435 | 691 | add( Lbcp, bcp_offset, Rtmp); |
duke@435 | 692 | andcc( Rtmp, 3, G0); |
duke@435 | 693 | Label aligned; |
duke@435 | 694 | switch (should_set_CC ) { |
duke@435 | 695 | default: ShouldNotReachHere(); |
duke@435 | 696 | |
duke@435 | 697 | case set_CC: break; |
duke@435 | 698 | case dont_set_CC: break; |
duke@435 | 699 | } |
duke@435 | 700 | |
duke@435 | 701 | br(Assembler::zero, true, Assembler::pn, aligned); |
duke@435 | 702 | #ifdef _LP64 |
duke@435 | 703 | delayed()->ldsw(Rtmp, 0, Rdst); |
duke@435 | 704 | #else |
duke@435 | 705 | delayed()->ld(Rtmp, 0, Rdst); |
duke@435 | 706 | #endif |
duke@435 | 707 | |
duke@435 | 708 | ldub(Lbcp, bcp_offset + 3, Rdst); |
duke@435 | 709 | ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); |
duke@435 | 710 | ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); |
duke@435 | 711 | #ifdef _LP64 |
duke@435 | 712 | ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); |
duke@435 | 713 | #else |
duke@435 | 714 | // Unsigned load is faster than signed on some implementations |
duke@435 | 715 | ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); |
duke@435 | 716 | #endif |
duke@435 | 717 | or3(Rtmp, Rdst, Rdst ); |
duke@435 | 718 | |
duke@435 | 719 | bind(aligned); |
duke@435 | 720 | if (should_set_CC == set_CC) tst(Rdst); |
duke@435 | 721 | } |
duke@435 | 722 | |
coleenp@4037 | 723 | void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, |
jrose@1920 | 724 | int bcp_offset, size_t index_size) { |
twisti@1858 | 725 | assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
jrose@1920 | 726 | if (index_size == sizeof(u2)) { |
coleenp@4037 | 727 | get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); |
jrose@1920 | 728 | } else if (index_size == sizeof(u4)) { |
twisti@2698 | 729 | assert(EnableInvokeDynamic, "giant index used only for JSR 292"); |
coleenp@4037 | 730 | get_4_byte_integer_at_bcp(bcp_offset, temp, index); |
coleenp@4037 | 731 | assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); |
coleenp@4037 | 732 | xor3(index, -1, index); // convert to plain index |
jrose@1920 | 733 | } else if (index_size == sizeof(u1)) { |
coleenp@4037 | 734 | ldub(Lbcp, bcp_offset, index); |
jrose@1920 | 735 | } else { |
jrose@1920 | 736 | ShouldNotReachHere(); |
twisti@1858 | 737 | } |
twisti@1858 | 738 | } |
twisti@1858 | 739 | |
twisti@1858 | 740 | |
twisti@1858 | 741 | void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, |
jrose@1920 | 742 | int bcp_offset, size_t index_size) { |
duke@435 | 743 | assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
duke@435 | 744 | assert_different_registers(cache, tmp); |
duke@435 | 745 | assert_not_delayed(); |
jrose@1920 | 746 | get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); |
twisti@1858 | 747 | // convert from field index to ConstantPoolCacheEntry index and from |
twisti@1858 | 748 | // word index to byte offset |
duke@435 | 749 | sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); |
duke@435 | 750 | add(LcpoolCache, tmp, cache); |
duke@435 | 751 | } |
duke@435 | 752 | |
duke@435 | 753 | |
twisti@3050 | 754 | void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, |
twisti@3050 | 755 | Register temp, |
twisti@3050 | 756 | Register bytecode, |
twisti@3050 | 757 | int byte_no, |
twisti@3050 | 758 | int bcp_offset, |
twisti@3050 | 759 | size_t index_size) { |
twisti@3050 | 760 | get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); |
coleenp@4037 | 761 | ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); |
twisti@3050 | 762 | const int shift_count = (1 + byte_no) * BitsPerByte; |
twisti@3969 | 763 | assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || |
twisti@3969 | 764 | (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), |
twisti@3969 | 765 | "correct shift count"); |
twisti@3969 | 766 | srl(bytecode, shift_count, bytecode); |
twisti@3969 | 767 | assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); |
twisti@3969 | 768 | and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); |
twisti@3050 | 769 | } |
twisti@3050 | 770 | |
twisti@3050 | 771 | |
twisti@1858 | 772 | void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, |
jrose@1920 | 773 | int bcp_offset, size_t index_size) { |
duke@435 | 774 | assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); |
duke@435 | 775 | assert_different_registers(cache, tmp); |
duke@435 | 776 | assert_not_delayed(); |
jrose@1920 | 777 | if (index_size == sizeof(u2)) { |
jrose@1920 | 778 | get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); |
jrose@1920 | 779 | } else { |
jrose@1920 | 780 | ShouldNotReachHere(); // other sizes not supported here |
jrose@1920 | 781 | } |
duke@435 | 782 | // convert from field index to ConstantPoolCacheEntry index |
duke@435 | 783 | // and from word index to byte offset |
duke@435 | 784 | sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); |
duke@435 | 785 | // skip past the header |
coleenp@4037 | 786 | add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); |
duke@435 | 787 | // construct pointer to cache entry |
duke@435 | 788 | add(LcpoolCache, tmp, cache); |
duke@435 | 789 | } |
duke@435 | 790 | |
duke@435 | 791 | |
coleenp@4037 | 792 | // Load object from cpool->resolved_references(index) |
coleenp@4037 | 793 | void InterpreterMacroAssembler::load_resolved_reference_at_index( |
coleenp@4037 | 794 | Register result, Register index) { |
coleenp@4037 | 795 | assert_different_registers(result, index); |
coleenp@4037 | 796 | assert_not_delayed(); |
coleenp@4037 | 797 | // convert from field index to resolved_references() index and from |
coleenp@4037 | 798 | // word index to byte offset. Since this is a java object, it can be compressed |
coleenp@4037 | 799 | Register tmp = index; // reuse |
coleenp@4037 | 800 | sll(index, LogBytesPerHeapOop, tmp); |
coleenp@4037 | 801 | get_constant_pool(result); |
coleenp@4037 | 802 | // load pointer for resolved_references[] objArray |
coleenp@4037 | 803 | ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); |
coleenp@4037 | 804 | // JNIHandles::resolve(result) |
coleenp@4037 | 805 | ld_ptr(result, 0, result); |
coleenp@4037 | 806 | // Add in the index |
coleenp@4037 | 807 | add(result, tmp, result); |
coleenp@4037 | 808 | load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); |
coleenp@4037 | 809 | } |
coleenp@4037 | 810 | |
coleenp@4037 | 811 | |
duke@435 | 812 | // Generate a subtype check: branch to ok_is_subtype if sub_klass is |
coleenp@548 | 813 | // a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. |
duke@435 | 814 | void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, |
duke@435 | 815 | Register Rsuper_klass, |
duke@435 | 816 | Register Rtmp1, |
duke@435 | 817 | Register Rtmp2, |
duke@435 | 818 | Register Rtmp3, |
duke@435 | 819 | Label &ok_is_subtype ) { |
jrose@1079 | 820 | Label not_subtype; |
duke@435 | 821 | |
duke@435 | 822 | // Profile the not-null value's klass. |
duke@435 | 823 | profile_typecheck(Rsub_klass, Rtmp1); |
duke@435 | 824 | |
jrose@1079 | 825 | check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, |
jrose@1079 | 826 | Rtmp1, Rtmp2, |
jrose@1079 | 827 | &ok_is_subtype, ¬_subtype, NULL); |
jrose@1079 | 828 | |
jrose@1079 | 829 | check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, |
jrose@1079 | 830 | Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, |
jrose@1079 | 831 | &ok_is_subtype, NULL); |
duke@435 | 832 | |
duke@435 | 833 | bind(not_subtype); |
duke@435 | 834 | profile_typecheck_failed(Rtmp1); |
duke@435 | 835 | } |
duke@435 | 836 | |
duke@435 | 837 | // Separate these two to allow for delay slot in middle |
duke@435 | 838 | // These are used to do a test and full jump to exception-throwing code. |
duke@435 | 839 | |
duke@435 | 840 | // %%%%% Could possibly reoptimize this by testing to see if could use |
duke@435 | 841 | // a single conditional branch (i.e. if span is small enough. |
duke@435 | 842 | // If you go that route, than get rid of the split and give up |
duke@435 | 843 | // on the delay-slot hack. |
duke@435 | 844 | |
duke@435 | 845 | void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, |
duke@435 | 846 | Label& ok ) { |
duke@435 | 847 | assert_not_delayed(); |
duke@435 | 848 | br(ok_condition, true, pt, ok); |
duke@435 | 849 | // DELAY SLOT |
duke@435 | 850 | } |
duke@435 | 851 | |
duke@435 | 852 | void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, |
duke@435 | 853 | Label& ok ) { |
duke@435 | 854 | assert_not_delayed(); |
duke@435 | 855 | bp( ok_condition, true, Assembler::xcc, pt, ok); |
duke@435 | 856 | // DELAY SLOT |
duke@435 | 857 | } |
duke@435 | 858 | |
duke@435 | 859 | void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, |
duke@435 | 860 | Label& ok ) { |
duke@435 | 861 | assert_not_delayed(); |
duke@435 | 862 | brx(ok_condition, true, pt, ok); |
duke@435 | 863 | // DELAY SLOT |
duke@435 | 864 | } |
duke@435 | 865 | |
duke@435 | 866 | void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, |
duke@435 | 867 | Register Rscratch, |
duke@435 | 868 | Label& ok ) { |
duke@435 | 869 | assert(throw_entry_point != NULL, "entry point must be generated by now"); |
twisti@1162 | 870 | AddressLiteral dest(throw_entry_point); |
twisti@1162 | 871 | jump_to(dest, Rscratch); |
duke@435 | 872 | delayed()->nop(); |
duke@435 | 873 | bind(ok); |
duke@435 | 874 | } |
duke@435 | 875 | |
duke@435 | 876 | |
duke@435 | 877 | // And if you cannot use the delay slot, here is a shorthand: |
duke@435 | 878 | |
duke@435 | 879 | void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, |
duke@435 | 880 | address throw_entry_point, |
duke@435 | 881 | Register Rscratch ) { |
duke@435 | 882 | Label ok; |
duke@435 | 883 | if (ok_condition != never) { |
duke@435 | 884 | throw_if_not_1_icc( ok_condition, ok); |
duke@435 | 885 | delayed()->nop(); |
duke@435 | 886 | } |
duke@435 | 887 | throw_if_not_2( throw_entry_point, Rscratch, ok); |
duke@435 | 888 | } |
duke@435 | 889 | void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, |
duke@435 | 890 | address throw_entry_point, |
duke@435 | 891 | Register Rscratch ) { |
duke@435 | 892 | Label ok; |
duke@435 | 893 | if (ok_condition != never) { |
duke@435 | 894 | throw_if_not_1_xcc( ok_condition, ok); |
duke@435 | 895 | delayed()->nop(); |
duke@435 | 896 | } |
duke@435 | 897 | throw_if_not_2( throw_entry_point, Rscratch, ok); |
duke@435 | 898 | } |
duke@435 | 899 | void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, |
duke@435 | 900 | address throw_entry_point, |
duke@435 | 901 | Register Rscratch ) { |
duke@435 | 902 | Label ok; |
duke@435 | 903 | if (ok_condition != never) { |
duke@435 | 904 | throw_if_not_1_x( ok_condition, ok); |
duke@435 | 905 | delayed()->nop(); |
duke@435 | 906 | } |
duke@435 | 907 | throw_if_not_2( throw_entry_point, Rscratch, ok); |
duke@435 | 908 | } |
duke@435 | 909 | |
duke@435 | 910 | // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res |
duke@435 | 911 | // Note: res is still shy of address by array offset into object. |
duke@435 | 912 | |
duke@435 | 913 | void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { |
duke@435 | 914 | assert_not_delayed(); |
duke@435 | 915 | |
duke@435 | 916 | verify_oop(array); |
duke@435 | 917 | #ifdef _LP64 |
duke@435 | 918 | // sign extend since tos (index) can be a 32bit value |
duke@435 | 919 | sra(index, G0, index); |
duke@435 | 920 | #endif // _LP64 |
duke@435 | 921 | |
duke@435 | 922 | // check array |
duke@435 | 923 | Label ptr_ok; |
duke@435 | 924 | tst(array); |
duke@435 | 925 | throw_if_not_1_x( notZero, ptr_ok ); |
duke@435 | 926 | delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index |
duke@435 | 927 | throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); |
duke@435 | 928 | |
duke@435 | 929 | Label index_ok; |
duke@435 | 930 | cmp(index, tmp); |
duke@435 | 931 | throw_if_not_1_icc( lessUnsigned, index_ok ); |
duke@435 | 932 | if (index_shift > 0) delayed()->sll(index, index_shift, index); |
duke@435 | 933 | else delayed()->add(array, index, res); // addr - const offset in index |
duke@435 | 934 | // convention: move aberrant index into G3_scratch for exception message |
duke@435 | 935 | mov(index, G3_scratch); |
duke@435 | 936 | throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); |
duke@435 | 937 | |
duke@435 | 938 | // add offset if didn't do it in delay slot |
duke@435 | 939 | if (index_shift > 0) add(array, index, res); // addr - const offset in index |
duke@435 | 940 | } |
duke@435 | 941 | |
duke@435 | 942 | |
duke@435 | 943 | void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { |
duke@435 | 944 | assert_not_delayed(); |
duke@435 | 945 | |
duke@435 | 946 | // pop array |
duke@435 | 947 | pop_ptr(array); |
duke@435 | 948 | |
duke@435 | 949 | // check array |
duke@435 | 950 | index_check_without_pop(array, index, index_shift, tmp, res); |
duke@435 | 951 | } |
duke@435 | 952 | |
duke@435 | 953 | |
jiangli@3826 | 954 | void InterpreterMacroAssembler::get_const(Register Rdst) { |
coleenp@4037 | 955 | ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); |
jiangli@3826 | 956 | } |
jiangli@3826 | 957 | |
jiangli@3826 | 958 | |
duke@435 | 959 | void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { |
jiangli@3826 | 960 | get_const(Rdst); |
coleenp@4037 | 961 | ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); |
duke@435 | 962 | } |
duke@435 | 963 | |
duke@435 | 964 | |
duke@435 | 965 | void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { |
duke@435 | 966 | get_constant_pool(Rdst); |
coleenp@4037 | 967 | ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); |
duke@435 | 968 | } |
duke@435 | 969 | |
duke@435 | 970 | |
duke@435 | 971 | void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { |
duke@435 | 972 | get_constant_pool(Rcpool); |
coleenp@4037 | 973 | ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); |
duke@435 | 974 | } |
duke@435 | 975 | |
duke@435 | 976 | |
duke@435 | 977 | // unlock if synchronized method |
duke@435 | 978 | // |
duke@435 | 979 | // Unlock the receiver if this is a synchronized method. |
duke@435 | 980 | // Unlock any Java monitors from syncronized blocks. |
duke@435 | 981 | // |
duke@435 | 982 | // If there are locked Java monitors |
duke@435 | 983 | // If throw_monitor_exception |
duke@435 | 984 | // throws IllegalMonitorStateException |
duke@435 | 985 | // Else if install_monitor_exception |
duke@435 | 986 | // installs IllegalMonitorStateException |
duke@435 | 987 | // Else |
duke@435 | 988 | // no error processing |
duke@435 | 989 | void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, |
duke@435 | 990 | bool throw_monitor_exception, |
duke@435 | 991 | bool install_monitor_exception) { |
duke@435 | 992 | Label unlocked, unlock, no_unlock; |
duke@435 | 993 | |
duke@435 | 994 | // get the value of _do_not_unlock_if_synchronized into G1_scratch |
twisti@1162 | 995 | const Address do_not_unlock_if_synchronized(G2_thread, |
twisti@1162 | 996 | JavaThread::do_not_unlock_if_synchronized_offset()); |
duke@435 | 997 | ldbool(do_not_unlock_if_synchronized, G1_scratch); |
duke@435 | 998 | stbool(G0, do_not_unlock_if_synchronized); // reset the flag |
duke@435 | 999 | |
duke@435 | 1000 | // check if synchronized method |
coleenp@4037 | 1001 | const Address access_flags(Lmethod, Method::access_flags_offset()); |
duke@435 | 1002 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 1003 | push(state); // save tos |
twisti@1162 | 1004 | ld(access_flags, G3_scratch); // Load access flags. |
duke@435 | 1005 | btst(JVM_ACC_SYNCHRONIZED, G3_scratch); |
twisti@1162 | 1006 | br(zero, false, pt, unlocked); |
duke@435 | 1007 | delayed()->nop(); |
duke@435 | 1008 | |
duke@435 | 1009 | // Don't unlock anything if the _do_not_unlock_if_synchronized flag |
duke@435 | 1010 | // is set. |
kvn@3037 | 1011 | cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); |
duke@435 | 1012 | delayed()->nop(); |
duke@435 | 1013 | |
duke@435 | 1014 | // BasicObjectLock will be first in list, since this is a synchronized method. However, need |
duke@435 | 1015 | // to check that the object has not been unlocked by an explicit monitorexit bytecode. |
duke@435 | 1016 | |
duke@435 | 1017 | //Intel: if (throw_monitor_exception) ... else ... |
duke@435 | 1018 | // Entry already unlocked, need to throw exception |
duke@435 | 1019 | //... |
duke@435 | 1020 | |
duke@435 | 1021 | // pass top-most monitor elem |
duke@435 | 1022 | add( top_most_monitor(), O1 ); |
duke@435 | 1023 | |
duke@435 | 1024 | ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); |
kvn@3037 | 1025 | br_notnull_short(G3_scratch, pt, unlock); |
duke@435 | 1026 | |
duke@435 | 1027 | if (throw_monitor_exception) { |
duke@435 | 1028 | // Entry already unlocked need to throw an exception |
duke@435 | 1029 | MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); |
duke@435 | 1030 | should_not_reach_here(); |
duke@435 | 1031 | } else { |
duke@435 | 1032 | // Monitor already unlocked during a stack unroll. |
duke@435 | 1033 | // If requested, install an illegal_monitor_state_exception. |
duke@435 | 1034 | // Continue with stack unrolling. |
duke@435 | 1035 | if (install_monitor_exception) { |
duke@435 | 1036 | MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); |
duke@435 | 1037 | } |
kvn@3037 | 1038 | ba_short(unlocked); |
duke@435 | 1039 | } |
duke@435 | 1040 | |
duke@435 | 1041 | bind(unlock); |
duke@435 | 1042 | |
duke@435 | 1043 | unlock_object(O1); |
duke@435 | 1044 | |
duke@435 | 1045 | bind(unlocked); |
duke@435 | 1046 | |
duke@435 | 1047 | // I0, I1: Might contain return value |
duke@435 | 1048 | |
duke@435 | 1049 | // Check that all monitors are unlocked |
duke@435 | 1050 | { Label loop, exception, entry, restart; |
duke@435 | 1051 | |
duke@435 | 1052 | Register Rmptr = O0; |
duke@435 | 1053 | Register Rtemp = O1; |
duke@435 | 1054 | Register Rlimit = Lmonitors; |
duke@435 | 1055 | const jint delta = frame::interpreter_frame_monitor_size() * wordSize; |
duke@435 | 1056 | assert( (delta & LongAlignmentMask) == 0, |
duke@435 | 1057 | "sizeof BasicObjectLock must be even number of doublewords"); |
duke@435 | 1058 | |
duke@435 | 1059 | #ifdef ASSERT |
duke@435 | 1060 | add(top_most_monitor(), Rmptr, delta); |
duke@435 | 1061 | { Label L; |
duke@435 | 1062 | // ensure that Rmptr starts out above (or at) Rlimit |
kvn@3037 | 1063 | cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
duke@435 | 1064 | stop("monitor stack has negative size"); |
duke@435 | 1065 | bind(L); |
duke@435 | 1066 | } |
duke@435 | 1067 | #endif |
duke@435 | 1068 | bind(restart); |
kvn@3037 | 1069 | ba(entry); |
duke@435 | 1070 | delayed()-> |
duke@435 | 1071 | add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry |
duke@435 | 1072 | |
duke@435 | 1073 | // Entry is still locked, need to throw exception |
duke@435 | 1074 | bind(exception); |
duke@435 | 1075 | if (throw_monitor_exception) { |
duke@435 | 1076 | MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); |
duke@435 | 1077 | should_not_reach_here(); |
duke@435 | 1078 | } else { |
duke@435 | 1079 | // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. |
duke@435 | 1080 | // Unlock does not block, so don't have to worry about the frame |
duke@435 | 1081 | unlock_object(Rmptr); |
duke@435 | 1082 | if (install_monitor_exception) { |
duke@435 | 1083 | MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); |
duke@435 | 1084 | } |
kvn@3037 | 1085 | ba_short(restart); |
duke@435 | 1086 | } |
duke@435 | 1087 | |
duke@435 | 1088 | bind(loop); |
duke@435 | 1089 | cmp(Rtemp, G0); // check if current entry is used |
duke@435 | 1090 | brx(Assembler::notEqual, false, pn, exception); |
duke@435 | 1091 | delayed()-> |
duke@435 | 1092 | dec(Rmptr, delta); // otherwise advance to next entry |
duke@435 | 1093 | #ifdef ASSERT |
duke@435 | 1094 | { Label L; |
duke@435 | 1095 | // ensure that Rmptr has not somehow stepped below Rlimit |
kvn@3037 | 1096 | cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); |
duke@435 | 1097 | stop("ran off the end of the monitor stack"); |
duke@435 | 1098 | bind(L); |
duke@435 | 1099 | } |
duke@435 | 1100 | #endif |
duke@435 | 1101 | bind(entry); |
duke@435 | 1102 | cmp(Rmptr, Rlimit); // check if bottom reached |
duke@435 | 1103 | brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry |
duke@435 | 1104 | delayed()-> |
duke@435 | 1105 | ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); |
duke@435 | 1106 | } |
duke@435 | 1107 | |
duke@435 | 1108 | bind(no_unlock); |
duke@435 | 1109 | pop(state); |
duke@435 | 1110 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 1111 | } |
duke@435 | 1112 | |
duke@435 | 1113 | |
duke@435 | 1114 | // remove activation |
duke@435 | 1115 | // |
duke@435 | 1116 | // Unlock the receiver if this is a synchronized method. |
duke@435 | 1117 | // Unlock any Java monitors from syncronized blocks. |
duke@435 | 1118 | // Remove the activation from the stack. |
duke@435 | 1119 | // |
duke@435 | 1120 | // If there are locked Java monitors |
duke@435 | 1121 | // If throw_monitor_exception |
duke@435 | 1122 | // throws IllegalMonitorStateException |
duke@435 | 1123 | // Else if install_monitor_exception |
duke@435 | 1124 | // installs IllegalMonitorStateException |
duke@435 | 1125 | // Else |
duke@435 | 1126 | // no error processing |
duke@435 | 1127 | void InterpreterMacroAssembler::remove_activation(TosState state, |
duke@435 | 1128 | bool throw_monitor_exception, |
duke@435 | 1129 | bool install_monitor_exception) { |
duke@435 | 1130 | |
duke@435 | 1131 | unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); |
duke@435 | 1132 | |
duke@435 | 1133 | // save result (push state before jvmti call and pop it afterwards) and notify jvmti |
duke@435 | 1134 | notify_method_exit(false, state, NotifyJVMTI); |
duke@435 | 1135 | |
duke@435 | 1136 | interp_verify_oop(Otos_i, state, __FILE__, __LINE__); |
duke@435 | 1137 | verify_thread(); |
duke@435 | 1138 | |
duke@435 | 1139 | // return tos |
duke@435 | 1140 | assert(Otos_l1 == Otos_i, "adjust code below"); |
duke@435 | 1141 | switch (state) { |
duke@435 | 1142 | #ifdef _LP64 |
duke@435 | 1143 | case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 |
duke@435 | 1144 | #else |
duke@435 | 1145 | case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 |
duke@435 | 1146 | #endif |
duke@435 | 1147 | case btos: // fall through |
duke@435 | 1148 | case ctos: |
duke@435 | 1149 | case stos: // fall through |
duke@435 | 1150 | case atos: // fall through |
duke@435 | 1151 | case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 |
duke@435 | 1152 | case ftos: // fall through |
duke@435 | 1153 | case dtos: // fall through |
duke@435 | 1154 | case vtos: /* nothing to do */ break; |
duke@435 | 1155 | default : ShouldNotReachHere(); |
duke@435 | 1156 | } |
duke@435 | 1157 | |
duke@435 | 1158 | #if defined(COMPILER2) && !defined(_LP64) |
duke@435 | 1159 | if (state == ltos) { |
duke@435 | 1160 | // C2 expects long results in G1 we can't tell if we're returning to interpreted |
duke@435 | 1161 | // or compiled so just be safe use G1 and O0/O1 |
duke@435 | 1162 | |
duke@435 | 1163 | // Shift bits into high (msb) of G1 |
duke@435 | 1164 | sllx(Otos_l1->after_save(), 32, G1); |
duke@435 | 1165 | // Zero extend low bits |
duke@435 | 1166 | srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); |
duke@435 | 1167 | or3 (Otos_l2->after_save(), G1, G1); |
duke@435 | 1168 | } |
duke@435 | 1169 | #endif /* COMPILER2 */ |
duke@435 | 1170 | |
duke@435 | 1171 | } |
duke@435 | 1172 | #endif /* CC_INTERP */ |
duke@435 | 1173 | |
duke@435 | 1174 | |
duke@435 | 1175 | // Lock object |
duke@435 | 1176 | // |
duke@435 | 1177 | // Argument - lock_reg points to the BasicObjectLock to be used for locking, |
duke@435 | 1178 | // it must be initialized with the object to lock |
duke@435 | 1179 | void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { |
duke@435 | 1180 | if (UseHeavyMonitors) { |
duke@435 | 1181 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); |
duke@435 | 1182 | } |
duke@435 | 1183 | else { |
duke@435 | 1184 | Register obj_reg = Object; |
duke@435 | 1185 | Register mark_reg = G4_scratch; |
duke@435 | 1186 | Register temp_reg = G1_scratch; |
twisti@1162 | 1187 | Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); |
twisti@1162 | 1188 | Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); |
duke@435 | 1189 | Label done; |
duke@435 | 1190 | |
duke@435 | 1191 | Label slow_case; |
duke@435 | 1192 | |
duke@435 | 1193 | assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); |
duke@435 | 1194 | |
duke@435 | 1195 | // load markOop from object into mark_reg |
duke@435 | 1196 | ld_ptr(mark_addr, mark_reg); |
duke@435 | 1197 | |
duke@435 | 1198 | if (UseBiasedLocking) { |
duke@435 | 1199 | biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); |
duke@435 | 1200 | } |
duke@435 | 1201 | |
duke@435 | 1202 | // get the address of basicLock on stack that will be stored in the object |
duke@435 | 1203 | // we need a temporary register here as we do not want to clobber lock_reg |
duke@435 | 1204 | // (cas clobbers the destination register) |
duke@435 | 1205 | mov(lock_reg, temp_reg); |
duke@435 | 1206 | // set mark reg to be (markOop of object | UNLOCK_VALUE) |
duke@435 | 1207 | or3(mark_reg, markOopDesc::unlocked_value, mark_reg); |
duke@435 | 1208 | // initialize the box (Must happen before we update the object mark!) |
duke@435 | 1209 | st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); |
duke@435 | 1210 | // compare and exchange object_addr, markOop | 1, stack address of basicLock |
duke@435 | 1211 | assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
duke@435 | 1212 | casx_under_lock(mark_addr.base(), mark_reg, temp_reg, |
duke@435 | 1213 | (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); |
duke@435 | 1214 | |
duke@435 | 1215 | // if the compare and exchange succeeded we are done (we saw an unlocked object) |
kvn@3037 | 1216 | cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); |
duke@435 | 1217 | |
duke@435 | 1218 | // We did not see an unlocked object so try the fast recursive case |
duke@435 | 1219 | |
duke@435 | 1220 | // Check if owner is self by comparing the value in the markOop of object |
duke@435 | 1221 | // with the stack pointer |
duke@435 | 1222 | sub(temp_reg, SP, temp_reg); |
duke@435 | 1223 | #ifdef _LP64 |
duke@435 | 1224 | sub(temp_reg, STACK_BIAS, temp_reg); |
duke@435 | 1225 | #endif |
duke@435 | 1226 | assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); |
duke@435 | 1227 | |
duke@435 | 1228 | // Composite "andcc" test: |
duke@435 | 1229 | // (a) %sp -vs- markword proximity check, and, |
duke@435 | 1230 | // (b) verify mark word LSBs == 0 (Stack-locked). |
duke@435 | 1231 | // |
duke@435 | 1232 | // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) |
duke@435 | 1233 | // Note that the page size used for %sp proximity testing is arbitrary and is |
duke@435 | 1234 | // unrelated to the actual MMU page size. We use a 'logical' page size of |
duke@435 | 1235 | // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate |
duke@435 | 1236 | // field of the andcc instruction. |
duke@435 | 1237 | andcc (temp_reg, 0xFFFFF003, G0) ; |
duke@435 | 1238 | |
duke@435 | 1239 | // if condition is true we are done and hence we can store 0 in the displaced |
duke@435 | 1240 | // header indicating it is a recursive lock and be done |
duke@435 | 1241 | brx(Assembler::zero, true, Assembler::pt, done); |
duke@435 | 1242 | delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); |
duke@435 | 1243 | |
duke@435 | 1244 | // none of the above fast optimizations worked so we have to get into the |
duke@435 | 1245 | // slow case of monitor enter |
duke@435 | 1246 | bind(slow_case); |
duke@435 | 1247 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); |
duke@435 | 1248 | |
duke@435 | 1249 | bind(done); |
duke@435 | 1250 | } |
duke@435 | 1251 | } |
duke@435 | 1252 | |
duke@435 | 1253 | // Unlocks an object. Used in monitorexit bytecode and remove_activation. |
duke@435 | 1254 | // |
duke@435 | 1255 | // Argument - lock_reg points to the BasicObjectLock for lock |
duke@435 | 1256 | // Throw IllegalMonitorException if object is not locked by current thread |
duke@435 | 1257 | void InterpreterMacroAssembler::unlock_object(Register lock_reg) { |
duke@435 | 1258 | if (UseHeavyMonitors) { |
duke@435 | 1259 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); |
duke@435 | 1260 | } else { |
duke@435 | 1261 | Register obj_reg = G3_scratch; |
duke@435 | 1262 | Register mark_reg = G4_scratch; |
duke@435 | 1263 | Register displaced_header_reg = G1_scratch; |
twisti@1162 | 1264 | Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); |
twisti@1162 | 1265 | Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); |
duke@435 | 1266 | Label done; |
duke@435 | 1267 | |
duke@435 | 1268 | if (UseBiasedLocking) { |
duke@435 | 1269 | // load the object out of the BasicObjectLock |
duke@435 | 1270 | ld_ptr(lockobj_addr, obj_reg); |
duke@435 | 1271 | biased_locking_exit(mark_addr, mark_reg, done, true); |
duke@435 | 1272 | st_ptr(G0, lockobj_addr); // free entry |
duke@435 | 1273 | } |
duke@435 | 1274 | |
duke@435 | 1275 | // Test first if we are in the fast recursive case |
twisti@1162 | 1276 | Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); |
twisti@1162 | 1277 | ld_ptr(lock_addr, displaced_header_reg); |
duke@435 | 1278 | br_null(displaced_header_reg, true, Assembler::pn, done); |
duke@435 | 1279 | delayed()->st_ptr(G0, lockobj_addr); // free entry |
duke@435 | 1280 | |
duke@435 | 1281 | // See if it is still a light weight lock, if so we just unlock |
duke@435 | 1282 | // the object and we are done |
duke@435 | 1283 | |
duke@435 | 1284 | if (!UseBiasedLocking) { |
duke@435 | 1285 | // load the object out of the BasicObjectLock |
duke@435 | 1286 | ld_ptr(lockobj_addr, obj_reg); |
duke@435 | 1287 | } |
duke@435 | 1288 | |
duke@435 | 1289 | // we have the displaced header in displaced_header_reg |
duke@435 | 1290 | // we expect to see the stack address of the basicLock in case the |
duke@435 | 1291 | // lock is still a light weight lock (lock_reg) |
duke@435 | 1292 | assert(mark_addr.disp() == 0, "cas must take a zero displacement"); |
duke@435 | 1293 | casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, |
duke@435 | 1294 | (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); |
duke@435 | 1295 | cmp(lock_reg, displaced_header_reg); |
duke@435 | 1296 | brx(Assembler::equal, true, Assembler::pn, done); |
duke@435 | 1297 | delayed()->st_ptr(G0, lockobj_addr); // free entry |
duke@435 | 1298 | |
duke@435 | 1299 | // The lock has been converted into a heavy lock and hence |
duke@435 | 1300 | // we need to get into the slow case |
duke@435 | 1301 | |
duke@435 | 1302 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); |
duke@435 | 1303 | |
duke@435 | 1304 | bind(done); |
duke@435 | 1305 | } |
duke@435 | 1306 | } |
duke@435 | 1307 | |
duke@435 | 1308 | #ifndef CC_INTERP |
duke@435 | 1309 | |
coleenp@4037 | 1310 | // Get the method data pointer from the Method* and set the |
duke@435 | 1311 | // specified register to its value. |
duke@435 | 1312 | |
iveresov@2438 | 1313 | void InterpreterMacroAssembler::set_method_data_pointer() { |
duke@435 | 1314 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1315 | Label get_continue; |
duke@435 | 1316 | |
coleenp@4037 | 1317 | ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
duke@435 | 1318 | test_method_data_pointer(get_continue); |
coleenp@4037 | 1319 | add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
duke@435 | 1320 | bind(get_continue); |
duke@435 | 1321 | } |
duke@435 | 1322 | |
duke@435 | 1323 | // Set the method data pointer for the current bcp. |
duke@435 | 1324 | |
duke@435 | 1325 | void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { |
duke@435 | 1326 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1327 | Label zero_continue; |
duke@435 | 1328 | |
duke@435 | 1329 | // Test MDO to avoid the call if it is NULL. |
coleenp@4037 | 1330 | ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); |
duke@435 | 1331 | test_method_data_pointer(zero_continue); |
duke@435 | 1332 | call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); |
coleenp@4037 | 1333 | add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); |
iveresov@2438 | 1334 | add(ImethodDataPtr, O0, ImethodDataPtr); |
duke@435 | 1335 | bind(zero_continue); |
duke@435 | 1336 | } |
duke@435 | 1337 | |
duke@435 | 1338 | // Test ImethodDataPtr. If it is null, continue at the specified label |
duke@435 | 1339 | |
duke@435 | 1340 | void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { |
duke@435 | 1341 | assert(ProfileInterpreter, "must be profiling interpreter"); |
kvn@3037 | 1342 | br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); |
duke@435 | 1343 | } |
duke@435 | 1344 | |
duke@435 | 1345 | void InterpreterMacroAssembler::verify_method_data_pointer() { |
duke@435 | 1346 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1347 | #ifdef ASSERT |
duke@435 | 1348 | Label verify_continue; |
duke@435 | 1349 | test_method_data_pointer(verify_continue); |
duke@435 | 1350 | |
duke@435 | 1351 | // If the mdp is valid, it will point to a DataLayout header which is |
duke@435 | 1352 | // consistent with the bcp. The converse is highly probable also. |
duke@435 | 1353 | lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); |
coleenp@4037 | 1354 | ld_ptr(Lmethod, Method::const_offset(), O5); |
coleenp@4037 | 1355 | add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); |
duke@435 | 1356 | add(G3_scratch, O5, G3_scratch); |
duke@435 | 1357 | cmp(Lbcp, G3_scratch); |
duke@435 | 1358 | brx(Assembler::equal, false, Assembler::pt, verify_continue); |
duke@435 | 1359 | |
duke@435 | 1360 | Register temp_reg = O5; |
duke@435 | 1361 | delayed()->mov(ImethodDataPtr, temp_reg); |
duke@435 | 1362 | // %%% should use call_VM_leaf here? |
duke@435 | 1363 | //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); |
duke@435 | 1364 | save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); |
twisti@1162 | 1365 | Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); |
duke@435 | 1366 | stf(FloatRegisterImpl::D, Ftos_d, d_save); |
duke@435 | 1367 | mov(temp_reg->after_save(), O2); |
duke@435 | 1368 | save_thread(L7_thread_cache); |
duke@435 | 1369 | call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); |
duke@435 | 1370 | delayed()->nop(); |
duke@435 | 1371 | restore_thread(L7_thread_cache); |
duke@435 | 1372 | ldf(FloatRegisterImpl::D, d_save, Ftos_d); |
duke@435 | 1373 | restore(); |
duke@435 | 1374 | bind(verify_continue); |
duke@435 | 1375 | #endif // ASSERT |
duke@435 | 1376 | } |
duke@435 | 1377 | |
duke@435 | 1378 | void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, |
duke@435 | 1379 | Register Rtmp, |
duke@435 | 1380 | Label &profile_continue) { |
duke@435 | 1381 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1382 | // Control will flow to "profile_continue" if the counter is less than the |
duke@435 | 1383 | // limit or if we call profile_method() |
duke@435 | 1384 | |
duke@435 | 1385 | Label done; |
duke@435 | 1386 | |
duke@435 | 1387 | // if no method data exists, and the counter is high enough, make one |
kvn@3037 | 1388 | br_notnull_short(ImethodDataPtr, Assembler::pn, done); |
duke@435 | 1389 | |
duke@435 | 1390 | // Test to see if we should create a method data oop |
twisti@1162 | 1391 | AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); |
twisti@1162 | 1392 | sethi(profile_limit, Rtmp); |
twisti@1162 | 1393 | ld(Rtmp, profile_limit.low10(), Rtmp); |
kvn@4116 | 1394 | cmp(invocation_count, Rtmp); |
kvn@4116 | 1395 | // Use long branches because call_VM() code and following code generated by |
kvn@4116 | 1396 | // test_backedge_count_for_osr() is large in debug VM. |
kvn@4116 | 1397 | br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); |
kvn@4116 | 1398 | delayed()->nop(); |
duke@435 | 1399 | |
duke@435 | 1400 | // Build it now. |
iveresov@2438 | 1401 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); |
iveresov@2438 | 1402 | set_method_data_pointer_for_bcp(); |
kvn@4116 | 1403 | ba(profile_continue); |
kvn@4116 | 1404 | delayed()->nop(); |
duke@435 | 1405 | bind(done); |
duke@435 | 1406 | } |
duke@435 | 1407 | |
duke@435 | 1408 | // Store a value at some constant offset from the method data pointer. |
duke@435 | 1409 | |
duke@435 | 1410 | void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { |
duke@435 | 1411 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1412 | st_ptr(value, ImethodDataPtr, constant); |
duke@435 | 1413 | } |
duke@435 | 1414 | |
duke@435 | 1415 | void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, |
duke@435 | 1416 | Register bumped_count, |
duke@435 | 1417 | bool decrement) { |
duke@435 | 1418 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1419 | |
duke@435 | 1420 | // Load the counter. |
duke@435 | 1421 | ld_ptr(counter, bumped_count); |
duke@435 | 1422 | |
duke@435 | 1423 | if (decrement) { |
duke@435 | 1424 | // Decrement the register. Set condition codes. |
duke@435 | 1425 | subcc(bumped_count, DataLayout::counter_increment, bumped_count); |
duke@435 | 1426 | |
duke@435 | 1427 | // If the decrement causes the counter to overflow, stay negative |
duke@435 | 1428 | Label L; |
duke@435 | 1429 | brx(Assembler::negative, true, Assembler::pn, L); |
duke@435 | 1430 | |
duke@435 | 1431 | // Store the decremented counter, if it is still negative. |
duke@435 | 1432 | delayed()->st_ptr(bumped_count, counter); |
duke@435 | 1433 | bind(L); |
duke@435 | 1434 | } else { |
duke@435 | 1435 | // Increment the register. Set carry flag. |
duke@435 | 1436 | addcc(bumped_count, DataLayout::counter_increment, bumped_count); |
duke@435 | 1437 | |
duke@435 | 1438 | // If the increment causes the counter to overflow, pull back by 1. |
duke@435 | 1439 | assert(DataLayout::counter_increment == 1, "subc works"); |
duke@435 | 1440 | subc(bumped_count, G0, bumped_count); |
duke@435 | 1441 | |
duke@435 | 1442 | // Store the incremented counter. |
duke@435 | 1443 | st_ptr(bumped_count, counter); |
duke@435 | 1444 | } |
duke@435 | 1445 | } |
duke@435 | 1446 | |
duke@435 | 1447 | // Increment the value at some constant offset from the method data pointer. |
duke@435 | 1448 | |
duke@435 | 1449 | void InterpreterMacroAssembler::increment_mdp_data_at(int constant, |
duke@435 | 1450 | Register bumped_count, |
duke@435 | 1451 | bool decrement) { |
duke@435 | 1452 | // Locate the counter at a fixed offset from the mdp: |
twisti@1162 | 1453 | Address counter(ImethodDataPtr, constant); |
duke@435 | 1454 | increment_mdp_data_at(counter, bumped_count, decrement); |
duke@435 | 1455 | } |
duke@435 | 1456 | |
duke@435 | 1457 | // Increment the value at some non-fixed (reg + constant) offset from |
duke@435 | 1458 | // the method data pointer. |
duke@435 | 1459 | |
duke@435 | 1460 | void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, |
duke@435 | 1461 | int constant, |
duke@435 | 1462 | Register bumped_count, |
duke@435 | 1463 | Register scratch2, |
duke@435 | 1464 | bool decrement) { |
duke@435 | 1465 | // Add the constant to reg to get the offset. |
duke@435 | 1466 | add(ImethodDataPtr, reg, scratch2); |
twisti@1162 | 1467 | Address counter(scratch2, constant); |
duke@435 | 1468 | increment_mdp_data_at(counter, bumped_count, decrement); |
duke@435 | 1469 | } |
duke@435 | 1470 | |
duke@435 | 1471 | // Set a flag value at the current method data pointer position. |
duke@435 | 1472 | // Updates a single byte of the header, to avoid races with other header bits. |
duke@435 | 1473 | |
duke@435 | 1474 | void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, |
duke@435 | 1475 | Register scratch) { |
duke@435 | 1476 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1477 | // Load the data header |
duke@435 | 1478 | ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); |
duke@435 | 1479 | |
duke@435 | 1480 | // Set the flag |
duke@435 | 1481 | or3(scratch, flag_constant, scratch); |
duke@435 | 1482 | |
duke@435 | 1483 | // Store the modified header. |
duke@435 | 1484 | stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); |
duke@435 | 1485 | } |
duke@435 | 1486 | |
duke@435 | 1487 | // Test the location at some offset from the method data pointer. |
duke@435 | 1488 | // If it is not equal to value, branch to the not_equal_continue Label. |
duke@435 | 1489 | // Set condition codes to match the nullness of the loaded value. |
duke@435 | 1490 | |
duke@435 | 1491 | void InterpreterMacroAssembler::test_mdp_data_at(int offset, |
duke@435 | 1492 | Register value, |
duke@435 | 1493 | Label& not_equal_continue, |
duke@435 | 1494 | Register scratch) { |
duke@435 | 1495 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1496 | ld_ptr(ImethodDataPtr, offset, scratch); |
duke@435 | 1497 | cmp(value, scratch); |
duke@435 | 1498 | brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); |
duke@435 | 1499 | delayed()->tst(scratch); |
duke@435 | 1500 | } |
duke@435 | 1501 | |
duke@435 | 1502 | // Update the method data pointer by the displacement located at some fixed |
duke@435 | 1503 | // offset from the method data pointer. |
duke@435 | 1504 | |
duke@435 | 1505 | void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, |
duke@435 | 1506 | Register scratch) { |
duke@435 | 1507 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1508 | ld_ptr(ImethodDataPtr, offset_of_disp, scratch); |
duke@435 | 1509 | add(ImethodDataPtr, scratch, ImethodDataPtr); |
duke@435 | 1510 | } |
duke@435 | 1511 | |
duke@435 | 1512 | // Update the method data pointer by the displacement located at the |
duke@435 | 1513 | // offset (reg + offset_of_disp). |
duke@435 | 1514 | |
duke@435 | 1515 | void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, |
duke@435 | 1516 | int offset_of_disp, |
duke@435 | 1517 | Register scratch) { |
duke@435 | 1518 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1519 | add(reg, offset_of_disp, scratch); |
duke@435 | 1520 | ld_ptr(ImethodDataPtr, scratch, scratch); |
duke@435 | 1521 | add(ImethodDataPtr, scratch, ImethodDataPtr); |
duke@435 | 1522 | } |
duke@435 | 1523 | |
duke@435 | 1524 | // Update the method data pointer by a simple constant displacement. |
duke@435 | 1525 | |
duke@435 | 1526 | void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { |
duke@435 | 1527 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1528 | add(ImethodDataPtr, constant, ImethodDataPtr); |
duke@435 | 1529 | } |
duke@435 | 1530 | |
duke@435 | 1531 | // Update the method data pointer for a _ret bytecode whose target |
duke@435 | 1532 | // was not among our cached targets. |
duke@435 | 1533 | |
duke@435 | 1534 | void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, |
duke@435 | 1535 | Register return_bci) { |
duke@435 | 1536 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 1537 | push(state); |
duke@435 | 1538 | st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile |
duke@435 | 1539 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); |
duke@435 | 1540 | ld_ptr(l_tmp, return_bci); |
duke@435 | 1541 | pop(state); |
duke@435 | 1542 | } |
duke@435 | 1543 | |
duke@435 | 1544 | // Count a taken branch in the bytecodes. |
duke@435 | 1545 | |
duke@435 | 1546 | void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { |
duke@435 | 1547 | if (ProfileInterpreter) { |
duke@435 | 1548 | Label profile_continue; |
duke@435 | 1549 | |
duke@435 | 1550 | // If no method data exists, go to profile_continue. |
duke@435 | 1551 | test_method_data_pointer(profile_continue); |
duke@435 | 1552 | |
duke@435 | 1553 | // We are taking a branch. Increment the taken count. |
duke@435 | 1554 | increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); |
duke@435 | 1555 | |
duke@435 | 1556 | // The method data pointer needs to be updated to reflect the new target. |
duke@435 | 1557 | update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); |
duke@435 | 1558 | bind (profile_continue); |
duke@435 | 1559 | } |
duke@435 | 1560 | } |
duke@435 | 1561 | |
duke@435 | 1562 | |
duke@435 | 1563 | // Count a not-taken branch in the bytecodes. |
duke@435 | 1564 | |
duke@435 | 1565 | void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { |
duke@435 | 1566 | if (ProfileInterpreter) { |
duke@435 | 1567 | Label profile_continue; |
duke@435 | 1568 | |
duke@435 | 1569 | // If no method data exists, go to profile_continue. |
duke@435 | 1570 | test_method_data_pointer(profile_continue); |
duke@435 | 1571 | |
duke@435 | 1572 | // We are taking a branch. Increment the not taken count. |
duke@435 | 1573 | increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); |
duke@435 | 1574 | |
duke@435 | 1575 | // The method data pointer needs to be updated to correspond to the |
duke@435 | 1576 | // next bytecode. |
duke@435 | 1577 | update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); |
duke@435 | 1578 | bind (profile_continue); |
duke@435 | 1579 | } |
duke@435 | 1580 | } |
duke@435 | 1581 | |
duke@435 | 1582 | |
duke@435 | 1583 | // Count a non-virtual call in the bytecodes. |
duke@435 | 1584 | |
duke@435 | 1585 | void InterpreterMacroAssembler::profile_call(Register scratch) { |
duke@435 | 1586 | if (ProfileInterpreter) { |
duke@435 | 1587 | Label profile_continue; |
duke@435 | 1588 | |
duke@435 | 1589 | // If no method data exists, go to profile_continue. |
duke@435 | 1590 | test_method_data_pointer(profile_continue); |
duke@435 | 1591 | |
duke@435 | 1592 | // We are making a call. Increment the count. |
duke@435 | 1593 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
duke@435 | 1594 | |
duke@435 | 1595 | // The method data pointer needs to be updated to reflect the new target. |
duke@435 | 1596 | update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); |
duke@435 | 1597 | bind (profile_continue); |
duke@435 | 1598 | } |
duke@435 | 1599 | } |
duke@435 | 1600 | |
duke@435 | 1601 | |
duke@435 | 1602 | // Count a final call in the bytecodes. |
duke@435 | 1603 | |
duke@435 | 1604 | void InterpreterMacroAssembler::profile_final_call(Register scratch) { |
duke@435 | 1605 | if (ProfileInterpreter) { |
duke@435 | 1606 | Label profile_continue; |
duke@435 | 1607 | |
duke@435 | 1608 | // If no method data exists, go to profile_continue. |
duke@435 | 1609 | test_method_data_pointer(profile_continue); |
duke@435 | 1610 | |
duke@435 | 1611 | // We are making a call. Increment the count. |
duke@435 | 1612 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
duke@435 | 1613 | |
duke@435 | 1614 | // The method data pointer needs to be updated to reflect the new target. |
duke@435 | 1615 | update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); |
duke@435 | 1616 | bind (profile_continue); |
duke@435 | 1617 | } |
duke@435 | 1618 | } |
duke@435 | 1619 | |
duke@435 | 1620 | |
duke@435 | 1621 | // Count a virtual call in the bytecodes. |
duke@435 | 1622 | |
duke@435 | 1623 | void InterpreterMacroAssembler::profile_virtual_call(Register receiver, |
twisti@1858 | 1624 | Register scratch, |
twisti@1858 | 1625 | bool receiver_can_be_null) { |
duke@435 | 1626 | if (ProfileInterpreter) { |
duke@435 | 1627 | Label profile_continue; |
duke@435 | 1628 | |
duke@435 | 1629 | // If no method data exists, go to profile_continue. |
duke@435 | 1630 | test_method_data_pointer(profile_continue); |
duke@435 | 1631 | |
twisti@1858 | 1632 | |
twisti@1858 | 1633 | Label skip_receiver_profile; |
twisti@1858 | 1634 | if (receiver_can_be_null) { |
twisti@1858 | 1635 | Label not_null; |
kvn@3037 | 1636 | br_notnull_short(receiver, Assembler::pt, not_null); |
twisti@1858 | 1637 | // We are making a call. Increment the count for null receiver. |
twisti@1858 | 1638 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
kvn@3037 | 1639 | ba_short(skip_receiver_profile); |
twisti@1858 | 1640 | bind(not_null); |
twisti@1858 | 1641 | } |
twisti@1858 | 1642 | |
duke@435 | 1643 | // Record the receiver type. |
kvn@1641 | 1644 | record_klass_in_profile(receiver, scratch, true); |
twisti@1858 | 1645 | bind(skip_receiver_profile); |
duke@435 | 1646 | |
duke@435 | 1647 | // The method data pointer needs to be updated to reflect the new target. |
duke@435 | 1648 | update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); |
duke@435 | 1649 | bind (profile_continue); |
duke@435 | 1650 | } |
duke@435 | 1651 | } |
duke@435 | 1652 | |
duke@435 | 1653 | void InterpreterMacroAssembler::record_klass_in_profile_helper( |
duke@435 | 1654 | Register receiver, Register scratch, |
kvn@1641 | 1655 | int start_row, Label& done, bool is_virtual_call) { |
kvn@1641 | 1656 | if (TypeProfileWidth == 0) { |
kvn@1641 | 1657 | if (is_virtual_call) { |
kvn@1641 | 1658 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
kvn@1641 | 1659 | } |
poonam@1402 | 1660 | return; |
kvn@1641 | 1661 | } |
poonam@1402 | 1662 | |
duke@435 | 1663 | int last_row = VirtualCallData::row_limit() - 1; |
duke@435 | 1664 | assert(start_row <= last_row, "must be work left to do"); |
duke@435 | 1665 | // Test this row for both the receiver and for null. |
duke@435 | 1666 | // Take any of three different outcomes: |
duke@435 | 1667 | // 1. found receiver => increment count and goto done |
duke@435 | 1668 | // 2. found null => keep looking for case 1, maybe allocate this cell |
duke@435 | 1669 | // 3. found something else => keep looking for cases 1 and 2 |
duke@435 | 1670 | // Case 3 is handled by a recursive call. |
duke@435 | 1671 | for (int row = start_row; row <= last_row; row++) { |
duke@435 | 1672 | Label next_test; |
duke@435 | 1673 | bool test_for_null_also = (row == start_row); |
duke@435 | 1674 | |
duke@435 | 1675 | // See if the receiver is receiver[n]. |
duke@435 | 1676 | int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); |
duke@435 | 1677 | test_mdp_data_at(recvr_offset, receiver, next_test, scratch); |
kvn@1641 | 1678 | // delayed()->tst(scratch); |
duke@435 | 1679 | |
duke@435 | 1680 | // The receiver is receiver[n]. Increment count[n]. |
duke@435 | 1681 | int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); |
duke@435 | 1682 | increment_mdp_data_at(count_offset, scratch); |
kvn@3037 | 1683 | ba_short(done); |
duke@435 | 1684 | bind(next_test); |
duke@435 | 1685 | |
duke@435 | 1686 | if (test_for_null_also) { |
kvn@1641 | 1687 | Label found_null; |
duke@435 | 1688 | // Failed the equality check on receiver[n]... Test for null. |
duke@435 | 1689 | if (start_row == last_row) { |
duke@435 | 1690 | // The only thing left to do is handle the null case. |
kvn@1641 | 1691 | if (is_virtual_call) { |
kvn@1641 | 1692 | brx(Assembler::zero, false, Assembler::pn, found_null); |
kvn@1641 | 1693 | delayed()->nop(); |
kvn@1641 | 1694 | // Receiver did not match any saved receiver and there is no empty row for it. |
kvn@1686 | 1695 | // Increment total counter to indicate polymorphic case. |
kvn@1641 | 1696 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
kvn@3037 | 1697 | ba_short(done); |
kvn@1641 | 1698 | bind(found_null); |
kvn@1641 | 1699 | } else { |
kvn@1641 | 1700 | brx(Assembler::notZero, false, Assembler::pt, done); |
kvn@1641 | 1701 | delayed()->nop(); |
kvn@1641 | 1702 | } |
duke@435 | 1703 | break; |
duke@435 | 1704 | } |
duke@435 | 1705 | // Since null is rare, make it be the branch-taken case. |
duke@435 | 1706 | brx(Assembler::zero, false, Assembler::pn, found_null); |
duke@435 | 1707 | delayed()->nop(); |
duke@435 | 1708 | |
duke@435 | 1709 | // Put all the "Case 3" tests here. |
kvn@1641 | 1710 | record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); |
duke@435 | 1711 | |
duke@435 | 1712 | // Found a null. Keep searching for a matching receiver, |
duke@435 | 1713 | // but remember that this is an empty (unused) slot. |
duke@435 | 1714 | bind(found_null); |
duke@435 | 1715 | } |
duke@435 | 1716 | } |
duke@435 | 1717 | |
duke@435 | 1718 | // In the fall-through case, we found no matching receiver, but we |
duke@435 | 1719 | // observed the receiver[start_row] is NULL. |
duke@435 | 1720 | |
duke@435 | 1721 | // Fill in the receiver field and increment the count. |
duke@435 | 1722 | int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); |
duke@435 | 1723 | set_mdp_data_at(recvr_offset, receiver); |
duke@435 | 1724 | int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); |
duke@435 | 1725 | mov(DataLayout::counter_increment, scratch); |
duke@435 | 1726 | set_mdp_data_at(count_offset, scratch); |
kvn@1641 | 1727 | if (start_row > 0) { |
kvn@3037 | 1728 | ba_short(done); |
kvn@1641 | 1729 | } |
duke@435 | 1730 | } |
duke@435 | 1731 | |
duke@435 | 1732 | void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, |
kvn@1641 | 1733 | Register scratch, bool is_virtual_call) { |
duke@435 | 1734 | assert(ProfileInterpreter, "must be profiling"); |
duke@435 | 1735 | Label done; |
duke@435 | 1736 | |
kvn@1641 | 1737 | record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); |
duke@435 | 1738 | |
duke@435 | 1739 | bind (done); |
duke@435 | 1740 | } |
duke@435 | 1741 | |
duke@435 | 1742 | |
duke@435 | 1743 | // Count a ret in the bytecodes. |
duke@435 | 1744 | |
duke@435 | 1745 | void InterpreterMacroAssembler::profile_ret(TosState state, |
duke@435 | 1746 | Register return_bci, |
duke@435 | 1747 | Register scratch) { |
duke@435 | 1748 | if (ProfileInterpreter) { |
duke@435 | 1749 | Label profile_continue; |
duke@435 | 1750 | uint row; |
duke@435 | 1751 | |
duke@435 | 1752 | // If no method data exists, go to profile_continue. |
duke@435 | 1753 | test_method_data_pointer(profile_continue); |
duke@435 | 1754 | |
duke@435 | 1755 | // Update the total ret count. |
duke@435 | 1756 | increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); |
duke@435 | 1757 | |
duke@435 | 1758 | for (row = 0; row < RetData::row_limit(); row++) { |
duke@435 | 1759 | Label next_test; |
duke@435 | 1760 | |
duke@435 | 1761 | // See if return_bci is equal to bci[n]: |
duke@435 | 1762 | test_mdp_data_at(in_bytes(RetData::bci_offset(row)), |
duke@435 | 1763 | return_bci, next_test, scratch); |
duke@435 | 1764 | |
duke@435 | 1765 | // return_bci is equal to bci[n]. Increment the count. |
duke@435 | 1766 | increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); |
duke@435 | 1767 | |
duke@435 | 1768 | // The method data pointer needs to be updated to reflect the new target. |
duke@435 | 1769 | update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); |
kvn@3037 | 1770 | ba_short(profile_continue); |
duke@435 | 1771 | bind(next_test); |
duke@435 | 1772 | } |
duke@435 | 1773 | |
duke@435 | 1774 | update_mdp_for_ret(state, return_bci); |
duke@435 | 1775 | |
duke@435 | 1776 | bind (profile_continue); |
duke@435 | 1777 | } |
duke@435 | 1778 | } |
duke@435 | 1779 | |
duke@435 | 1780 | // Profile an unexpected null in the bytecodes. |
duke@435 | 1781 | void InterpreterMacroAssembler::profile_null_seen(Register scratch) { |
duke@435 | 1782 | if (ProfileInterpreter) { |
duke@435 | 1783 | Label profile_continue; |
duke@435 | 1784 | |
duke@435 | 1785 | // If no method data exists, go to profile_continue. |
duke@435 | 1786 | test_method_data_pointer(profile_continue); |
duke@435 | 1787 | |
duke@435 | 1788 | set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); |
duke@435 | 1789 | |
duke@435 | 1790 | // The method data pointer needs to be updated. |
duke@435 | 1791 | int mdp_delta = in_bytes(BitData::bit_data_size()); |
duke@435 | 1792 | if (TypeProfileCasts) { |
duke@435 | 1793 | mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); |
duke@435 | 1794 | } |
duke@435 | 1795 | update_mdp_by_constant(mdp_delta); |
duke@435 | 1796 | |
duke@435 | 1797 | bind (profile_continue); |
duke@435 | 1798 | } |
duke@435 | 1799 | } |
duke@435 | 1800 | |
duke@435 | 1801 | void InterpreterMacroAssembler::profile_typecheck(Register klass, |
duke@435 | 1802 | Register scratch) { |
duke@435 | 1803 | if (ProfileInterpreter) { |
duke@435 | 1804 | Label profile_continue; |
duke@435 | 1805 | |
duke@435 | 1806 | // If no method data exists, go to profile_continue. |
duke@435 | 1807 | test_method_data_pointer(profile_continue); |
duke@435 | 1808 | |
duke@435 | 1809 | int mdp_delta = in_bytes(BitData::bit_data_size()); |
duke@435 | 1810 | if (TypeProfileCasts) { |
duke@435 | 1811 | mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); |
duke@435 | 1812 | |
duke@435 | 1813 | // Record the object type. |
kvn@1641 | 1814 | record_klass_in_profile(klass, scratch, false); |
duke@435 | 1815 | } |
duke@435 | 1816 | |
duke@435 | 1817 | // The method data pointer needs to be updated. |
duke@435 | 1818 | update_mdp_by_constant(mdp_delta); |
duke@435 | 1819 | |
duke@435 | 1820 | bind (profile_continue); |
duke@435 | 1821 | } |
duke@435 | 1822 | } |
duke@435 | 1823 | |
duke@435 | 1824 | void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { |
duke@435 | 1825 | if (ProfileInterpreter && TypeProfileCasts) { |
duke@435 | 1826 | Label profile_continue; |
duke@435 | 1827 | |
duke@435 | 1828 | // If no method data exists, go to profile_continue. |
duke@435 | 1829 | test_method_data_pointer(profile_continue); |
duke@435 | 1830 | |
duke@435 | 1831 | int count_offset = in_bytes(CounterData::count_offset()); |
duke@435 | 1832 | // Back up the address, since we have already bumped the mdp. |
duke@435 | 1833 | count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); |
duke@435 | 1834 | |
duke@435 | 1835 | // *Decrement* the counter. We expect to see zero or small negatives. |
duke@435 | 1836 | increment_mdp_data_at(count_offset, scratch, true); |
duke@435 | 1837 | |
duke@435 | 1838 | bind (profile_continue); |
duke@435 | 1839 | } |
duke@435 | 1840 | } |
duke@435 | 1841 | |
duke@435 | 1842 | // Count the default case of a switch construct. |
duke@435 | 1843 | |
duke@435 | 1844 | void InterpreterMacroAssembler::profile_switch_default(Register scratch) { |
duke@435 | 1845 | if (ProfileInterpreter) { |
duke@435 | 1846 | Label profile_continue; |
duke@435 | 1847 | |
duke@435 | 1848 | // If no method data exists, go to profile_continue. |
duke@435 | 1849 | test_method_data_pointer(profile_continue); |
duke@435 | 1850 | |
duke@435 | 1851 | // Update the default case count |
duke@435 | 1852 | increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), |
duke@435 | 1853 | scratch); |
duke@435 | 1854 | |
duke@435 | 1855 | // The method data pointer needs to be updated. |
duke@435 | 1856 | update_mdp_by_offset( |
duke@435 | 1857 | in_bytes(MultiBranchData::default_displacement_offset()), |
duke@435 | 1858 | scratch); |
duke@435 | 1859 | |
duke@435 | 1860 | bind (profile_continue); |
duke@435 | 1861 | } |
duke@435 | 1862 | } |
duke@435 | 1863 | |
duke@435 | 1864 | // Count the index'th case of a switch construct. |
duke@435 | 1865 | |
duke@435 | 1866 | void InterpreterMacroAssembler::profile_switch_case(Register index, |
duke@435 | 1867 | Register scratch, |
duke@435 | 1868 | Register scratch2, |
duke@435 | 1869 | Register scratch3) { |
duke@435 | 1870 | if (ProfileInterpreter) { |
duke@435 | 1871 | Label profile_continue; |
duke@435 | 1872 | |
duke@435 | 1873 | // If no method data exists, go to profile_continue. |
duke@435 | 1874 | test_method_data_pointer(profile_continue); |
duke@435 | 1875 | |
duke@435 | 1876 | // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() |
duke@435 | 1877 | set(in_bytes(MultiBranchData::per_case_size()), scratch); |
duke@435 | 1878 | smul(index, scratch, scratch); |
duke@435 | 1879 | add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); |
duke@435 | 1880 | |
duke@435 | 1881 | // Update the case count |
duke@435 | 1882 | increment_mdp_data_at(scratch, |
duke@435 | 1883 | in_bytes(MultiBranchData::relative_count_offset()), |
duke@435 | 1884 | scratch2, |
duke@435 | 1885 | scratch3); |
duke@435 | 1886 | |
duke@435 | 1887 | // The method data pointer needs to be updated. |
duke@435 | 1888 | update_mdp_by_offset(scratch, |
duke@435 | 1889 | in_bytes(MultiBranchData::relative_displacement_offset()), |
duke@435 | 1890 | scratch2); |
duke@435 | 1891 | |
duke@435 | 1892 | bind (profile_continue); |
duke@435 | 1893 | } |
duke@435 | 1894 | } |
duke@435 | 1895 | |
duke@435 | 1896 | // add a InterpMonitorElem to stack (see frame_sparc.hpp) |
duke@435 | 1897 | |
duke@435 | 1898 | void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, |
duke@435 | 1899 | Register Rtemp, |
duke@435 | 1900 | Register Rtemp2 ) { |
duke@435 | 1901 | |
duke@435 | 1902 | Register Rlimit = Lmonitors; |
duke@435 | 1903 | const jint delta = frame::interpreter_frame_monitor_size() * wordSize; |
duke@435 | 1904 | assert( (delta & LongAlignmentMask) == 0, |
duke@435 | 1905 | "sizeof BasicObjectLock must be even number of doublewords"); |
duke@435 | 1906 | |
duke@435 | 1907 | sub( SP, delta, SP); |
duke@435 | 1908 | sub( Lesp, delta, Lesp); |
duke@435 | 1909 | sub( Lmonitors, delta, Lmonitors); |
duke@435 | 1910 | |
duke@435 | 1911 | if (!stack_is_empty) { |
duke@435 | 1912 | |
duke@435 | 1913 | // must copy stack contents down |
duke@435 | 1914 | |
duke@435 | 1915 | Label start_copying, next; |
duke@435 | 1916 | |
duke@435 | 1917 | // untested("monitor stack expansion"); |
duke@435 | 1918 | compute_stack_base(Rtemp); |
kvn@3037 | 1919 | ba(start_copying); |
kvn@3037 | 1920 | delayed()->cmp(Rtemp, Rlimit); // done? duplicated below |
duke@435 | 1921 | |
duke@435 | 1922 | // note: must copy from low memory upwards |
duke@435 | 1923 | // On entry to loop, |
duke@435 | 1924 | // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) |
duke@435 | 1925 | // Loop mutates Rtemp |
duke@435 | 1926 | |
duke@435 | 1927 | bind( next); |
duke@435 | 1928 | |
duke@435 | 1929 | st_ptr(Rtemp2, Rtemp, 0); |
duke@435 | 1930 | inc(Rtemp, wordSize); |
duke@435 | 1931 | cmp(Rtemp, Rlimit); // are we done? (duplicated above) |
duke@435 | 1932 | |
duke@435 | 1933 | bind( start_copying ); |
duke@435 | 1934 | |
duke@435 | 1935 | brx( notEqual, true, pn, next ); |
duke@435 | 1936 | delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); |
duke@435 | 1937 | |
duke@435 | 1938 | // done copying stack |
duke@435 | 1939 | } |
duke@435 | 1940 | } |
duke@435 | 1941 | |
duke@435 | 1942 | // Locals |
duke@435 | 1943 | void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { |
duke@435 | 1944 | assert_not_delayed(); |
twisti@1861 | 1945 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1946 | sub(Llocals, index, index); |
twisti@1861 | 1947 | ld_ptr(index, 0, dst); |
duke@435 | 1948 | // Note: index must hold the effective address--the iinc template uses it |
duke@435 | 1949 | } |
duke@435 | 1950 | |
duke@435 | 1951 | // Just like access_local_ptr but the tag is a returnAddress |
duke@435 | 1952 | void InterpreterMacroAssembler::access_local_returnAddress(Register index, |
duke@435 | 1953 | Register dst ) { |
duke@435 | 1954 | assert_not_delayed(); |
twisti@1861 | 1955 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1956 | sub(Llocals, index, index); |
twisti@1861 | 1957 | ld_ptr(index, 0, dst); |
duke@435 | 1958 | } |
duke@435 | 1959 | |
duke@435 | 1960 | void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { |
duke@435 | 1961 | assert_not_delayed(); |
twisti@1861 | 1962 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1963 | sub(Llocals, index, index); |
twisti@1861 | 1964 | ld(index, 0, dst); |
duke@435 | 1965 | // Note: index must hold the effective address--the iinc template uses it |
duke@435 | 1966 | } |
duke@435 | 1967 | |
duke@435 | 1968 | |
duke@435 | 1969 | void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { |
duke@435 | 1970 | assert_not_delayed(); |
twisti@1861 | 1971 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1972 | sub(Llocals, index, index); |
duke@435 | 1973 | // First half stored at index n+1 (which grows down from Llocals[n]) |
duke@435 | 1974 | load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); |
duke@435 | 1975 | } |
duke@435 | 1976 | |
duke@435 | 1977 | |
duke@435 | 1978 | void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { |
duke@435 | 1979 | assert_not_delayed(); |
twisti@1861 | 1980 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1981 | sub(Llocals, index, index); |
twisti@1861 | 1982 | ldf(FloatRegisterImpl::S, index, 0, dst); |
duke@435 | 1983 | } |
duke@435 | 1984 | |
duke@435 | 1985 | |
duke@435 | 1986 | void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { |
duke@435 | 1987 | assert_not_delayed(); |
twisti@1861 | 1988 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 1989 | sub(Llocals, index, index); |
duke@435 | 1990 | load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); |
duke@435 | 1991 | } |
duke@435 | 1992 | |
duke@435 | 1993 | |
duke@435 | 1994 | #ifdef ASSERT |
duke@435 | 1995 | void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { |
duke@435 | 1996 | Label L; |
duke@435 | 1997 | |
duke@435 | 1998 | assert(Rindex != Rscratch, "Registers cannot be same"); |
duke@435 | 1999 | assert(Rindex != Rscratch1, "Registers cannot be same"); |
duke@435 | 2000 | assert(Rlimit != Rscratch, "Registers cannot be same"); |
duke@435 | 2001 | assert(Rlimit != Rscratch1, "Registers cannot be same"); |
duke@435 | 2002 | assert(Rscratch1 != Rscratch, "Registers cannot be same"); |
duke@435 | 2003 | |
duke@435 | 2004 | // untested("reg area corruption"); |
duke@435 | 2005 | add(Rindex, offset, Rscratch); |
duke@435 | 2006 | add(Rlimit, 64 + STACK_BIAS, Rscratch1); |
kvn@3037 | 2007 | cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); |
duke@435 | 2008 | stop("regsave area is being clobbered"); |
duke@435 | 2009 | bind(L); |
duke@435 | 2010 | } |
duke@435 | 2011 | #endif // ASSERT |
duke@435 | 2012 | |
duke@435 | 2013 | |
duke@435 | 2014 | void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { |
duke@435 | 2015 | assert_not_delayed(); |
twisti@1861 | 2016 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 2017 | sub(Llocals, index, index); |
twisti@1861 | 2018 | debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) |
twisti@1861 | 2019 | st(src, index, 0); |
duke@435 | 2020 | } |
duke@435 | 2021 | |
twisti@1861 | 2022 | void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { |
duke@435 | 2023 | assert_not_delayed(); |
twisti@1861 | 2024 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 2025 | sub(Llocals, index, index); |
twisti@1861 | 2026 | #ifdef ASSERT |
twisti@1861 | 2027 | check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); |
twisti@1861 | 2028 | #endif |
twisti@1861 | 2029 | st_ptr(src, index, 0); |
duke@435 | 2030 | } |
duke@435 | 2031 | |
duke@435 | 2032 | |
duke@435 | 2033 | |
twisti@1861 | 2034 | void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { |
twisti@1861 | 2035 | st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); |
duke@435 | 2036 | } |
duke@435 | 2037 | |
duke@435 | 2038 | void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { |
duke@435 | 2039 | assert_not_delayed(); |
twisti@1861 | 2040 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 2041 | sub(Llocals, index, index); |
twisti@1861 | 2042 | #ifdef ASSERT |
duke@435 | 2043 | check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
twisti@1861 | 2044 | #endif |
duke@435 | 2045 | store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 |
duke@435 | 2046 | } |
duke@435 | 2047 | |
duke@435 | 2048 | |
duke@435 | 2049 | void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { |
duke@435 | 2050 | assert_not_delayed(); |
twisti@1861 | 2051 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 2052 | sub(Llocals, index, index); |
twisti@1861 | 2053 | #ifdef ASSERT |
twisti@1861 | 2054 | check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); |
twisti@1861 | 2055 | #endif |
twisti@1861 | 2056 | stf(FloatRegisterImpl::S, src, index, 0); |
duke@435 | 2057 | } |
duke@435 | 2058 | |
duke@435 | 2059 | |
duke@435 | 2060 | void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { |
duke@435 | 2061 | assert_not_delayed(); |
twisti@1861 | 2062 | sll(index, Interpreter::logStackElementSize, index); |
duke@435 | 2063 | sub(Llocals, index, index); |
twisti@1861 | 2064 | #ifdef ASSERT |
duke@435 | 2065 | check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); |
twisti@1861 | 2066 | #endif |
duke@435 | 2067 | store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); |
duke@435 | 2068 | } |
duke@435 | 2069 | |
duke@435 | 2070 | |
duke@435 | 2071 | int InterpreterMacroAssembler::top_most_monitor_byte_offset() { |
duke@435 | 2072 | const jint delta = frame::interpreter_frame_monitor_size() * wordSize; |
duke@435 | 2073 | int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); |
duke@435 | 2074 | return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; |
duke@435 | 2075 | } |
duke@435 | 2076 | |
duke@435 | 2077 | |
duke@435 | 2078 | Address InterpreterMacroAssembler::top_most_monitor() { |
twisti@1162 | 2079 | return Address(FP, top_most_monitor_byte_offset()); |
duke@435 | 2080 | } |
duke@435 | 2081 | |
duke@435 | 2082 | |
duke@435 | 2083 | void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { |
duke@435 | 2084 | add( Lesp, wordSize, Rdest ); |
duke@435 | 2085 | } |
duke@435 | 2086 | |
duke@435 | 2087 | #endif /* CC_INTERP */ |
duke@435 | 2088 | |
duke@435 | 2089 | void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { |
duke@435 | 2090 | assert(UseCompiler, "incrementing must be useful"); |
duke@435 | 2091 | #ifdef CC_INTERP |
coleenp@4037 | 2092 | Address inv_counter(G5_method, Method::invocation_counter_offset() + |
twisti@1162 | 2093 | InvocationCounter::counter_offset()); |
coleenp@4037 | 2094 | Address be_counter (G5_method, Method::backedge_counter_offset() + |
twisti@1162 | 2095 | InvocationCounter::counter_offset()); |
duke@435 | 2096 | #else |
coleenp@4037 | 2097 | Address inv_counter(Lmethod, Method::invocation_counter_offset() + |
twisti@1162 | 2098 | InvocationCounter::counter_offset()); |
coleenp@4037 | 2099 | Address be_counter (Lmethod, Method::backedge_counter_offset() + |
twisti@1162 | 2100 | InvocationCounter::counter_offset()); |
duke@435 | 2101 | #endif /* CC_INTERP */ |
duke@435 | 2102 | int delta = InvocationCounter::count_increment; |
duke@435 | 2103 | |
duke@435 | 2104 | // Load each counter in a register |
duke@435 | 2105 | ld( inv_counter, Rtmp ); |
duke@435 | 2106 | ld( be_counter, Rtmp2 ); |
duke@435 | 2107 | |
duke@435 | 2108 | assert( is_simm13( delta ), " delta too large."); |
duke@435 | 2109 | |
duke@435 | 2110 | // Add the delta to the invocation counter and store the result |
duke@435 | 2111 | add( Rtmp, delta, Rtmp ); |
duke@435 | 2112 | |
duke@435 | 2113 | // Mask the backedge counter |
duke@435 | 2114 | and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); |
duke@435 | 2115 | |
duke@435 | 2116 | // Store value |
duke@435 | 2117 | st( Rtmp, inv_counter); |
duke@435 | 2118 | |
duke@435 | 2119 | // Add invocation counter + backedge counter |
duke@435 | 2120 | add( Rtmp, Rtmp2, Rtmp); |
duke@435 | 2121 | |
duke@435 | 2122 | // Note that this macro must leave the backedge_count + invocation_count in Rtmp! |
duke@435 | 2123 | } |
duke@435 | 2124 | |
duke@435 | 2125 | void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { |
duke@435 | 2126 | assert(UseCompiler, "incrementing must be useful"); |
duke@435 | 2127 | #ifdef CC_INTERP |
coleenp@4037 | 2128 | Address be_counter (G5_method, Method::backedge_counter_offset() + |
twisti@1162 | 2129 | InvocationCounter::counter_offset()); |
coleenp@4037 | 2130 | Address inv_counter(G5_method, Method::invocation_counter_offset() + |
twisti@1162 | 2131 | InvocationCounter::counter_offset()); |
duke@435 | 2132 | #else |
coleenp@4037 | 2133 | Address be_counter (Lmethod, Method::backedge_counter_offset() + |
twisti@1162 | 2134 | InvocationCounter::counter_offset()); |
coleenp@4037 | 2135 | Address inv_counter(Lmethod, Method::invocation_counter_offset() + |
twisti@1162 | 2136 | InvocationCounter::counter_offset()); |
duke@435 | 2137 | #endif /* CC_INTERP */ |
duke@435 | 2138 | int delta = InvocationCounter::count_increment; |
duke@435 | 2139 | // Load each counter in a register |
duke@435 | 2140 | ld( be_counter, Rtmp ); |
duke@435 | 2141 | ld( inv_counter, Rtmp2 ); |
duke@435 | 2142 | |
duke@435 | 2143 | // Add the delta to the backedge counter |
duke@435 | 2144 | add( Rtmp, delta, Rtmp ); |
duke@435 | 2145 | |
duke@435 | 2146 | // Mask the invocation counter, add to backedge counter |
duke@435 | 2147 | and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); |
duke@435 | 2148 | |
duke@435 | 2149 | // and store the result to memory |
duke@435 | 2150 | st( Rtmp, be_counter ); |
duke@435 | 2151 | |
duke@435 | 2152 | // Add backedge + invocation counter |
duke@435 | 2153 | add( Rtmp, Rtmp2, Rtmp ); |
duke@435 | 2154 | |
duke@435 | 2155 | // Note that this macro must leave backedge_count + invocation_count in Rtmp! |
duke@435 | 2156 | } |
duke@435 | 2157 | |
duke@435 | 2158 | #ifndef CC_INTERP |
duke@435 | 2159 | void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, |
duke@435 | 2160 | Register branch_bcp, |
duke@435 | 2161 | Register Rtmp ) { |
duke@435 | 2162 | Label did_not_overflow; |
duke@435 | 2163 | Label overflow_with_error; |
duke@435 | 2164 | assert_different_registers(backedge_count, Rtmp, branch_bcp); |
duke@435 | 2165 | assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); |
duke@435 | 2166 | |
twisti@1162 | 2167 | AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); |
duke@435 | 2168 | load_contents(limit, Rtmp); |
kvn@3037 | 2169 | cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); |
duke@435 | 2170 | |
duke@435 | 2171 | // When ProfileInterpreter is on, the backedge_count comes from the |
coleenp@4037 | 2172 | // MethodData*, which value does not get reset on the call to |
duke@435 | 2173 | // frequency_counter_overflow(). To avoid excessive calls to the overflow |
duke@435 | 2174 | // routine while the method is being compiled, add a second test to make sure |
duke@435 | 2175 | // the overflow function is called only once every overflow_frequency. |
duke@435 | 2176 | if (ProfileInterpreter) { |
duke@435 | 2177 | const int overflow_frequency = 1024; |
duke@435 | 2178 | andcc(backedge_count, overflow_frequency-1, Rtmp); |
duke@435 | 2179 | brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); |
duke@435 | 2180 | delayed()->nop(); |
duke@435 | 2181 | } |
duke@435 | 2182 | |
duke@435 | 2183 | // overflow in loop, pass branch bytecode |
duke@435 | 2184 | set(6,Rtmp); |
duke@435 | 2185 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); |
duke@435 | 2186 | |
duke@435 | 2187 | // Was an OSR adapter generated? |
duke@435 | 2188 | // O0 = osr nmethod |
kvn@3037 | 2189 | br_null_short(O0, Assembler::pn, overflow_with_error); |
duke@435 | 2190 | |
duke@435 | 2191 | // Has the nmethod been invalidated already? |
duke@435 | 2192 | ld(O0, nmethod::entry_bci_offset(), O2); |
kvn@3037 | 2193 | cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); |
duke@435 | 2194 | |
duke@435 | 2195 | // migrate the interpreter frame off of the stack |
duke@435 | 2196 | |
duke@435 | 2197 | mov(G2_thread, L7); |
duke@435 | 2198 | // save nmethod |
duke@435 | 2199 | mov(O0, L6); |
duke@435 | 2200 | set_last_Java_frame(SP, noreg); |
duke@435 | 2201 | call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); |
duke@435 | 2202 | reset_last_Java_frame(); |
duke@435 | 2203 | mov(L7, G2_thread); |
duke@435 | 2204 | |
duke@435 | 2205 | // move OSR nmethod to I1 |
duke@435 | 2206 | mov(L6, I1); |
duke@435 | 2207 | |
duke@435 | 2208 | // OSR buffer to I0 |
duke@435 | 2209 | mov(O0, I0); |
duke@435 | 2210 | |
duke@435 | 2211 | // remove the interpreter frame |
duke@435 | 2212 | restore(I5_savedSP, 0, SP); |
duke@435 | 2213 | |
duke@435 | 2214 | // Jump to the osr code. |
duke@435 | 2215 | ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); |
duke@435 | 2216 | jmp(O2, G0); |
duke@435 | 2217 | delayed()->nop(); |
duke@435 | 2218 | |
duke@435 | 2219 | bind(overflow_with_error); |
duke@435 | 2220 | |
duke@435 | 2221 | bind(did_not_overflow); |
duke@435 | 2222 | } |
duke@435 | 2223 | |
duke@435 | 2224 | |
duke@435 | 2225 | |
duke@435 | 2226 | void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { |
duke@435 | 2227 | if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } |
duke@435 | 2228 | } |
duke@435 | 2229 | |
duke@435 | 2230 | |
duke@435 | 2231 | // local helper function for the verify_oop_or_return_address macro |
coleenp@4037 | 2232 | static bool verify_return_address(Method* m, int bci) { |
duke@435 | 2233 | #ifndef PRODUCT |
duke@435 | 2234 | address pc = (address)(m->constMethod()) |
coleenp@4037 | 2235 | + in_bytes(ConstMethod::codes_offset()) + bci; |
duke@435 | 2236 | // assume it is a valid return address if it is inside m and is preceded by a jsr |
duke@435 | 2237 | if (!m->contains(pc)) return false; |
duke@435 | 2238 | address jsr_pc; |
duke@435 | 2239 | jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); |
duke@435 | 2240 | if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; |
duke@435 | 2241 | jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); |
duke@435 | 2242 | if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; |
duke@435 | 2243 | #endif // PRODUCT |
duke@435 | 2244 | return false; |
duke@435 | 2245 | } |
duke@435 | 2246 | |
duke@435 | 2247 | |
duke@435 | 2248 | void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { |
duke@435 | 2249 | if (!VerifyOops) return; |
duke@435 | 2250 | // the VM documentation for the astore[_wide] bytecode allows |
duke@435 | 2251 | // the TOS to be not only an oop but also a return address |
duke@435 | 2252 | Label test; |
duke@435 | 2253 | Label skip; |
duke@435 | 2254 | // See if it is an address (in the current method): |
duke@435 | 2255 | |
duke@435 | 2256 | mov(reg, Rtmp); |
duke@435 | 2257 | const int log2_bytecode_size_limit = 16; |
duke@435 | 2258 | srl(Rtmp, log2_bytecode_size_limit, Rtmp); |
kvn@3037 | 2259 | br_notnull_short( Rtmp, pt, test ); |
duke@435 | 2260 | |
duke@435 | 2261 | // %%% should use call_VM_leaf here? |
duke@435 | 2262 | save_frame_and_mov(0, Lmethod, O0, reg, O1); |
duke@435 | 2263 | save_thread(L7_thread_cache); |
duke@435 | 2264 | call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); |
duke@435 | 2265 | delayed()->nop(); |
duke@435 | 2266 | restore_thread(L7_thread_cache); |
duke@435 | 2267 | br_notnull( O0, false, pt, skip ); |
duke@435 | 2268 | delayed()->restore(); |
duke@435 | 2269 | |
duke@435 | 2270 | // Perform a more elaborate out-of-line call |
duke@435 | 2271 | // Not an address; verify it: |
duke@435 | 2272 | bind(test); |
duke@435 | 2273 | verify_oop(reg); |
duke@435 | 2274 | bind(skip); |
duke@435 | 2275 | } |
duke@435 | 2276 | |
duke@435 | 2277 | |
duke@435 | 2278 | void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { |
duke@435 | 2279 | if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); |
duke@435 | 2280 | } |
duke@435 | 2281 | #endif /* CC_INTERP */ |
duke@435 | 2282 | |
duke@435 | 2283 | // Inline assembly for: |
duke@435 | 2284 | // |
duke@435 | 2285 | // if (thread is in interp_only_mode) { |
duke@435 | 2286 | // InterpreterRuntime::post_method_entry(); |
duke@435 | 2287 | // } |
duke@435 | 2288 | // if (DTraceMethodProbes) { |
twisti@1040 | 2289 | // SharedRuntime::dtrace_method_entry(method, receiver); |
duke@435 | 2290 | // } |
dcubed@1045 | 2291 | // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
dcubed@1045 | 2292 | // SharedRuntime::rc_trace_method_entry(method, receiver); |
coleenp@857 | 2293 | // } |
duke@435 | 2294 | |
duke@435 | 2295 | void InterpreterMacroAssembler::notify_method_entry() { |
duke@435 | 2296 | |
duke@435 | 2297 | // C++ interpreter only uses this for native methods. |
duke@435 | 2298 | |
duke@435 | 2299 | // Whenever JVMTI puts a thread in interp_only_mode, method |
duke@435 | 2300 | // entry/exit events are sent for that thread to track stack |
duke@435 | 2301 | // depth. If it is possible to enter interp_only_mode we add |
duke@435 | 2302 | // the code to check if the event should be sent. |
duke@435 | 2303 | if (JvmtiExport::can_post_interpreter_events()) { |
duke@435 | 2304 | Label L; |
duke@435 | 2305 | Register temp_reg = O5; |
twisti@1162 | 2306 | const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
duke@435 | 2307 | ld(interp_only, temp_reg); |
kvn@3037 | 2308 | cmp_and_br_short(temp_reg, 0, equal, pt, L); |
duke@435 | 2309 | call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); |
duke@435 | 2310 | bind(L); |
duke@435 | 2311 | } |
duke@435 | 2312 | |
duke@435 | 2313 | { |
duke@435 | 2314 | Register temp_reg = O5; |
duke@435 | 2315 | SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); |
duke@435 | 2316 | call_VM_leaf(noreg, |
duke@435 | 2317 | CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), |
duke@435 | 2318 | G2_thread, Lmethod); |
duke@435 | 2319 | } |
dcubed@1045 | 2320 | |
dcubed@1045 | 2321 | // RedefineClasses() tracing support for obsolete method entry |
dcubed@1045 | 2322 | if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { |
dcubed@1045 | 2323 | call_VM_leaf(noreg, |
dcubed@1045 | 2324 | CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), |
dcubed@1045 | 2325 | G2_thread, Lmethod); |
dcubed@1045 | 2326 | } |
duke@435 | 2327 | } |
duke@435 | 2328 | |
duke@435 | 2329 | |
duke@435 | 2330 | // Inline assembly for: |
duke@435 | 2331 | // |
duke@435 | 2332 | // if (thread is in interp_only_mode) { |
duke@435 | 2333 | // // save result |
duke@435 | 2334 | // InterpreterRuntime::post_method_exit(); |
duke@435 | 2335 | // // restore result |
duke@435 | 2336 | // } |
duke@435 | 2337 | // if (DTraceMethodProbes) { |
duke@435 | 2338 | // SharedRuntime::dtrace_method_exit(thread, method); |
duke@435 | 2339 | // } |
duke@435 | 2340 | // |
duke@435 | 2341 | // Native methods have their result stored in d_tmp and l_tmp |
duke@435 | 2342 | // Java methods have their result stored in the expression stack |
duke@435 | 2343 | |
duke@435 | 2344 | void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, |
duke@435 | 2345 | TosState state, |
duke@435 | 2346 | NotifyMethodExitMode mode) { |
duke@435 | 2347 | // C++ interpreter only uses this for native methods. |
duke@435 | 2348 | |
duke@435 | 2349 | // Whenever JVMTI puts a thread in interp_only_mode, method |
duke@435 | 2350 | // entry/exit events are sent for that thread to track stack |
duke@435 | 2351 | // depth. If it is possible to enter interp_only_mode we add |
duke@435 | 2352 | // the code to check if the event should be sent. |
duke@435 | 2353 | if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { |
duke@435 | 2354 | Label L; |
duke@435 | 2355 | Register temp_reg = O5; |
twisti@1162 | 2356 | const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); |
duke@435 | 2357 | ld(interp_only, temp_reg); |
kvn@3037 | 2358 | cmp_and_br_short(temp_reg, 0, equal, pt, L); |
duke@435 | 2359 | |
duke@435 | 2360 | // Note: frame::interpreter_frame_result has a dependency on how the |
duke@435 | 2361 | // method result is saved across the call to post_method_exit. For |
duke@435 | 2362 | // native methods it assumes the result registers are saved to |
duke@435 | 2363 | // l_scratch and d_scratch. If this changes then the interpreter_frame_result |
duke@435 | 2364 | // implementation will need to be updated too. |
duke@435 | 2365 | |
duke@435 | 2366 | save_return_value(state, is_native_method); |
duke@435 | 2367 | call_VM(noreg, |
duke@435 | 2368 | CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); |
duke@435 | 2369 | restore_return_value(state, is_native_method); |
duke@435 | 2370 | bind(L); |
duke@435 | 2371 | } |
duke@435 | 2372 | |
duke@435 | 2373 | { |
duke@435 | 2374 | Register temp_reg = O5; |
duke@435 | 2375 | // Dtrace notification |
duke@435 | 2376 | SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); |
duke@435 | 2377 | save_return_value(state, is_native_method); |
duke@435 | 2378 | call_VM_leaf( |
duke@435 | 2379 | noreg, |
duke@435 | 2380 | CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), |
duke@435 | 2381 | G2_thread, Lmethod); |
duke@435 | 2382 | restore_return_value(state, is_native_method); |
duke@435 | 2383 | } |
duke@435 | 2384 | } |
duke@435 | 2385 | |
duke@435 | 2386 | void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { |
duke@435 | 2387 | #ifdef CC_INTERP |
duke@435 | 2388 | // result potentially in O0/O1: save it across calls |
duke@435 | 2389 | stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); |
duke@435 | 2390 | #ifdef _LP64 |
duke@435 | 2391 | stx(O0, STATE(_native_lresult)); |
duke@435 | 2392 | #else |
duke@435 | 2393 | std(O0, STATE(_native_lresult)); |
duke@435 | 2394 | #endif |
duke@435 | 2395 | #else // CC_INTERP |
duke@435 | 2396 | if (is_native_call) { |
duke@435 | 2397 | stf(FloatRegisterImpl::D, F0, d_tmp); |
duke@435 | 2398 | #ifdef _LP64 |
duke@435 | 2399 | stx(O0, l_tmp); |
duke@435 | 2400 | #else |
duke@435 | 2401 | std(O0, l_tmp); |
duke@435 | 2402 | #endif |
duke@435 | 2403 | } else { |
duke@435 | 2404 | push(state); |
duke@435 | 2405 | } |
duke@435 | 2406 | #endif // CC_INTERP |
duke@435 | 2407 | } |
duke@435 | 2408 | |
duke@435 | 2409 | void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { |
duke@435 | 2410 | #ifdef CC_INTERP |
duke@435 | 2411 | ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); |
duke@435 | 2412 | #ifdef _LP64 |
duke@435 | 2413 | ldx(STATE(_native_lresult), O0); |
duke@435 | 2414 | #else |
duke@435 | 2415 | ldd(STATE(_native_lresult), O0); |
duke@435 | 2416 | #endif |
duke@435 | 2417 | #else // CC_INTERP |
duke@435 | 2418 | if (is_native_call) { |
duke@435 | 2419 | ldf(FloatRegisterImpl::D, d_tmp, F0); |
duke@435 | 2420 | #ifdef _LP64 |
duke@435 | 2421 | ldx(l_tmp, O0); |
duke@435 | 2422 | #else |
duke@435 | 2423 | ldd(l_tmp, O0); |
duke@435 | 2424 | #endif |
duke@435 | 2425 | } else { |
duke@435 | 2426 | pop(state); |
duke@435 | 2427 | } |
duke@435 | 2428 | #endif // CC_INTERP |
duke@435 | 2429 | } |
iveresov@2138 | 2430 | |
iveresov@2138 | 2431 | // Jump if ((*counter_addr += increment) & mask) satisfies the condition. |
iveresov@2138 | 2432 | void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, |
iveresov@2138 | 2433 | int increment, int mask, |
iveresov@2138 | 2434 | Register scratch1, Register scratch2, |
iveresov@2138 | 2435 | Condition cond, Label *where) { |
iveresov@2138 | 2436 | ld(counter_addr, scratch1); |
iveresov@2138 | 2437 | add(scratch1, increment, scratch1); |
iveresov@2138 | 2438 | if (is_simm13(mask)) { |
iveresov@2138 | 2439 | andcc(scratch1, mask, G0); |
iveresov@2138 | 2440 | } else { |
iveresov@2138 | 2441 | set(mask, scratch2); |
iveresov@2138 | 2442 | andcc(scratch1, scratch2, G0); |
iveresov@2138 | 2443 | } |
iveresov@2138 | 2444 | br(cond, false, Assembler::pn, *where); |
iveresov@2138 | 2445 | delayed()->st(scratch1, counter_addr); |
iveresov@2138 | 2446 | } |