duke@435: /* phh@2423: * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "interpreter/interpreter.hpp" stefank@2314: #include "interpreter/interpreterRuntime.hpp" stefank@2314: #include "interpreter/templateTable.hpp" stefank@2314: #include "memory/universe.inline.hpp" stefank@2314: #include "oops/methodDataOop.hpp" stefank@2314: #include "oops/objArrayKlass.hpp" stefank@2314: #include "oops/oop.inline.hpp" stefank@2314: #include "prims/methodHandles.hpp" stefank@2314: #include "runtime/sharedRuntime.hpp" stefank@2314: #include "runtime/stubRoutines.hpp" stefank@2314: #include "runtime/synchronizer.hpp" duke@435: duke@435: #ifndef CC_INTERP duke@435: #define __ _masm-> duke@435: ysr@777: // Misc helpers ysr@777: ysr@777: // Do an oop store like *(base + index + offset) = val ysr@777: // index can be noreg, ysr@777: static void do_oop_store(InterpreterMacroAssembler* _masm, ysr@777: Register base, ysr@777: Register index, ysr@777: int offset, ysr@777: Register val, ysr@777: Register tmp, ysr@777: BarrierSet::Name barrier, ysr@777: bool precise) { ysr@777: assert(tmp != val && tmp != base && tmp != index, "register collision"); ysr@777: assert(index == noreg || offset == 0, "only one offset"); ysr@777: switch (barrier) { ysr@777: #ifndef SERIALGC ysr@777: case BarrierSet::G1SATBCT: ysr@777: case BarrierSet::G1SATBCTLogging: ysr@777: { johnc@2781: // Load and record the previous value. johnc@2781: __ g1_write_barrier_pre(base, index, offset, johnc@2781: noreg /* pre_val */, johnc@2781: tmp, true /*preserve_o_regs*/); johnc@2781: ysr@777: if (index == noreg ) { ysr@777: assert(Assembler::is_simm13(offset), "fix this code"); ysr@777: __ store_heap_oop(val, base, offset); ysr@777: } else { ysr@777: __ store_heap_oop(val, base, index); ysr@777: } ysr@777: ysr@777: // No need for post barrier if storing NULL ysr@777: if (val != G0) { ysr@777: if (precise) { ysr@777: if (index == noreg) { ysr@777: __ add(base, offset, base); ysr@777: } else { ysr@777: __ add(base, index, base); ysr@777: } ysr@777: } ysr@777: __ g1_write_barrier_post(base, val, tmp); ysr@777: } ysr@777: } ysr@777: break; ysr@777: #endif // SERIALGC ysr@777: case BarrierSet::CardTableModRef: ysr@777: case BarrierSet::CardTableExtension: ysr@777: { ysr@777: if (index == noreg ) { ysr@777: assert(Assembler::is_simm13(offset), "fix this code"); ysr@777: __ store_heap_oop(val, base, offset); ysr@777: } else { ysr@777: __ store_heap_oop(val, base, index); ysr@777: } ysr@777: // No need for post barrier if storing NULL ysr@777: if (val != G0) { ysr@777: if (precise) { ysr@777: if (index == noreg) { ysr@777: __ add(base, offset, base); ysr@777: } else { ysr@777: __ add(base, index, base); ysr@777: } ysr@777: } ysr@777: __ card_write_barrier_post(base, val, tmp); ysr@777: } ysr@777: } ysr@777: break; ysr@777: case BarrierSet::ModRef: ysr@777: case BarrierSet::Other: ysr@777: ShouldNotReachHere(); ysr@777: break; ysr@777: default : ysr@777: ShouldNotReachHere(); ysr@777: ysr@777: } ysr@777: } ysr@777: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Platform-dependent initialization duke@435: duke@435: void TemplateTable::pd_initialize() { duke@435: // (none) duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Condition conversion duke@435: Assembler::Condition ccNot(TemplateTable::Condition cc) { duke@435: switch (cc) { duke@435: case TemplateTable::equal : return Assembler::notEqual; duke@435: case TemplateTable::not_equal : return Assembler::equal; duke@435: case TemplateTable::less : return Assembler::greaterEqual; duke@435: case TemplateTable::less_equal : return Assembler::greater; duke@435: case TemplateTable::greater : return Assembler::lessEqual; duke@435: case TemplateTable::greater_equal: return Assembler::less; duke@435: } duke@435: ShouldNotReachHere(); duke@435: return Assembler::zero; duke@435: } duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Miscelaneous helper routines duke@435: duke@435: duke@435: Address TemplateTable::at_bcp(int offset) { duke@435: assert(_desc->uses_bcp(), "inconsistent uses_bcp information"); twisti@1162: return Address(Lbcp, offset); duke@435: } duke@435: duke@435: twisti@3050: void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, twisti@3050: Register temp_reg, bool load_bc_into_bc_reg/*=true*/, twisti@3050: int byte_no) { duke@435: // With sharing on, may need to test methodOop flag. twisti@3050: if (!RewriteBytecodes) return; twisti@3050: Label L_patch_done; twisti@3050: twisti@3050: switch (bc) { twisti@3050: case Bytecodes::_fast_aputfield: twisti@3050: case Bytecodes::_fast_bputfield: twisti@3050: case Bytecodes::_fast_cputfield: twisti@3050: case Bytecodes::_fast_dputfield: twisti@3050: case Bytecodes::_fast_fputfield: twisti@3050: case Bytecodes::_fast_iputfield: twisti@3050: case Bytecodes::_fast_lputfield: twisti@3050: case Bytecodes::_fast_sputfield: twisti@3050: { twisti@3050: // We skip bytecode quickening for putfield instructions when twisti@3050: // the put_code written to the constant pool cache is zero. twisti@3050: // This is required so that every execution of this instruction twisti@3050: // calls out to InterpreterRuntime::resolve_get_put to do twisti@3050: // additional, required work. twisti@3050: assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); twisti@3050: assert(load_bc_into_bc_reg, "we use bc_reg as temp"); twisti@3050: __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1); twisti@3050: __ set(bc, bc_reg); twisti@3050: __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch twisti@3050: } twisti@3050: break; twisti@3050: default: twisti@3050: assert(byte_no == -1, "sanity"); twisti@3050: if (load_bc_into_bc_reg) { twisti@3050: __ set(bc, bc_reg); twisti@3050: } twisti@3050: } twisti@3050: duke@435: if (JvmtiExport::can_post_breakpoint()) { twisti@3050: Label L_fast_patch; twisti@3050: __ ldub(at_bcp(0), temp_reg); twisti@3050: __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch); duke@435: // perform the quickening, slowly, in the bowels of the breakpoint table twisti@3050: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg); twisti@3050: __ ba_short(L_patch_done); twisti@3050: __ bind(L_fast_patch); duke@435: } twisti@3050: duke@435: #ifdef ASSERT duke@435: Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc); twisti@3050: Label L_okay; twisti@3050: __ ldub(at_bcp(0), temp_reg); twisti@3050: __ cmp(temp_reg, orig_bytecode); twisti@3050: __ br(Assembler::equal, false, Assembler::pt, L_okay); twisti@3050: __ delayed()->cmp(temp_reg, bc_reg); twisti@3050: __ br(Assembler::equal, false, Assembler::pt, L_okay); duke@435: __ delayed()->nop(); twisti@3050: __ stop("patching the wrong bytecode"); twisti@3050: __ bind(L_okay); duke@435: #endif twisti@3050: twisti@3050: // patch bytecode twisti@3050: __ stb(bc_reg, at_bcp(0)); twisti@3050: __ bind(L_patch_done); duke@435: } duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Individual instructions duke@435: duke@435: void TemplateTable::nop() { duke@435: transition(vtos, vtos); duke@435: // nothing to do duke@435: } duke@435: duke@435: void TemplateTable::shouldnotreachhere() { duke@435: transition(vtos, vtos); duke@435: __ stop("shouldnotreachhere bytecode"); duke@435: } duke@435: duke@435: void TemplateTable::aconst_null() { duke@435: transition(vtos, atos); duke@435: __ clr(Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iconst(int value) { duke@435: transition(vtos, itos); duke@435: __ set(value, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lconst(int value) { duke@435: transition(vtos, ltos); duke@435: assert(value >= 0, "check this code"); duke@435: #ifdef _LP64 duke@435: __ set(value, Otos_l); duke@435: #else duke@435: __ set(value, Otos_l2); duke@435: __ clr( Otos_l1); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fconst(int value) { duke@435: transition(vtos, ftos); duke@435: static float zero = 0.0, one = 1.0, two = 2.0; duke@435: float* p; duke@435: switch( value ) { duke@435: default: ShouldNotReachHere(); duke@435: case 0: p = &zero; break; duke@435: case 1: p = &one; break; duke@435: case 2: p = &two; break; duke@435: } twisti@1162: AddressLiteral a(p); twisti@1162: __ sethi(a, G3_scratch); twisti@1162: __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dconst(int value) { duke@435: transition(vtos, dtos); duke@435: static double zero = 0.0, one = 1.0; duke@435: double* p; duke@435: switch( value ) { duke@435: default: ShouldNotReachHere(); duke@435: case 0: p = &zero; break; duke@435: case 1: p = &one; break; duke@435: } twisti@1162: AddressLiteral a(p); twisti@1162: __ sethi(a, G3_scratch); twisti@1162: __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d); duke@435: } duke@435: duke@435: duke@435: // %%%%% Should factore most snippet templates across platforms duke@435: duke@435: void TemplateTable::bipush() { duke@435: transition(vtos, itos); duke@435: __ ldsb( at_bcp(1), Otos_i ); duke@435: } duke@435: duke@435: void TemplateTable::sipush() { duke@435: transition(vtos, itos); duke@435: __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed); duke@435: } duke@435: duke@435: void TemplateTable::ldc(bool wide) { duke@435: transition(vtos, vtos); jrose@2982: Label call_ldc, notInt, isString, notString, notClass, exit; duke@435: duke@435: if (wide) { duke@435: __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); duke@435: } else { duke@435: __ ldub(Lbcp, 1, O1); duke@435: } duke@435: __ get_cpool_and_tags(O0, O2); duke@435: duke@435: const int base_offset = constantPoolOopDesc::header_size() * wordSize; duke@435: const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; duke@435: duke@435: // get type from tags duke@435: __ add(O2, tags_offset, O2); duke@435: __ ldub(O2, O1, O2); kvn@3037: // unresolved string? If so, must resolve kvn@3037: __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedString, Assembler::equal, Assembler::pt, call_ldc); kvn@3037: kvn@3037: // unresolved class? If so, must resolve kvn@3037: __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc); kvn@3037: kvn@3037: // unresolved class in error state kvn@3037: __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc); duke@435: duke@435: __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class duke@435: __ brx(Assembler::notEqual, true, Assembler::pt, notClass); duke@435: __ delayed()->add(O0, base_offset, O0); duke@435: duke@435: __ bind(call_ldc); duke@435: __ set(wide, O1); duke@435: call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1); duke@435: __ push(atos); kvn@3037: __ ba_short(exit); duke@435: duke@435: __ bind(notClass); duke@435: // __ add(O0, base_offset, O0); duke@435: __ sll(O1, LogBytesPerWord, O1); duke@435: __ cmp(O2, JVM_CONSTANT_Integer); duke@435: __ brx(Assembler::notEqual, true, Assembler::pt, notInt); duke@435: __ delayed()->cmp(O2, JVM_CONSTANT_String); duke@435: __ ld(O0, O1, Otos_i); duke@435: __ push(itos); kvn@3037: __ ba_short(exit); duke@435: duke@435: __ bind(notInt); duke@435: // __ cmp(O2, JVM_CONSTANT_String); jrose@2982: __ brx(Assembler::equal, true, Assembler::pt, isString); jrose@2982: __ delayed()->cmp(O2, JVM_CONSTANT_Object); duke@435: __ brx(Assembler::notEqual, true, Assembler::pt, notString); duke@435: __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); jrose@2982: __ bind(isString); duke@435: __ ld_ptr(O0, O1, Otos_i); duke@435: __ verify_oop(Otos_i); duke@435: __ push(atos); kvn@3037: __ ba_short(exit); duke@435: duke@435: __ bind(notString); duke@435: // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f); duke@435: __ push(ftos); duke@435: duke@435: __ bind(exit); duke@435: } duke@435: jrose@1957: // Fast path for caching oop constants. jrose@1957: // %%% We should use this to handle Class and String constants also. jrose@1957: // %%% It will simplify the ldc/primitive path considerably. jrose@1957: void TemplateTable::fast_aldc(bool wide) { jrose@1957: transition(vtos, atos); jrose@1957: twisti@2698: if (!EnableInvokeDynamic) { twisti@2698: // We should not encounter this bytecode if !EnableInvokeDynamic. jrose@1957: // The verifier will stop it. However, if we get past the verifier, jrose@1957: // this will stop the thread in a reasonable way, without crashing the JVM. jrose@1957: __ call_VM(noreg, CAST_FROM_FN_PTR(address, jrose@1957: InterpreterRuntime::throw_IncompatibleClassChangeError)); jrose@1957: // the call_VM checks for exception, so we should never return here. jrose@1957: __ should_not_reach_here(); jrose@1957: return; jrose@1957: } jrose@1957: jrose@1957: Register Rcache = G3_scratch; jrose@1957: Register Rscratch = G4_scratch; jrose@1957: jrose@1957: resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); jrose@1957: jrose@1957: __ verify_oop(Otos_i); jrose@2268: jrose@2268: Label L_done; jrose@2268: const Register Rcon_klass = G3_scratch; // same as Rcache jrose@2268: const Register Rarray_klass = G4_scratch; // same as Rscratch jrose@2268: __ load_klass(Otos_i, Rcon_klass); jrose@2268: AddressLiteral array_klass_addr((address)Universe::systemObjArrayKlassObj_addr()); jrose@2268: __ load_contents(array_klass_addr, Rarray_klass); kvn@3037: __ cmp_and_brx_short(Rarray_klass, Rcon_klass, Assembler::notEqual, Assembler::pt, L_done); jrose@2268: __ ld(Address(Otos_i, arrayOopDesc::length_offset_in_bytes()), Rcon_klass); jrose@2268: __ tst(Rcon_klass); jrose@2268: __ brx(Assembler::zero, true, Assembler::pt, L_done); jrose@2268: __ delayed()->clr(Otos_i); // executed only if branch is taken jrose@2268: jrose@2268: // Load the exception from the system-array which wraps it: jrose@2268: __ load_heap_oop(Otos_i, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); jrose@2268: __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); jrose@2268: jrose@2268: __ bind(L_done); jrose@1957: } jrose@1957: duke@435: void TemplateTable::ldc2_w() { duke@435: transition(vtos, vtos); duke@435: Label retry, resolved, Long, exit; duke@435: duke@435: __ bind(retry); duke@435: __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned); duke@435: __ get_cpool_and_tags(O0, O2); duke@435: duke@435: const int base_offset = constantPoolOopDesc::header_size() * wordSize; duke@435: const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize; duke@435: // get type from tags duke@435: __ add(O2, tags_offset, O2); duke@435: __ ldub(O2, O1, O2); duke@435: duke@435: __ sll(O1, LogBytesPerWord, O1); duke@435: __ add(O0, O1, G3_scratch); duke@435: kvn@3037: __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long); duke@435: // A double can be placed at word-aligned locations in the constant pool. duke@435: // Check out Conversions.java for an example. duke@435: // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult duke@435: // to double-align double on the constant pool. SG, 11/7/97 duke@435: #ifdef _LP64 duke@435: __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d); duke@435: #else duke@435: FloatRegister f = Ftos_d; duke@435: __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f); duke@435: __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2, duke@435: f->successor()); duke@435: #endif duke@435: __ push(dtos); kvn@3037: __ ba_short(exit); duke@435: duke@435: __ bind(Long); duke@435: #ifdef _LP64 duke@435: __ ldx(G3_scratch, base_offset, Otos_l); duke@435: #else duke@435: __ ld(G3_scratch, base_offset, Otos_l); duke@435: __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor()); duke@435: #endif duke@435: __ push(ltos); duke@435: duke@435: __ bind(exit); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::locals_index(Register reg, int offset) { duke@435: __ ldub( at_bcp(offset), reg ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::locals_index_wide(Register reg) { duke@435: // offset is 2, not 1, because Lbcp points to wide prefix code duke@435: __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned); duke@435: } duke@435: duke@435: void TemplateTable::iload() { duke@435: transition(vtos, itos); duke@435: // Rewrite iload,iload pair into fast_iload2 duke@435: // iload,caload pair into fast_icaload duke@435: if (RewriteFrequentPairs) { duke@435: Label rewrite, done; duke@435: duke@435: // get next byte duke@435: __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch); duke@435: duke@435: // if _iload, wait to rewrite to iload2. We only want to rewrite the duke@435: // last two iloads in a pair. Comparing against fast_iload means that duke@435: // the next bytecode is neither an iload or a caload, and therefore duke@435: // an iload pair. kvn@3037: __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done); duke@435: duke@435: __ cmp(G3_scratch, (int)Bytecodes::_fast_iload); duke@435: __ br(Assembler::equal, false, Assembler::pn, rewrite); duke@435: __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch); duke@435: duke@435: __ cmp(G3_scratch, (int)Bytecodes::_caload); duke@435: __ br(Assembler::equal, false, Assembler::pn, rewrite); duke@435: __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch); duke@435: duke@435: __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again duke@435: // rewrite duke@435: // G4_scratch: fast bytecode duke@435: __ bind(rewrite); duke@435: patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false); duke@435: __ bind(done); duke@435: } duke@435: duke@435: // Get the local value into tos duke@435: locals_index(G3_scratch); duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: void TemplateTable::fast_iload2() { duke@435: transition(vtos, itos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: __ push_i(); duke@435: locals_index(G3_scratch, 3); // get next bytecode's local index. duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: void TemplateTable::fast_iload() { duke@435: transition(vtos, itos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: void TemplateTable::lload() { duke@435: transition(vtos, ltos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_long( G3_scratch, Otos_l ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fload() { duke@435: transition(vtos, ftos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_float( G3_scratch, Ftos_f ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dload() { duke@435: transition(vtos, dtos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_double( G3_scratch, Ftos_d ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::aload() { duke@435: transition(vtos, atos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_ptr( G3_scratch, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_iload() { duke@435: transition(vtos, itos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_lload() { duke@435: transition(vtos, ltos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_long( G3_scratch, Otos_l ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_fload() { duke@435: transition(vtos, ftos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_float( G3_scratch, Ftos_f ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_dload() { duke@435: transition(vtos, dtos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_double( G3_scratch, Ftos_d ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_aload() { duke@435: transition(vtos, atos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_ptr( G3_scratch, Otos_i ); duke@435: __ verify_oop(Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iaload() { duke@435: transition(itos, itos); duke@435: // Otos_i: index duke@435: // tos: array duke@435: __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); duke@435: __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::laload() { duke@435: transition(itos, ltos); duke@435: // Otos_i: index duke@435: // O2: array duke@435: __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); duke@435: __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::faload() { duke@435: transition(itos, ftos); duke@435: // Otos_i: index duke@435: // O2: array duke@435: __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3); duke@435: __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::daload() { duke@435: transition(itos, dtos); duke@435: // Otos_i: index duke@435: // O2: array duke@435: __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3); duke@435: __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::aaload() { duke@435: transition(itos, atos); duke@435: // Otos_i: index duke@435: // tos: array coleenp@548: __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3); coleenp@548: __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i); duke@435: __ verify_oop(Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::baload() { duke@435: transition(itos, itos); duke@435: // Otos_i: index duke@435: // tos: array duke@435: __ index_check(O2, Otos_i, 0, G3_scratch, O3); duke@435: __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::caload() { duke@435: transition(itos, itos); duke@435: // Otos_i: index duke@435: // tos: array duke@435: __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); duke@435: __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); duke@435: } duke@435: duke@435: void TemplateTable::fast_icaload() { duke@435: transition(vtos, itos); duke@435: // Otos_i: index duke@435: // tos: array duke@435: locals_index(G3_scratch); duke@435: __ access_local_int( G3_scratch, Otos_i ); duke@435: __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); duke@435: __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::saload() { duke@435: transition(itos, itos); duke@435: // Otos_i: index duke@435: // tos: array duke@435: __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3); duke@435: __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iload(int n) { duke@435: transition(vtos, itos); duke@435: __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lload(int n) { duke@435: transition(vtos, ltos); duke@435: assert(n+1 < Argument::n_register_parameters, "would need more code"); duke@435: __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fload(int n) { duke@435: transition(vtos, ftos); duke@435: assert(n < Argument::n_register_parameters, "would need more code"); duke@435: __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dload(int n) { duke@435: transition(vtos, dtos); duke@435: FloatRegister dst = Ftos_d; duke@435: __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::aload(int n) { duke@435: transition(vtos, atos); duke@435: __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::aload_0() { duke@435: transition(vtos, atos); duke@435: duke@435: // According to bytecode histograms, the pairs: duke@435: // duke@435: // _aload_0, _fast_igetfield (itos) duke@435: // _aload_0, _fast_agetfield (atos) duke@435: // _aload_0, _fast_fgetfield (ftos) duke@435: // duke@435: // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0 duke@435: // bytecode checks the next bytecode and then rewrites the current duke@435: // bytecode into a pair bytecode; otherwise it rewrites the current duke@435: // bytecode into _fast_aload_0 that doesn't do the pair check anymore. duke@435: // duke@435: if (RewriteFrequentPairs) { duke@435: Label rewrite, done; duke@435: duke@435: // get next byte duke@435: __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch); duke@435: duke@435: // do actual aload_0 duke@435: aload(0); duke@435: duke@435: // if _getfield then wait with rewrite kvn@3037: __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done); duke@435: duke@435: // if _igetfield then rewrite to _fast_iaccess_0 duke@435: assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); duke@435: __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield); duke@435: __ br(Assembler::equal, false, Assembler::pn, rewrite); duke@435: __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch); duke@435: duke@435: // if _agetfield then rewrite to _fast_aaccess_0 duke@435: assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); duke@435: __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield); duke@435: __ br(Assembler::equal, false, Assembler::pn, rewrite); duke@435: __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch); duke@435: duke@435: // if _fgetfield then rewrite to _fast_faccess_0 duke@435: assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); duke@435: __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield); duke@435: __ br(Assembler::equal, false, Assembler::pn, rewrite); duke@435: __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch); duke@435: duke@435: // else rewrite to _fast_aload0 duke@435: assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def"); duke@435: __ set(Bytecodes::_fast_aload_0, G4_scratch); duke@435: duke@435: // rewrite duke@435: // G4_scratch: fast bytecode duke@435: __ bind(rewrite); duke@435: patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false); duke@435: __ bind(done); duke@435: } else { duke@435: aload(0); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::istore() { duke@435: transition(itos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ store_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lstore() { duke@435: transition(ltos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ store_local_long( G3_scratch, Otos_l ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fstore() { duke@435: transition(ftos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ store_local_float( G3_scratch, Ftos_f ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dstore() { duke@435: transition(dtos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ store_local_double( G3_scratch, Ftos_d ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::astore() { duke@435: transition(vtos, vtos); twisti@1861: __ load_ptr(0, Otos_i); twisti@1861: __ inc(Lesp, Interpreter::stackElementSize); duke@435: __ verify_oop_or_return_address(Otos_i, G3_scratch); duke@435: locals_index(G3_scratch); twisti@1861: __ store_local_ptr(G3_scratch, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_istore() { duke@435: transition(vtos, vtos); duke@435: __ pop_i(); duke@435: locals_index_wide(G3_scratch); duke@435: __ store_local_int( G3_scratch, Otos_i ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_lstore() { duke@435: transition(vtos, vtos); duke@435: __ pop_l(); duke@435: locals_index_wide(G3_scratch); duke@435: __ store_local_long( G3_scratch, Otos_l ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_fstore() { duke@435: transition(vtos, vtos); duke@435: __ pop_f(); duke@435: locals_index_wide(G3_scratch); duke@435: __ store_local_float( G3_scratch, Ftos_f ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_dstore() { duke@435: transition(vtos, vtos); duke@435: __ pop_d(); duke@435: locals_index_wide(G3_scratch); duke@435: __ store_local_double( G3_scratch, Ftos_d ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_astore() { duke@435: transition(vtos, vtos); twisti@1861: __ load_ptr(0, Otos_i); twisti@1861: __ inc(Lesp, Interpreter::stackElementSize); duke@435: __ verify_oop_or_return_address(Otos_i, G3_scratch); duke@435: locals_index_wide(G3_scratch); twisti@1861: __ store_local_ptr(G3_scratch, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iastore() { duke@435: transition(itos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Otos_i: val duke@435: // O3: array duke@435: __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); duke@435: __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lastore() { duke@435: transition(ltos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Otos_l: val duke@435: // O3: array duke@435: __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); duke@435: __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fastore() { duke@435: transition(ftos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Ftos_f: val duke@435: // O3: array duke@435: __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2); duke@435: __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dastore() { duke@435: transition(dtos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Fos_d: val duke@435: // O3: array duke@435: __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2); duke@435: __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::aastore() { duke@435: Label store_ok, is_null, done; duke@435: transition(vtos, vtos); duke@435: __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); duke@435: __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index duke@435: __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array duke@435: // Otos_i: val duke@435: // O2: index duke@435: // O3: array duke@435: __ verify_oop(Otos_i); coleenp@548: __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1); duke@435: duke@435: // do array store check - check for NULL value first kvn@3037: __ br_null_short( Otos_i, Assembler::pn, is_null ); coleenp@548: coleenp@548: __ load_klass(O3, O4); // get array klass coleenp@548: __ load_klass(Otos_i, O5); // get value klass duke@435: duke@435: // do fast instanceof cache test duke@435: duke@435: __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4); duke@435: duke@435: assert(Otos_i == O0, "just checking"); duke@435: duke@435: // Otos_i: value duke@435: // O1: addr - offset duke@435: // O2: index duke@435: // O3: array duke@435: // O4: array element klass duke@435: // O5: value klass duke@435: ysr@777: // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); ysr@777: duke@435: // Generate a fast subtype check. Branch to store_ok if no duke@435: // failure. Throw if failure. duke@435: __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok ); duke@435: duke@435: // Not a subtype; so must throw exception duke@435: __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch ); duke@435: duke@435: // Store is OK. duke@435: __ bind(store_ok); ysr@777: do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true); ysr@777: kvn@3037: __ ba(done); twisti@1861: __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) duke@435: duke@435: __ bind(is_null); ysr@777: do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true); ysr@777: duke@435: __ profile_null_seen(G3_scratch); twisti@1861: __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value) duke@435: __ bind(done); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::bastore() { duke@435: transition(itos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Otos_i: val duke@435: // O3: array duke@435: __ index_check(O3, O2, 0, G3_scratch, O2); duke@435: __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::castore() { duke@435: transition(itos, vtos); duke@435: __ pop_i(O2); // index duke@435: // Otos_i: val duke@435: // O3: array duke@435: __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2); duke@435: __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::sastore() { duke@435: // %%%%% Factor across platform duke@435: castore(); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::istore(int n) { duke@435: transition(itos, vtos); duke@435: __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lstore(int n) { duke@435: transition(ltos, vtos); duke@435: assert(n+1 < Argument::n_register_parameters, "only handle register cases"); duke@435: __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1)); duke@435: duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fstore(int n) { duke@435: transition(ftos, vtos); duke@435: assert(n < Argument::n_register_parameters, "only handle register cases"); duke@435: __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dstore(int n) { duke@435: transition(dtos, vtos); duke@435: FloatRegister src = Ftos_d; duke@435: __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1)); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::astore(int n) { duke@435: transition(vtos, vtos); twisti@1861: __ load_ptr(0, Otos_i); twisti@1861: __ inc(Lesp, Interpreter::stackElementSize); duke@435: __ verify_oop_or_return_address(Otos_i, G3_scratch); twisti@1861: __ store_local_ptr(n, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::pop() { duke@435: transition(vtos, vtos); twisti@1861: __ inc(Lesp, Interpreter::stackElementSize); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::pop2() { duke@435: transition(vtos, vtos); twisti@1861: __ inc(Lesp, 2 * Interpreter::stackElementSize); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dup() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a duke@435: // load a and tag twisti@1861: __ load_ptr(0, Otos_i); twisti@1861: __ push_ptr(Otos_i); duke@435: // stack: ..., a, a duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dup_x1() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a, b twisti@1861: __ load_ptr( 1, G3_scratch); // get a twisti@1861: __ load_ptr( 0, Otos_l1); // get b twisti@1861: __ store_ptr(1, Otos_l1); // put b twisti@1861: __ store_ptr(0, G3_scratch); // put a - like swap twisti@1861: __ push_ptr(Otos_l1); // push b duke@435: // stack: ..., b, a, b duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dup_x2() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a, b, c duke@435: // get c and push on stack, reuse registers twisti@1861: __ load_ptr( 0, G3_scratch); // get c twisti@1861: __ push_ptr(G3_scratch); // push c with tag duke@435: // stack: ..., a, b, c, c (c in reg) (Lesp - 4) duke@435: // (stack offsets n+1 now) twisti@1861: __ load_ptr( 3, Otos_l1); // get a twisti@1861: __ store_ptr(3, G3_scratch); // put c at 3 duke@435: // stack: ..., c, b, c, c (a in reg) twisti@1861: __ load_ptr( 2, G3_scratch); // get b twisti@1861: __ store_ptr(2, Otos_l1); // put a at 2 duke@435: // stack: ..., c, a, c, c (b in reg) twisti@1861: __ store_ptr(1, G3_scratch); // put b at 1 duke@435: // stack: ..., c, a, b, c duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dup2() { duke@435: transition(vtos, vtos); twisti@1861: __ load_ptr(1, G3_scratch); // get a twisti@1861: __ load_ptr(0, Otos_l1); // get b twisti@1861: __ push_ptr(G3_scratch); // push a twisti@1861: __ push_ptr(Otos_l1); // push b duke@435: // stack: ..., a, b, a, b duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dup2_x1() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a, b, c twisti@1861: __ load_ptr( 1, Lscratch); // get b twisti@1861: __ load_ptr( 2, Otos_l1); // get a twisti@1861: __ store_ptr(2, Lscratch); // put b at a duke@435: // stack: ..., b, b, c twisti@1861: __ load_ptr( 0, G3_scratch); // get c twisti@1861: __ store_ptr(1, G3_scratch); // put c at b duke@435: // stack: ..., b, c, c twisti@1861: __ store_ptr(0, Otos_l1); // put a at c duke@435: // stack: ..., b, c, a twisti@1861: __ push_ptr(Lscratch); // push b twisti@1861: __ push_ptr(G3_scratch); // push c duke@435: // stack: ..., b, c, a, b, c duke@435: } duke@435: duke@435: duke@435: // The spec says that these types can be a mixture of category 1 (1 word) duke@435: // types and/or category 2 types (long and doubles) duke@435: void TemplateTable::dup2_x2() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a, b, c, d twisti@1861: __ load_ptr( 1, Lscratch); // get c twisti@1861: __ load_ptr( 3, Otos_l1); // get a twisti@1861: __ store_ptr(3, Lscratch); // put c at 3 twisti@1861: __ store_ptr(1, Otos_l1); // put a at 1 duke@435: // stack: ..., c, b, a, d twisti@1861: __ load_ptr( 2, G3_scratch); // get b twisti@1861: __ load_ptr( 0, Otos_l1); // get d twisti@1861: __ store_ptr(0, G3_scratch); // put b at 0 twisti@1861: __ store_ptr(2, Otos_l1); // put d at 2 duke@435: // stack: ..., c, d, a, b twisti@1861: __ push_ptr(Lscratch); // push c twisti@1861: __ push_ptr(Otos_l1); // push d duke@435: // stack: ..., c, d, a, b, c, d duke@435: } duke@435: duke@435: duke@435: void TemplateTable::swap() { duke@435: transition(vtos, vtos); duke@435: // stack: ..., a, b twisti@1861: __ load_ptr( 1, G3_scratch); // get a twisti@1861: __ load_ptr( 0, Otos_l1); // get b twisti@1861: __ store_ptr(0, G3_scratch); // put b twisti@1861: __ store_ptr(1, Otos_l1); // put a duke@435: // stack: ..., b, a duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iop2(Operation op) { duke@435: transition(itos, itos); duke@435: __ pop_i(O1); duke@435: switch (op) { duke@435: case add: __ add(O1, Otos_i, Otos_i); break; duke@435: case sub: __ sub(O1, Otos_i, Otos_i); break; duke@435: // %%%%% Mul may not exist: better to call .mul? duke@435: case mul: __ smul(O1, Otos_i, Otos_i); break; twisti@1861: case _and: __ and3(O1, Otos_i, Otos_i); break; twisti@1861: case _or: __ or3(O1, Otos_i, Otos_i); break; twisti@1861: case _xor: __ xor3(O1, Otos_i, Otos_i); break; duke@435: case shl: __ sll(O1, Otos_i, Otos_i); break; duke@435: case shr: __ sra(O1, Otos_i, Otos_i); break; duke@435: case ushr: __ srl(O1, Otos_i, Otos_i); break; duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lop2(Operation op) { duke@435: transition(ltos, ltos); duke@435: __ pop_l(O2); duke@435: switch (op) { duke@435: #ifdef _LP64 twisti@1861: case add: __ add(O2, Otos_l, Otos_l); break; twisti@1861: case sub: __ sub(O2, Otos_l, Otos_l); break; twisti@1861: case _and: __ and3(O2, Otos_l, Otos_l); break; twisti@1861: case _or: __ or3(O2, Otos_l, Otos_l); break; twisti@1861: case _xor: __ xor3(O2, Otos_l, Otos_l); break; duke@435: #else duke@435: case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break; duke@435: case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break; twisti@1861: case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break; twisti@1861: case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break; twisti@1861: case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break; duke@435: #endif duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::idiv() { duke@435: // %%%%% Later: ForSPARC/V7 call .sdiv library routine, duke@435: // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe. duke@435: duke@435: transition(itos, itos); duke@435: __ pop_i(O1); // get 1st op duke@435: duke@435: // Y contains upper 32 bits of result, set it to 0 or all ones duke@435: __ wry(G0); duke@435: __ mov(~0, G3_scratch); duke@435: duke@435: __ tst(O1); duke@435: Label neg; duke@435: __ br(Assembler::negative, true, Assembler::pn, neg); duke@435: __ delayed()->wry(G3_scratch); duke@435: __ bind(neg); duke@435: duke@435: Label ok; duke@435: __ tst(Otos_i); duke@435: __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch ); duke@435: duke@435: const int min_int = 0x80000000; duke@435: Label regular; duke@435: __ cmp(Otos_i, -1); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, regular); duke@435: #ifdef _LP64 duke@435: // Don't put set in delay slot duke@435: // Set will turn into multiple instructions in 64 bit mode duke@435: __ delayed()->nop(); duke@435: __ set(min_int, G4_scratch); duke@435: #else duke@435: __ delayed()->set(min_int, G4_scratch); duke@435: #endif duke@435: Label done; duke@435: __ cmp(O1, G4_scratch); duke@435: __ br(Assembler::equal, true, Assembler::pt, done); duke@435: __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken) duke@435: duke@435: __ bind(regular); duke@435: __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction! duke@435: __ bind(done); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::irem() { duke@435: transition(itos, itos); duke@435: __ mov(Otos_i, O2); // save divisor duke@435: idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1 duke@435: __ smul(Otos_i, O2, Otos_i); duke@435: __ sub(O1, Otos_i, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lmul() { duke@435: transition(ltos, ltos); duke@435: __ pop_l(O2); duke@435: #ifdef _LP64 duke@435: __ mulx(Otos_l, O2, Otos_l); duke@435: #else duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul)); duke@435: #endif duke@435: duke@435: } duke@435: duke@435: duke@435: void TemplateTable::ldiv() { duke@435: transition(ltos, ltos); duke@435: duke@435: // check for zero duke@435: __ pop_l(O2); duke@435: #ifdef _LP64 duke@435: __ tst(Otos_l); duke@435: __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); duke@435: __ sdivx(O2, Otos_l, Otos_l); duke@435: #else duke@435: __ orcc(Otos_l1, Otos_l2, G0); duke@435: __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv)); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lrem() { duke@435: transition(ltos, ltos); duke@435: duke@435: // check for zero duke@435: __ pop_l(O2); duke@435: #ifdef _LP64 duke@435: __ tst(Otos_l); duke@435: __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); duke@435: __ sdivx(O2, Otos_l, Otos_l2); duke@435: __ mulx (Otos_l2, Otos_l, Otos_l2); duke@435: __ sub (O2, Otos_l2, Otos_l); duke@435: #else duke@435: __ orcc(Otos_l1, Otos_l2, G0); duke@435: __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch); duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem)); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lshl() { duke@435: transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra duke@435: duke@435: __ pop_l(O2); // shift value in O2, O3 duke@435: #ifdef _LP64 duke@435: __ sllx(O2, Otos_i, Otos_l); duke@435: #else duke@435: __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lshr() { duke@435: transition(itos, ltos); // %%%% see lshl comment duke@435: duke@435: __ pop_l(O2); // shift value in O2, O3 duke@435: #ifdef _LP64 duke@435: __ srax(O2, Otos_i, Otos_l); duke@435: #else duke@435: __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: duke@435: void TemplateTable::lushr() { duke@435: transition(itos, ltos); // %%%% see lshl comment duke@435: duke@435: __ pop_l(O2); // shift value in O2, O3 duke@435: #ifdef _LP64 duke@435: __ srlx(O2, Otos_i, Otos_l); duke@435: #else duke@435: __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fop2(Operation op) { duke@435: transition(ftos, ftos); duke@435: switch (op) { duke@435: case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; duke@435: case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; duke@435: case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; duke@435: case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break; duke@435: case rem: duke@435: assert(Ftos_f == F0, "just checking"); duke@435: #ifdef _LP64 duke@435: // LP64 calling conventions use F1, F3 for passing 2 floats duke@435: __ pop_f(F1); duke@435: __ fmov(FloatRegisterImpl::S, Ftos_f, F3); duke@435: #else duke@435: __ pop_i(O0); duke@435: __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp); duke@435: __ ld( __ d_tmp, O1 ); duke@435: #endif duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem)); duke@435: assert( Ftos_f == F0, "fix this code" ); duke@435: break; duke@435: duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dop2(Operation op) { duke@435: transition(dtos, dtos); duke@435: switch (op) { duke@435: case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; duke@435: case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; duke@435: case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; duke@435: case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break; duke@435: case rem: duke@435: #ifdef _LP64 duke@435: // Pass arguments in D0, D2 duke@435: __ fmov(FloatRegisterImpl::D, Ftos_f, F2 ); duke@435: __ pop_d( F0 ); duke@435: #else duke@435: // Pass arguments in O0O1, O2O3 duke@435: __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); duke@435: __ ldd( __ d_tmp, O2 ); duke@435: __ pop_d(Ftos_f); duke@435: __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp); duke@435: __ ldd( __ d_tmp, O0 ); duke@435: #endif duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem)); duke@435: assert( Ftos_d == F0, "fix this code" ); duke@435: break; duke@435: duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::ineg() { duke@435: transition(itos, itos); duke@435: __ neg(Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lneg() { duke@435: transition(ltos, ltos); duke@435: #ifdef _LP64 duke@435: __ sub(G0, Otos_l, Otos_l); duke@435: #else duke@435: __ lneg(Otos_l1, Otos_l2); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fneg() { duke@435: transition(ftos, ftos); duke@435: __ fneg(FloatRegisterImpl::S, Ftos_f); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::dneg() { duke@435: transition(dtos, dtos); duke@435: // v8 has fnegd if source and dest are the same duke@435: __ fneg(FloatRegisterImpl::D, Ftos_f); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::iinc() { duke@435: transition(vtos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ ldsb(Lbcp, 2, O2); // load constant duke@435: __ access_local_int(G3_scratch, Otos_i); duke@435: __ add(Otos_i, O2, Otos_i); twisti@1861: __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_iinc() { duke@435: transition(vtos, vtos); duke@435: locals_index_wide(G3_scratch); duke@435: __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed); duke@435: __ access_local_int(G3_scratch, Otos_i); duke@435: __ add(Otos_i, O3, Otos_i); twisti@1861: __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch duke@435: } duke@435: duke@435: duke@435: void TemplateTable::convert() { duke@435: // %%%%% Factor this first part accross platforms duke@435: #ifdef ASSERT duke@435: TosState tos_in = ilgl; duke@435: TosState tos_out = ilgl; duke@435: switch (bytecode()) { duke@435: case Bytecodes::_i2l: // fall through duke@435: case Bytecodes::_i2f: // fall through duke@435: case Bytecodes::_i2d: // fall through duke@435: case Bytecodes::_i2b: // fall through duke@435: case Bytecodes::_i2c: // fall through duke@435: case Bytecodes::_i2s: tos_in = itos; break; duke@435: case Bytecodes::_l2i: // fall through duke@435: case Bytecodes::_l2f: // fall through duke@435: case Bytecodes::_l2d: tos_in = ltos; break; duke@435: case Bytecodes::_f2i: // fall through duke@435: case Bytecodes::_f2l: // fall through duke@435: case Bytecodes::_f2d: tos_in = ftos; break; duke@435: case Bytecodes::_d2i: // fall through duke@435: case Bytecodes::_d2l: // fall through duke@435: case Bytecodes::_d2f: tos_in = dtos; break; duke@435: default : ShouldNotReachHere(); duke@435: } duke@435: switch (bytecode()) { duke@435: case Bytecodes::_l2i: // fall through duke@435: case Bytecodes::_f2i: // fall through duke@435: case Bytecodes::_d2i: // fall through duke@435: case Bytecodes::_i2b: // fall through duke@435: case Bytecodes::_i2c: // fall through duke@435: case Bytecodes::_i2s: tos_out = itos; break; duke@435: case Bytecodes::_i2l: // fall through duke@435: case Bytecodes::_f2l: // fall through duke@435: case Bytecodes::_d2l: tos_out = ltos; break; duke@435: case Bytecodes::_i2f: // fall through duke@435: case Bytecodes::_l2f: // fall through duke@435: case Bytecodes::_d2f: tos_out = ftos; break; duke@435: case Bytecodes::_i2d: // fall through duke@435: case Bytecodes::_l2d: // fall through duke@435: case Bytecodes::_f2d: tos_out = dtos; break; duke@435: default : ShouldNotReachHere(); duke@435: } duke@435: transition(tos_in, tos_out); duke@435: #endif duke@435: duke@435: duke@435: // Conversion duke@435: Label done; duke@435: switch (bytecode()) { duke@435: case Bytecodes::_i2l: duke@435: #ifdef _LP64 duke@435: // Sign extend the 32 bits duke@435: __ sra ( Otos_i, 0, Otos_l ); duke@435: #else duke@435: __ addcc(Otos_i, 0, Otos_l2); duke@435: __ br(Assembler::greaterEqual, true, Assembler::pt, done); duke@435: __ delayed()->clr(Otos_l1); duke@435: __ set(~0, Otos_l1); duke@435: #endif duke@435: break; duke@435: duke@435: case Bytecodes::_i2f: duke@435: __ st(Otos_i, __ d_tmp ); duke@435: __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); duke@435: __ fitof(FloatRegisterImpl::S, F0, Ftos_f); duke@435: break; duke@435: duke@435: case Bytecodes::_i2d: duke@435: __ st(Otos_i, __ d_tmp); duke@435: __ ldf(FloatRegisterImpl::S, __ d_tmp, F0); duke@435: __ fitof(FloatRegisterImpl::D, F0, Ftos_f); duke@435: break; duke@435: duke@435: case Bytecodes::_i2b: duke@435: __ sll(Otos_i, 24, Otos_i); duke@435: __ sra(Otos_i, 24, Otos_i); duke@435: break; duke@435: duke@435: case Bytecodes::_i2c: duke@435: __ sll(Otos_i, 16, Otos_i); duke@435: __ srl(Otos_i, 16, Otos_i); duke@435: break; duke@435: duke@435: case Bytecodes::_i2s: duke@435: __ sll(Otos_i, 16, Otos_i); duke@435: __ sra(Otos_i, 16, Otos_i); duke@435: break; duke@435: duke@435: case Bytecodes::_l2i: duke@435: #ifndef _LP64 duke@435: __ mov(Otos_l2, Otos_i); duke@435: #else duke@435: // Sign-extend into the high 32 bits duke@435: __ sra(Otos_l, 0, Otos_i); duke@435: #endif duke@435: break; duke@435: duke@435: case Bytecodes::_l2f: duke@435: case Bytecodes::_l2d: duke@435: __ st_long(Otos_l, __ d_tmp); duke@435: __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); duke@435: duke@435: if (VM_Version::v9_instructions_work()) { duke@435: if (bytecode() == Bytecodes::_l2f) { duke@435: __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); duke@435: } else { duke@435: __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); duke@435: } duke@435: } else { duke@435: __ call_VM_leaf( duke@435: Lscratch, duke@435: bytecode() == Bytecodes::_l2f duke@435: ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f) duke@435: : CAST_FROM_FN_PTR(address, SharedRuntime::l2d) duke@435: ); duke@435: } duke@435: break; duke@435: duke@435: case Bytecodes::_f2i: { duke@435: Label isNaN; duke@435: // result must be 0 if value is NaN; test by comparing value to itself duke@435: __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); duke@435: // According to the v8 manual, you have to have a non-fp instruction duke@435: // between fcmp and fb. duke@435: if (!VM_Version::v9_instructions_work()) { duke@435: __ nop(); duke@435: } duke@435: __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); duke@435: __ delayed()->clr(Otos_i); // NaN duke@435: __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); duke@435: __ stf(FloatRegisterImpl::S, F30, __ d_tmp); duke@435: __ ld(__ d_tmp, Otos_i); duke@435: __ bind(isNaN); duke@435: } duke@435: break; duke@435: duke@435: case Bytecodes::_f2l: duke@435: // must uncache tos duke@435: __ push_f(); duke@435: #ifdef _LP64 duke@435: __ pop_f(F1); duke@435: #else duke@435: __ pop_i(O0); duke@435: #endif duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l)); duke@435: break; duke@435: duke@435: case Bytecodes::_f2d: duke@435: __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f); duke@435: break; duke@435: duke@435: case Bytecodes::_d2i: duke@435: case Bytecodes::_d2l: duke@435: // must uncache tos duke@435: __ push_d(); duke@435: #ifdef _LP64 duke@435: // LP64 calling conventions pass first double arg in D0 duke@435: __ pop_d( Ftos_d ); duke@435: #else duke@435: __ pop_i( O0 ); duke@435: __ pop_i( O1 ); duke@435: #endif duke@435: __ call_VM_leaf(Lscratch, duke@435: bytecode() == Bytecodes::_d2i duke@435: ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i) duke@435: : CAST_FROM_FN_PTR(address, SharedRuntime::d2l)); duke@435: break; duke@435: duke@435: case Bytecodes::_d2f: duke@435: if (VM_Version::v9_instructions_work()) { duke@435: __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); duke@435: } duke@435: else { duke@435: // must uncache tos duke@435: __ push_d(); duke@435: __ pop_i(O0); duke@435: __ pop_i(O1); duke@435: __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f)); duke@435: } duke@435: break; duke@435: duke@435: default: ShouldNotReachHere(); duke@435: } duke@435: __ bind(done); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lcmp() { duke@435: transition(ltos, itos); duke@435: duke@435: #ifdef _LP64 duke@435: __ pop_l(O1); // pop off value 1, value 2 is in O0 duke@435: __ lcmp( O1, Otos_l, Otos_i ); duke@435: #else duke@435: __ pop_l(O2); // cmp O2,3 to O0,1 duke@435: __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i ); duke@435: #endif duke@435: } duke@435: duke@435: duke@435: void TemplateTable::float_cmp(bool is_float, int unordered_result) { duke@435: duke@435: if (is_float) __ pop_f(F2); duke@435: else __ pop_d(F2); duke@435: duke@435: assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:"); duke@435: duke@435: __ float_cmp( is_float, unordered_result, F2, F0, Otos_i ); duke@435: } duke@435: duke@435: void TemplateTable::branch(bool is_jsr, bool is_wide) { duke@435: // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. duke@435: __ verify_oop(Lmethod); duke@435: __ verify_thread(); duke@435: duke@435: const Register O2_bumped_count = O2; duke@435: __ profile_taken_branch(G3_scratch, O2_bumped_count); duke@435: duke@435: // get (wide) offset to O1_disp duke@435: const Register O1_disp = O1; duke@435: if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC); duke@435: else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC); duke@435: duke@435: // Handle all the JSR stuff here, then exit. duke@435: // It's much shorter and cleaner than intermingling with the twisti@1040: // non-JSR normal-branch stuff occurring below. duke@435: if( is_jsr ) { duke@435: // compute return address as bci in Otos_i twisti@1162: __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); duke@435: __ sub(Lbcp, G3_scratch, G3_scratch); duke@435: __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i); duke@435: duke@435: // Bump Lbcp to target of JSR duke@435: __ add(Lbcp, O1_disp, Lbcp); duke@435: // Push returnAddress for "ret" on stack twisti@1861: __ push_ptr(Otos_i); duke@435: // And away we go! duke@435: __ dispatch_next(vtos); duke@435: return; duke@435: } duke@435: duke@435: // Normal (non-jsr) branch handling duke@435: duke@435: // Save the current Lbcp duke@435: const Register O0_cur_bcp = O0; duke@435: __ mov( Lbcp, O0_cur_bcp ); duke@435: iveresov@2138: duke@435: bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; duke@435: if ( increment_invocation_counter_for_backward_branches ) { duke@435: Label Lforward; duke@435: // check branch direction duke@435: __ br( Assembler::positive, false, Assembler::pn, Lforward ); duke@435: // Bump bytecode pointer by displacement (take the branch) duke@435: __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr duke@435: iveresov@2138: if (TieredCompilation) { iveresov@2138: Label Lno_mdo, Loverflow; iveresov@2138: int increment = InvocationCounter::count_increment; iveresov@2138: int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; iveresov@2138: if (ProfileInterpreter) { iveresov@2138: // If no method data exists, go to profile_continue. iveresov@2138: __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch); kvn@3037: __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo); iveresov@2138: iveresov@2138: // Increment backedge counter in the MDO iveresov@2138: Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) + iveresov@2138: in_bytes(InvocationCounter::counter_offset())); iveresov@2138: __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, iveresov@2138: Assembler::notZero, &Lforward); kvn@3037: __ ba_short(Loverflow); duke@435: } iveresov@2138: iveresov@2138: // If there's no MDO, increment counter in methodOop iveresov@2138: __ bind(Lno_mdo); iveresov@2138: Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) + iveresov@2138: in_bytes(InvocationCounter::counter_offset())); iveresov@2138: __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, iveresov@2138: Assembler::notZero, &Lforward); iveresov@2138: __ bind(Loverflow); iveresov@2138: iveresov@2138: // notify point for loop, pass branch bytecode iveresov@2138: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); iveresov@2138: iveresov@2138: // Was an OSR adapter generated? iveresov@2138: // O0 = osr nmethod kvn@3037: __ br_null_short(O0, Assembler::pn, Lforward); iveresov@2138: iveresov@2138: // Has the nmethod been invalidated already? iveresov@2138: __ ld(O0, nmethod::entry_bci_offset(), O2); kvn@3037: __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward); iveresov@2138: iveresov@2138: // migrate the interpreter frame off of the stack iveresov@2138: iveresov@2138: __ mov(G2_thread, L7); iveresov@2138: // save nmethod iveresov@2138: __ mov(O0, L6); iveresov@2138: __ set_last_Java_frame(SP, noreg); iveresov@2138: __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); iveresov@2138: __ reset_last_Java_frame(); iveresov@2138: __ mov(L7, G2_thread); iveresov@2138: iveresov@2138: // move OSR nmethod to I1 iveresov@2138: __ mov(L6, I1); iveresov@2138: iveresov@2138: // OSR buffer to I0 iveresov@2138: __ mov(O0, I0); iveresov@2138: iveresov@2138: // remove the interpreter frame iveresov@2138: __ restore(I5_savedSP, 0, SP); iveresov@2138: iveresov@2138: // Jump to the osr code. iveresov@2138: __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); iveresov@2138: __ jmp(O2, G0); iveresov@2138: __ delayed()->nop(); iveresov@2138: duke@435: } else { iveresov@2138: // Update Backedge branch separately from invocations iveresov@2138: const Register G4_invoke_ctr = G4; iveresov@2138: __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); iveresov@2138: if (ProfileInterpreter) { iveresov@2438: __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); iveresov@2138: if (UseOnStackReplacement) { iveresov@2138: __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); iveresov@2138: } iveresov@2138: } else { iveresov@2138: if (UseOnStackReplacement) { iveresov@2138: __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); iveresov@2138: } duke@435: } duke@435: } duke@435: duke@435: __ bind(Lforward); duke@435: } else duke@435: // Bump bytecode pointer by displacement (take the branch) duke@435: __ add( O1_disp, Lbcp, Lbcp );// add to bc addr duke@435: duke@435: // continue with bytecode @ target duke@435: // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, duke@435: // %%%%% and changing dispatch_next to dispatch_only duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: // Note Condition in argument is TemplateTable::Condition duke@435: // arg scope is within class scope duke@435: duke@435: void TemplateTable::if_0cmp(Condition cc) { duke@435: // no pointers, integer only! duke@435: transition(itos, vtos); duke@435: // assume branch is more often taken than not (loops use backward branches) duke@435: __ cmp( Otos_i, 0); duke@435: __ if_cmp(ccNot(cc), false); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::if_icmp(Condition cc) { duke@435: transition(itos, vtos); duke@435: __ pop_i(O1); duke@435: __ cmp(O1, Otos_i); duke@435: __ if_cmp(ccNot(cc), false); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::if_nullcmp(Condition cc) { duke@435: transition(atos, vtos); duke@435: __ tst(Otos_i); duke@435: __ if_cmp(ccNot(cc), true); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::if_acmp(Condition cc) { duke@435: transition(atos, vtos); duke@435: __ pop_ptr(O1); duke@435: __ verify_oop(O1); duke@435: __ verify_oop(Otos_i); duke@435: __ cmp(O1, Otos_i); duke@435: __ if_cmp(ccNot(cc), true); duke@435: } duke@435: duke@435: duke@435: duke@435: void TemplateTable::ret() { duke@435: transition(vtos, vtos); duke@435: locals_index(G3_scratch); duke@435: __ access_local_returnAddress(G3_scratch, Otos_i); duke@435: // Otos_i contains the bci, compute the bcp from that duke@435: duke@435: #ifdef _LP64 duke@435: #ifdef ASSERT duke@435: // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC duke@435: // the result. The return address (really a BCI) was stored with an duke@435: // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in duke@435: // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit duke@435: // loaded value. duke@435: { Label zzz ; duke@435: __ set (65536, G3_scratch) ; duke@435: __ cmp (Otos_i, G3_scratch) ; duke@435: __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz); duke@435: __ delayed()->nop(); duke@435: __ stop("BCI is in the wrong register half?"); duke@435: __ bind (zzz) ; duke@435: } duke@435: #endif duke@435: #endif duke@435: duke@435: __ profile_ret(vtos, Otos_i, G4_scratch); duke@435: twisti@1162: __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); duke@435: __ add(G3_scratch, Otos_i, G3_scratch); duke@435: __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::wide_ret() { duke@435: transition(vtos, vtos); duke@435: locals_index_wide(G3_scratch); duke@435: __ access_local_returnAddress(G3_scratch, Otos_i); duke@435: // Otos_i contains the bci, compute the bcp from that duke@435: duke@435: __ profile_ret(vtos, Otos_i, G4_scratch); duke@435: twisti@1162: __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch); duke@435: __ add(G3_scratch, Otos_i, G3_scratch); duke@435: __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp); duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::tableswitch() { duke@435: transition(itos, vtos); duke@435: Label default_case, continue_execution; duke@435: duke@435: // align bcp duke@435: __ add(Lbcp, BytesPerInt, O1); duke@435: __ and3(O1, -BytesPerInt, O1); duke@435: // load lo, hi duke@435: __ ld(O1, 1 * BytesPerInt, O2); // Low Byte duke@435: __ ld(O1, 2 * BytesPerInt, O3); // High Byte duke@435: #ifdef _LP64 duke@435: // Sign extend the 32 bits duke@435: __ sra ( Otos_i, 0, Otos_i ); duke@435: #endif /* _LP64 */ duke@435: duke@435: // check against lo & hi duke@435: __ cmp( Otos_i, O2); duke@435: __ br( Assembler::less, false, Assembler::pn, default_case); duke@435: __ delayed()->cmp( Otos_i, O3 ); duke@435: __ br( Assembler::greater, false, Assembler::pn, default_case); duke@435: // lookup dispatch offset duke@435: __ delayed()->sub(Otos_i, O2, O2); duke@435: __ profile_switch_case(O2, O3, G3_scratch, G4_scratch); duke@435: __ sll(O2, LogBytesPerInt, O2); duke@435: __ add(O2, 3 * BytesPerInt, O2); kvn@3037: __ ba(continue_execution); duke@435: __ delayed()->ld(O1, O2, O2); duke@435: // handle default duke@435: __ bind(default_case); duke@435: __ profile_switch_default(O3); duke@435: __ ld(O1, 0, O2); // get default offset duke@435: // continue execution duke@435: __ bind(continue_execution); duke@435: __ add(Lbcp, O2, Lbcp); duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::lookupswitch() { duke@435: transition(itos, itos); duke@435: __ stop("lookupswitch bytecode should have been rewritten"); duke@435: } duke@435: duke@435: void TemplateTable::fast_linearswitch() { duke@435: transition(itos, vtos); duke@435: Label loop_entry, loop, found, continue_execution; duke@435: // align bcp duke@435: __ add(Lbcp, BytesPerInt, O1); duke@435: __ and3(O1, -BytesPerInt, O1); duke@435: // set counter duke@435: __ ld(O1, BytesPerInt, O2); duke@435: __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs duke@435: __ add(O1, 2 * BytesPerInt, O3); // set first pair addr kvn@3037: __ ba(loop_entry); duke@435: __ delayed()->add(O3, O2, O2); // counter now points past last pair duke@435: duke@435: // table search duke@435: __ bind(loop); duke@435: __ cmp(O4, Otos_i); duke@435: __ br(Assembler::equal, true, Assembler::pn, found); duke@435: __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4 duke@435: __ inc(O3, 2 * BytesPerInt); duke@435: duke@435: __ bind(loop_entry); duke@435: __ cmp(O2, O3); duke@435: __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop); duke@435: __ delayed()->ld(O3, 0, O4); duke@435: duke@435: // default case duke@435: __ ld(O1, 0, O4); // get default offset duke@435: if (ProfileInterpreter) { duke@435: __ profile_switch_default(O3); kvn@3037: __ ba_short(continue_execution); duke@435: } duke@435: duke@435: // entry found -> get offset duke@435: __ bind(found); duke@435: if (ProfileInterpreter) { duke@435: __ sub(O3, O1, O3); duke@435: __ sub(O3, 2*BytesPerInt, O3); duke@435: __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs duke@435: __ profile_switch_case(O3, O1, O2, G3_scratch); duke@435: duke@435: __ bind(continue_execution); duke@435: } duke@435: __ add(Lbcp, O4, Lbcp); duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fast_binaryswitch() { duke@435: transition(itos, vtos); duke@435: // Implementation using the following core algorithm: (copied from Intel) duke@435: // duke@435: // int binary_search(int key, LookupswitchPair* array, int n) { duke@435: // // Binary search according to "Methodik des Programmierens" by duke@435: // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. duke@435: // int i = 0; duke@435: // int j = n; duke@435: // while (i+1 < j) { duke@435: // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) duke@435: // // with Q: for all i: 0 <= i < n: key < a[i] duke@435: // // where a stands for the array and assuming that the (inexisting) duke@435: // // element a[n] is infinitely big. duke@435: // int h = (i + j) >> 1; duke@435: // // i < h < j duke@435: // if (key < array[h].fast_match()) { duke@435: // j = h; duke@435: // } else { duke@435: // i = h; duke@435: // } duke@435: // } duke@435: // // R: a[i] <= key < a[i+1] or Q duke@435: // // (i.e., if key is within array, i is the correct index) duke@435: // return i; duke@435: // } duke@435: duke@435: // register allocation duke@435: assert(Otos_i == O0, "alias checking"); duke@435: const Register Rkey = Otos_i; // already set (tosca) duke@435: const Register Rarray = O1; duke@435: const Register Ri = O2; duke@435: const Register Rj = O3; duke@435: const Register Rh = O4; duke@435: const Register Rscratch = O5; duke@435: duke@435: const int log_entry_size = 3; duke@435: const int entry_size = 1 << log_entry_size; duke@435: duke@435: Label found; duke@435: // Find Array start duke@435: __ add(Lbcp, 3 * BytesPerInt, Rarray); duke@435: __ and3(Rarray, -BytesPerInt, Rarray); duke@435: // initialize i & j (in delay slot) duke@435: __ clr( Ri ); duke@435: duke@435: // and start duke@435: Label entry; kvn@3037: __ ba(entry); duke@435: __ delayed()->ld( Rarray, -BytesPerInt, Rj); duke@435: // (Rj is already in the native byte-ordering.) duke@435: duke@435: // binary search loop duke@435: { Label loop; duke@435: __ bind( loop ); duke@435: // int h = (i + j) >> 1; duke@435: __ sra( Rh, 1, Rh ); duke@435: // if (key < array[h].fast_match()) { duke@435: // j = h; duke@435: // } else { duke@435: // i = h; duke@435: // } duke@435: __ sll( Rh, log_entry_size, Rscratch ); duke@435: __ ld( Rarray, Rscratch, Rscratch ); duke@435: // (Rscratch is already in the native byte-ordering.) duke@435: __ cmp( Rkey, Rscratch ); duke@435: if ( VM_Version::v9_instructions_work() ) { duke@435: __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) duke@435: __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) duke@435: } duke@435: else { duke@435: Label end_of_if; duke@435: __ br( Assembler::less, true, Assembler::pt, end_of_if ); duke@435: __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh duke@435: __ mov( Rh, Ri ); // else i = h duke@435: __ bind(end_of_if); // } duke@435: } duke@435: duke@435: // while (i+1 < j) duke@435: __ bind( entry ); duke@435: __ add( Ri, 1, Rscratch ); duke@435: __ cmp(Rscratch, Rj); duke@435: __ br( Assembler::less, true, Assembler::pt, loop ); duke@435: __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1; duke@435: } duke@435: duke@435: // end of binary search, result index is i (must check again!) duke@435: Label default_case; duke@435: Label continue_execution; duke@435: if (ProfileInterpreter) { duke@435: __ mov( Ri, Rh ); // Save index in i for profiling duke@435: } duke@435: __ sll( Ri, log_entry_size, Ri ); duke@435: __ ld( Rarray, Ri, Rscratch ); duke@435: // (Rscratch is already in the native byte-ordering.) duke@435: __ cmp( Rkey, Rscratch ); duke@435: __ br( Assembler::notEqual, true, Assembler::pn, default_case ); duke@435: __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j duke@435: duke@435: // entry found -> j = offset duke@435: __ inc( Ri, BytesPerInt ); duke@435: __ profile_switch_case(Rh, Rj, Rscratch, Rkey); duke@435: __ ld( Rarray, Ri, Rj ); duke@435: // (Rj is already in the native byte-ordering.) duke@435: duke@435: if (ProfileInterpreter) { kvn@3037: __ ba_short(continue_execution); duke@435: } duke@435: duke@435: __ bind(default_case); // fall through (if not profiling) duke@435: __ profile_switch_default(Ri); duke@435: duke@435: __ bind(continue_execution); duke@435: __ add( Lbcp, Rj, Lbcp ); duke@435: __ dispatch_next( vtos ); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::_return(TosState state) { duke@435: transition(state, state); duke@435: assert(_desc->calls_vm(), "inconsistent calls_vm information"); duke@435: duke@435: if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { duke@435: assert(state == vtos, "only valid state"); duke@435: __ mov(G0, G3_scratch); duke@435: __ access_local_ptr(G3_scratch, Otos_i); coleenp@548: __ load_klass(Otos_i, O2); duke@435: __ set(JVM_ACC_HAS_FINALIZER, G3); duke@435: __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2); duke@435: __ andcc(G3, O2, G0); duke@435: Label skip_register_finalizer; duke@435: __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer); duke@435: __ delayed()->nop(); duke@435: duke@435: // Call out to do finalizer registration duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i); duke@435: duke@435: __ bind(skip_register_finalizer); duke@435: } duke@435: duke@435: __ remove_activation(state, /* throw_monitor_exception */ true); duke@435: duke@435: // The caller's SP was adjusted upon method entry to accomodate duke@435: // the callee's non-argument locals. Undo that adjustment. duke@435: __ ret(); // return to caller duke@435: __ delayed()->restore(I5_savedSP, G0, SP); duke@435: } duke@435: duke@435: duke@435: // ---------------------------------------------------------------------------- duke@435: // Volatile variables demand their effects be made known to all CPU's in duke@435: // order. Store buffers on most chips allow reads & writes to reorder; the duke@435: // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of duke@435: // memory barrier (i.e., it's not sufficient that the interpreter does not duke@435: // reorder volatile references, the hardware also must not reorder them). duke@435: // duke@435: // According to the new Java Memory Model (JMM): duke@435: // (1) All volatiles are serialized wrt to each other. duke@435: // ALSO reads & writes act as aquire & release, so: duke@435: // (2) A read cannot let unrelated NON-volatile memory refs that happen after duke@435: // the read float up to before the read. It's OK for non-volatile memory refs duke@435: // that happen before the volatile read to float down below it. duke@435: // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs duke@435: // that happen BEFORE the write float down to after the write. It's OK for duke@435: // non-volatile memory refs that happen after the volatile write to float up duke@435: // before it. duke@435: // duke@435: // We only put in barriers around volatile refs (they are expensive), not duke@435: // _between_ memory refs (that would require us to track the flavor of the duke@435: // previous memory refs). Requirements (2) and (3) require some barriers duke@435: // before volatile stores and after volatile loads. These nearly cover duke@435: // requirement (1) but miss the volatile-store-volatile-load case. This final duke@435: // case is placed after volatile-stores although it could just as well go duke@435: // before volatile-loads. duke@435: void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) { duke@435: // Helper function to insert a is-volatile test and memory barrier duke@435: // All current sparc implementations run in TSO, needing only StoreLoad duke@435: if ((order_constraint & Assembler::StoreLoad) == 0) return; duke@435: __ membar( order_constraint ); duke@435: } duke@435: duke@435: // ---------------------------------------------------------------------------- jrose@1920: void TemplateTable::resolve_cache_and_index(int byte_no, jrose@1920: Register result, jrose@1920: Register Rcache, jrose@1920: Register index, jrose@1920: size_t index_size) { duke@435: // Depends on cpCacheOop layout! duke@435: Label resolved; duke@435: jrose@1920: if (byte_no == f1_oop) { jrose@1920: // We are resolved if the f1 field contains a non-null object (CallSite, etc.) jrose@1920: // This kind of CP cache entry does not need to match the flags byte, because jrose@1920: // there is a 1-1 relation between bytecode type and CP entry type. jrose@1920: assert_different_registers(result, Rcache); twisti@3050: __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); twisti@1858: __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + jrose@1920: ConstantPoolCacheEntry::f1_offset(), result); jrose@1920: __ tst(result); twisti@1858: __ br(Assembler::notEqual, false, Assembler::pt, resolved); twisti@1858: __ delayed()->set((int)bytecode(), O1); twisti@1858: } else { jrose@1920: assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); jrose@1920: assert(result == noreg, ""); //else change code for setting result twisti@3050: __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size); twisti@3050: __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode? twisti@3050: __ br(Assembler::equal, false, Assembler::pt, resolved); twisti@1858: __ delayed()->set((int)bytecode(), O1); twisti@1858: } duke@435: duke@435: address entry; duke@435: switch (bytecode()) { duke@435: case Bytecodes::_getstatic : // fall through duke@435: case Bytecodes::_putstatic : // fall through duke@435: case Bytecodes::_getfield : // fall through duke@435: case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; duke@435: case Bytecodes::_invokevirtual : // fall through duke@435: case Bytecodes::_invokespecial : // fall through duke@435: case Bytecodes::_invokestatic : // fall through duke@435: case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; twisti@1858: case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; jrose@1957: case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; jrose@1957: case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; duke@435: default : ShouldNotReachHere(); break; duke@435: } duke@435: // first time invocation - must resolve first duke@435: __ call_VM(noreg, entry, O1); duke@435: // Update registers with resolved info jrose@1920: __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size); jrose@1920: if (result != noreg) jrose@1920: __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + jrose@1920: ConstantPoolCacheEntry::f1_offset(), result); duke@435: __ bind(resolved); duke@435: } duke@435: duke@435: void TemplateTable::load_invoke_cp_cache_entry(int byte_no, duke@435: Register Rmethod, duke@435: Register Ritable_index, duke@435: Register Rflags, duke@435: bool is_invokevirtual, jrose@1920: bool is_invokevfinal, jrose@1920: bool is_invokedynamic) { duke@435: // Uses both G3_scratch and G4_scratch duke@435: Register Rcache = G3_scratch; duke@435: Register Rscratch = G4_scratch; duke@435: assert_different_registers(Rcache, Rmethod, Ritable_index); duke@435: duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: duke@435: // determine constant pool cache field offsets duke@435: const int method_offset = in_bytes( duke@435: cp_base_offset + duke@435: (is_invokevirtual duke@435: ? ConstantPoolCacheEntry::f2_offset() duke@435: : ConstantPoolCacheEntry::f1_offset() duke@435: ) duke@435: ); duke@435: const int flags_offset = in_bytes(cp_base_offset + duke@435: ConstantPoolCacheEntry::flags_offset()); duke@435: // access constant pool cache fields duke@435: const int index_offset = in_bytes(cp_base_offset + duke@435: ConstantPoolCacheEntry::f2_offset()); duke@435: duke@435: if (is_invokevfinal) { duke@435: __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1); jrose@1920: __ ld_ptr(Rcache, method_offset, Rmethod); jrose@1920: } else if (byte_no == f1_oop) { jrose@1920: // Resolved f1_oop goes directly into 'method' register. jrose@1920: resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4)); duke@435: } else { jrose@1920: resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2)); jrose@1920: __ ld_ptr(Rcache, method_offset, Rmethod); duke@435: } duke@435: duke@435: if (Ritable_index != noreg) { twisti@1162: __ ld_ptr(Rcache, index_offset, Ritable_index); duke@435: } twisti@1162: __ ld_ptr(Rcache, flags_offset, Rflags); duke@435: } duke@435: duke@435: // The Rcache register must be set before call duke@435: void TemplateTable::load_field_cp_cache_entry(Register Robj, duke@435: Register Rcache, duke@435: Register index, duke@435: Register Roffset, duke@435: Register Rflags, duke@435: bool is_static) { duke@435: assert_different_registers(Rcache, Rflags, Roffset); duke@435: duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); duke@435: if (is_static) { twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj); duke@435: } duke@435: } duke@435: duke@435: // The registers Rcache and index expected to be set before call. duke@435: // Correct values of the Rcache and index registers are preserved. duke@435: void TemplateTable::jvmti_post_field_access(Register Rcache, duke@435: Register index, duke@435: bool is_static, duke@435: bool has_tos) { duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: duke@435: if (JvmtiExport::can_post_field_access()) { duke@435: // Check to see if a field access watch has been set before we take duke@435: // the time to call into the VM. duke@435: Label Label1; duke@435: assert_different_registers(Rcache, index, G1_scratch); twisti@1162: AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr()); duke@435: __ load_contents(get_field_access_count_addr, G1_scratch); kvn@3037: __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1); duke@435: duke@435: __ add(Rcache, in_bytes(cp_base_offset), Rcache); duke@435: duke@435: if (is_static) { duke@435: __ clr(Otos_i); duke@435: } else { duke@435: if (has_tos) { duke@435: // save object pointer before call_VM() clobbers it coleenp@885: __ push_ptr(Otos_i); // put object on tos where GC wants it. duke@435: } else { duke@435: // Load top of stack (do not pop the value off the stack); duke@435: __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i); duke@435: } duke@435: __ verify_oop(Otos_i); duke@435: } duke@435: // Otos_i: object pointer or NULL if static duke@435: // Rcache: cache entry pointer duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), duke@435: Otos_i, Rcache); duke@435: if (!is_static && has_tos) { coleenp@885: __ pop_ptr(Otos_i); // restore object pointer duke@435: __ verify_oop(Otos_i); duke@435: } duke@435: __ get_cache_and_index_at_bcp(Rcache, index, 1); duke@435: __ bind(Label1); duke@435: } duke@435: } duke@435: duke@435: void TemplateTable::getfield_or_static(int byte_no, bool is_static) { duke@435: transition(vtos, vtos); duke@435: duke@435: Register Rcache = G3_scratch; duke@435: Register index = G4_scratch; duke@435: Register Rclass = Rcache; duke@435: Register Roffset= G4_scratch; duke@435: Register Rflags = G1_scratch; duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: jrose@1920: resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2)); duke@435: jvmti_post_field_access(Rcache, index, is_static, false); duke@435: load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); duke@435: duke@435: if (!is_static) { duke@435: pop_and_check_object(Rclass); duke@435: } else { duke@435: __ verify_oop(Rclass); duke@435: } duke@435: duke@435: Label exit; duke@435: duke@435: Assembler::Membar_mask_bits membar_bits = duke@435: Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); duke@435: duke@435: if (__ membar_has_effect(membar_bits)) { duke@435: // Get volatile flag duke@435: __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); duke@435: __ and3(Rflags, Lscratch, Lscratch); duke@435: } duke@435: duke@435: Label checkVolatile; duke@435: duke@435: // compute field type duke@435: Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj; duke@435: __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); duke@435: // Make sure we don't need to mask Rflags for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: duke@435: // Check atos before itos for getstatic, more likely (in Queens at least) duke@435: __ cmp(Rflags, atos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notObj); duke@435: __ delayed() ->cmp(Rflags, itos); duke@435: duke@435: // atos coleenp@548: __ load_heap_oop(Rclass, Roffset, Otos_i); duke@435: __ verify_oop(Otos_i); duke@435: __ push(atos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notObj); duke@435: duke@435: // cmp(Rflags, itos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notInt); duke@435: __ delayed() ->cmp(Rflags, ltos); duke@435: duke@435: // itos duke@435: __ ld(Rclass, Roffset, Otos_i); duke@435: __ push(itos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notInt); duke@435: duke@435: // cmp(Rflags, ltos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notLong); duke@435: __ delayed() ->cmp(Rflags, btos); duke@435: duke@435: // ltos duke@435: // load must be atomic duke@435: __ ld_long(Rclass, Roffset, Otos_l); duke@435: __ push(ltos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notLong); duke@435: duke@435: // cmp(Rflags, btos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notByte); duke@435: __ delayed() ->cmp(Rflags, ctos); duke@435: duke@435: // btos duke@435: __ ldsb(Rclass, Roffset, Otos_i); duke@435: __ push(itos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notByte); duke@435: duke@435: // cmp(Rflags, ctos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notChar); duke@435: __ delayed() ->cmp(Rflags, stos); duke@435: duke@435: // ctos duke@435: __ lduh(Rclass, Roffset, Otos_i); duke@435: __ push(itos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notChar); duke@435: duke@435: // cmp(Rflags, stos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notShort); duke@435: __ delayed() ->cmp(Rflags, ftos); duke@435: duke@435: // stos duke@435: __ ldsh(Rclass, Roffset, Otos_i); duke@435: __ push(itos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notShort); duke@435: duke@435: duke@435: // cmp(Rflags, ftos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notFloat); duke@435: __ delayed() ->tst(Lscratch); duke@435: duke@435: // ftos duke@435: __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f); duke@435: __ push(ftos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch); duke@435: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: duke@435: __ bind(notFloat); duke@435: duke@435: duke@435: // dtos duke@435: __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d); duke@435: __ push(dtos); duke@435: if (!is_static) { duke@435: patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch); duke@435: } duke@435: duke@435: __ bind(checkVolatile); duke@435: if (__ membar_has_effect(membar_bits)) { duke@435: // __ tst(Lscratch); executed in delay slot duke@435: __ br(Assembler::zero, false, Assembler::pt, exit); duke@435: __ delayed()->nop(); duke@435: volatile_barrier(membar_bits); duke@435: } duke@435: duke@435: __ bind(exit); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::getfield(int byte_no) { duke@435: getfield_or_static(byte_no, false); duke@435: } duke@435: duke@435: void TemplateTable::getstatic(int byte_no) { duke@435: getfield_or_static(byte_no, true); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fast_accessfield(TosState state) { duke@435: transition(atos, state); duke@435: Register Rcache = G3_scratch; duke@435: Register index = G4_scratch; duke@435: Register Roffset = G4_scratch; duke@435: Register Rflags = Rcache; duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: duke@435: __ get_cache_and_index_at_bcp(Rcache, index, 1); duke@435: jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true); duke@435: twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); duke@435: duke@435: __ null_check(Otos_i); duke@435: __ verify_oop(Otos_i); duke@435: duke@435: Label exit; duke@435: duke@435: Assembler::Membar_mask_bits membar_bits = duke@435: Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); duke@435: if (__ membar_has_effect(membar_bits)) { duke@435: // Get volatile flag twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags); duke@435: __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); duke@435: } duke@435: duke@435: switch (bytecode()) { duke@435: case Bytecodes::_fast_bgetfield: duke@435: __ ldsb(Otos_i, Roffset, Otos_i); duke@435: break; duke@435: case Bytecodes::_fast_cgetfield: duke@435: __ lduh(Otos_i, Roffset, Otos_i); duke@435: break; duke@435: case Bytecodes::_fast_sgetfield: duke@435: __ ldsh(Otos_i, Roffset, Otos_i); duke@435: break; duke@435: case Bytecodes::_fast_igetfield: duke@435: __ ld(Otos_i, Roffset, Otos_i); duke@435: break; duke@435: case Bytecodes::_fast_lgetfield: duke@435: __ ld_long(Otos_i, Roffset, Otos_l); duke@435: break; duke@435: case Bytecodes::_fast_fgetfield: duke@435: __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f); duke@435: break; duke@435: case Bytecodes::_fast_dgetfield: duke@435: __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d); duke@435: break; duke@435: case Bytecodes::_fast_agetfield: coleenp@548: __ load_heap_oop(Otos_i, Roffset, Otos_i); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: if (__ membar_has_effect(membar_bits)) { duke@435: __ btst(Lscratch, Rflags); duke@435: __ br(Assembler::zero, false, Assembler::pt, exit); duke@435: __ delayed()->nop(); duke@435: volatile_barrier(membar_bits); duke@435: __ bind(exit); duke@435: } duke@435: duke@435: if (state == atos) { duke@435: __ verify_oop(Otos_i); // does not blow flags! duke@435: } duke@435: } duke@435: duke@435: void TemplateTable::jvmti_post_fast_field_mod() { duke@435: if (JvmtiExport::can_post_field_modification()) { duke@435: // Check to see if a field modification watch has been set before we take duke@435: // the time to call into the VM. duke@435: Label done; twisti@1162: AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); duke@435: __ load_contents(get_field_modification_count_addr, G4_scratch); kvn@3037: __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done); duke@435: __ pop_ptr(G4_scratch); // copy the object pointer from tos duke@435: __ verify_oop(G4_scratch); duke@435: __ push_ptr(G4_scratch); // put the object pointer back on tos duke@435: __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1); duke@435: // Save tos values before call_VM() clobbers them. Since we have duke@435: // to do it for every data type, we use the saved values as the duke@435: // jvalue object. duke@435: switch (bytecode()) { // save tos values before call_VM() clobbers them duke@435: case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break; duke@435: case Bytecodes::_fast_bputfield: // fall through duke@435: case Bytecodes::_fast_sputfield: // fall through duke@435: case Bytecodes::_fast_cputfield: // fall through duke@435: case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break; duke@435: case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break; duke@435: case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break; duke@435: // get words in right order for use as jvalue object duke@435: case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break; duke@435: } duke@435: // setup pointer to jvalue object duke@435: __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize); duke@435: // G4_scratch: object pointer duke@435: // G1_scratch: cache entry pointer duke@435: // G3_scratch: jvalue object on the stack duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch); duke@435: switch (bytecode()) { // restore tos values duke@435: case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break; duke@435: case Bytecodes::_fast_bputfield: // fall through duke@435: case Bytecodes::_fast_sputfield: // fall through duke@435: case Bytecodes::_fast_cputfield: // fall through duke@435: case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break; duke@435: case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break; duke@435: case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break; duke@435: case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break; duke@435: } duke@435: __ bind(done); duke@435: } duke@435: } duke@435: duke@435: // The registers Rcache and index expected to be set before call. duke@435: // The function may destroy various registers, just not the Rcache and index registers. duke@435: void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) { duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: duke@435: if (JvmtiExport::can_post_field_modification()) { duke@435: // Check to see if a field modification watch has been set before we take duke@435: // the time to call into the VM. duke@435: Label Label1; duke@435: assert_different_registers(Rcache, index, G1_scratch); twisti@1162: AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr()); duke@435: __ load_contents(get_field_modification_count_addr, G1_scratch); kvn@3037: __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1); duke@435: duke@435: // The Rcache and index registers have been already set. duke@435: // This allows to eliminate this call but the Rcache and index duke@435: // registers must be correspondingly used after this line. duke@435: __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1); duke@435: duke@435: __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch); duke@435: if (is_static) { duke@435: // Life is simple. Null out the object pointer. duke@435: __ clr(G4_scratch); duke@435: } else { duke@435: Register Rflags = G1_scratch; duke@435: // Life is harder. The stack holds the value on top, followed by the duke@435: // object. We don't know the size of the value, though; it could be duke@435: // one or two words depending on its type. As a result, we must find duke@435: // the type to determine where the object is. duke@435: duke@435: Label two_word, valsizeknown; twisti@1162: __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); duke@435: __ mov(Lesp, G4_scratch); duke@435: __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); duke@435: // Make sure we don't need to mask Rflags for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ cmp(Rflags, ltos); duke@435: __ br(Assembler::equal, false, Assembler::pt, two_word); duke@435: __ delayed()->cmp(Rflags, dtos); duke@435: __ br(Assembler::equal, false, Assembler::pt, two_word); duke@435: __ delayed()->nop(); duke@435: __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1)); kvn@3037: __ ba_short(valsizeknown); duke@435: __ bind(two_word); duke@435: duke@435: __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2)); duke@435: duke@435: __ bind(valsizeknown); duke@435: // setup object pointer duke@435: __ ld_ptr(G4_scratch, 0, G4_scratch); duke@435: __ verify_oop(G4_scratch); duke@435: } duke@435: // setup pointer to jvalue object duke@435: __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize); duke@435: // G4_scratch: object pointer or NULL if static duke@435: // G3_scratch: cache entry pointer duke@435: // G1_scratch: jvalue object on the stack duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), duke@435: G4_scratch, G3_scratch, G1_scratch); duke@435: __ get_cache_and_index_at_bcp(Rcache, index, 1); duke@435: __ bind(Label1); duke@435: } duke@435: } duke@435: duke@435: void TemplateTable::pop_and_check_object(Register r) { duke@435: __ pop_ptr(r); duke@435: __ null_check(r); // for field access must check obj. duke@435: __ verify_oop(r); duke@435: } duke@435: duke@435: void TemplateTable::putfield_or_static(int byte_no, bool is_static) { duke@435: transition(vtos, vtos); duke@435: Register Rcache = G3_scratch; duke@435: Register index = G4_scratch; duke@435: Register Rclass = Rcache; duke@435: Register Roffset= G4_scratch; duke@435: Register Rflags = G1_scratch; duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: jrose@1920: resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2)); duke@435: jvmti_post_field_mod(Rcache, index, is_static); duke@435: load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static); duke@435: duke@435: Assembler::Membar_mask_bits read_bits = duke@435: Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); duke@435: Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; duke@435: duke@435: Label notVolatile, checkVolatile, exit; duke@435: if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { duke@435: __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); duke@435: __ and3(Rflags, Lscratch, Lscratch); duke@435: duke@435: if (__ membar_has_effect(read_bits)) { kvn@3037: __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); duke@435: volatile_barrier(read_bits); duke@435: __ bind(notVolatile); duke@435: } duke@435: } duke@435: duke@435: __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags); duke@435: // Make sure we don't need to mask Rflags for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: duke@435: // compute field type duke@435: Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat; duke@435: duke@435: if (is_static) { duke@435: // putstatic with object type most likely, check that first twisti@3050: __ cmp(Rflags, atos); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, notObj); twisti@3050: __ delayed()->cmp(Rflags, itos); duke@435: duke@435: // atos twisti@3050: { twisti@3050: __ pop_ptr(); twisti@3050: __ verify_oop(Otos_i); twisti@3050: do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); twisti@3050: __ ba(checkVolatile); twisti@3050: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notObj); twisti@3050: // cmp(Rflags, itos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notInt); twisti@3050: __ delayed()->cmp(Rflags, btos); twisti@3050: twisti@3050: // itos twisti@3050: { twisti@3050: __ pop_i(); twisti@3050: __ st(Otos_i, Rclass, Roffset); twisti@3050: __ ba(checkVolatile); twisti@3050: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notInt); twisti@3050: } else { twisti@3050: // putfield with int type most likely, check that first twisti@3050: __ cmp(Rflags, itos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notInt); twisti@3050: __ delayed()->cmp(Rflags, atos); twisti@3050: twisti@3050: // itos twisti@3050: { twisti@3050: __ pop_i(); twisti@3050: pop_and_check_object(Rclass); twisti@3050: __ st(Otos_i, Rclass, Roffset); twisti@3050: patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: __ ba(checkVolatile); twisti@3050: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notInt); twisti@3050: // cmp(Rflags, atos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notObj); twisti@3050: __ delayed()->cmp(Rflags, btos); twisti@3050: twisti@3050: // atos twisti@3050: { twisti@3050: __ pop_ptr(); twisti@3050: pop_and_check_object(Rclass); twisti@3050: __ verify_oop(Otos_i); twisti@3050: do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); twisti@3050: patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: __ ba(checkVolatile); twisti@3050: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notObj); twisti@3050: } twisti@3050: twisti@3050: // cmp(Rflags, btos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notByte); twisti@3050: __ delayed()->cmp(Rflags, ltos); twisti@3050: twisti@3050: // btos twisti@3050: { twisti@3050: __ pop_i(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ stb(Otos_i, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notByte); twisti@3050: // cmp(Rflags, ltos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notLong); twisti@3050: __ delayed()->cmp(Rflags, ctos); twisti@3050: twisti@3050: // ltos twisti@3050: { twisti@3050: __ pop_l(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ st_long(Otos_l, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notLong); twisti@3050: // cmp(Rflags, ctos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notChar); twisti@3050: __ delayed()->cmp(Rflags, stos); twisti@3050: twisti@3050: // ctos (char) twisti@3050: { duke@435: __ pop_i(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ sth(Otos_i, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); twisti@3050: } twisti@3050: twisti@3050: __ bind(notChar); twisti@3050: // cmp(Rflags, stos); twisti@3050: __ br(Assembler::notEqual, false, Assembler::pt, notShort); twisti@3050: __ delayed()->cmp(Rflags, ftos); twisti@3050: twisti@3050: // stos (short) twisti@3050: { twisti@3050: __ pop_i(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ sth(Otos_i, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } kvn@3037: __ ba(checkVolatile); duke@435: __ delayed()->tst(Lscratch); duke@435: } duke@435: duke@435: __ bind(notShort); twisti@3050: // cmp(Rflags, ftos); duke@435: __ br(Assembler::notZero, false, Assembler::pt, notFloat); duke@435: __ delayed()->nop(); duke@435: duke@435: // ftos twisti@3050: { twisti@3050: __ pop_f(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } twisti@3050: __ ba(checkVolatile); twisti@3050: __ delayed()->tst(Lscratch); duke@435: } duke@435: duke@435: __ bind(notFloat); duke@435: duke@435: // dtos twisti@3050: { twisti@3050: __ pop_d(); twisti@3050: if (!is_static) pop_and_check_object(Rclass); twisti@3050: __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); twisti@3050: if (!is_static) { twisti@3050: patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no); twisti@3050: } duke@435: } duke@435: duke@435: __ bind(checkVolatile); duke@435: __ tst(Lscratch); duke@435: duke@435: if (__ membar_has_effect(write_bits)) { duke@435: // __ tst(Lscratch); in delay slot duke@435: __ br(Assembler::zero, false, Assembler::pt, exit); duke@435: __ delayed()->nop(); duke@435: volatile_barrier(Assembler::StoreLoad); duke@435: __ bind(exit); duke@435: } duke@435: } duke@435: duke@435: void TemplateTable::fast_storefield(TosState state) { duke@435: transition(state, vtos); duke@435: Register Rcache = G3_scratch; duke@435: Register Rclass = Rcache; duke@435: Register Roffset= G4_scratch; duke@435: Register Rflags = G1_scratch; duke@435: ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset(); duke@435: duke@435: jvmti_post_fast_field_mod(); duke@435: duke@435: __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1); duke@435: duke@435: Assembler::Membar_mask_bits read_bits = duke@435: Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore); duke@435: Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad; duke@435: duke@435: Label notVolatile, checkVolatile, exit; duke@435: if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) { twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags); duke@435: __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); duke@435: __ and3(Rflags, Lscratch, Lscratch); duke@435: if (__ membar_has_effect(read_bits)) { kvn@3037: __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile); duke@435: volatile_barrier(read_bits); duke@435: __ bind(notVolatile); duke@435: } duke@435: } duke@435: twisti@1162: __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset); duke@435: pop_and_check_object(Rclass); duke@435: duke@435: switch (bytecode()) { duke@435: case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break; duke@435: case Bytecodes::_fast_cputfield: /* fall through */ duke@435: case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break; duke@435: case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break; duke@435: case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break; duke@435: case Bytecodes::_fast_fputfield: duke@435: __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset); duke@435: break; duke@435: case Bytecodes::_fast_dputfield: duke@435: __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset); duke@435: break; duke@435: case Bytecodes::_fast_aputfield: ysr@777: do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: if (__ membar_has_effect(write_bits)) { kvn@3037: __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit); duke@435: volatile_barrier(Assembler::StoreLoad); duke@435: __ bind(exit); duke@435: } duke@435: } duke@435: duke@435: duke@435: void TemplateTable::putfield(int byte_no) { duke@435: putfield_or_static(byte_no, false); duke@435: } duke@435: duke@435: void TemplateTable::putstatic(int byte_no) { duke@435: putfield_or_static(byte_no, true); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::fast_xaccess(TosState state) { duke@435: transition(vtos, state); duke@435: Register Rcache = G3_scratch; duke@435: Register Roffset = G4_scratch; duke@435: Register Rflags = G4_scratch; duke@435: Register Rreceiver = Lscratch; duke@435: twisti@1861: __ ld_ptr(Llocals, 0, Rreceiver); duke@435: duke@435: // access constant pool cache (is resolved) duke@435: __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2); twisti@1162: __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset); duke@435: __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp duke@435: duke@435: __ verify_oop(Rreceiver); duke@435: __ null_check(Rreceiver); duke@435: if (state == atos) { coleenp@548: __ load_heap_oop(Rreceiver, Roffset, Otos_i); duke@435: } else if (state == itos) { duke@435: __ ld (Rreceiver, Roffset, Otos_i) ; duke@435: } else if (state == ftos) { duke@435: __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f); duke@435: } else { duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: Assembler::Membar_mask_bits membar_bits = duke@435: Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore); duke@435: if (__ membar_has_effect(membar_bits)) { duke@435: duke@435: // Get is_volatile value in Rflags and check if membar is needed twisti@1162: __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags); duke@435: duke@435: // Test volatile duke@435: Label notVolatile; duke@435: __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch); duke@435: __ btst(Rflags, Lscratch); duke@435: __ br(Assembler::zero, false, Assembler::pt, notVolatile); duke@435: __ delayed()->nop(); duke@435: volatile_barrier(membar_bits); duke@435: __ bind(notVolatile); duke@435: } duke@435: duke@435: __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__); duke@435: __ sub(Lbcp, 1, Lbcp); duke@435: } duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Calls duke@435: duke@435: void TemplateTable::count_calls(Register method, Register temp) { duke@435: // implemented elsewhere duke@435: ShouldNotReachHere(); duke@435: } duke@435: duke@435: void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) { duke@435: Register Rtemp = G4_scratch; duke@435: Register Rcall = Rindex; duke@435: assert_different_registers(Rcall, G5_method, Gargs, Rret); duke@435: duke@435: // get target methodOop & entry point duke@435: const int base = instanceKlass::vtable_start_offset() * wordSize; duke@435: if (vtableEntry::size() % 3 == 0) { duke@435: // scale the vtable index by 12: duke@435: int one_third = vtableEntry::size() / 3; duke@435: __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp); duke@435: __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex); duke@435: __ add(Rindex, Rtemp, Rindex); duke@435: } else { duke@435: // scale the vtable index by 8: duke@435: __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex); duke@435: } duke@435: duke@435: __ add(Rrecv, Rindex, Rrecv); duke@435: __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method); duke@435: duke@435: __ call_from_interpreter(Rcall, Gargs, Rret); duke@435: } duke@435: duke@435: void TemplateTable::invokevirtual(int byte_no) { duke@435: transition(vtos, vtos); jrose@1920: assert(byte_no == f2_byte, "use this argument"); duke@435: duke@435: Register Rscratch = G3_scratch; duke@435: Register Rtemp = G4_scratch; duke@435: Register Rret = Lscratch; duke@435: Register Rrecv = G5_method; duke@435: Label notFinal; duke@435: jrose@1920: load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false); duke@435: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore duke@435: duke@435: // Check for vfinal duke@435: __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch); duke@435: __ btst(Rret, G4_scratch); duke@435: __ br(Assembler::zero, false, Assembler::pt, notFinal); duke@435: __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters duke@435: duke@435: patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp); duke@435: duke@435: invokevfinal_helper(Rscratch, Rret); duke@435: duke@435: __ bind(notFinal); duke@435: duke@435: __ mov(G5_method, Rscratch); // better scratch register duke@435: __ load_receiver(G4_scratch, O0); // gets receiverOop duke@435: // receiver is in O0 duke@435: __ verify_oop(O0); duke@435: duke@435: // get return address twisti@1162: AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); twisti@1162: __ set(table, Rtemp); duke@435: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type duke@435: // Make sure we don't need to mask Rret for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ sll(Rret, LogBytesPerWord, Rret); duke@435: __ ld_ptr(Rtemp, Rret, Rret); // get return address duke@435: duke@435: // get receiver klass duke@435: __ null_check(O0, oopDesc::klass_offset_in_bytes()); coleenp@548: __ load_klass(O0, Rrecv); duke@435: __ verify_oop(Rrecv); duke@435: duke@435: __ profile_virtual_call(Rrecv, O4); duke@435: duke@435: generate_vtable_call(Rrecv, Rscratch, Rret); duke@435: } duke@435: duke@435: void TemplateTable::fast_invokevfinal(int byte_no) { duke@435: transition(vtos, vtos); jrose@1920: assert(byte_no == f2_byte, "use this argument"); duke@435: duke@435: load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true, jrose@1920: /*is_invokevfinal*/true, false); duke@435: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore duke@435: invokevfinal_helper(G3_scratch, Lscratch); duke@435: } duke@435: duke@435: void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) { duke@435: Register Rtemp = G4_scratch; duke@435: duke@435: __ verify_oop(G5_method); duke@435: duke@435: // Load receiver from stack slot twisti@1162: __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); duke@435: __ load_receiver(G4_scratch, O0); duke@435: duke@435: // receiver NULL check duke@435: __ null_check(O0); duke@435: duke@435: __ profile_final_call(O4); duke@435: duke@435: // get return address twisti@1162: AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); twisti@1162: __ set(table, Rtemp); duke@435: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type duke@435: // Make sure we don't need to mask Rret for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ sll(Rret, LogBytesPerWord, Rret); duke@435: __ ld_ptr(Rtemp, Rret, Rret); // get return address duke@435: duke@435: duke@435: // do the call duke@435: __ call_from_interpreter(Rscratch, Gargs, Rret); duke@435: } duke@435: duke@435: void TemplateTable::invokespecial(int byte_no) { duke@435: transition(vtos, vtos); jrose@1920: assert(byte_no == f1_byte, "use this argument"); duke@435: duke@435: Register Rscratch = G3_scratch; duke@435: Register Rtemp = G4_scratch; duke@435: Register Rret = Lscratch; duke@435: jrose@1920: load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); duke@435: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore duke@435: duke@435: __ verify_oop(G5_method); duke@435: twisti@1162: __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch); duke@435: __ load_receiver(G4_scratch, O0); duke@435: duke@435: // receiver NULL check duke@435: __ null_check(O0); duke@435: duke@435: __ profile_call(O4); duke@435: duke@435: // get return address twisti@1162: AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); twisti@1162: __ set(table, Rtemp); duke@435: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type duke@435: // Make sure we don't need to mask Rret for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ sll(Rret, LogBytesPerWord, Rret); duke@435: __ ld_ptr(Rtemp, Rret, Rret); // get return address duke@435: duke@435: // do the call duke@435: __ call_from_interpreter(Rscratch, Gargs, Rret); duke@435: } duke@435: duke@435: void TemplateTable::invokestatic(int byte_no) { duke@435: transition(vtos, vtos); jrose@1920: assert(byte_no == f1_byte, "use this argument"); duke@435: duke@435: Register Rscratch = G3_scratch; duke@435: Register Rtemp = G4_scratch; duke@435: Register Rret = Lscratch; duke@435: jrose@1920: load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false); duke@435: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore duke@435: duke@435: __ verify_oop(G5_method); duke@435: duke@435: __ profile_call(O4); duke@435: duke@435: // get return address twisti@1162: AddressLiteral table(Interpreter::return_3_addrs_by_index_table()); twisti@1162: __ set(table, Rtemp); duke@435: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type duke@435: // Make sure we don't need to mask Rret for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ sll(Rret, LogBytesPerWord, Rret); duke@435: __ ld_ptr(Rtemp, Rret, Rret); // get return address duke@435: duke@435: // do the call duke@435: __ call_from_interpreter(Rscratch, Gargs, Rret); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::invokeinterface_object_method(Register RklassOop, duke@435: Register Rcall, duke@435: Register Rret, duke@435: Register Rflags) { duke@435: Register Rscratch = G4_scratch; duke@435: Register Rindex = Lscratch; duke@435: duke@435: assert_different_registers(Rscratch, Rindex, Rret); duke@435: duke@435: Label notFinal; duke@435: duke@435: // Check for vfinal duke@435: __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch); duke@435: __ btst(Rflags, Rscratch); duke@435: __ br(Assembler::zero, false, Assembler::pt, notFinal); duke@435: __ delayed()->nop(); duke@435: duke@435: __ profile_final_call(O4); duke@435: duke@435: // do the call - the index (f2) contains the methodOop duke@435: assert_different_registers(G5_method, Gargs, Rcall); duke@435: __ mov(Rindex, G5_method); duke@435: __ call_from_interpreter(Rcall, Gargs, Rret); duke@435: __ bind(notFinal); duke@435: duke@435: __ profile_virtual_call(RklassOop, O4); duke@435: generate_vtable_call(RklassOop, Rindex, Rret); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::invokeinterface(int byte_no) { duke@435: transition(vtos, vtos); jrose@1920: assert(byte_no == f1_byte, "use this argument"); duke@435: duke@435: Register Rscratch = G4_scratch; duke@435: Register Rret = G3_scratch; duke@435: Register Rindex = Lscratch; duke@435: Register Rinterface = G1_scratch; duke@435: Register RklassOop = G5_method; duke@435: Register Rflags = O1; duke@435: assert_different_registers(Rscratch, G5_method); duke@435: jrose@1920: load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false); duke@435: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore duke@435: duke@435: // get receiver duke@435: __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters duke@435: __ load_receiver(Rscratch, O0); duke@435: __ verify_oop(O0); duke@435: duke@435: __ mov(Rflags, Rret); duke@435: duke@435: // get return address twisti@1162: AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); twisti@1162: __ set(table, Rscratch); duke@435: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type duke@435: // Make sure we don't need to mask Rret for tosBits after the above shift duke@435: ConstantPoolCacheEntry::verify_tosBits(); duke@435: __ sll(Rret, LogBytesPerWord, Rret); duke@435: __ ld_ptr(Rscratch, Rret, Rret); // get return address duke@435: duke@435: // get receiver klass duke@435: __ null_check(O0, oopDesc::klass_offset_in_bytes()); coleenp@548: __ load_klass(O0, RklassOop); duke@435: __ verify_oop(RklassOop); duke@435: duke@435: // Special case of invokeinterface called for virtual method of duke@435: // java.lang.Object. See cpCacheOop.cpp for details. duke@435: // This code isn't produced by javac, but could be produced by duke@435: // another compliant java compiler. duke@435: Label notMethod; duke@435: __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch); duke@435: __ btst(Rflags, Rscratch); duke@435: __ br(Assembler::zero, false, Assembler::pt, notMethod); duke@435: __ delayed()->nop(); duke@435: duke@435: invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags); duke@435: duke@435: __ bind(notMethod); duke@435: duke@435: __ profile_virtual_call(RklassOop, O4); duke@435: duke@435: // duke@435: // find entry point to call duke@435: // duke@435: duke@435: // compute start of first itableOffsetEntry (which is at end of vtable) duke@435: const int base = instanceKlass::vtable_start_offset() * wordSize; duke@435: Label search; duke@435: Register Rtemp = Rflags; duke@435: twisti@1162: __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp); duke@435: if (align_object_offset(1) > 1) { duke@435: __ round_to(Rtemp, align_object_offset(1)); duke@435: } duke@435: __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4; duke@435: if (Assembler::is_simm13(base)) { duke@435: __ add(Rtemp, base, Rtemp); duke@435: } else { duke@435: __ set(base, Rscratch); duke@435: __ add(Rscratch, Rtemp, Rtemp); duke@435: } duke@435: __ add(RklassOop, Rtemp, Rscratch); duke@435: duke@435: __ bind(search); duke@435: duke@435: __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp); duke@435: { duke@435: Label ok; duke@435: duke@435: // Check that entry is non-null. Null entries are probably a bytecode twisti@1040: // problem. If the interface isn't implemented by the receiver class, duke@435: // the VM should throw IncompatibleClassChangeError. linkResolver checks duke@435: // this too but that's only if the entry isn't already resolved, so we duke@435: // need to check again. kvn@3037: __ br_notnull_short( Rtemp, Assembler::pt, ok); duke@435: call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); duke@435: __ should_not_reach_here(); duke@435: __ bind(ok); duke@435: __ verify_oop(Rtemp); duke@435: } duke@435: duke@435: __ verify_oop(Rinterface); duke@435: duke@435: __ cmp(Rinterface, Rtemp); duke@435: __ brx(Assembler::notEqual, true, Assembler::pn, search); duke@435: __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch); duke@435: duke@435: // entry found and Rscratch points to it duke@435: __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch); duke@435: duke@435: assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below"); duke@435: __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8; duke@435: __ add(Rscratch, Rindex, Rscratch); duke@435: __ ld_ptr(RklassOop, Rscratch, G5_method); duke@435: duke@435: // Check for abstract method error. duke@435: { duke@435: Label ok; kvn@3037: __ br_notnull_short(G5_method, Assembler::pt, ok); duke@435: call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); duke@435: __ should_not_reach_here(); duke@435: __ bind(ok); duke@435: } duke@435: duke@435: Register Rcall = Rinterface; duke@435: assert_different_registers(Rcall, G5_method, Gargs, Rret); duke@435: duke@435: __ verify_oop(G5_method); duke@435: __ call_from_interpreter(Rcall, Gargs, Rret); duke@435: duke@435: } duke@435: duke@435: jrose@1161: void TemplateTable::invokedynamic(int byte_no) { jrose@1161: transition(vtos, vtos); jrose@1920: assert(byte_no == f1_oop, "use this argument"); jrose@1161: jrose@1161: if (!EnableInvokeDynamic) { jrose@1161: // We should not encounter this bytecode if !EnableInvokeDynamic. jrose@1161: // The verifier will stop it. However, if we get past the verifier, jrose@1161: // this will stop the thread in a reasonable way, without crashing the JVM. jrose@1161: __ call_VM(noreg, CAST_FROM_FN_PTR(address, jrose@1161: InterpreterRuntime::throw_IncompatibleClassChangeError)); jrose@1161: // the call_VM checks for exception, so we should never return here. jrose@1161: __ should_not_reach_here(); jrose@1161: return; jrose@1161: } jrose@1161: twisti@1858: // G5: CallSite object (f1) twisti@1858: // XX: unused (f2) twisti@1858: // XX: flags (unused) twisti@1858: twisti@1858: Register G5_callsite = G5_method; twisti@1858: Register Rscratch = G3_scratch; twisti@1858: Register Rtemp = G1_scratch; twisti@1858: Register Rret = Lscratch; twisti@1858: jrose@1920: load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret, jrose@1920: /*virtual*/ false, /*vfinal*/ false, /*indy*/ true); twisti@1858: __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore twisti@1858: twisti@1858: // profile this call twisti@1858: __ profile_call(O4); twisti@1858: twisti@1858: // get return address twisti@1858: AddressLiteral table(Interpreter::return_5_addrs_by_index_table()); twisti@1858: __ set(table, Rtemp); twisti@1858: __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type twisti@1858: // Make sure we don't need to mask Rret for tosBits after the above shift twisti@1858: ConstantPoolCacheEntry::verify_tosBits(); twisti@1858: __ sll(Rret, LogBytesPerWord, Rret); twisti@1858: __ ld_ptr(Rtemp, Rret, Rret); // get return address twisti@1858: twisti@2811: __ verify_oop(G5_callsite); jrose@2639: __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle); twisti@1858: __ null_check(G3_method_handle); twisti@2811: __ verify_oop(G3_method_handle); twisti@1858: twisti@1858: // Adjust Rret first so Llast_SP can be same as Rret twisti@1858: __ add(Rret, -frame::pc_return_offset, O7); twisti@1858: __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer twisti@1858: __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false); twisti@1858: // Record SP so we can remove any stack space allocated by adapter transition twisti@1858: __ delayed()->mov(SP, Llast_SP); jrose@1161: } jrose@1161: jrose@1161: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Allocation duke@435: duke@435: void TemplateTable::_new() { duke@435: transition(vtos, atos); duke@435: duke@435: Label slow_case; duke@435: Label done; duke@435: Label initialize_header; duke@435: Label initialize_object; // including clearing the fields duke@435: duke@435: Register RallocatedObject = Otos_i; duke@435: Register RinstanceKlass = O1; duke@435: Register Roffset = O3; duke@435: Register Rscratch = O4; duke@435: duke@435: __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned); duke@435: __ get_cpool_and_tags(Rscratch, G3_scratch); duke@435: // make sure the class we're about to instantiate has been resolved bobv@2036: // This is done before loading instanceKlass to be consistent with the order bobv@2036: // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put) duke@435: __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch); duke@435: __ ldub(G3_scratch, Roffset, G3_scratch); duke@435: __ cmp(G3_scratch, JVM_CONSTANT_Class); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, slow_case); duke@435: __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); bobv@2036: // get instanceKlass duke@435: //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot duke@435: __ add(Roffset, sizeof(constantPoolOopDesc), Roffset); duke@435: __ ld_ptr(Rscratch, Roffset, RinstanceKlass); duke@435: duke@435: // make sure klass is fully initialized: coleenp@3368: __ ldub(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch); duke@435: __ cmp(G3_scratch, instanceKlass::fully_initialized); duke@435: __ br(Assembler::notEqual, false, Assembler::pn, slow_case); duke@435: __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset); duke@435: duke@435: // get instance_size in instanceKlass (already aligned) duke@435: //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset); duke@435: duke@435: // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class duke@435: __ btst(Klass::_lh_instance_slow_path_bit, Roffset); duke@435: __ br(Assembler::notZero, false, Assembler::pn, slow_case); duke@435: __ delayed()->nop(); duke@435: duke@435: // allocate the instance duke@435: // 1) Try to allocate in the TLAB duke@435: // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden duke@435: // 3) if the above fails (or is not applicable), go to a slow case duke@435: // (creates a new TLAB, etc.) duke@435: duke@435: const bool allow_shared_alloc = duke@435: Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; duke@435: duke@435: if(UseTLAB) { duke@435: Register RoldTopValue = RallocatedObject; kvn@3092: Register RtlabWasteLimitValue = G3_scratch; duke@435: Register RnewTopValue = G1_scratch; duke@435: Register RendValue = Rscratch; duke@435: Register RfreeValue = RnewTopValue; duke@435: duke@435: // check if we can allocate in the TLAB duke@435: __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject duke@435: __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); duke@435: __ add(RoldTopValue, Roffset, RnewTopValue); duke@435: duke@435: // if there is enough space, we do not CAS and do not clear duke@435: __ cmp(RnewTopValue, RendValue); duke@435: if(ZeroTLAB) { duke@435: // the fields have already been cleared duke@435: __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header); duke@435: } else { duke@435: // initialize both the header and fields duke@435: __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object); duke@435: } duke@435: __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset())); duke@435: duke@435: if (allow_shared_alloc) { phh@2423: // Check if tlab should be discarded (refill_waste_limit >= free) phh@2423: __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue); phh@2423: __ sub(RendValue, RoldTopValue, RfreeValue); duke@435: #ifdef _LP64 phh@2423: __ srlx(RfreeValue, LogHeapWordSize, RfreeValue); duke@435: #else phh@2423: __ srl(RfreeValue, LogHeapWordSize, RfreeValue); duke@435: #endif kvn@3037: __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small phh@2423: phh@2423: // increment waste limit to prevent getting stuck on this slow path phh@2423: __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue); phh@2423: __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset())); duke@435: } else { duke@435: // No allocation in the shared eden. kvn@3037: __ ba_short(slow_case); duke@435: } duke@435: } duke@435: duke@435: // Allocation in the shared Eden duke@435: if (allow_shared_alloc) { duke@435: Register RoldTopValue = G1_scratch; duke@435: Register RtopAddr = G3_scratch; duke@435: Register RnewTopValue = RallocatedObject; duke@435: Register RendValue = Rscratch; duke@435: duke@435: __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr); duke@435: duke@435: Label retry; duke@435: __ bind(retry); duke@435: __ set((intptr_t)Universe::heap()->end_addr(), RendValue); duke@435: __ ld_ptr(RendValue, 0, RendValue); duke@435: __ ld_ptr(RtopAddr, 0, RoldTopValue); duke@435: __ add(RoldTopValue, Roffset, RnewTopValue); duke@435: duke@435: // RnewTopValue contains the top address after the new object duke@435: // has been allocated. kvn@3037: __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); duke@435: duke@435: __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue, duke@435: VM_Version::v9_instructions_work() ? NULL : duke@435: (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); duke@435: duke@435: // if someone beat us on the allocation, try again, otherwise continue kvn@3037: __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); phh@2423: phh@2423: // bump total bytes allocated by this thread phh@2447: // RoldTopValue and RtopAddr are dead, so can use G1 and G3 phh@2447: __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch); duke@435: } duke@435: duke@435: if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) { duke@435: // clear object fields duke@435: __ bind(initialize_object); duke@435: __ deccc(Roffset, sizeof(oopDesc)); duke@435: __ br(Assembler::zero, false, Assembler::pt, initialize_header); duke@435: __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch); duke@435: duke@435: // initialize remaining object fields kvn@3092: if (UseBlockZeroing) { kvn@3092: // Use BIS for zeroing kvn@3092: __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header); kvn@3092: } else { kvn@3092: Label loop; duke@435: __ subcc(Roffset, wordSize, Roffset); duke@435: __ bind(loop); duke@435: //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot duke@435: __ st_ptr(G0, G3_scratch, Roffset); duke@435: __ br(Assembler::notEqual, false, Assembler::pt, loop); duke@435: __ delayed()->subcc(Roffset, wordSize, Roffset); duke@435: } kvn@3037: __ ba_short(initialize_header); duke@435: } duke@435: duke@435: // slow case duke@435: __ bind(slow_case); duke@435: __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned); duke@435: __ get_constant_pool(O1); duke@435: duke@435: call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2); duke@435: kvn@3037: __ ba_short(done); duke@435: duke@435: // Initialize the header: mark, klass duke@435: __ bind(initialize_header); duke@435: duke@435: if (UseBiasedLocking) { duke@435: __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch); duke@435: } else { duke@435: __ set((intptr_t)markOopDesc::prototype(), G4_scratch); duke@435: } duke@435: __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark coleenp@602: __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed coleenp@602: __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms) duke@435: duke@435: { duke@435: SkipIfEqual skip_if( duke@435: _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero); duke@435: // Trigger dtrace event duke@435: __ push(atos); duke@435: __ call_VM_leaf(noreg, duke@435: CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0); duke@435: __ pop(atos); duke@435: } duke@435: duke@435: // continue duke@435: __ bind(done); duke@435: } duke@435: duke@435: duke@435: duke@435: void TemplateTable::newarray() { duke@435: transition(itos, atos); duke@435: __ ldub(Lbcp, 1, O1); duke@435: call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::anewarray() { duke@435: transition(itos, atos); duke@435: __ get_constant_pool(O1); duke@435: __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned); duke@435: call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::arraylength() { duke@435: transition(atos, itos); duke@435: Label ok; duke@435: __ verify_oop(Otos_i); duke@435: __ tst(Otos_i); duke@435: __ throw_if_not_1_x( Assembler::notZero, ok ); duke@435: __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i); duke@435: __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::checkcast() { duke@435: transition(atos, atos); duke@435: Label done, is_null, quicked, cast_ok, resolved; duke@435: Register Roffset = G1_scratch; duke@435: Register RobjKlass = O5; duke@435: Register RspecifiedKlass = O4; duke@435: duke@435: // Check for casting a NULL kvn@3037: __ br_null_short(Otos_i, Assembler::pn, is_null); duke@435: duke@435: // Get value klass in RobjKlass coleenp@548: __ load_klass(Otos_i, RobjKlass); // get value klass duke@435: duke@435: // Get constant pool tag duke@435: __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); duke@435: duke@435: // See if the checkcast has been quickened duke@435: __ get_cpool_and_tags(Lscratch, G3_scratch); duke@435: __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch); duke@435: __ ldub(G3_scratch, Roffset, G3_scratch); duke@435: __ cmp(G3_scratch, JVM_CONSTANT_Class); duke@435: __ br(Assembler::equal, true, Assembler::pt, quicked); duke@435: __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); duke@435: duke@435: __ push_ptr(); // save receiver for result, and for GC duke@435: call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); duke@435: __ pop_ptr(Otos_i, G3_scratch); // restore receiver duke@435: kvn@3037: __ ba_short(resolved); duke@435: duke@435: // Extract target class from constant pool duke@435: __ bind(quicked); duke@435: __ add(Roffset, sizeof(constantPoolOopDesc), Roffset); duke@435: __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); duke@435: __ bind(resolved); coleenp@548: __ load_klass(Otos_i, RobjKlass); // get value klass duke@435: duke@435: // Generate a fast subtype check. Branch to cast_ok if no duke@435: // failure. Throw exception if failure. duke@435: __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok ); duke@435: duke@435: // Not a subtype; so must throw exception duke@435: __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch ); duke@435: duke@435: __ bind(cast_ok); duke@435: duke@435: if (ProfileInterpreter) { kvn@3037: __ ba_short(done); duke@435: } duke@435: __ bind(is_null); duke@435: __ profile_null_seen(G3_scratch); duke@435: __ bind(done); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::instanceof() { duke@435: Label done, is_null, quicked, resolved; duke@435: transition(atos, itos); duke@435: Register Roffset = G1_scratch; duke@435: Register RobjKlass = O5; duke@435: Register RspecifiedKlass = O4; duke@435: duke@435: // Check for casting a NULL kvn@3037: __ br_null_short(Otos_i, Assembler::pt, is_null); duke@435: duke@435: // Get value klass in RobjKlass coleenp@548: __ load_klass(Otos_i, RobjKlass); // get value klass duke@435: duke@435: // Get constant pool tag duke@435: __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned); duke@435: duke@435: // See if the checkcast has been quickened duke@435: __ get_cpool_and_tags(Lscratch, G3_scratch); duke@435: __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch); duke@435: __ ldub(G3_scratch, Roffset, G3_scratch); duke@435: __ cmp(G3_scratch, JVM_CONSTANT_Class); duke@435: __ br(Assembler::equal, true, Assembler::pt, quicked); duke@435: __ delayed()->sll(Roffset, LogBytesPerWord, Roffset); duke@435: duke@435: __ push_ptr(); // save receiver for result, and for GC duke@435: call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) ); duke@435: __ pop_ptr(Otos_i, G3_scratch); // restore receiver duke@435: kvn@3037: __ ba_short(resolved); duke@435: duke@435: // Extract target class from constant pool duke@435: __ bind(quicked); duke@435: __ add(Roffset, sizeof(constantPoolOopDesc), Roffset); duke@435: __ get_constant_pool(Lscratch); duke@435: __ ld_ptr(Lscratch, Roffset, RspecifiedKlass); duke@435: __ bind(resolved); coleenp@548: __ load_klass(Otos_i, RobjKlass); // get value klass duke@435: duke@435: // Generate a fast subtype check. Branch to cast_ok if no duke@435: // failure. Return 0 if failure. duke@435: __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed duke@435: __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done ); duke@435: // Not a subtype; return 0; duke@435: __ clr( Otos_i ); duke@435: duke@435: if (ProfileInterpreter) { kvn@3037: __ ba_short(done); duke@435: } duke@435: __ bind(is_null); duke@435: __ profile_null_seen(G3_scratch); duke@435: __ bind(done); duke@435: } duke@435: duke@435: void TemplateTable::_breakpoint() { duke@435: duke@435: // Note: We get here even if we are single stepping.. duke@435: // jbug inists on setting breakpoints at every bytecode duke@435: // even if we are in single step mode. duke@435: duke@435: transition(vtos, vtos); duke@435: // get the unpatched byte code duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp); duke@435: __ mov(O0, Lbyte_code); duke@435: duke@435: // post the breakpoint event duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp); duke@435: duke@435: // complete the execution of original bytecode duke@435: __ dispatch_normal(vtos); duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Exceptions duke@435: duke@435: void TemplateTable::athrow() { duke@435: transition(atos, vtos); duke@435: duke@435: // This works because exception is cached in Otos_i which is same as O0, duke@435: // which is same as what throw_exception_entry_expects duke@435: assert(Otos_i == Oexception, "see explanation above"); duke@435: duke@435: __ verify_oop(Otos_i); duke@435: __ null_check(Otos_i); duke@435: __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch); duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Synchronization duke@435: duke@435: duke@435: // See frame_sparc.hpp for monitor block layout. duke@435: // Monitor elements are dynamically allocated by growing stack as needed. duke@435: duke@435: void TemplateTable::monitorenter() { duke@435: transition(atos, vtos); duke@435: __ verify_oop(Otos_i); duke@435: // Try to acquire a lock on the object duke@435: // Repeat until succeeded (i.e., until duke@435: // monitorenter returns true). duke@435: duke@435: { Label ok; duke@435: __ tst(Otos_i); duke@435: __ throw_if_not_1_x( Assembler::notZero, ok); duke@435: __ delayed()->mov(Otos_i, Lscratch); // save obj duke@435: __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok); duke@435: } duke@435: duke@435: assert(O0 == Otos_i, "Be sure where the object to lock is"); duke@435: duke@435: // find a free slot in the monitor block duke@435: duke@435: duke@435: // initialize entry pointer duke@435: __ clr(O1); // points to free slot or NULL duke@435: duke@435: { duke@435: Label entry, loop, exit; duke@435: __ add( __ top_most_monitor(), O2 ); // last one to check kvn@3037: __ ba( entry ); duke@435: __ delayed()->mov( Lmonitors, O3 ); // first one to check duke@435: duke@435: duke@435: __ bind( loop ); duke@435: duke@435: __ verify_oop(O4); // verify each monitor's oop duke@435: __ tst(O4); // is this entry unused? duke@435: if (VM_Version::v9_instructions_work()) duke@435: __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); duke@435: else { duke@435: Label L; duke@435: __ br( Assembler::zero, true, Assembler::pn, L ); duke@435: __ delayed()->mov(O3, O1); // rememeber this one if match duke@435: __ bind(L); duke@435: } duke@435: duke@435: __ cmp(O4, O0); // check if current entry is for same object duke@435: __ brx( Assembler::equal, false, Assembler::pn, exit ); duke@435: __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one duke@435: duke@435: __ bind( entry ); duke@435: duke@435: __ cmp( O3, O2 ); duke@435: __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); duke@435: __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4); duke@435: duke@435: __ bind( exit ); duke@435: } duke@435: duke@435: { Label allocated; duke@435: duke@435: // found free slot? kvn@3037: __ br_notnull_short(O1, Assembler::pn, allocated); duke@435: duke@435: __ add_monitor_to_stack( false, O2, O3 ); duke@435: __ mov(Lmonitors, O1); duke@435: duke@435: __ bind(allocated); duke@435: } duke@435: duke@435: // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. duke@435: // The object has already been poped from the stack, so the expression stack looks correct. duke@435: __ inc(Lbcp); duke@435: duke@435: __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object duke@435: __ lock_object(O1, O0); duke@435: duke@435: // check if there's enough space on the stack for the monitors after locking duke@435: __ generate_stack_overflow_check(0); duke@435: duke@435: // The bcp has already been incremented. Just need to dispatch to next instruction. duke@435: __ dispatch_next(vtos); duke@435: } duke@435: duke@435: duke@435: void TemplateTable::monitorexit() { duke@435: transition(atos, vtos); duke@435: __ verify_oop(Otos_i); duke@435: __ tst(Otos_i); duke@435: __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch ); duke@435: duke@435: assert(O0 == Otos_i, "just checking"); duke@435: duke@435: { Label entry, loop, found; duke@435: __ add( __ top_most_monitor(), O2 ); // last one to check kvn@3037: __ ba(entry); duke@435: // use Lscratch to hold monitor elem to check, start with most recent monitor, duke@435: // By using a local it survives the call to the C routine. duke@435: __ delayed()->mov( Lmonitors, Lscratch ); duke@435: duke@435: __ bind( loop ); duke@435: duke@435: __ verify_oop(O4); // verify each monitor's oop duke@435: __ cmp(O4, O0); // check if current entry is for desired object duke@435: __ brx( Assembler::equal, true, Assembler::pt, found ); duke@435: __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit duke@435: duke@435: __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next duke@435: duke@435: __ bind( entry ); duke@435: duke@435: __ cmp( Lscratch, O2 ); duke@435: __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop ); duke@435: __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4); duke@435: duke@435: call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); duke@435: __ should_not_reach_here(); duke@435: duke@435: __ bind(found); duke@435: } duke@435: __ unlock_object(O1); duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Wide instructions duke@435: duke@435: void TemplateTable::wide() { duke@435: transition(vtos, vtos); duke@435: __ ldub(Lbcp, 1, G3_scratch);// get next bc duke@435: __ sll(G3_scratch, LogBytesPerWord, G3_scratch); twisti@1162: AddressLiteral ep(Interpreter::_wentry_point); twisti@1162: __ set(ep, G4_scratch); twisti@1162: __ ld_ptr(G4_scratch, G3_scratch, G3_scratch); duke@435: __ jmp(G3_scratch, G0); duke@435: __ delayed()->nop(); duke@435: // Note: the Lbcp increment step is part of the individual wide bytecode implementations duke@435: } duke@435: duke@435: duke@435: //---------------------------------------------------------------------------------------------------- duke@435: // Multi arrays duke@435: duke@435: void TemplateTable::multianewarray() { duke@435: transition(vtos, atos); duke@435: // put ndims * wordSize into Lscratch duke@435: __ ldub( Lbcp, 3, Lscratch); twisti@1861: __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch); duke@435: // Lesp points past last_dim, so set to O1 to first_dim address duke@435: __ add( Lesp, Lscratch, O1); duke@435: call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1); duke@435: __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack duke@435: } duke@435: #endif /* !CC_INTERP */