duke@435: /* stefank@2314: * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "asm/assembler.hpp" stefank@2314: #include "assembler_sparc.inline.hpp" stefank@2314: #include "code/vtableStubs.hpp" stefank@2314: #include "interp_masm_sparc.hpp" stefank@2314: #include "memory/resourceArea.hpp" stefank@2314: #include "oops/instanceKlass.hpp" stefank@2314: #include "oops/klassVtable.hpp" stefank@2314: #include "runtime/sharedRuntime.hpp" stefank@2314: #include "vmreg_sparc.inline.hpp" stefank@2314: #ifdef COMPILER2 stefank@2314: #include "opto/runtime.hpp" stefank@2314: #endif duke@435: duke@435: // machine-dependent part of VtableStubs: create vtableStub of correct size and duke@435: // initialize its code duke@435: duke@435: #define __ masm-> duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: extern "C" void bad_compiled_vtable_index(JavaThread* thread, oopDesc* receiver, int index); duke@435: #endif duke@435: duke@435: duke@435: // Used by compiler only; may use only caller saved, non-argument registers duke@435: // NOTE: %%%% if any change is made to this stub make sure that the function duke@435: // pd_code_size_limit is changed to ensure the correct size for VtableStub duke@435: VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { duke@435: const int sparc_code_length = VtableStub::pd_code_size_limit(true); duke@435: VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index); duke@435: ResourceMark rm; duke@435: CodeBuffer cb(s->entry_point(), sparc_code_length); duke@435: MacroAssembler* masm = new MacroAssembler(&cb); duke@435: duke@435: #ifndef PRODUCT duke@435: if (CountCompiledCalls) { twisti@1162: __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch); duke@435: } duke@435: #endif /* PRODUCT */ duke@435: duke@435: assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); duke@435: duke@435: // get receiver klass duke@435: address npe_addr = __ pc(); coleenp@548: __ load_klass(O0, G3_scratch); duke@435: duke@435: // set methodOop (in case of interpreted method), and destination address duke@435: int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size(); duke@435: #ifndef PRODUCT duke@435: if (DebugVtables) { duke@435: Label L; duke@435: // check offset vs vtable length duke@435: __ ld(G3_scratch, instanceKlass::vtable_length_offset()*wordSize, G5); duke@435: __ cmp(G5, vtable_index*vtableEntry::size()); duke@435: __ br(Assembler::greaterUnsigned, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ set(vtable_index, O2); duke@435: __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2); duke@435: __ bind(L); duke@435: } duke@435: #endif duke@435: int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes(); duke@435: if( __ is_simm13(v_off) ) { duke@435: __ ld_ptr(G3, v_off, G5_method); duke@435: } else { duke@435: __ set(v_off,G5); duke@435: __ ld_ptr(G3, G5, G5_method); duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: if (DebugVtables) { duke@435: Label L; duke@435: __ br_notnull(G5_method, false, Assembler::pt, L); duke@435: __ delayed()->nop(); duke@435: __ stop("Vtable entry is ZERO"); duke@435: __ bind(L); duke@435: } duke@435: #endif duke@435: duke@435: address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract duke@435: // NOTE: for vtable dispatches, the vtable entry will never be null. duke@435: duke@435: __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); duke@435: duke@435: // jump to target (either compiled code or c2iadapter) duke@435: __ JMP(G3_scratch, 0); duke@435: // load methodOop (in case we call c2iadapter) duke@435: __ delayed()->nop(); duke@435: duke@435: masm->flush(); jrose@1058: jrose@1058: if (PrintMiscellaneous && (WizardMode || Verbose)) { jrose@1058: tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d", jrose@1058: vtable_index, s->entry_point(), jrose@1058: (int)(s->code_end() - s->entry_point()), jrose@1058: (int)(s->code_end() - __ pc())); jrose@1058: } jrose@1058: guarantee(__ pc() <= s->code_end(), "overflowed buffer"); jrose@1144: // shut the door on sizing bugs jrose@1144: int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one jrose@1144: assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); jrose@1058: duke@435: s->set_exception_points(npe_addr, ame_addr); duke@435: return s; duke@435: } duke@435: duke@435: duke@435: // NOTE: %%%% if any change is made to this stub make sure that the function duke@435: // pd_code_size_limit is changed to ensure the correct size for VtableStub jrose@1058: VtableStub* VtableStubs::create_itable_stub(int itable_index) { duke@435: const int sparc_code_length = VtableStub::pd_code_size_limit(false); jrose@1058: VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index); duke@435: ResourceMark rm; duke@435: CodeBuffer cb(s->entry_point(), sparc_code_length); duke@435: MacroAssembler* masm = new MacroAssembler(&cb); duke@435: duke@435: Register G3_klassOop = G3_scratch; duke@435: Register G5_interface = G5; // Passed in as an argument duke@435: Label search; duke@435: duke@435: // Entry arguments: duke@435: // G5_interface: Interface duke@435: // O0: Receiver duke@435: assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0"); duke@435: duke@435: // get receiver klass (also an implicit null-check) duke@435: address npe_addr = __ pc(); coleenp@548: __ load_klass(O0, G3_klassOop); duke@435: __ verify_oop(G3_klassOop); duke@435: duke@435: // Push a new window to get some temp registers. This chops the head of all duke@435: // my 64-bit %o registers in the LION build, but this is OK because no longs duke@435: // are passed in the %o registers. Instead, longs are passed in G1 and G4 duke@435: // and so those registers are not available here. duke@435: __ save(SP,-frame::register_save_words*wordSize,SP); duke@435: duke@435: #ifndef PRODUCT duke@435: if (CountCompiledCalls) { twisti@1162: __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), L0, L1); duke@435: } duke@435: #endif /* PRODUCT */ duke@435: jrose@1058: Label throw_icce; duke@435: jrose@1058: Register L5_method = L5; jrose@1058: __ lookup_interface_method(// inputs: rec. class, interface, itable index jrose@1058: G3_klassOop, G5_interface, itable_index, jrose@1058: // outputs: method, scan temp. reg jrose@1058: L5_method, L2, L3, jrose@1058: throw_icce); duke@435: duke@435: #ifndef PRODUCT duke@435: if (DebugVtables) { duke@435: Label L01; jrose@1058: __ bpr(Assembler::rc_nz, false, Assembler::pt, L5_method, L01); duke@435: __ delayed()->nop(); duke@435: __ stop("methodOop is null"); duke@435: __ bind(L01); jrose@1058: __ verify_oop(L5_method); duke@435: } duke@435: #endif duke@435: duke@435: // If the following load is through a NULL pointer, we'll take an OS duke@435: // exception that should translate into an AbstractMethodError. We need the duke@435: // window count to be correct at that time. jrose@1058: __ restore(L5_method, 0, G5_method); jrose@1058: // Restore registers *before* the AME point. duke@435: duke@435: address ame_addr = __ pc(); // if the vtable entry is null, the method is abstract duke@435: __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3_scratch); duke@435: duke@435: // G5_method: methodOop duke@435: // O0: Receiver duke@435: // G3_scratch: entry point duke@435: __ JMP(G3_scratch, 0); duke@435: __ delayed()->nop(); duke@435: dcubed@451: __ bind(throw_icce); twisti@1162: AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); twisti@1162: __ jump_to(icce, G3_scratch); dcubed@451: __ delayed()->restore(); dcubed@451: duke@435: masm->flush(); dcubed@451: jrose@1058: if (PrintMiscellaneous && (WizardMode || Verbose)) { jrose@1058: tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d", jrose@1058: itable_index, s->entry_point(), jrose@1058: (int)(s->code_end() - s->entry_point()), jrose@1058: (int)(s->code_end() - __ pc())); jrose@1058: } dcubed@451: guarantee(__ pc() <= s->code_end(), "overflowed buffer"); jrose@1144: // shut the door on sizing bugs jrose@1144: int slop = 2*BytesPerInstWord; // 32-bit offset is this much larger than a 13-bit one jrose@1144: assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add"); dcubed@451: duke@435: s->set_exception_points(npe_addr, ame_addr); duke@435: return s; duke@435: } duke@435: duke@435: duke@435: int VtableStub::pd_code_size_limit(bool is_vtable_stub) { dcubed@451: if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000; duke@435: else { duke@435: const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets) duke@435: if (is_vtable_stub) { coleenp@548: // ld;ld;ld,jmp,nop coleenp@548: const int basic = 5*BytesPerInstWord + kvn@1077: // shift;add for load_klass (only shift with zero heap based) kvn@1077: (UseCompressedOops ? kvn@1077: ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0); duke@435: return basic + slop; duke@435: } else { jrose@1058: const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord + kvn@1077: // shift;add for load_klass (only shift with zero heap based) kvn@1077: (UseCompressedOops ? kvn@1077: ((Universe::narrow_oop_base() == NULL) ? BytesPerInstWord : 2*BytesPerInstWord) : 0); duke@435: return (basic + slop); duke@435: } duke@435: } jrose@1144: jrose@1144: // In order to tune these parameters, run the JVM with VM options jrose@1144: // +PrintMiscellaneous and +WizardMode to see information about jrose@1144: // actual itable stubs. Look for lines like this: jrose@1144: // itable #1 at 0x5551212[116] left over: 8 jrose@1144: // Reduce the constants so that the "left over" number is 8 jrose@1144: // Do not aim at a left-over number of zero, because a very jrose@1144: // large vtable or itable offset (> 4K) will require an extra jrose@1144: // sethi/or pair of instructions. jrose@1144: // jrose@1144: // The JVM98 app. _202_jess has a megamorphic interface call. jrose@1144: // The itable code looks like this: jrose@1144: // Decoding VtableStub itbl[1]@16 jrose@1144: // ld [ %o0 + 4 ], %g3 jrose@1144: // save %sp, -64, %sp jrose@1144: // ld [ %g3 + 0xe8 ], %l2 jrose@1144: // sll %l2, 2, %l2 jrose@1144: // add %l2, 0x134, %l2 jrose@1144: // and %l2, -8, %l2 ! NOT_LP64 only jrose@1144: // add %g3, %l2, %l2 jrose@1144: // add %g3, 4, %g3 jrose@1144: // ld [ %l2 ], %l5 jrose@1144: // brz,pn %l5, throw_icce jrose@1144: // cmp %l5, %g5 jrose@1144: // be %icc, success jrose@1144: // add %l2, 8, %l2 jrose@1144: // loop: jrose@1144: // ld [ %l2 ], %l5 jrose@1144: // brz,pn %l5, throw_icce jrose@1144: // cmp %l5, %g5 jrose@1144: // bne,pn %icc, loop jrose@1144: // add %l2, 8, %l2 jrose@1144: // success: jrose@1144: // ld [ %l2 + -4 ], %l2 jrose@1144: // ld [ %g3 + %l2 ], %l5 jrose@1144: // restore %l5, 0, %g5 jrose@1144: // ld [ %g5 + 0x44 ], %g3 jrose@1144: // jmp %g3 jrose@1144: // nop jrose@1144: // throw_icce: jrose@1144: // sethi %hi(throw_ICCE_entry), %g3 jrose@1144: // ! 5 more instructions here, LP64_ONLY jrose@1144: // jmp %g3 + %lo(throw_ICCE_entry) jrose@1144: // restore duke@435: } duke@435: duke@435: duke@435: int VtableStub::pd_code_alignment() { duke@435: // UltraSPARC cache line size is 8 instructions: duke@435: const unsigned int icache_line_size = 32; duke@435: return icache_line_size; duke@435: }