aoqi@0: /* aoqi@0: * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "asm/macroAssembler.hpp" aoqi@0: #include "code/vtableStubs.hpp" aoqi@0: #include "interp_masm_x86.hpp" aoqi@0: #include "memory/resourceArea.hpp" aoqi@0: #include "oops/instanceKlass.hpp" aoqi@0: #include "oops/klassVtable.hpp" aoqi@0: #include "runtime/sharedRuntime.hpp" aoqi@0: #include "vmreg_x86.inline.hpp" aoqi@0: #ifdef COMPILER2 aoqi@0: #include "opto/runtime.hpp" aoqi@0: #endif aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: // machine-dependent part of VtableStubs: create VtableStub of correct size and aoqi@0: // initialize its code aoqi@0: aoqi@0: #define __ masm-> aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: extern "C" void bad_compiled_vtable_index(JavaThread* thread, aoqi@0: oop receiver, aoqi@0: int index); aoqi@0: #endif aoqi@0: aoqi@0: VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { aoqi@0: const int amd64_code_length = VtableStub::pd_code_size_limit(true); aoqi@0: VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index); aoqi@0: // Can be NULL if there is no free space in the code cache. aoqi@0: if (s == NULL) { aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: ResourceMark rm; aoqi@0: CodeBuffer cb(s->entry_point(), amd64_code_length); aoqi@0: MacroAssembler* masm = new MacroAssembler(&cb); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (CountCompiledCalls) { aoqi@0: __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // get receiver (need to skip return address on top of stack) aoqi@0: assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); aoqi@0: aoqi@0: // Free registers (non-args) are rax, rbx aoqi@0: aoqi@0: // get receiver klass aoqi@0: address npe_addr = __ pc(); aoqi@0: __ load_klass(rax, j_rarg0); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (DebugVtables) { aoqi@0: Label L; aoqi@0: // check offset vs vtable length aoqi@0: __ cmpl(Address(rax, InstanceKlass::vtable_length_offset() * wordSize), aoqi@0: vtable_index * vtableEntry::size()); aoqi@0: __ jcc(Assembler::greater, L); aoqi@0: __ movl(rbx, vtable_index); aoqi@0: __ call_VM(noreg, aoqi@0: CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, rbx); aoqi@0: __ bind(L); aoqi@0: } aoqi@0: #endif // PRODUCT aoqi@0: aoqi@0: // load Method* and target address aoqi@0: const Register method = rbx; aoqi@0: aoqi@0: __ lookup_virtual_method(rax, vtable_index, method); aoqi@0: aoqi@0: if (DebugVtables) { aoqi@0: Label L; aoqi@0: __ cmpptr(method, (int32_t)NULL_WORD); aoqi@0: __ jcc(Assembler::equal, L); aoqi@0: __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); aoqi@0: __ jcc(Assembler::notZero, L); aoqi@0: __ stop("Vtable entry is NULL"); aoqi@0: __ bind(L); aoqi@0: } aoqi@0: // rax: receiver klass aoqi@0: // rbx: Method* aoqi@0: // rcx: receiver aoqi@0: address ame_addr = __ pc(); aoqi@0: __ jmp( Address(rbx, Method::from_compiled_offset())); aoqi@0: aoqi@0: __ flush(); aoqi@0: aoqi@0: if (PrintMiscellaneous && (WizardMode || Verbose)) { aoqi@0: tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d", aoqi@0: vtable_index, s->entry_point(), aoqi@0: (int)(s->code_end() - s->entry_point()), aoqi@0: (int)(s->code_end() - __ pc())); aoqi@0: } aoqi@0: guarantee(__ pc() <= s->code_end(), "overflowed buffer"); aoqi@0: // shut the door on sizing bugs aoqi@0: int slop = 3; // 32-bit offset is this much larger than an 8-bit one aoqi@0: assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); aoqi@0: aoqi@0: s->set_exception_points(npe_addr, ame_addr); aoqi@0: return s; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: VtableStub* VtableStubs::create_itable_stub(int itable_index) { aoqi@0: // Note well: pd_code_size_limit is the absolute minimum we can get aoqi@0: // away with. If you add code here, bump the code stub size aoqi@0: // returned by pd_code_size_limit! aoqi@0: const int amd64_code_length = VtableStub::pd_code_size_limit(false); aoqi@0: VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index); aoqi@0: // Can be NULL if there is no free space in the code cache. aoqi@0: if (s == NULL) { aoqi@0: return NULL; aoqi@0: } aoqi@0: aoqi@0: ResourceMark rm; aoqi@0: CodeBuffer cb(s->entry_point(), amd64_code_length); aoqi@0: MacroAssembler* masm = new MacroAssembler(&cb); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (CountCompiledCalls) { aoqi@0: __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr())); aoqi@0: } aoqi@0: #endif aoqi@0: aoqi@0: // Entry arguments: aoqi@0: // rax: Interface aoqi@0: // j_rarg0: Receiver aoqi@0: aoqi@0: // Free registers (non-args) are rax (interface), rbx aoqi@0: aoqi@0: // get receiver (need to skip return address on top of stack) aoqi@0: aoqi@0: assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); aoqi@0: // get receiver klass (also an implicit null-check) aoqi@0: address npe_addr = __ pc(); aoqi@0: aoqi@0: // Most registers are in use; we'll use rax, rbx, r10, r11 aoqi@0: // (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them) aoqi@0: __ load_klass(r10, j_rarg0); aoqi@0: aoqi@0: // If we take a trap while this arg is on the stack we will not aoqi@0: // be able to walk the stack properly. This is not an issue except aoqi@0: // when there are mistakes in this assembly code that could generate aoqi@0: // a spurious fault. Ask me how I know... aoqi@0: aoqi@0: const Register method = rbx; aoqi@0: Label throw_icce; aoqi@0: aoqi@0: // Get Method* and entrypoint for compiler aoqi@0: __ lookup_interface_method(// inputs: rec. class, interface, itable index aoqi@0: r10, rax, itable_index, aoqi@0: // outputs: method, scan temp. reg aoqi@0: method, r11, aoqi@0: throw_icce); aoqi@0: aoqi@0: // method (rbx): Method* aoqi@0: // j_rarg0: receiver aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: if (DebugVtables) { aoqi@0: Label L2; aoqi@0: __ cmpptr(method, (int32_t)NULL_WORD); aoqi@0: __ jcc(Assembler::equal, L2); aoqi@0: __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD); aoqi@0: __ jcc(Assembler::notZero, L2); aoqi@0: __ stop("compiler entrypoint is null"); aoqi@0: __ bind(L2); aoqi@0: } aoqi@0: #endif // ASSERT aoqi@0: aoqi@0: // rbx: Method* aoqi@0: // j_rarg0: receiver aoqi@0: address ame_addr = __ pc(); aoqi@0: __ jmp(Address(method, Method::from_compiled_offset())); aoqi@0: aoqi@0: __ bind(throw_icce); aoqi@0: __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry())); aoqi@0: aoqi@0: __ flush(); aoqi@0: aoqi@0: if (PrintMiscellaneous && (WizardMode || Verbose)) { aoqi@0: tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d", aoqi@0: itable_index, s->entry_point(), aoqi@0: (int)(s->code_end() - s->entry_point()), aoqi@0: (int)(s->code_end() - __ pc())); aoqi@0: } aoqi@0: guarantee(__ pc() <= s->code_end(), "overflowed buffer"); aoqi@0: // shut the door on sizing bugs aoqi@0: int slop = 3; // 32-bit offset is this much larger than an 8-bit one aoqi@0: assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset"); aoqi@0: aoqi@0: s->set_exception_points(npe_addr, ame_addr); aoqi@0: return s; aoqi@0: } aoqi@0: aoqi@0: int VtableStub::pd_code_size_limit(bool is_vtable_stub) { aoqi@0: if (is_vtable_stub) { aoqi@0: // Vtable stub size aoqi@0: return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) + aoqi@0: (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); aoqi@0: } else { aoqi@0: // Itable stub size aoqi@0: return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) + aoqi@0: (UseCompressedClassPointers ? MacroAssembler::instr_size_for_decode_klass_not_null() : 0); aoqi@0: } aoqi@0: // In order to tune these parameters, run the JVM with VM options aoqi@0: // +PrintMiscellaneous and +WizardMode to see information about aoqi@0: // actual itable stubs. Look for lines like this: aoqi@0: // itable #1 at 0x5551212[71] left over: 3 aoqi@0: // Reduce the constants so that the "left over" number is >=3 aoqi@0: // for the common cases. aoqi@0: // Do not aim at a left-over number of zero, because a aoqi@0: // large vtable or itable index (>= 32) will require a 32-bit aoqi@0: // immediate displacement instead of an 8-bit one. aoqi@0: // aoqi@0: // The JVM98 app. _202_jess has a megamorphic interface call. aoqi@0: // The itable code looks like this: aoqi@0: // Decoding VtableStub itbl[1]@12 aoqi@0: // mov 0x8(%rsi),%r10 aoqi@0: // mov 0x198(%r10),%r11d aoqi@0: // lea 0x218(%r10,%r11,8),%r11 aoqi@0: // lea 0x8(%r10),%r10 aoqi@0: // mov (%r11),%rbx aoqi@0: // cmp %rbx,%rax aoqi@0: // je success aoqi@0: // loop: aoqi@0: // test %rbx,%rbx aoqi@0: // je throw_icce aoqi@0: // add $0x10,%r11 aoqi@0: // mov (%r11),%rbx aoqi@0: // cmp %rbx,%rax aoqi@0: // jne loop aoqi@0: // success: aoqi@0: // mov 0x8(%r11),%r11d aoqi@0: // mov (%r10,%r11,1),%rbx aoqi@0: // jmpq *0x60(%rbx) aoqi@0: // throw_icce: aoqi@0: // jmpq throw_ICCE_entry aoqi@0: } aoqi@0: aoqi@0: int VtableStub::pd_code_alignment() { aoqi@0: return wordSize; aoqi@0: }