aoqi@1: /* aoqi@1: * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. huangxuguang@9251: * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved. aoqi@1: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@1: * aoqi@1: * This code is free software; you can redistribute it and/or modify it aoqi@1: * under the terms of the GNU General Public License version 2 only, as aoqi@1: * published by the Free Software Foundation. aoqi@1: * aoqi@1: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@1: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@1: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@1: * version 2 for more details (a copy is included in the LICENSE file that aoqi@1: * accompanied this code). aoqi@1: * aoqi@1: * You should have received a copy of the GNU General Public License version aoqi@1: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@1: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@1: * aoqi@1: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@1: * or visit www.oracle.com if you need additional information or have any aoqi@1: * questions. aoqi@1: * aoqi@1: */ aoqi@1: aoqi@1: #include "precompiled.hpp" aoqi@1: #include "asm/assembler.hpp" aoqi@1: #include "c1/c1_Defs.hpp" aoqi@1: #include "c1/c1_MacroAssembler.hpp" aoqi@1: #include "c1/c1_Runtime1.hpp" aoqi@1: #include "interpreter/interpreter.hpp" aoqi@1: #include "nativeInst_mips.hpp" aoqi@1: #include "oops/compiledICHolder.hpp" aoqi@1: #include "oops/oop.inline.hpp" aoqi@1: #include "prims/jvmtiExport.hpp" aoqi@1: #include "register_mips.hpp" aoqi@1: #include "runtime/sharedRuntime.hpp" aoqi@1: #include "runtime/signature.hpp" aoqi@1: #include "runtime/vframeArray.hpp" aoqi@6880: #include "utilities/macros.hpp" aoqi@1: #include "vmreg_mips.inline.hpp" aoqi@6880: #if INCLUDE_ALL_GCS aoqi@6880: #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" aoqi@6880: #endif aoqi@1: aoqi@1: aoqi@1: // Implementation of StubAssembler aoqi@1: // this method will preserve the stack space for arguments as indicated by args_size aoqi@1: // for stack alignment consideration, you cannot call this with argument in stack. aoqi@1: // if you need >3 arguments, you must implement this method yourself. fujie@9153: int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { aoqi@6880: // i use S7 for edi. aoqi@6880: // setup registers aoqi@6880: const Register thread = TREG; // is callee-saved register (Visual C++ calling conventions) fujie@9153: assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different"); fujie@9153: assert(oop_result1 != thread && metadata_result != thread, "registers must be different"); aoqi@6880: assert(args_size >= 0, "illegal args_size"); fujie@9153: bool align_stack = false; fujie@9153: #ifdef _LP64 fujie@9153: // At a method handle call, the stack may not be properly aligned fujie@9153: // when returning with an exception. fujie@9153: align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); fujie@9153: #endif aoqi@1: aoqi@6880: set_num_rt_args(1 + args_size); aoqi@1: aoqi@1: aoqi@6880: // push java thread (becomes first argument of C function) aoqi@6880: get_thread(thread); aoqi@6880: move(A0, thread); aoqi@1: fujie@9153: if(!align_stack) { fujie@9153: set_last_Java_frame(thread, NOREG, FP, NULL); fujie@9153: } else { fujie@9153: address the_pc = pc(); fujie@9153: set_last_Java_frame(thread, NOREG, FP, the_pc); fujie@9153: move(AT, -(StackAlignmentInBytes)); fujie@9153: andr(SP, SP, AT); fujie@9153: } aoqi@1: aoqi@6880: relocate(relocInfo::internal_pc_type); aoqi@6880: { aoqi@1: #ifndef _LP64 aoqi@6880: int save_pc = (int)pc() + 12 + NativeCall::return_address_offset; aoqi@6880: lui(AT, Assembler::split_high(save_pc)); aoqi@6880: addiu(AT, AT, Assembler::split_low(save_pc)); aoqi@1: #else aoqi@6880: uintptr_t save_pc = (uintptr_t)pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long; aoqi@6880: li48(AT, save_pc); aoqi@1: #endif aoqi@6880: } aoqi@6880: st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); aoqi@1: aoqi@6880: // do the call aoqi@6880: #ifndef _LP64 aoqi@6880: lui(T9, Assembler::split_high((int)entry)); aoqi@6880: addiu(T9, T9, Assembler::split_low((int)entry)); aoqi@6880: #else aoqi@6880: li48(T9, (intptr_t)entry); aoqi@1: #endif aoqi@6880: jalr(T9); aoqi@6880: delayed()->nop(); fujie@9161: fujie@9161: int call_offset = offset(); aoqi@6880: aoqi@6880: // verify callee-saved register aoqi@6880: #ifdef ASSERT aoqi@6880: guarantee(thread != V0, "change this code"); aoqi@6880: push(V0); aoqi@6880: { aoqi@6880: Label L; aoqi@6880: get_thread(V0); aoqi@6880: beq(thread, V0, L); aoqi@6880: delayed()->nop(); aoqi@6880: int3(); aoqi@6880: stop("StubAssembler::call_RT: edi not callee saved?"); aoqi@6880: bind(L); aoqi@6880: } aoqi@6880: super_pop(V0); aoqi@1: #endif aoqi@6880: // discard thread and arguments aoqi@9228: ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); fujie@9171: reset_last_Java_frame(thread, true); aoqi@6880: // check for pending exceptions aoqi@6880: { aoqi@6880: Label L; aoqi@6880: ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); aoqi@6880: beq(AT, R0, L); aoqi@6880: delayed()->nop(); aoqi@6880: // exception pending => remove activation and forward to exception handler aoqi@6880: // make sure that the vm_results are cleared aoqi@6880: if (oop_result1->is_valid()) { aoqi@6880: st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset())); aoqi@6880: } fujie@9153: if (metadata_result->is_valid()) { aoqi@6880: st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset())); aoqi@6880: } aoqi@6880: // the leave() in x86 just pops ebp and remains the return address on the top aoqi@6880: // of stack aoqi@6880: // the return address will be needed by forward_exception_entry() aoqi@6880: if (frame_size() == no_frame_size) { aoqi@6880: addiu(SP, FP, wordSize); aoqi@6880: ld_ptr(FP, SP, (-1) * wordSize); aoqi@6880: jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); aoqi@6880: delayed()->nop(); aoqi@6880: } else if (_stub_id == Runtime1::forward_exception_id) { aoqi@6880: should_not_reach_here(); aoqi@6880: } else { aoqi@8865: jmp(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type); aoqi@6880: delayed()->nop(); aoqi@6880: } aoqi@6880: bind(L); aoqi@6880: } aoqi@6880: // get oop results if there are any and reset the values in the thread aoqi@6880: if (oop_result1->is_valid()) { aoqi@6880: ld_ptr(oop_result1, thread, in_bytes(JavaThread::vm_result_offset())); aoqi@6880: st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset())); aoqi@6880: verify_oop(oop_result1); aoqi@6880: } fujie@9153: if (metadata_result->is_valid()) { fujie@9153: ld_ptr(metadata_result, thread, in_bytes(JavaThread::vm_result_2_offset())); aoqi@6880: st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset())); fujie@9153: verify_oop(metadata_result); aoqi@6880: } aoqi@6880: return call_offset; aoqi@1: } aoqi@1: aoqi@1: fujie@9153: int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { aoqi@6880: if (arg1 != A1) move(A1, arg1); fujie@9153: return call_RT(oop_result1, metadata_result, entry, 1); aoqi@1: } aoqi@1: aoqi@1: fujie@9153: int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { aoqi@6880: if (arg1!=A1) move(A1, arg1); aoqi@6880: if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument"); fujie@9153: return call_RT(oop_result1, metadata_result, entry, 2); aoqi@1: } aoqi@1: aoqi@1: fujie@9153: int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { aoqi@6880: if (arg1!=A1) move(A1, arg1); aoqi@6880: if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument"); aoqi@6880: if (arg3!=A3) move(A3, arg3); assert(arg3 != A1 && arg3 != A2, "smashed argument"); fujie@9153: return call_RT(oop_result1, metadata_result, entry, 3); aoqi@1: } aoqi@1: aoqi@1: aoqi@1: // Implementation of StubFrame aoqi@1: aoqi@1: class StubFrame: public StackObj { aoqi@6880: private: aoqi@6880: StubAssembler* _sasm; aoqi@1: aoqi@6880: public: aoqi@6880: StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); aoqi@6880: void load_argument(int offset_in_words, Register reg); aoqi@6880: aoqi@6880: ~StubFrame(); aoqi@1: }; aoqi@1: aoqi@1: aoqi@1: #define __ _sasm-> aoqi@1: aoqi@1: StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { aoqi@6880: _sasm = sasm; aoqi@6880: __ set_info(name, must_gc_arguments); aoqi@6880: __ enter(); aoqi@1: } aoqi@1: aoqi@1: aoqi@1: //FIXME, I have no idea the frame architecture of mips aoqi@1: // load parameters that were stored with LIR_Assembler::store_parameter aoqi@6880: // Note: offsets for store_parameter and load_argument must match aoqi@1: void StubFrame::load_argument(int offset_in_words, Register reg) { aoqi@6880: //ebp + 0: link aoqi@6880: // + 1: return address aoqi@6880: // + 2: argument with offset 0 aoqi@6880: // + 3: argument with offset 1 aoqi@6880: // + 4: ... aoqi@6880: __ ld_ptr(reg, Address(FP, (offset_in_words + 2) * BytesPerWord)); aoqi@1: } aoqi@6880: aoqi@6880: aoqi@1: StubFrame::~StubFrame() { aoqi@6880: __ leave(); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@1: } aoqi@1: aoqi@1: #undef __ aoqi@1: aoqi@1: aoqi@1: // Implementation of Runtime1 aoqi@1: aoqi@1: #define __ sasm-> aoqi@1: aoqi@1: //static OopMap* save_live_registers(MacroAssembler* sasm, int num_rt_args); aoqi@1: //static void restore_live_registers(MacroAssembler* sasm); aoqi@1: //DeoptimizationBlob* SharedRuntime::_deopt_blob = NULL; aoqi@1: /* aoqi@1: const int fpu_stack_as_doubles_size_in_words = 16; aoqi@1: const int fpu_stack_as_doubles_size = 64; aoqi@1: */ aoqi@1: const int float_regs_as_doubles_size_in_words = 16; aoqi@1: aoqi@6880: //FIXME, aoqi@1: // Stack layout for saving/restoring all the registers needed during a runtime aoqi@1: // call (this includes deoptimization) aoqi@1: // Note: note that users of this frame may well have arguments to some runtime aoqi@1: // while these values are on the stack. These positions neglect those arguments aoqi@1: // but the code in save_live_registers will take the argument count into aoqi@1: // account. aoqi@1: // aoqi@1: #ifdef _LP64 aoqi@1: #define SLOT2(x) x, aoqi@1: #define SLOT_PER_WORD 2 aoqi@1: #else aoqi@1: #define SLOT2(x) aoqi@1: #define SLOT_PER_WORD 1 aoqi@1: #endif // _LP64 aoqi@1: aoqi@1: enum reg_save_layout { aoqi@1: #ifndef _LP64 aoqi@1: T0_off = 0, aoqi@1: S0_off = T0_off + SLOT_PER_WORD * 8, aoqi@1: #else aoqi@1: A4_off = 0, aoqi@1: S0_off = A4_off + SLOT_PER_WORD * 8, aoqi@1: #endif aoqi@1: FP_off = S0_off + SLOT_PER_WORD * 8, SLOT2(FPH_off) aoqi@1: T8_off, SLOT2(T8H_off) aoqi@1: T9_off, SLOT2(T9H_off) aoqi@1: SP_off, SLOT2(SPH_off) aoqi@1: V0_off, SLOT2(V0H_off) aoqi@1: V1_off, SLOT2(V1H_off) aoqi@1: A0_off, SLOT2(A0H_off) aoqi@1: A1_off, SLOT2(A1H_off) aoqi@1: A2_off, SLOT2(A2H_off) aoqi@1: A3_off, SLOT2(A3H_off) aoqi@1: aoqi@1: // Float registers aoqi@1: /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */ aoqi@1: F0_off, SLOT2( F0H_off) aoqi@1: F1_off, SLOT2( F1H_off) aoqi@1: F2_off, SLOT2( F2H_off) aoqi@1: F3_off, SLOT2( F3H_off) aoqi@1: F4_off, SLOT2( F4H_off) aoqi@1: F5_off, SLOT2( F5H_off) aoqi@1: F6_off, SLOT2( F6H_off) aoqi@1: F7_off, SLOT2( F7H_off) aoqi@1: F8_off, SLOT2( F8H_off) aoqi@1: F9_off, SLOT2( F9H_off) aoqi@1: F10_off, SLOT2( F10H_off) aoqi@1: F11_off, SLOT2( F11H_off) aoqi@1: F12_off, SLOT2( F12H_off) aoqi@1: F13_off, SLOT2( F13H_off) aoqi@1: F14_off, SLOT2( F14H_off) aoqi@1: F15_off, SLOT2( F15H_off) aoqi@1: F16_off, SLOT2( F16H_off) aoqi@1: F17_off, SLOT2( F17H_off) aoqi@1: F18_off, SLOT2( F18H_off) aoqi@1: F19_off, SLOT2( F19H_off) aoqi@1: aoqi@1: GP_off, SLOT2( GPH_off) aoqi@1: //temp_2_off, aoqi@1: temp_1_off, SLOT2(temp_1H_off) aoqi@1: saved_fp_off, SLOT2(saved_fpH_off) aoqi@1: return_off, SLOT2(returnH_off) aoqi@1: aoqi@1: reg_save_frame_size, aoqi@1: aoqi@1: // illegal instruction handler aoqi@1: continue_dest_off = temp_1_off, aoqi@1: aoqi@1: // deoptimization equates aoqi@1: //deopt_type = temp_2_off, // slot for type of deopt in progress aoqi@1: ret_type = temp_1_off // slot for return type aoqi@1: }; aoqi@1: aoqi@6880: aoqi@6880: aoqi@1: // Save off registers which might be killed by calls into the runtime. aoqi@1: // Tries to smart of about FP registers. In particular we separate aoqi@1: // saving and describing the FPU registers for deoptimization since we aoqi@1: // have to save the FPU registers twice if we describe them and on P4 aoqi@1: // saving FPU registers which don't contain anything appears aoqi@1: // expensive. The deopt blob is the only thing which needs to aoqi@1: // describe FPU registers. In all other cases it should be sufficient aoqi@1: // to simply save their current value. aoqi@1: static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, aoqi@6880: bool save_fpu_registers = true, bool describe_fpu_registers = false) { aoqi@1: aoqi@8865: LP64_ONLY(num_rt_args = 0); aoqi@8865: LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) aoqi@8865: int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / VMRegImpl::slots_per_word; // args + thread aoqi@8865: sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); aoqi@1: aoqi@1: // record saved value locations in an OopMap aoqi@6880: // locations are offsets from sp after runtime call; num_rt_args is number of arguments aoqi@1: // in call, including thread aoqi@1: OopMap* map = new OopMap(reg_save_frame_size, 0); aoqi@6880: aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(V0_off + num_rt_args), V0->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(V1_off + num_rt_args), V1->as_VMReg()); aoqi@1: #ifdef _LP64 aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(V0H_off + num_rt_args), V0->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(V1H_off + num_rt_args), V1->as_VMReg()->next()); aoqi@1: #endif aoqi@1: aoqi@1: int i = 0; aoqi@1: #ifndef _LP64 aoqi@1: for (Register r = T0; r != T7->successor(); r = r->successor() ) { aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(T0_off + num_rt_args + i++), r->as_VMReg()); aoqi@1: } aoqi@1: #else aoqi@1: for (Register r = A4; r != T3->successor(); r = r->successor() ) { aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()->next()); aoqi@1: } aoqi@1: #endif aoqi@1: aoqi@1: i = 0; aoqi@1: for (Register r = S0; r != S7->successor(); r = r->successor() ) { aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()); aoqi@1: #ifdef _LP64 aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()->next()); aoqi@1: #endif aoqi@1: } aoqi@1: aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(FP_off + num_rt_args), FP->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(GP_off + num_rt_args), GP->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(T8_off + num_rt_args), T8->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(T9_off + num_rt_args), T9->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A0_off + num_rt_args), A0->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A1_off + num_rt_args), A1->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A2_off + num_rt_args), A2->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A3_off + num_rt_args), A3->as_VMReg()); aoqi@1: aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F0_off + num_rt_args), F0->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F1_off + num_rt_args), F1->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F2_off + num_rt_args), F2->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F3_off + num_rt_args), F1->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F4_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F5_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F6_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F7_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F8_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F9_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F10_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F11_off + num_rt_args), F4->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F12_off + num_rt_args), F12->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F13_off + num_rt_args), F13->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F14_off + num_rt_args), F14->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F15_off + num_rt_args), F15->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F16_off + num_rt_args), F16->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F17_off + num_rt_args), F17->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F18_off + num_rt_args), F18->as_VMReg()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(F19_off + num_rt_args), F19->as_VMReg()); aoqi@1: aoqi@1: #ifdef _LP64 aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(FPH_off + num_rt_args), FP->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(GPH_off + num_rt_args), GP->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(T8H_off + num_rt_args), T8->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(T9H_off + num_rt_args), T9->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A0H_off + num_rt_args), A0->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A1H_off + num_rt_args), A1->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A2H_off + num_rt_args), A2->as_VMReg()->next()); aoqi@1: map->set_callee_saved(VMRegImpl::stack2reg(A3H_off + num_rt_args), A3->as_VMReg()->next()); aoqi@1: #endif aoqi@1: return map; aoqi@1: } aoqi@1: aoqi@8865: static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, aoqi@8865: bool save_fpu_registers = true, aoqi@1: bool describe_fpu_registers = false) { aoqi@1: //const int reg_save_frame_size = return_off + 1 + num_rt_args; aoqi@1: __ block_comment("save_live_registers"); aoqi@1: aoqi@6880: // save all register state - int, fpu aoqi@1: __ addi(SP, SP, -(reg_save_frame_size / SLOT_PER_WORD - 2)* wordSize); aoqi@6880: aoqi@1: #ifndef _LP64 aoqi@1: for (Register r = T0; r != T7->successor(); r = r->successor() ) { aoqi@1: __ sw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize); aoqi@1: #else aoqi@1: for (Register r = A4; r != T3->successor(); r = r->successor() ) { aoqi@1: __ sd(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize); aoqi@1: #endif aoqi@1: } aoqi@1: for (Register r = S0; r != S7->successor(); r = r->successor() ) { aoqi@1: __ st_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize); aoqi@1: } aoqi@1: __ st_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD); aoqi@1: __ st_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ st_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@6880: __ sdc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ sdc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: return generate_oop_map(sasm, num_rt_args, save_fpu_registers, describe_fpu_registers); aoqi@1: } aoqi@1: aoqi@1: static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { aoqi@1: //static void restore_live_registers(MacroAssembler* sasm) { aoqi@1: #ifndef _LP64 aoqi@1: for (Register r = T0; r != T7->successor(); r = r->successor() ) { aoqi@1: __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize); aoqi@1: #else aoqi@1: for (Register r = A4; r != T3->successor(); r = r->successor() ) { aoqi@1: __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize); aoqi@1: #endif aoqi@1: } aoqi@1: for (Register r = S0; r != S7->successor(); r = r->successor() ) { aoqi@1: __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize); aoqi@1: } aoqi@1: __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ ld_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD); aoqi@6880: __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize); aoqi@1: } aoqi@1: aoqi@1: static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { aoqi@1: __ block_comment("restore_live_registers"); aoqi@1: restore_fpu(sasm, restore_fpu_registers); aoqi@1: } aoqi@1: aoqi@6880: static void restore_live_registers_except_V0(StubAssembler* sasm, bool restore_fpu_registers = true) { aoqi@1: //static void restore_live_registers(MacroAssembler* sasm) { aoqi@1: //FIXME , maybe V1 need to be saved too aoqi@1: __ block_comment("restore_live_registers except V0"); aoqi@1: #ifndef _LP64 aoqi@1: for (Register r = T0; r != T7->successor(); r = r->successor() ) { aoqi@6880: __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize); aoqi@1: #else aoqi@1: for (Register r = A4; r != T3->successor(); r = r->successor() ) { aoqi@6880: __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize); aoqi@1: #endif aoqi@1: } aoqi@1: for (Register r = S0; r != S7->successor(); r = r->successor() ) { aoqi@6880: __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize); aoqi@1: } aoqi@1: __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: #if 1 aoqi@1: __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD); aoqi@1: __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD); aoqi@1: #endif aoqi@1: aoqi@6880: __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD); aoqi@1: aoqi@1: __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize); aoqi@1: } aoqi@1: aoqi@1: void Runtime1::initialize_pd() { aoqi@1: // nothing to do aoqi@1: } aoqi@1: aoqi@1: // target: the entry point of the method that creates and posts the exception oop aoqi@1: // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) aoqi@1: OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { aoqi@6880: // preserve all registers aoqi@6880: OopMap* oop_map = save_live_registers(sasm, 0); aoqi@1: aoqi@6880: // now all registers are saved and can be used freely aoqi@6880: // verify that no old value is used accidentally aoqi@6880: //all reigster are saved , I think mips do not need this aoqi@1: aoqi@6880: // registers used by this stub aoqi@6880: const Register temp_reg = T3; aoqi@6880: // load argument for exception that is passed as an argument into the stub aoqi@6880: if (has_argument) { aoqi@6880: __ ld_ptr(temp_reg, Address(FP, 2*BytesPerWord)); aoqi@6880: } aoqi@6880: int call_offset; aoqi@6880: if (has_argument) aoqi@6880: call_offset = __ call_RT(noreg, noreg, target, temp_reg); aoqi@1: else aoqi@6880: call_offset = __ call_RT(noreg, noreg, target); aoqi@1: aoqi@6880: OopMapSet* oop_maps = new OopMapSet(); aoqi@6880: oop_maps->add_gc_map(call_offset, oop_map); aoqi@1: aoqi@6880: __ stop("should not reach here"); aoqi@6880: aoqi@6880: return oop_maps; aoqi@1: } aoqi@1: aoqi@1: OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { aoqi@8865: __ block_comment("generate_handle_exception"); aoqi@8865: aoqi@8865: // incoming parameters aoqi@6880: const Register exception_oop = V0; aoqi@6880: const Register exception_pc = V1; aoqi@6880: // other registers used in this stub aoqi@8865: const Register thread = TREG; aoqi@8865: #ifndef OPT_THREAD aoqi@8865: __ get_thread(thread); aoqi@8865: #endif aoqi@1: // Save registers, if required. aoqi@8865: OopMapSet* oop_maps = new OopMapSet(); aoqi@8865: OopMap* oop_map = NULL; aoqi@8865: switch (id) { aoqi@8865: case forward_exception_id: aoqi@8865: // We're handling an exception in the context of a compiled frame. aoqi@8865: // The registers have been saved in the standard places. Perform aoqi@8865: // an exception lookup in the caller and dispatch to the handler aoqi@8865: // if found. Otherwise unwind and dispatch to the callers aoqi@8865: // exception handler. aoqi@8865: oop_map = generate_oop_map(sasm, 1 /*thread*/); aoqi@6880: aoqi@8865: // load and clear pending exception oop into RAX aoqi@8865: __ ld_ptr(exception_oop, Address(thread, Thread::pending_exception_offset())); aoqi@8865: __ st_ptr(R0, Address(thread, Thread::pending_exception_offset())); aoqi@6880: aoqi@8865: // load issuing PC (the return address for this stub) into rdx aoqi@8865: __ ld_ptr(exception_pc, Address(FP, 1*BytesPerWord)); aoqi@6880: aoqi@8865: // make sure that the vm_results are cleared (may be unnecessary) aoqi@8865: __ st_ptr(R0, Address(thread, JavaThread::vm_result_offset())); aoqi@8865: __ st_ptr(R0, Address(thread, JavaThread::vm_result_2_offset())); aoqi@8865: break; aoqi@8865: case handle_exception_nofpu_id: aoqi@8865: case handle_exception_id: aoqi@8865: // At this point all registers MAY be live. aoqi@8865: oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); aoqi@8865: break; aoqi@8865: case handle_exception_from_callee_id: { aoqi@8865: // At this point all registers except exception oop (RAX) and aoqi@8865: // exception pc (RDX) are dead. aoqi@8865: const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/); aoqi@8865: oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); aoqi@8865: sasm->set_frame_size(frame_size); aoqi@8865: break; aoqi@8865: } aoqi@8865: default: ShouldNotReachHere(); aoqi@8865: } aoqi@1: aoqi@1: #ifdef TIERED aoqi@6880: // C2 can leave the fpu stack dirty aoqi@6880: __ empty_FPU_stack(); aoqi@1: #endif // TIERED aoqi@1: aoqi@6880: // verify that only V0 and V1 is valid at this time aoqi@6880: // verify that V0 contains a valid exception aoqi@6880: __ verify_not_null_oop(exception_oop); aoqi@1: aoqi@6880: // load address of JavaThread object for thread-local data aoqi@6880: __ get_thread(thread); aoqi@1: aoqi@1: #ifdef ASSERT aoqi@6880: // check that fields in JavaThread for exception oop and issuing pc are aoqi@6880: // empty before writing to them aoqi@6880: Label oop_empty; aoqi@6880: __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); aoqi@6880: __ beq(AT, R0, oop_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception oop already set"); aoqi@6880: __ bind(oop_empty); fujie@9207: aoqi@6880: Label pc_empty; aoqi@6880: __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); aoqi@6880: __ beq(AT, R0, pc_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception pc already set"); aoqi@6880: __ bind(pc_empty); aoqi@1: #endif aoqi@1: aoqi@6880: // save exception oop and issuing pc into JavaThread aoqi@6880: // (exception handler will load it from here) aoqi@6880: __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); aoqi@6880: __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); aoqi@1: aoqi@8865: // patch throwing pc into return address (has bci & oop map) aoqi@8865: __ st_ptr(exception_pc, Address(FP, 1*BytesPerWord)); aoqi@1: aoqi@6880: // compute the exception handler. aoqi@6880: // the exception oop and the throwing pc are read from the fields in JavaThread fujie@9161: __ block_comment(";; will call_RT exception_handler_for_pc"); aoqi@8865: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); aoqi@6880: oop_maps->add_gc_map(call_offset, oop_map); fujie@9161: __ block_comment(";; end of call_RT exception_handler_for_pc"); fujie@9207: aoqi@6880: // V0: handler address or NULL if no handler exists aoqi@6880: // will be the deopt blob if nmethod was deoptimized while we looked up aoqi@6880: // handler regardless of whether handler existed in the nmethod. aoqi@1: aoqi@6880: // only V0 is valid at this time, all other registers have been destroyed by the aoqi@6880: // runtime call aoqi@1: aoqi@6880: // patch the return address -> the stub will directly return to the exception handler aoqi@8865: __ st_ptr(V0, Address(FP, 1 * BytesPerWord)); aoqi@1: aoqi@8865: switch (id) { aoqi@8865: case forward_exception_id: aoqi@8865: case handle_exception_nofpu_id: aoqi@8865: case handle_exception_id: aoqi@8865: // Restore the registers that were saved at the beginning. aoqi@8865: restore_live_registers(sasm, id != handle_exception_nofpu_id); aoqi@8865: break; aoqi@8865: case handle_exception_from_callee_id: aoqi@8865: // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP aoqi@8865: // since we do a leave anyway. aoqi@1: aoqi@8865: // Pop the return address since we are possibly changing SP (restoring from BP). fujie@9207: __ move(SP, FP); fujie@9207: __ pop(FP); fujie@9207: __ pop(RA); aoqi@8865: __ jr(RA); // jump to exception handler aoqi@8865: __ delayed()->nop(); aoqi@8865: break; aoqi@8865: default: ShouldNotReachHere(); aoqi@8865: } aoqi@1: aoqi@8865: return oop_maps; aoqi@8865: } aoqi@1: aoqi@1: aoqi@1: aoqi@1: aoqi@1: aoqi@1: void Runtime1::generate_unwind_exception(StubAssembler *sasm) { aoqi@6880: // incoming parameters aoqi@6880: const Register exception_oop = V0; aoqi@8865: // callee-saved copy of exception_oop during runtime call aoqi@8865: const Register exception_oop_callee_saved = S0; aoqi@6880: // other registers used in this stub aoqi@6880: const Register exception_pc = V1; aoqi@6880: const Register handler_addr = T3; aoqi@8865: const Register thread = TREG; aoqi@1: aoqi@6880: // verify that only eax is valid at this time aoqi@6880: // __ invalidate_registers(false, true, true, true, true, true); aoqi@1: aoqi@1: #ifdef ASSERT aoqi@6880: // check that fields in JavaThread for exception oop and issuing pc are empty aoqi@6880: __ get_thread(thread); aoqi@6880: Label oop_empty; aoqi@6880: __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset())); aoqi@6880: __ beq(AT, R0, oop_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception oop must be empty"); aoqi@6880: __ bind(oop_empty); aoqi@1: aoqi@6880: Label pc_empty; aoqi@6880: __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset())); aoqi@8865: __ beq(AT, R0, pc_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception pc must be empty"); aoqi@6880: __ bind(pc_empty); aoqi@1: #endif aoqi@6880: // clear the FPU stack in case any FPU results are left behind aoqi@6880: __ empty_FPU_stack(); aoqi@1: aoqi@8865: // save exception_oop in callee-saved register to preserve it during runtime calls aoqi@8865: __ verify_not_null_oop(exception_oop); aoqi@8865: __ move(exception_oop_callee_saved, exception_oop); aoqi@8865: aoqi@8865: #ifndef OPT_THREAD aoqi@8865: __ get_thread(thread); aoqi@8865: #endif fujie@9207: // Get return address (is in RA after leave). aoqi@8865: fujie@9207: __ move(exception_pc, RA); fujie@9207: __ push(RA); aoqi@1: aoqi@6880: // search the exception handler address of the caller (using the return address) aoqi@8865: __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); aoqi@8865: // V0: exception handler address of the caller aoqi@1: aoqi@6880: // only eax is valid at this time, all other registers have been destroyed by the call aoqi@1: aoqi@6880: // move result of call into correct register aoqi@6880: __ move(handler_addr, V0); aoqi@1: aoqi@8865: // Restore exception oop to V0 (required convention of exception handler). aoqi@8865: __ move(exception_oop, exception_oop_callee_saved); aoqi@8865: aoqi@8865: // verify that there is really a valid exception in V0 aoqi@6880: __ verify_oop(exception_oop); aoqi@1: aoqi@6880: // get throwing pc (= return address). aoqi@8865: // V1 has been destroyed by the call, so it must be set again aoqi@6880: // the pop is also necessary to simulate the effect of a ret(0) aoqi@6880: __ super_pop(exception_pc); aoqi@1: aoqi@6880: // continue at exception handler (return address removed) aoqi@6880: // note: do *not* remove arguments when unwinding the aoqi@6880: // activation since the caller assumes having aoqi@6880: // all arguments on the stack when entering the aoqi@6880: // runtime to determine the exception handler aoqi@6880: // (GC happens at call site with arguments!) aoqi@8865: // V0: exception oop aoqi@8865: // V1: throwing pc aoqi@8865: // T3: exception handler aoqi@6880: __ jr(handler_addr); aoqi@6880: __ delayed()->nop(); aoqi@1: } aoqi@1: aoqi@1: aoqi@1: aoqi@1: aoqi@1: //static address deopt_with_exception_entry_for_patch = NULL; aoqi@1: aoqi@1: OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { aoqi@1: aoqi@6880: // use the maximum number of runtime-arguments here because it is difficult to aoqi@6880: // distinguish each RT-Call. aoqi@6880: // Note: This number affects also the RT-Call in generate_handle_exception because aoqi@6880: // the oop-map is shared for all calls. aoqi@1: aoqi@6880: DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); aoqi@6880: assert(deopt_blob != NULL, "deoptimization blob must have been created"); aoqi@6880: // assert(deopt_with_exception_entry_for_patch != NULL, aoqi@6880: // "deoptimization blob must have been created"); aoqi@6880: aoqi@6880: //OopMap* oop_map = save_live_registers(sasm, num_rt_args); aoqi@6880: OopMap* oop_map = save_live_registers(sasm, 0); aoqi@6880: const Register thread = T8; aoqi@6880: // push java thread (becomes first argument of C function) aoqi@6880: __ get_thread(thread); aoqi@6880: __ move(A0, thread); aoqi@1: aoqi@1: aoqi@6880: /* aoqi@6880: * NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor aoqi@6880: * is contained in interpreter. It should be wrong, and should be cleared but is not. aoqi@6880: * even if we cleared the wrong pc in anchor, the default way to get caller pc in class frame aoqi@6880: * is not right. It depends on that the caller pc is stored in *(sp - 1) but it's not the case aoqi@1: */ aoqi@6880: __ set_last_Java_frame(thread, NOREG, FP, NULL); aoqi@6880: NOT_LP64(__ addiu(SP, SP, (-1) * wordSize)); aoqi@6880: __ move(AT, -(StackAlignmentInBytes)); aoqi@6880: __ andr(SP, SP, AT); aoqi@6880: __ relocate(relocInfo::internal_pc_type); aoqi@6880: { aoqi@1: #ifndef _LP64 aoqi@6880: int save_pc = (int)__ pc() + 12 + NativeCall::return_address_offset; aoqi@6880: __ lui(AT, Assembler::split_high(save_pc)); aoqi@6880: __ addiu(AT, AT, Assembler::split_low(save_pc)); aoqi@1: #else aoqi@6880: uintptr_t save_pc = (uintptr_t)__ pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long; aoqi@6880: __ li48(AT, save_pc); aoqi@1: #endif aoqi@6880: } aoqi@6880: __ st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); aoqi@1: aoqi@6880: // do the call aoqi@1: #ifndef _LP64 aoqi@6880: __ lui(T9, Assembler::split_high((int)target)); aoqi@6880: __ addiu(T9, T9, Assembler::split_low((int)target)); aoqi@1: #else aoqi@6880: __ li48(T9, (intptr_t)target); aoqi@1: #endif aoqi@6880: __ jalr(T9); aoqi@6880: __ delayed()->nop(); aoqi@6880: OopMapSet* oop_maps = new OopMapSet(); aoqi@6880: oop_maps->add_gc_map(__ offset(), oop_map); aoqi@1: aoqi@6880: __ get_thread(thread); aoqi@1: aoqi@6880: __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); fujie@9171: __ reset_last_Java_frame(thread, true); aoqi@6880: // discard thread arg aoqi@6880: // check for pending exceptions aoqi@6880: { aoqi@6880: Label L, skip; aoqi@6880: //Label no_deopt; aoqi@6880: __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); aoqi@6880: __ beq(AT, R0, L); aoqi@6880: __ delayed()->nop(); aoqi@6880: // exception pending => remove activation and forward to exception handler aoqi@1: aoqi@8865: __ bne(V0, R0, skip); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id), aoqi@6880: relocInfo::runtime_call_type); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ bind(skip); aoqi@1: aoqi@6880: // the deopt blob expects exceptions in the special fields of aoqi@6880: // JavaThread, so copy and clear pending exception. aoqi@1: aoqi@6880: // load and clear pending exception aoqi@6880: __ ld_ptr(V0, Address(thread,in_bytes(Thread::pending_exception_offset()))); aoqi@6880: __ st_ptr(R0, Address(thread, in_bytes(Thread::pending_exception_offset()))); aoqi@1: aoqi@6880: // check that there is really a valid exception aoqi@6880: __ verify_not_null_oop(V0); aoqi@6880: aoqi@6880: // load throwing pc: this is the return address of the stub aoqi@6880: __ ld_ptr(V1, Address(SP, return_off * BytesPerWord)); aoqi@1: aoqi@1: aoqi@1: #ifdef ASSERT aoqi@6880: // check that fields in JavaThread for exception oop and issuing pc are empty aoqi@6880: Label oop_empty; aoqi@6880: __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); aoqi@6880: __ beq(AT,R0,oop_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception oop must be empty"); aoqi@6880: __ bind(oop_empty); aoqi@1: aoqi@6880: Label pc_empty; aoqi@6880: __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); aoqi@6880: __ beq(AT,R0,pc_empty); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ stop("exception pc must be empty"); aoqi@6880: __ bind(pc_empty); aoqi@1: #endif aoqi@1: aoqi@6880: // store exception oop and throwing pc to JavaThread aoqi@6880: __ st_ptr(V0,Address(thread, in_bytes(JavaThread::exception_oop_offset()))); aoqi@6880: __ st_ptr(V1,Address(thread, in_bytes(JavaThread::exception_pc_offset()))); aoqi@1: aoqi@6880: restore_live_registers(sasm); aoqi@1: aoqi@6880: __ leave(); aoqi@1: aoqi@6880: // Forward the exception directly to deopt blob. We can blow no aoqi@6880: // registers and must leave throwing pc on the stack. A patch may aoqi@6880: // have values live in registers so the entry point with the aoqi@6880: // exception in tls. aoqi@6880: __ jmp(deopt_blob->unpack_with_exception_in_tls(), relocInfo::runtime_call_type); aoqi@6880: __ delayed()->nop(); aoqi@1: aoqi@6880: __ bind(L); aoqi@6880: } aoqi@1: aoqi@6880: // Runtime will return true if the nmethod has been deoptimized during aoqi@6880: // the patching process. In that case we must do a deopt reexecute instead. aoqi@1: aoqi@6880: Label reexecuteEntry, cont; aoqi@1: aoqi@6880: __ beq(V0, R0, cont); // have we deoptimized? aoqi@6880: __ delayed()->nop(); aoqi@1: aoqi@6880: // Will reexecute. Proper return address is already on the stack we just restore aoqi@6880: // registers, pop all of our frame but the return address and jump to the deopt blob aoqi@6880: restore_live_registers(sasm); aoqi@1: aoqi@6880: __ leave(); aoqi@6880: __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); aoqi@6880: __ delayed()->nop(); aoqi@1: aoqi@6880: __ bind(cont); aoqi@6880: restore_live_registers(sasm); aoqi@1: aoqi@6880: __ leave(); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@6880: aoqi@6880: return oop_maps; aoqi@1: } aoqi@1: aoqi@1: aoqi@1: OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { aoqi@6880: // for better readability aoqi@6880: const bool must_gc_arguments = true; aoqi@6880: const bool dont_gc_arguments = false; aoqi@1: aoqi@1: aoqi@6880: // default value; overwritten for some optimized stubs that are called aoqi@6880: // from methods that do not use the fpu aoqi@6880: bool save_fpu_registers = true; aoqi@1: aoqi@1: aoqi@6880: // stub code & info for the different stubs aoqi@6880: OopMapSet* oop_maps = NULL; aoqi@1: aoqi@1: switch (id) { aoqi@1: case forward_exception_id: aoqi@1: { aoqi@8865: oop_maps = generate_handle_exception(id, sasm); aoqi@8865: __ leave(); aoqi@8865: __ jr(RA); aoqi@8865: __ delayed()->nop(); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@1: case new_instance_id: aoqi@1: case fast_new_instance_id: aoqi@1: case fast_new_instance_init_check_id: aoqi@1: { aoqi@1: Register klass = A4; // Incoming aoqi@1: Register obj = V0; // Result aoqi@1: aoqi@1: if (id == new_instance_id) { aoqi@1: __ set_info("new_instance", dont_gc_arguments); aoqi@1: } else if (id == fast_new_instance_id) { aoqi@1: __ set_info("fast new_instance", dont_gc_arguments); aoqi@1: } else { aoqi@1: assert(id == fast_new_instance_init_check_id, "bad StubID"); aoqi@1: __ set_info("fast new_instance init check", dont_gc_arguments); aoqi@1: } aoqi@1: aoqi@6880: if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) aoqi@1: && UseTLAB && FastTLABRefill) { aoqi@1: Label slow_path; aoqi@1: Register obj_size = T0; aoqi@1: Register t1 = T2; aoqi@1: Register t2 = T3; aoqi@1: assert_different_registers(klass, obj, obj_size, t1, t2); aoqi@1: if (id == fast_new_instance_init_check_id) { aoqi@1: // make sure the klass is initialized aoqi@8865: __ ld_ptr(AT, Address(klass, in_bytes(InstanceKlass::init_state_offset()))); aoqi@1: __ move(t1, InstanceKlass::fully_initialized); aoqi@1: __ bne(AT, t1, slow_path); aoqi@1: __ delayed()->nop(); aoqi@1: } aoqi@1: #ifdef ASSERT aoqi@1: // assert object can be fast path allocated aoqi@1: { aoqi@1: Label ok, not_ok; aoqi@1: __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset())); aoqi@1: __ blez(obj_size, not_ok); aoqi@1: __ delayed()->nop(); aoqi@1: __ andi(t1 , obj_size, Klass::_lh_instance_slow_path_bit); aoqi@1: __ beq(t1, R0, ok); aoqi@1: __ delayed()->nop(); aoqi@1: __ bind(not_ok); aoqi@1: __ stop("assert(can be fast path allocated)"); aoqi@1: __ should_not_reach_here(); aoqi@1: __ bind(ok); aoqi@1: } aoqi@1: #endif // ASSERT aoqi@1: // if we got here then the TLAB allocation failed, so try aoqi@1: // refilling the TLAB or allocating directly from eden. aoqi@6880: aoqi@1: Label retry_tlab, try_eden; aoqi@1: __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy edx (klass) aoqi@6880: aoqi@1: __ bind(retry_tlab); aoqi@6880: aoqi@1: // get the instance size aoqi@1: __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset())); aoqi@1: __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); aoqi@1: __ initialize_object(obj, klass, obj_size, 0, t1, t2); aoqi@1: __ verify_oop(obj); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@6880: wangxue@9205: #ifndef OPT_THREAD wangxue@9205: const Register thread = T8; wangxue@9205: __ get_thread(thread); wangxue@9205: #else wangxue@9205: const Register thread = TREG; wangxue@9205: #endif wangxue@9205: aoqi@1: __ bind(try_eden); aoqi@1: aoqi@6880: // get the instance size aoqi@1: __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset())); aoqi@1: __ eden_allocate(obj, obj_size, 0, t1, t2, slow_path); wangxue@9205: __ incr_allocated_bytes(thread, obj_size, 0); wangxue@9205: aoqi@1: __ initialize_object(obj, klass, obj_size, 0, t1, t2); aoqi@1: __ verify_oop(obj); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@6880: aoqi@1: __ bind(slow_path); aoqi@1: } aoqi@1: __ enter(); aoqi@1: OopMap* map = save_live_registers(sasm, 0); aoqi@1: int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); aoqi@1: oop_maps = new OopMapSet(); aoqi@1: oop_maps->add_gc_map(call_offset, map); aoqi@1: restore_live_registers_except_V0(sasm); aoqi@1: __ verify_oop(obj); aoqi@1: __ leave(); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@6880: aoqi@1: // V0: new instance aoqi@1: } aoqi@1: break; aoqi@1: aoqi@1: aoqi@1: #ifdef TIERED aoqi@1: //FIXME, I hava no idea which register to use aoqi@1: case counter_overflow_id: aoqi@1: { aoqi@1: #ifndef _LP64 aoqi@1: Register bci = T5; aoqi@1: #else aoqi@1: Register bci = A5; aoqi@1: #endif aoqi@8865: Register method = AT; aoqi@1: __ enter(); aoqi@1: OopMap* map = save_live_registers(sasm, 0); aoqi@1: // Retrieve bci aoqi@1: __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==?? aoqi@8865: __ ld(method, Address(FP, 3*BytesPerWord)); aoqi@8865: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); aoqi@1: oop_maps = new OopMapSet(); aoqi@1: oop_maps->add_gc_map(call_offset, map); aoqi@1: restore_live_registers(sasm); aoqi@1: __ leave(); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@1: } aoqi@1: break; aoqi@1: #endif // TIERED aoqi@1: aoqi@1: aoqi@1: aoqi@1: case new_type_array_id: aoqi@1: case new_object_array_id: aoqi@6880: { aoqi@6880: // i use T2 as length register, T4 as klass register, V0 as result register. aoqi@1: // MUST accord with NewTypeArrayStub::emit_code, NewObjectArrayStub::emit_code aoqi@1: Register length = T2; // Incoming aoqi@1: #ifndef _LP64 aoqi@1: Register klass = T4; // Incoming aoqi@1: #else aoqi@1: Register klass = A4; // Incoming aoqi@1: #endif aoqi@1: Register obj = V0; // Result aoqi@6880: aoqi@1: if (id == new_type_array_id) { aoqi@1: __ set_info("new_type_array", dont_gc_arguments); aoqi@1: } else { aoqi@1: __ set_info("new_object_array", dont_gc_arguments); aoqi@1: } aoqi@6880: aoqi@1: if (UseTLAB && FastTLABRefill) { aoqi@1: Register arr_size = T0; aoqi@6880: Register t1 = T1; aoqi@1: Register t2 = T3; aoqi@1: Label slow_path; aoqi@1: assert_different_registers(length, klass, obj, arr_size, t1, t2); aoqi@6880: aoqi@1: // check that array length is small enough for fast path aoqi@1: __ move(AT, C1_MacroAssembler::max_array_allocation_length); aoqi@1: __ sltu(AT, AT, length); aoqi@1: __ bne(AT, R0, slow_path); aoqi@1: __ delayed()->nop(); aoqi@1: aoqi@1: // if we got here then the TLAB allocation failed, so try aoqi@1: // refilling the TLAB or allocating directly from eden. aoqi@1: Label retry_tlab, try_eden; aoqi@6880: //T0,T1,T5,T8 have changed! aoqi@1: __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves T2 & T4 aoqi@6880: aoqi@1: __ bind(retry_tlab); aoqi@6880: aoqi@1: // get the allocation size: (length << (layout_helper & 0x1F)) + header_size aoqi@6880: __ lw(t1, klass, in_bytes(Klass::layout_helper_offset())); aoqi@1: __ andi(AT, t1, 0x1f); aoqi@1: __ sllv(arr_size, length, AT); aoqi@1: __ srl(t1, t1, Klass::_lh_header_size_shift); aoqi@1: __ andi(t1, t1, Klass::_lh_header_size_mask); aoqi@1: __ add(arr_size, t1, arr_size); aoqi@1: __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up aoqi@1: __ move(AT, ~MinObjAlignmentInBytesMask); aoqi@1: __ andr(arr_size, arr_size, AT); aoqi@6880: aoqi@6880: aoqi@1: __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size aoqi@1: __ initialize_header(obj, klass, length,t1,t2); aoqi@6880: __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) aoqi@1: + (Klass::_lh_header_size_shift / BitsPerByte))); aoqi@1: assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); aoqi@1: assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); aoqi@1: __ andi(t1, t1, Klass::_lh_header_size_mask); aoqi@1: __ sub(arr_size, arr_size, t1); // body length aoqi@1: __ add(t1, t1, obj); // body start aoqi@1: __ initialize_body(t1, arr_size, 0, t2); aoqi@1: __ verify_oop(obj); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@6880: wangxue@9205: #ifndef OPT_THREAD wangxue@9205: const Register thread = T8; wangxue@9205: __ get_thread(thread); wangxue@9205: #else wangxue@9205: const Register thread = TREG; wangxue@9205: #endif wangxue@9205: aoqi@1: __ bind(try_eden); aoqi@1: // get the allocation size: (length << (layout_helper & 0x1F)) + header_size aoqi@6880: __ lw(t1, klass, in_bytes(Klass::layout_helper_offset())); aoqi@1: __ andi(AT, t1, 0x1f); aoqi@1: __ sllv(arr_size, length, AT); aoqi@1: __ srl(t1, t1, Klass::_lh_header_size_shift); aoqi@1: __ andi(t1, t1, Klass::_lh_header_size_mask); aoqi@1: __ add(arr_size, t1, arr_size); aoqi@1: __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up aoqi@1: __ move(AT, ~MinObjAlignmentInBytesMask); aoqi@1: __ andr(arr_size, arr_size, AT); aoqi@1: __ eden_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size wangxue@9205: __ incr_allocated_bytes(thread, arr_size, 0); wangxue@9205: aoqi@1: __ initialize_header(obj, klass, length,t1,t2); aoqi@1: __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) aoqi@1: + (Klass::_lh_header_size_shift / BitsPerByte))); aoqi@1: __ andi(t1, t1, Klass::_lh_header_size_mask); aoqi@1: __ sub(arr_size, arr_size, t1); // body length aoqi@1: __ add(t1, t1, obj); // body start aoqi@6880: aoqi@1: __ initialize_body(t1, arr_size, 0, t2); aoqi@1: __ verify_oop(obj); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@1: __ bind(slow_path); aoqi@1: } aoqi@6880: aoqi@6880: aoqi@1: __ enter(); aoqi@1: OopMap* map = save_live_registers(sasm, 0); aoqi@1: int call_offset; aoqi@1: if (id == new_type_array_id) { aoqi@6880: call_offset = __ call_RT(obj, noreg, aoqi@1: CAST_FROM_FN_PTR(address, new_type_array), klass, length); aoqi@1: } else { aoqi@6880: call_offset = __ call_RT(obj, noreg, aoqi@1: CAST_FROM_FN_PTR(address, new_object_array), klass, length); aoqi@1: } aoqi@6880: aoqi@1: oop_maps = new OopMapSet(); aoqi@1: oop_maps->add_gc_map(call_offset, map); aoqi@1: restore_live_registers_except_V0(sasm); aoqi@1: __ verify_oop(obj); aoqi@6880: __ leave(); aoqi@1: __ jr(RA); aoqi@1: __ delayed()->nop(); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@1: case new_multi_array_id: aoqi@6880: { aoqi@6880: StubFrame f(sasm, "new_multi_array", dont_gc_arguments); aoqi@6880: //refer to c1_LIRGenerate_mips.cpp:do_NewmultiArray aoqi@6880: // V0: klass aoqi@6880: // T2: rank aoqi@6880: // T0: address of 1st dimension aoqi@6880: //__ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), A1, A2, A3); aoqi@6880: //OopMap* map = save_live_registers(sasm, 4); aoqi@6880: OopMap* map = save_live_registers(sasm, 0); aoqi@6880: int call_offset = __ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), aoqi@6880: V0,T2,T0); aoqi@6880: oop_maps = new OopMapSet(); aoqi@6880: oop_maps->add_gc_map(call_offset, map); aoqi@6880: //FIXME aoqi@6880: restore_live_registers_except_V0(sasm); aoqi@6880: // V0: new multi array aoqi@6880: __ verify_oop(V0); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@6880: aoqi@1: case register_finalizer_id: aoqi@1: { aoqi@6880: __ set_info("register_finalizer", dont_gc_arguments); aoqi@1: aoqi@6880: // The object is passed on the stack and we haven't pushed a aoqi@6880: // frame yet so it's one work away from top of stack. aoqi@1: //reference to LIRGenerator::do_RegisterFinalizer, call_runtime aoqi@6880: __ move(V0, A0); aoqi@6880: __ verify_oop(V0); aoqi@6880: // load the klass and check the has finalizer flag aoqi@6880: Label register_finalizer; aoqi@1: #ifndef _LP64 aoqi@6880: Register t = T5; aoqi@1: #else aoqi@6880: Register t = A5; aoqi@1: #endif aoqi@6880: //__ ld_ptr(t, Address(V0, oopDesc::klass_offset_in_bytes())); aoqi@6880: __ load_klass(t, V0); aoqi@6880: __ lw(t, Address(t, Klass::access_flags_offset())); aoqi@6880: __ move(AT, JVM_ACC_HAS_FINALIZER); aoqi@6880: __ andr(AT, AT, t); aoqi@1: aoqi@6880: __ bne(AT, R0, register_finalizer); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@6880: __ bind(register_finalizer); aoqi@6880: __ enter(); aoqi@6880: OopMap* map = save_live_registers(sasm, 0 /*num_rt_args */); aoqi@6880: aoqi@6880: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, aoqi@6880: SharedRuntime::register_finalizer), V0); aoqi@6880: oop_maps = new OopMapSet(); aoqi@1: oop_maps->add_gc_map(call_offset, map); aoqi@1: aoqi@6880: // Now restore all the live registers aoqi@6880: restore_live_registers(sasm); aoqi@1: aoqi@6880: __ leave(); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@6880: // case range_check_failed_id: aoqi@6880: case throw_range_check_failed_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "range_check_failed", dont_gc_arguments); aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, aoqi@1: throw_range_check_exception),true); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@1: case throw_index_exception_id: aoqi@6880: { aoqi@6880: // i use A1 as the index register, for this will be the first argument, see call_RT aoqi@6880: StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, aoqi@6880: throw_index_exception), true); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@6880: case throw_div0_exception_id: aoqi@1: { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, aoqi@6880: throw_div0_exception), false); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@6880: case throw_null_pointer_exception_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, aoqi@6880: throw_null_pointer_exception),false); aoqi@1: } aoqi@1: break; aoqi@1: aoqi@8865: case handle_exception_nofpu_id: aoqi@6880: save_fpu_registers = false; aoqi@6880: // fall through aoqi@6880: case handle_exception_id: aoqi@6880: { aoqi@6880: StubFrame f(sasm, "handle_exception", dont_gc_arguments); aoqi@6880: //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); aoqi@6880: oop_maps = generate_handle_exception(id, sasm); aoqi@6880: } aoqi@6880: break; aoqi@8865: case handle_exception_from_callee_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); aoqi@8865: oop_maps = generate_handle_exception(id, sasm); aoqi@8865: } aoqi@8865: break; aoqi@6880: case unwind_exception_id: aoqi@6880: { aoqi@6880: __ set_info("unwind_exception", dont_gc_arguments); aoqi@6880: generate_unwind_exception(sasm); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@1: aoqi@6880: case throw_array_store_exception_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); aoqi@6880: // tos + 0: link aoqi@6880: // + 1: return address aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, wangxue@9248: throw_array_store_exception), true); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@6880: case throw_class_cast_exception_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); aoqi@6880: oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, aoqi@8865: throw_class_cast_exception), true); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@6880: case throw_incompatible_class_change_error_id: aoqi@6880: { aoqi@8865: StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); aoqi@8865: oop_maps = generate_exception_throw(sasm, aoqi@8865: CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@6880: case slow_subtype_check_id: aoqi@6880: { aoqi@6880: //actually , We do not use it aoqi@6880: // A0:klass_RInfo sub aoqi@6880: // A1:k->encoding() super aoqi@6880: __ set_info("slow_subtype_check", dont_gc_arguments); aoqi@6880: __ st_ptr(T0, SP, (-1) * wordSize); aoqi@6880: __ st_ptr(T1, SP, (-2) * wordSize); aoqi@6880: __ addiu(SP, SP, (-2) * wordSize); aoqi@1: fujie@9132: Label miss; fujie@9132: __ check_klass_subtype_slow_path(A0, A1, T0, T1, NULL, &miss); aoqi@1: aoqi@6880: __ addiu(V0, R0, 1); aoqi@6880: __ addiu(SP, SP, 2 * wordSize); aoqi@6880: __ ld_ptr(T0, SP, (-1) * wordSize); aoqi@6880: __ ld_ptr(T1, SP, (-2) * wordSize); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@1: aoqi@1: aoqi@6880: __ bind(miss); aoqi@6880: __ move(V0, R0); aoqi@6880: __ addiu(SP, SP, 2 * wordSize); aoqi@6880: __ ld_ptr(T0, SP, (-1) * wordSize); aoqi@6880: __ ld_ptr(T1, SP, (-2) * wordSize); aoqi@6880: __ jr(RA); aoqi@6880: __ delayed()->nop(); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@1: case monitorenter_nofpu_id: aoqi@1: save_fpu_registers = false;// fall through aoqi@1: aoqi@6880: case monitorenter_id: aoqi@1: { aoqi@6880: StubFrame f(sasm, "monitorenter", dont_gc_arguments); aoqi@6880: OopMap* map = save_live_registers(sasm, 0, save_fpu_registers); aoqi@1: aoqi@6880: f.load_argument(1, V0); // V0: object aoqi@1: #ifndef _LP64 aoqi@6880: f.load_argument(0, T6); // T6: lock address aoqi@6880: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, aoqi@6880: monitorenter), V0, T6); aoqi@1: #else aoqi@6880: f.load_argument(0, A6); // A6: lock address aoqi@6880: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, aoqi@6880: monitorenter), V0, A6); aoqi@1: #endif aoqi@1: aoqi@6880: oop_maps = new OopMapSet(); aoqi@6880: oop_maps->add_gc_map(call_offset, map); aoqi@6880: restore_live_registers(sasm, save_fpu_registers); aoqi@6880: } aoqi@6880: break; aoqi@1: aoqi@6880: case monitorexit_nofpu_id: aoqi@6880: save_fpu_registers = false; aoqi@6880: // fall through aoqi@6880: case monitorexit_id: aoqi@6880: { aoqi@1: StubFrame f(sasm, "monitorexit", dont_gc_arguments); aoqi@1: OopMap* map = save_live_registers(sasm, 0, save_fpu_registers); aoqi@6880: aoqi@1: #ifndef _LP64 aoqi@1: f.load_argument(0, T6); // eax: lock address aoqi@1: #else aoqi@1: f.load_argument(0, A6); // A6: lock address aoqi@1: #endif aoqi@1: // note: really a leaf routine but must setup last java sp aoqi@1: // => use call_RT for now (speed can be improved by aoqi@1: // doing last java sp setup manually) aoqi@1: #ifndef _LP64 aoqi@6880: int call_offset = __ call_RT(noreg, noreg, aoqi@6880: CAST_FROM_FN_PTR(address, monitorexit), T6); aoqi@1: #else aoqi@6880: int call_offset = __ call_RT(noreg, noreg, aoqi@6880: CAST_FROM_FN_PTR(address, monitorexit), A6); aoqi@1: #endif aoqi@1: oop_maps = new OopMapSet(); aoqi@1: oop_maps->add_gc_map(call_offset, map); aoqi@1: restore_live_registers(sasm, save_fpu_registers); aoqi@6880: aoqi@1: } aoqi@1: break; aoqi@6880: // case init_check_patching_id: aoqi@6880: case access_field_patching_id: aoqi@6880: { aoqi@1: StubFrame f(sasm, "access_field_patching", dont_gc_arguments); aoqi@1: // we should set up register map aoqi@1: oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); aoqi@1: aoqi@1: } aoqi@1: break; aoqi@1: aoqi@6880: case load_klass_patching_id: aoqi@6880: { aoqi@6880: StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); aoqi@6880: // we should set up register map aoqi@6880: oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, aoqi@6880: move_klass_patching)); aoqi@6880: } aoqi@6880: break; aoqi@6880: /* case jvmti_exception_throw_id: aoqi@6880: { aoqi@6880: // V0: exception oop aoqi@6880: // V1: exception pc aoqi@6880: StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments); aoqi@6880: // Preserve all registers across this potentially blocking call aoqi@6880: const int num_rt_args = 2; // thread, exception oop aoqi@6880: //OopMap* map = save_live_registers(sasm, num_rt_args); aoqi@6880: OopMap* map = save_live_registers(sasm, 0); aoqi@6880: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, aoqi@6880: Runtime1::post_jvmti_exception_throw), V0); aoqi@6880: oop_maps = new OopMapSet(); aoqi@6880: oop_maps->add_gc_map(call_offset, map); aoqi@6880: restore_live_registers(sasm); aoqi@6880: }*/ aoqi@8865: case load_mirror_patching_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments); aoqi@8865: oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); aoqi@8865: } aoqi@6880: break; aoqi@8865: aoqi@8865: case load_appendix_patching_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments); aoqi@8865: // we should set up register map aoqi@8865: oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); aoqi@8865: } aoqi@8865: break; aoqi@8865: aoqi@6880: case dtrace_object_alloc_id: aoqi@6880: { aoqi@6880: // V0:object aoqi@6880: StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); aoqi@6880: // we can't gc here so skip the oopmap but make sure that all aoqi@6880: // the live registers get saved. aoqi@6880: save_live_registers(sasm, 0); aoqi@1: aoqi@6880: __ push_reg(V0); aoqi@8865: __ move(A0, V0); aoqi@6880: __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), aoqi@6880: relocInfo::runtime_call_type); fujie@9134: __ delayed()->nop(); aoqi@6880: __ super_pop(V0); aoqi@1: aoqi@6880: restore_live_registers(sasm); aoqi@6880: } aoqi@6880: break; aoqi@8865: aoqi@6880: case fpu2long_stub_id: aoqi@6880: { aoqi@8865: //FIXME, I hava no idea how to port this aoqi@8865: //tty->print_cr("fpu2long_stub_id unimplemented yet!"); aoqi@6880: } aoqi@8865: break; aoqi@8865: aoqi@8865: case deoptimize_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "deoptimize", dont_gc_arguments); aoqi@8865: const int num_rt_args = 1; // thread aoqi@8865: OopMap* oop_map = save_live_registers(sasm, num_rt_args); aoqi@8865: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); aoqi@8865: oop_maps = new OopMapSet(); aoqi@8865: oop_maps->add_gc_map(call_offset, oop_map); aoqi@8865: restore_live_registers(sasm); aoqi@8865: DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); aoqi@8865: assert(deopt_blob != NULL, "deoptimization blob must have been created"); aoqi@8865: __ leave(); aoqi@8865: __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); zhaixiang@9135: __ delayed()->nop(); aoqi@8865: } aoqi@8865: break; aoqi@8865: aoqi@8865: case predicate_failed_trap_id: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments); aoqi@8865: aoqi@8865: OopMap* map = save_live_registers(sasm, 1); aoqi@8865: aoqi@8865: int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); aoqi@8865: oop_maps = new OopMapSet(); aoqi@8865: oop_maps->add_gc_map(call_offset, map); aoqi@8865: restore_live_registers(sasm); aoqi@8865: __ leave(); aoqi@8865: DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); aoqi@8865: assert(deopt_blob != NULL, "deoptimization blob must have been created"); aoqi@8865: aoqi@8865: __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); zhaixiang@9135: __ delayed()->nop(); aoqi@8865: } aoqi@8865: break; aoqi@8865: aoqi@6880: default: aoqi@8865: { aoqi@8865: StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); aoqi@6880: __ move(A1, (int)id); aoqi@6880: __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1); aoqi@6880: __ should_not_reach_here(); aoqi@6880: } aoqi@6880: break; aoqi@6880: } aoqi@6880: return oop_maps; aoqi@1: } aoqi@1: aoqi@1: #undef __ aoqi@1: aoqi@1: const char *Runtime1::pd_name_for_address(address entry) { aoqi@1: return ""; aoqi@1: }