src/cpu/mips/vm/c1_Runtime1_mips.cpp

Thu, 24 May 2018 19:49:50 +0800

author
aoqi
date
Thu, 24 May 2018 19:49:50 +0800
changeset 8865
ffcdff41a92f
parent 6880
52ea28d233d2
child 9132
0f025dcc49cc
permissions
-rw-r--r--

some C1 fix
Contributed-by: chenhaoxuan, zhaixiang, aoqi

     1 /*
     2  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/assembler.hpp"
    28 #include "c1/c1_Defs.hpp"
    29 #include "c1/c1_MacroAssembler.hpp"
    30 #include "c1/c1_Runtime1.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "nativeInst_mips.hpp"
    33 #include "oops/compiledICHolder.hpp"
    34 #include "oops/oop.inline.hpp"
    35 #include "prims/jvmtiExport.hpp"
    36 #include "register_mips.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/signature.hpp"
    39 #include "runtime/vframeArray.hpp"
    40 #include "utilities/macros.hpp"
    41 #include "vmreg_mips.inline.hpp"
    42 #if INCLUDE_ALL_GCS
    43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    44 #endif
    47 // Implementation of StubAssembler
    48 // this method will preserve the stack space for arguments as indicated by args_size
    49 // for stack alignment consideration, you cannot call this with argument in stack.
    50 // if you need >3 arguments, you must implement this method yourself.
    51 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
    52   // i use S7 for edi.
    53   // setup registers
    54   const Register thread = TREG; // is callee-saved register (Visual C++ calling conventions)
    55   assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2,                            "registers must be different");
    56   assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
    57   assert(args_size >= 0, "illegal args_size");
    59   set_num_rt_args(1 + args_size);
    62   // push java thread (becomes first argument of C function)
    63   get_thread(thread);
    64   move(A0, thread);
    66   set_last_Java_frame(thread, NOREG, FP, NULL);
    67   NOT_LP64(addi(SP, SP, - wordSize * (1+args_size)));
    68   move(AT, -(StackAlignmentInBytes));
    69   andr(SP, SP, AT);
    71   relocate(relocInfo::internal_pc_type);
    72   {
    73 #ifndef _LP64
    74     int save_pc = (int)pc() +  12 + NativeCall::return_address_offset;
    75     lui(AT, Assembler::split_high(save_pc));
    76     addiu(AT, AT, Assembler::split_low(save_pc));
    77 #else
    78     uintptr_t save_pc = (uintptr_t)pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long;
    79     li48(AT, save_pc);
    80 #endif
    81   }
    82   st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
    84   // do the call
    85 #ifndef _LP64
    86   lui(T9, Assembler::split_high((int)entry));
    87   addiu(T9, T9, Assembler::split_low((int)entry));
    88 #else
    89   li48(T9, (intptr_t)entry);
    90 #endif
    91   jalr(T9);
    92   delayed()->nop();
    93   int call_offset = offset();
    95   // verify callee-saved register
    96 #ifdef ASSERT
    97   guarantee(thread != V0, "change this code");
    98   push(V0);
    99   {
   100     Label L;
   101     get_thread(V0);
   102     beq(thread, V0, L);
   103     delayed()->nop();
   104     int3();
   105     stop("StubAssembler::call_RT: edi not callee saved?");
   106     bind(L);
   107   }
   108   super_pop(V0);
   109 #endif
   110   // discard thread and arguments
   111   ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); //by yyq
   112   //FIXME , in x86 version , the second parameter is false, why true here? @jerome, 12/31, 06
   113   //  reset_last_Java_frame(thread, true);
   114   reset_last_Java_frame(thread, true, false);
   115   // check for pending exceptions
   116   {
   117     Label L;
   118     ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
   119     beq(AT, R0, L);
   120     delayed()->nop();
   121     // exception pending => remove activation and forward to exception handler
   122     // make sure that the vm_results are cleared
   123     if (oop_result1->is_valid()) {
   124       st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
   125     }
   126     if (oop_result2->is_valid()) {
   127       st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
   128     }
   129     // the leave() in x86 just pops ebp and remains the return address on the top
   130     // of stack
   131     // the return address will be needed by forward_exception_entry()
   132     if (frame_size() == no_frame_size) {
   133       addiu(SP, FP, wordSize);
   134       ld_ptr(FP, SP, (-1) * wordSize);
   135       jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
   136       delayed()->nop();
   137     } else if (_stub_id == Runtime1::forward_exception_id) {
   138       should_not_reach_here();
   139     } else {
   140       jmp(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type);
   141       delayed()->nop();
   142     }
   143     bind(L);
   144   }
   145   // get oop results if there are any and reset the values in the thread
   146   if (oop_result1->is_valid()) {
   147     ld_ptr(oop_result1, thread, in_bytes(JavaThread::vm_result_offset()));
   148     st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
   149     verify_oop(oop_result1);
   150   }
   151   if (oop_result2->is_valid()) {
   152     ld_ptr(oop_result2, thread, in_bytes(JavaThread::vm_result_2_offset()));
   153     st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
   154     verify_oop(oop_result2);
   155   }
   156   return call_offset;
   157 }
   160 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
   161   if (arg1 != A1) move(A1, arg1);
   162   return call_RT(oop_result1, oop_result2, entry, 1);
   163 }
   166 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
   167   if (arg1!=A1) move(A1, arg1);
   168   if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
   169   return call_RT(oop_result1, oop_result2, entry, 2);
   170 }
   173 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
   174   if (arg1!=A1) move(A1, arg1);
   175   if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
   176   if (arg3!=A3) move(A3, arg3); assert(arg3 != A1 && arg3 != A2, "smashed argument");
   177   return call_RT(oop_result1, oop_result2, entry, 3);
   178 }
   181 // Implementation of StubFrame
   183 class StubFrame: public StackObj {
   184  private:
   185   StubAssembler* _sasm;
   187  public:
   188   StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
   189   void load_argument(int offset_in_words, Register reg);
   191   ~StubFrame();
   192 };
   195 #define __ _sasm->
   197 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
   198   _sasm = sasm;
   199   __ set_info(name, must_gc_arguments);
   200   __ enter();
   201 }
   204 //FIXME, I have no idea the frame architecture of mips
   205 // load parameters that were stored with LIR_Assembler::store_parameter
   206 // Note: offsets for store_parameter and load_argument must match
   207 void StubFrame::load_argument(int offset_in_words, Register reg) {
   208   //ebp + 0: link
   209   //    + 1: return address
   210   //    + 2: argument with offset 0
   211   //    + 3: argument with offset 1
   212   //    + 4: ...
   213   __ ld_ptr(reg, Address(FP, (offset_in_words + 2) * BytesPerWord));
   214 }
   217 StubFrame::~StubFrame() {
   218   __ leave();
   219   __ jr(RA);
   220   __ delayed()->nop();
   221 }
   223 #undef __
   226 // Implementation of Runtime1
   228 #define __ sasm->
   230 //static OopMap* save_live_registers(MacroAssembler* sasm, int num_rt_args);
   231 //static void restore_live_registers(MacroAssembler* sasm);
   232 //DeoptimizationBlob* SharedRuntime::_deopt_blob = NULL;
   233 /*
   234 const int fpu_stack_as_doubles_size_in_words = 16;
   235 const int fpu_stack_as_doubles_size = 64;
   236 */
   237 const int float_regs_as_doubles_size_in_words = 16;
   239 //FIXME,
   240 // Stack layout for saving/restoring  all the registers needed during a runtime
   241 // call (this includes deoptimization)
   242 // Note: note that users of this frame may well have arguments to some runtime
   243 // while these values are on the stack. These positions neglect those arguments
   244 // but the code in save_live_registers will take the argument count into
   245 // account.
   246 //
   247 #ifdef _LP64
   248   #define SLOT2(x) x,
   249   #define SLOT_PER_WORD 2
   250 #else
   251   #define SLOT2(x)
   252   #define SLOT_PER_WORD 1
   253 #endif // _LP64
   255 enum reg_save_layout {
   256 #ifndef _LP64
   257   T0_off = 0,
   258   S0_off = T0_off + SLOT_PER_WORD * 8,
   259 #else
   260   A4_off = 0,
   261   S0_off = A4_off + SLOT_PER_WORD * 8,
   262 #endif
   263   FP_off = S0_off + SLOT_PER_WORD * 8, SLOT2(FPH_off)
   264   T8_off, SLOT2(T8H_off)
   265   T9_off, SLOT2(T9H_off)
   266   SP_off, SLOT2(SPH_off)
   267   V0_off, SLOT2(V0H_off)
   268   V1_off, SLOT2(V1H_off)
   269   A0_off, SLOT2(A0H_off)
   270   A1_off, SLOT2(A1H_off)
   271   A2_off, SLOT2(A2H_off)
   272   A3_off, SLOT2(A3H_off)
   274   // Float registers
   275   /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
   276   F0_off, SLOT2( F0H_off)
   277   F1_off, SLOT2( F1H_off)
   278   F2_off, SLOT2( F2H_off)
   279   F3_off, SLOT2( F3H_off)
   280   F4_off, SLOT2( F4H_off)
   281   F5_off, SLOT2( F5H_off)
   282   F6_off, SLOT2( F6H_off)
   283   F7_off, SLOT2( F7H_off)
   284   F8_off, SLOT2( F8H_off)
   285   F9_off, SLOT2( F9H_off)
   286   F10_off, SLOT2( F10H_off)
   287   F11_off, SLOT2( F11H_off)
   288   F12_off, SLOT2( F12H_off)
   289   F13_off, SLOT2( F13H_off)
   290   F14_off, SLOT2( F14H_off)
   291   F15_off, SLOT2( F15H_off)
   292   F16_off, SLOT2( F16H_off)
   293   F17_off, SLOT2( F17H_off)
   294   F18_off, SLOT2( F18H_off)
   295   F19_off, SLOT2( F19H_off)
   297   GP_off, SLOT2( GPH_off)
   298   //temp_2_off,
   299   temp_1_off, SLOT2(temp_1H_off)
   300   saved_fp_off, SLOT2(saved_fpH_off)
   301   return_off, SLOT2(returnH_off)
   303   reg_save_frame_size,
   305   // illegal instruction handler
   306   continue_dest_off = temp_1_off,
   308   // deoptimization equates
   309   //deopt_type = temp_2_off,             // slot for type of deopt in progress
   310   ret_type = temp_1_off                // slot for return type
   311 };
   315 // Save off registers which might be killed by calls into the runtime.
   316 // Tries to smart of about FP registers.  In particular we separate
   317 // saving and describing the FPU registers for deoptimization since we
   318 // have to save the FPU registers twice if we describe them and on P4
   319 // saving FPU registers which don't contain anything appears
   320 // expensive.  The deopt blob is the only thing which needs to
   321 // describe FPU registers.  In all other cases it should be sufficient
   322 // to simply save their current value.
   323 //FIXME, I have no idea which register should be saved . @jerome
   324 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
   325                                 bool save_fpu_registers = true, bool describe_fpu_registers = false) {
   327   LP64_ONLY(num_rt_args = 0);
   328   LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
   329   int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / VMRegImpl::slots_per_word;   // args + thread
   330   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
   332   // record saved value locations in an OopMap
   333   // locations are offsets from sp after runtime call; num_rt_args is number of arguments
   334   // in call, including thread
   335   OopMap* map = new OopMap(reg_save_frame_size, 0);
   337   map->set_callee_saved(VMRegImpl::stack2reg(V0_off + num_rt_args), V0->as_VMReg());
   338   map->set_callee_saved(VMRegImpl::stack2reg(V1_off + num_rt_args), V1->as_VMReg());
   339 #ifdef _LP64
   340   map->set_callee_saved(VMRegImpl::stack2reg(V0H_off + num_rt_args), V0->as_VMReg()->next());
   341   map->set_callee_saved(VMRegImpl::stack2reg(V1H_off + num_rt_args), V1->as_VMReg()->next());
   342 #endif
   344   int i = 0;
   345 #ifndef _LP64
   346   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   347     map->set_callee_saved(VMRegImpl::stack2reg(T0_off + num_rt_args + i++), r->as_VMReg());
   348   }
   349 #else
   350   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   351     map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg());
   352     map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()->next());
   353   }
   354 #endif
   356   i = 0;
   357   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   358     map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg());
   359 #ifdef _LP64
   360     map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()->next());
   361 #endif
   362   }
   364   map->set_callee_saved(VMRegImpl::stack2reg(FP_off + num_rt_args), FP->as_VMReg());
   365   map->set_callee_saved(VMRegImpl::stack2reg(GP_off + num_rt_args), GP->as_VMReg());
   366   map->set_callee_saved(VMRegImpl::stack2reg(T8_off + num_rt_args), T8->as_VMReg());
   367   map->set_callee_saved(VMRegImpl::stack2reg(T9_off + num_rt_args), T9->as_VMReg());
   368   map->set_callee_saved(VMRegImpl::stack2reg(A0_off + num_rt_args), A0->as_VMReg());
   369   map->set_callee_saved(VMRegImpl::stack2reg(A1_off + num_rt_args), A1->as_VMReg());
   370   map->set_callee_saved(VMRegImpl::stack2reg(A2_off + num_rt_args), A2->as_VMReg());
   371   map->set_callee_saved(VMRegImpl::stack2reg(A3_off + num_rt_args), A3->as_VMReg());
   373   map->set_callee_saved(VMRegImpl::stack2reg(F0_off + num_rt_args), F0->as_VMReg());
   374   map->set_callee_saved(VMRegImpl::stack2reg(F1_off + num_rt_args), F1->as_VMReg());
   375   map->set_callee_saved(VMRegImpl::stack2reg(F2_off + num_rt_args), F2->as_VMReg());
   376   map->set_callee_saved(VMRegImpl::stack2reg(F3_off + num_rt_args), F1->as_VMReg());
   377   map->set_callee_saved(VMRegImpl::stack2reg(F4_off + num_rt_args), F4->as_VMReg());
   378   map->set_callee_saved(VMRegImpl::stack2reg(F5_off + num_rt_args), F4->as_VMReg());
   379   map->set_callee_saved(VMRegImpl::stack2reg(F6_off + num_rt_args), F4->as_VMReg());
   380   map->set_callee_saved(VMRegImpl::stack2reg(F7_off + num_rt_args), F4->as_VMReg());
   381   map->set_callee_saved(VMRegImpl::stack2reg(F8_off + num_rt_args), F4->as_VMReg());
   382   map->set_callee_saved(VMRegImpl::stack2reg(F9_off + num_rt_args), F4->as_VMReg());
   383   map->set_callee_saved(VMRegImpl::stack2reg(F10_off + num_rt_args), F4->as_VMReg());
   384   map->set_callee_saved(VMRegImpl::stack2reg(F11_off + num_rt_args), F4->as_VMReg());
   385   map->set_callee_saved(VMRegImpl::stack2reg(F12_off + num_rt_args), F12->as_VMReg());
   386   map->set_callee_saved(VMRegImpl::stack2reg(F13_off + num_rt_args), F13->as_VMReg());
   387   map->set_callee_saved(VMRegImpl::stack2reg(F14_off + num_rt_args), F14->as_VMReg());
   388   map->set_callee_saved(VMRegImpl::stack2reg(F15_off + num_rt_args), F15->as_VMReg());
   389   map->set_callee_saved(VMRegImpl::stack2reg(F16_off + num_rt_args), F16->as_VMReg());
   390   map->set_callee_saved(VMRegImpl::stack2reg(F17_off + num_rt_args), F17->as_VMReg());
   391   map->set_callee_saved(VMRegImpl::stack2reg(F18_off + num_rt_args), F18->as_VMReg());
   392   map->set_callee_saved(VMRegImpl::stack2reg(F19_off + num_rt_args), F19->as_VMReg());
   394 #ifdef _LP64
   395   map->set_callee_saved(VMRegImpl::stack2reg(FPH_off + num_rt_args), FP->as_VMReg()->next());
   396   map->set_callee_saved(VMRegImpl::stack2reg(GPH_off + num_rt_args), GP->as_VMReg()->next());
   397   map->set_callee_saved(VMRegImpl::stack2reg(T8H_off + num_rt_args), T8->as_VMReg()->next());
   398   map->set_callee_saved(VMRegImpl::stack2reg(T9H_off + num_rt_args), T9->as_VMReg()->next());
   399   map->set_callee_saved(VMRegImpl::stack2reg(A0H_off + num_rt_args), A0->as_VMReg()->next());
   400   map->set_callee_saved(VMRegImpl::stack2reg(A1H_off + num_rt_args), A1->as_VMReg()->next());
   401   map->set_callee_saved(VMRegImpl::stack2reg(A2H_off + num_rt_args), A2->as_VMReg()->next());
   402   map->set_callee_saved(VMRegImpl::stack2reg(A3H_off + num_rt_args), A3->as_VMReg()->next());
   403 #endif
   404   return map;
   405 }
   407 //FIXME, Is it enough to save this registers  by yyq
   408 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
   409                                    bool save_fpu_registers = true,
   410                                    bool describe_fpu_registers = false) {
   411   //const int reg_save_frame_size = return_off + 1 + num_rt_args;
   412   __ block_comment("save_live_registers");
   414   // save all register state - int, fpu
   415   __ addi(SP, SP, -(reg_save_frame_size / SLOT_PER_WORD - 2)* wordSize);
   417 #ifndef _LP64
   418   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   419     __ sw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   420 #else
   421   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   422     __ sd(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   423 #endif
   424   }
   425   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   426     __ st_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   427   }
   428   __ st_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   429   __ st_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   430   __ st_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   431   __ st_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   432   __ st_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   433   __ st_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   434   __ st_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   435   __ st_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   436   __ st_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
   437   __ st_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
   439   __ sdc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
   440   __ sdc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
   441   __ sdc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
   442   __ sdc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
   443   __ sdc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
   444   __ sdc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
   445   __ sdc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
   446   __ sdc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
   447   __ sdc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
   448   __ sdc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
   449   __ sdc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
   450   __ sdc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
   451   __ sdc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
   452   __ sdc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
   453   __ sdc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
   454   __ sdc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
   455   __ sdc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
   456   __ sdc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
   457   __ sdc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
   458   __ sdc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
   460   return generate_oop_map(sasm, num_rt_args, save_fpu_registers, describe_fpu_registers);
   461 }
   463 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
   464   //static void restore_live_registers(MacroAssembler* sasm) {
   465 #ifndef _LP64
   466   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   467     __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   468 #else
   469   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   470     __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   471 #endif
   472   }
   473   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   474     __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   475   }
   476   __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   477   __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   479   __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   480   __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   481   __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   482   __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   483   __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   484   __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   486   __ ld_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
   487   __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
   489   __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
   490   __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
   491   __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
   492   __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
   493   __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
   494   __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
   495   __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
   496   __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
   497   __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
   498   __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
   499   __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
   500   __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
   501   __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
   502   __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
   503   __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
   504   __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
   505   __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
   506   __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
   507   __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
   508   __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
   510   __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
   511 }
   513 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
   514   __ block_comment("restore_live_registers");
   515   restore_fpu(sasm, restore_fpu_registers);
   516 }
   518 static void restore_live_registers_except_V0(StubAssembler* sasm, bool restore_fpu_registers = true) {
   519   //static void restore_live_registers(MacroAssembler* sasm) {
   520   //FIXME , maybe V1 need to be saved too
   521   __ block_comment("restore_live_registers except V0");
   522 #ifndef _LP64
   523   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   524     __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   525 #else
   526   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   527     __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   528 #endif
   529   }
   530   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   531     __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   532   }
   533   __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   534   __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   536   __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   537   __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   538   __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   539   __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   540   __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   541   __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   543 #if 1
   544   __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
   545   __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
   546   __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
   547   __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
   548   __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
   549   __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
   550   __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
   551   __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
   552   __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
   553   __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
   554   __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
   555   __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
   556   __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
   557   __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
   558   __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
   559   __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
   560   __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
   561   __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
   562   __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
   563   __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
   564 #endif
   566   __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
   568   __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
   569 }
   571 void Runtime1::initialize_pd() {
   572   // nothing to do
   573 }
   575 // target: the entry point of the method that creates and posts the exception oop
   576 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
   577 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
   578   // preserve all registers
   579   OopMap* oop_map = save_live_registers(sasm, 0);
   581   // now all registers are saved and can be used freely
   582   // verify that no old value is used accidentally
   583   //all reigster are saved , I think mips do not need this
   585   // registers used by this stub
   586   const Register temp_reg = T3;
   587   // load argument for exception that is passed as an argument into the stub
   588   if (has_argument) {
   589     __ ld_ptr(temp_reg, Address(FP, 2*BytesPerWord));
   590   }
   591   int call_offset;
   592   if (has_argument)
   593      call_offset = __ call_RT(noreg, noreg, target, temp_reg);
   594   else
   595      call_offset = __ call_RT(noreg, noreg, target);
   597   OopMapSet* oop_maps = new OopMapSet();
   598   oop_maps->add_gc_map(call_offset, oop_map);
   600   __ stop("should not reach here");
   602   return oop_maps;
   603 }
   605 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome
   606 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   607   __ block_comment("generate_handle_exception");
   609   // incoming parameters
   610   const Register exception_oop = V0;
   611   const Register exception_pc = V1;
   612   // other registers used in this stub
   613   // const Register real_return_addr = T3;
   614   const Register thread = TREG;
   615 #ifndef OPT_THREAD
   616   __ get_thread(thread);
   617 #endif
   618   // Save registers, if required.
   619   OopMapSet* oop_maps = new OopMapSet();
   620   OopMap* oop_map = NULL;
   621   switch (id) {
   622   case forward_exception_id:
   623     // We're handling an exception in the context of a compiled frame.
   624     // The registers have been saved in the standard places.  Perform
   625     // an exception lookup in the caller and dispatch to the handler
   626     // if found.  Otherwise unwind and dispatch to the callers
   627     // exception handler.
   628     oop_map = generate_oop_map(sasm, 1 /*thread*/);
   630     // load and clear pending exception oop into RAX
   631     __ ld_ptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
   632     __ st_ptr(R0, Address(thread, Thread::pending_exception_offset()));
   634     // load issuing PC (the return address for this stub) into rdx
   635     __ ld_ptr(exception_pc, Address(FP, 1*BytesPerWord));
   637     // make sure that the vm_results are cleared (may be unnecessary)
   638     __ st_ptr(R0, Address(thread, JavaThread::vm_result_offset()));
   639     __ st_ptr(R0, Address(thread, JavaThread::vm_result_2_offset()));
   640     break;
   641   case handle_exception_nofpu_id:
   642   case handle_exception_id:
   643     // At this point all registers MAY be live.
   644     oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
   645     break;
   646   case handle_exception_from_callee_id: {
   647     // At this point all registers except exception oop (RAX) and
   648     // exception pc (RDX) are dead.
   649     const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/);
   650     oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
   651     sasm->set_frame_size(frame_size);
   652     break;
   653   }
   654   default:  ShouldNotReachHere();
   655   }
   657 #ifdef TIERED
   658   // C2 can leave the fpu stack dirty
   659   __ empty_FPU_stack();
   660 #endif // TIERED
   662   // verify that only V0 and V1 is valid at this time
   663   // verify that V0 contains a valid exception
   664   __ verify_not_null_oop(exception_oop);
   666   // load address of JavaThread object for thread-local data
   667   __ get_thread(thread);
   669 #ifdef ASSERT
   670   // check that fields in JavaThread for exception oop and issuing pc are
   671   // empty before writing to them
   672   Label oop_empty;
   673   __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   674   __ beq(AT, R0, oop_empty);
   675   __ delayed()->nop();
   676   __ stop("exception oop already set");
   677   __ bind(oop_empty);
   678   Label pc_empty;
   679   __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   680   __ beq(AT, R0, pc_empty);
   681   __ delayed()->nop();
   682   __ stop("exception pc already set");
   683   __ bind(pc_empty);
   684 #endif
   686   // save exception oop and issuing pc into JavaThread
   687   // (exception handler will load it from here)
   688   __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   689   __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   691   // patch throwing pc into return address (has bci & oop map)
   692   __ st_ptr(exception_pc, Address(FP, 1*BytesPerWord));
   694   // compute the exception handler.
   695   // the exception oop and the throwing pc are read from the fields in JavaThread
   696   int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
   697   oop_maps->add_gc_map(call_offset, oop_map);
   698   // V0:  handler address or NULL if no handler exists
   699   //      will be the deopt blob if nmethod was deoptimized while we looked up
   700   //      handler regardless of whether handler existed in the nmethod.
   702   // only V0 is valid at this time, all other registers have been destroyed by the
   703   // runtime call
   705   // patch the return address -> the stub will directly return to the exception handler
   706   __ st_ptr(V0, Address(FP, 1 * BytesPerWord));
   708   switch (id) {
   709   case forward_exception_id:
   710   case handle_exception_nofpu_id:
   711   case handle_exception_id:
   712     // Restore the registers that were saved at the beginning.
   713     restore_live_registers(sasm, id != handle_exception_nofpu_id);
   714     break;
   715   case handle_exception_from_callee_id:
   716     // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
   717     // since we do a leave anyway.
   719     // Pop the return address since we are possibly changing SP (restoring from BP).
   720     __ leave();
   721     // Restore SP from BP if the exception PC is a method handle call site.
   722     {
   723       Label done;
   724       __ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
   725       __ beq(AT, R0, done);
   726       __ delayed()->nop();
   727       __ bind(done);
   728     }
   729     __ jr(RA);  // jump to exception handler
   730     __ delayed()->nop();
   731     break;
   732    default:  ShouldNotReachHere();
   733   }
   735   return oop_maps;
   736 }
   742 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
   743   // incoming parameters
   744   const Register exception_oop = V0;
   745   // callee-saved copy of exception_oop during runtime call
   746   const Register exception_oop_callee_saved = S0;
   747   // other registers used in this stub
   748   const Register exception_pc = V1;
   749   const Register handler_addr = T3;
   750   const Register thread = TREG;
   752   // verify that only eax is valid at this time
   753   //  __ invalidate_registers(false, true, true, true, true, true);
   755 #ifdef ASSERT
   756   // check that fields in JavaThread for exception oop and issuing pc are empty
   757   __ get_thread(thread);
   758   Label oop_empty;
   759   __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset()));
   760   __ beq(AT, R0, oop_empty);
   761   __ delayed()->nop();
   762   __ stop("exception oop must be empty");
   763   __ bind(oop_empty);
   765   Label pc_empty;
   766   __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset()));
   767   __ beq(AT, R0, pc_empty);
   768   __ delayed()->nop();
   769   __ stop("exception pc must be empty");
   770   __ bind(pc_empty);
   771 #endif
   772   // clear the FPU stack in case any FPU results are left behind
   773   __ empty_FPU_stack();
   775   // save exception_oop in callee-saved register to preserve it during runtime calls
   776   __ verify_not_null_oop(exception_oop);
   777   __ move(exception_oop_callee_saved, exception_oop);
   779 #ifndef OPT_THREAD
   780   __ get_thread(thread);
   781 #endif
   782   // Get return address (is on top of stack after leave).
   783   // store return address (is on top of stack after leave)
   785   __ ld_ptr(exception_pc, SP, 0);
   787   // search the exception handler address of the caller (using the return address)
   788   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
   789   // V0: exception handler address of the caller
   791   // only eax is valid at this time, all other registers have been destroyed by the call
   793   // move result of call into correct register
   794   __ move(handler_addr, V0);
   796   // Restore exception oop to V0 (required convention of exception handler).
   797   __ move(exception_oop, exception_oop_callee_saved);
   799   // verify that there is really a valid exception in V0
   800   __ verify_oop(exception_oop);
   802   // get throwing pc (= return address).
   803   // V1 has been destroyed by the call, so it must be set again
   804   // the pop is also necessary to simulate the effect of a ret(0)
   805   __ super_pop(exception_pc);
   807   // continue at exception handler (return address removed)
   808   // note: do *not* remove arguments when unwinding the
   809   //       activation since the caller assumes having
   810   //       all arguments on the stack when entering the
   811   //       runtime to determine the exception handler
   812   //       (GC happens at call site with arguments!)
   813   // V0: exception oop
   814   // V1: throwing pc
   815   // T3: exception handler
   816   __ jr(handler_addr);
   817   __ delayed()->nop();
   818 }
   823 //static address deopt_with_exception_entry_for_patch = NULL;
   825 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   827   // use the maximum number of runtime-arguments here because it is difficult to
   828   // distinguish each RT-Call.
   829   // Note: This number affects also the RT-Call in generate_handle_exception because
   830   //       the oop-map is shared for all calls.
   832   DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
   833   assert(deopt_blob != NULL, "deoptimization blob must have been created");
   834   // assert(deopt_with_exception_entry_for_patch != NULL,
   835   // "deoptimization blob must have been created");
   837   //OopMap* oop_map = save_live_registers(sasm, num_rt_args);
   838   OopMap* oop_map = save_live_registers(sasm, 0);
   839   const Register thread = T8;
   840   // push java thread (becomes first argument of C function)
   841   __ get_thread(thread);
   842   __ move(A0, thread);
   845 /*
   846  *  NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor
   847  *  is contained in interpreter. It should be wrong, and should be cleared but is not.
   848  *   even if we cleared the wrong pc in anchor, the default way to get caller pc in class frame
   849  *   is not right. It depends on that the caller pc is stored in *(sp - 1) but it's not the case
   850  */
   851   __ set_last_Java_frame(thread, NOREG, FP, NULL);
   852   NOT_LP64(__ addiu(SP, SP, (-1) * wordSize));
   853   __ move(AT, -(StackAlignmentInBytes));
   854   __ andr(SP, SP, AT);
   855   __ relocate(relocInfo::internal_pc_type);
   856   {
   857 #ifndef _LP64
   858     int save_pc = (int)__ pc() +  12 + NativeCall::return_address_offset;
   859     __ lui(AT, Assembler::split_high(save_pc));
   860     __ addiu(AT, AT, Assembler::split_low(save_pc));
   861 #else
   862     uintptr_t save_pc = (uintptr_t)__ pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long;
   863     __ li48(AT, save_pc);
   864 #endif
   865   }
   866   __ st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
   868   // do the call
   869 #ifndef _LP64
   870   __ lui(T9, Assembler::split_high((int)target));
   871   __ addiu(T9, T9, Assembler::split_low((int)target));
   872 #else
   873   __ li48(T9, (intptr_t)target);
   874 #endif
   875   __ jalr(T9);
   876   __ delayed()->nop();
   877   OopMapSet*  oop_maps = new OopMapSet();
   878   oop_maps->add_gc_map(__ offset(),  oop_map);
   880   __ get_thread(thread);
   882   __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
   883   __ reset_last_Java_frame(thread, true,true);
   884   // discard thread arg
   885   // check for pending exceptions
   886   {
   887     Label L, skip;
   888     //Label no_deopt;
   889     __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
   890     __ beq(AT, R0, L);
   891     __ delayed()->nop();
   892     // exception pending => remove activation and forward to exception handler
   894     __ bne(V0, R0, skip);
   895     __ delayed()->nop();
   896     __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id),
   897         relocInfo::runtime_call_type);
   898     __ delayed()->nop();
   899     __ bind(skip);
   901     // the deopt blob expects exceptions in the special fields of
   902     // JavaThread, so copy and clear pending exception.
   904     // load and clear pending exception
   905     __ ld_ptr(V0, Address(thread,in_bytes(Thread::pending_exception_offset())));
   906     __ st_ptr(R0, Address(thread, in_bytes(Thread::pending_exception_offset())));
   908     // check that there is really a valid exception
   909     __ verify_not_null_oop(V0);
   911     // load throwing pc: this is the return address of the stub
   912     __ ld_ptr(V1, Address(SP, return_off * BytesPerWord));
   915 #ifdef ASSERT
   916     // check that fields in JavaThread for exception oop and issuing pc are empty
   917     Label oop_empty;
   918     __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   919     __ beq(AT,R0,oop_empty);
   920     __ delayed()->nop();
   921     __ stop("exception oop must be empty");
   922     __ bind(oop_empty);
   924     Label pc_empty;
   925     __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   926     __ beq(AT,R0,pc_empty);
   927     __ delayed()->nop();
   928     __ stop("exception pc must be empty");
   929     __ bind(pc_empty);
   930 #endif
   932     // store exception oop and throwing pc to JavaThread
   933     __ st_ptr(V0,Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   934     __ st_ptr(V1,Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   936     restore_live_registers(sasm);
   938     __ leave();
   940     // Forward the exception directly to deopt blob. We can blow no
   941     // registers and must leave throwing pc on the stack.  A patch may
   942     // have values live in registers so the entry point with the
   943     // exception in tls.
   944     __ jmp(deopt_blob->unpack_with_exception_in_tls(), relocInfo::runtime_call_type);
   945     __ delayed()->nop();
   947     __ bind(L);
   948   }
   950   // Runtime will return true if the nmethod has been deoptimized during
   951   // the patching process. In that case we must do a deopt reexecute instead.
   953   Label reexecuteEntry, cont;
   955   __ beq(V0, R0, cont);                              // have we deoptimized?
   956   __ delayed()->nop();
   958   // Will reexecute. Proper return address is already on the stack we just restore
   959   // registers, pop all of our frame but the return address and jump to the deopt blob
   960   restore_live_registers(sasm);
   962   __ leave();
   963   __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
   964   __ delayed()->nop();
   966   __ bind(cont);
   967   restore_live_registers(sasm);
   969   __ leave();
   970   __ jr(RA);
   971   __ delayed()->nop();
   973   return oop_maps;
   974 }
   977 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   978   // for better readability
   979   const bool must_gc_arguments = true;
   980   const bool dont_gc_arguments = false;
   983   // default value; overwritten for some optimized stubs that are called
   984   // from methods that do not use the fpu
   985   bool save_fpu_registers = true;
   988   // stub code & info for the different stubs
   989   OopMapSet* oop_maps = NULL;
   991   switch (id) {
   992     case forward_exception_id:
   993       {
   994         oop_maps = generate_handle_exception(id, sasm);
   995         __ leave();
   996         __ jr(RA);
   997         __ delayed()->nop();
   998       }
   999       break;
  1001     case new_instance_id:
  1002     case fast_new_instance_id:
  1003     case fast_new_instance_init_check_id:
  1005         Register klass = A4; // Incoming
  1006         Register obj   = V0; // Result
  1008         if (id == new_instance_id) {
  1009           __ set_info("new_instance", dont_gc_arguments);
  1010         } else if (id == fast_new_instance_id) {
  1011           __ set_info("fast new_instance", dont_gc_arguments);
  1012         } else {
  1013           assert(id == fast_new_instance_init_check_id, "bad StubID");
  1014           __ set_info("fast new_instance init check", dont_gc_arguments);
  1017         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id)
  1018              && UseTLAB && FastTLABRefill) {
  1019           Label slow_path;
  1020           Register obj_size = T0;
  1021           Register t1       = T2;
  1022           Register t2       = T3;
  1023           assert_different_registers(klass, obj, obj_size, t1, t2);
  1024           if (id == fast_new_instance_init_check_id) {
  1025             // make sure the klass is initialized
  1026             __ ld_ptr(AT, Address(klass, in_bytes(InstanceKlass::init_state_offset())));
  1027             __ move(t1, InstanceKlass::fully_initialized);
  1028             __ bne(AT, t1, slow_path);
  1029             __ delayed()->nop();
  1031 #ifdef ASSERT
  1032           // assert object can be fast path allocated
  1034             Label ok, not_ok;
  1035             __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1036             __ blez(obj_size, not_ok);
  1037             __ delayed()->nop();
  1038             __ andi(t1 , obj_size, Klass::_lh_instance_slow_path_bit);
  1039             __ beq(t1, R0, ok);
  1040             __ delayed()->nop();
  1041             __ bind(not_ok);
  1042             __ stop("assert(can be fast path allocated)");
  1043             __ should_not_reach_here();
  1044             __ bind(ok);
  1046 #endif // ASSERT
  1047           // if we got here then the TLAB allocation failed, so try
  1048           // refilling the TLAB or allocating directly from eden.
  1050           Label retry_tlab, try_eden;
  1051           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy edx (klass)
  1053           __ bind(retry_tlab);
  1055           // get the instance size
  1056           __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1057           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
  1058           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
  1059           __ verify_oop(obj);
  1060           __ jr(RA);
  1061           __ delayed()->nop();
  1063           __ bind(try_eden);
  1065           // get the instance size
  1066           __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1067           __ eden_allocate(obj, obj_size, 0, t1, t2, slow_path);
  1068           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
  1069           __ verify_oop(obj);
  1070           __ jr(RA);
  1071           __ delayed()->nop();
  1073           __ bind(slow_path);
  1075         __ enter();
  1076         OopMap* map = save_live_registers(sasm, 0);
  1077         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
  1078         oop_maps = new OopMapSet();
  1079         oop_maps->add_gc_map(call_offset, map);
  1080         restore_live_registers_except_V0(sasm);
  1081         __ verify_oop(obj);
  1082         __ leave();
  1083         __ jr(RA);
  1084         __ delayed()->nop();
  1086         // V0: new instance
  1088       break;
  1091 #ifdef TIERED
  1092 //FIXME, I hava no idea which register to use
  1093     case counter_overflow_id:
  1095 #ifndef _LP64
  1096         Register bci = T5;
  1097 #else
  1098         Register bci = A5;
  1099 #endif
  1100         Register method = AT;
  1101         __ enter();
  1102         OopMap* map = save_live_registers(sasm, 0);
  1103         // Retrieve bci
  1104         __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==??
  1105         __ ld(method, Address(FP, 3*BytesPerWord));
  1106         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
  1107         oop_maps = new OopMapSet();
  1108         oop_maps->add_gc_map(call_offset, map);
  1109         restore_live_registers(sasm);
  1110         __ leave();
  1111         __ jr(RA);
  1112         __ delayed()->nop();
  1114       break;
  1115 #endif // TIERED
  1119     case new_type_array_id:
  1120     case new_object_array_id:
  1122         // i use T2 as length register, T4 as klass register, V0 as result register.
  1123         // MUST accord with NewTypeArrayStub::emit_code, NewObjectArrayStub::emit_code
  1124         Register length   = T2; // Incoming
  1125 #ifndef _LP64
  1126         Register klass    = T4; // Incoming
  1127 #else
  1128         Register klass    = A4; // Incoming
  1129 #endif
  1130         Register obj      = V0; // Result
  1132         if (id == new_type_array_id) {
  1133           __ set_info("new_type_array", dont_gc_arguments);
  1134         } else {
  1135           __ set_info("new_object_array", dont_gc_arguments);
  1138         if (UseTLAB && FastTLABRefill) {
  1139           Register arr_size = T0;
  1140           Register t1       = T1;
  1141           Register t2       = T3;
  1142           Label slow_path;
  1143           assert_different_registers(length, klass, obj, arr_size, t1, t2);
  1145           // check that array length is small enough for fast path
  1146           __ move(AT, C1_MacroAssembler::max_array_allocation_length);
  1147           __ sltu(AT, AT, length);
  1148           __ bne(AT, R0, slow_path);
  1149           __ delayed()->nop();
  1151           // if we got here then the TLAB allocation failed, so try
  1152           // refilling the TLAB or allocating directly from eden.
  1153           Label retry_tlab, try_eden;
  1154           //T0,T1,T5,T8 have changed!
  1155           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves T2 & T4
  1157           __ bind(retry_tlab);
  1159           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
  1160           __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
  1161           __ andi(AT, t1, 0x1f);
  1162           __ sllv(arr_size, length, AT);
  1163           __ srl(t1, t1, Klass::_lh_header_size_shift);
  1164           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1165           __ add(arr_size, t1, arr_size);
  1166           __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask);  // align up
  1167           __ move(AT, ~MinObjAlignmentInBytesMask);
  1168           __ andr(arr_size, arr_size, AT);
  1171           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
  1172           __ initialize_header(obj, klass, length,t1,t2);
  1173           __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
  1174                                     + (Klass::_lh_header_size_shift / BitsPerByte)));
  1175           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
  1176           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
  1177           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1178           __ sub(arr_size, arr_size, t1);  // body length
  1179           __ add(t1, t1, obj);             // body start
  1180           __ initialize_body(t1, arr_size, 0, t2);
  1181           __ verify_oop(obj);
  1182           __ jr(RA);
  1183           __ delayed()->nop();
  1185           __ bind(try_eden);
  1186           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
  1187           __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
  1188           __ andi(AT, t1, 0x1f);
  1189           __ sllv(arr_size, length, AT);
  1190           __ srl(t1, t1, Klass::_lh_header_size_shift);
  1191           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1192           __ add(arr_size, t1, arr_size);
  1193           __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask);  // align up
  1194           __ move(AT, ~MinObjAlignmentInBytesMask);
  1195           __ andr(arr_size, arr_size, AT);
  1196           __ eden_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
  1197           __ initialize_header(obj, klass, length,t1,t2);
  1198           __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
  1199                                     + (Klass::_lh_header_size_shift / BitsPerByte)));
  1200           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1201           __ sub(arr_size, arr_size, t1);  // body length
  1202           __ add(t1, t1, obj);             // body start
  1204           __ initialize_body(t1, arr_size, 0, t2);
  1205           __ verify_oop(obj);
  1206           __ jr(RA);
  1207           __ delayed()->nop();
  1208           __ bind(slow_path);
  1212         __ enter();
  1213         OopMap* map = save_live_registers(sasm, 0);
  1214         int call_offset;
  1215         if (id == new_type_array_id) {
  1216           call_offset = __ call_RT(obj, noreg,
  1217                                     CAST_FROM_FN_PTR(address, new_type_array), klass, length);
  1218         } else {
  1219           call_offset = __ call_RT(obj, noreg,
  1220                                    CAST_FROM_FN_PTR(address, new_object_array), klass, length);
  1223         oop_maps = new OopMapSet();
  1224         oop_maps->add_gc_map(call_offset, map);
  1225         restore_live_registers_except_V0(sasm);
  1226         __ verify_oop(obj);
  1227         __ leave();
  1228         __ jr(RA);
  1229         __ delayed()->nop();
  1231       break;
  1233     case new_multi_array_id:
  1235         StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
  1236        //refer to c1_LIRGenerate_mips.cpp:do_NewmultiArray
  1237         // V0: klass
  1238         // T2: rank
  1239         // T0: address of 1st dimension
  1240         //__ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), A1, A2, A3);
  1241         //OopMap* map = save_live_registers(sasm, 4);
  1242         OopMap* map = save_live_registers(sasm, 0);
  1243         int call_offset = __ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array),
  1244             V0,T2,T0);
  1245         oop_maps = new OopMapSet();
  1246         oop_maps->add_gc_map(call_offset, map);
  1247         //FIXME
  1248         restore_live_registers_except_V0(sasm);
  1249         // V0: new multi array
  1250         __ verify_oop(V0);
  1252       break;
  1255     case register_finalizer_id:
  1257         __ set_info("register_finalizer", dont_gc_arguments);
  1259         // The object is passed on the stack and we haven't pushed a
  1260         // frame yet so it's one work away from top of stack.
  1261         //reference to LIRGenerator::do_RegisterFinalizer, call_runtime
  1262         __ move(V0, A0);
  1263         __ verify_oop(V0);
  1264         // load the klass and check the has finalizer flag
  1265         Label register_finalizer;
  1266 #ifndef _LP64
  1267         Register t = T5;
  1268 #else
  1269         Register t = A5;
  1270 #endif
  1271         //__ ld_ptr(t, Address(V0, oopDesc::klass_offset_in_bytes()));
  1272         __ load_klass(t, V0);
  1273         __ lw(t, Address(t, Klass::access_flags_offset()));
  1274         __ move(AT, JVM_ACC_HAS_FINALIZER);
  1275         __ andr(AT, AT, t);
  1277         __ bne(AT, R0, register_finalizer);
  1278         __ delayed()->nop();
  1279         __ jr(RA);
  1280         __ delayed()->nop();
  1281         __ bind(register_finalizer);
  1282         __ enter();
  1283         OopMap* map = save_live_registers(sasm, 0 /*num_rt_args */);
  1285         int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
  1286               SharedRuntime::register_finalizer), V0);
  1287         oop_maps = new OopMapSet();
  1288         oop_maps->add_gc_map(call_offset, map);
  1290         // Now restore all the live registers
  1291         restore_live_registers(sasm);
  1293         __ leave();
  1294         __ jr(RA);
  1295         __ delayed()->nop();
  1297       break;
  1299 //  case range_check_failed_id:
  1300   case throw_range_check_failed_id:
  1302         StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
  1303         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1304               throw_range_check_exception),true);
  1306       break;
  1308       case throw_index_exception_id:
  1310         // i use A1 as the index register, for this will be the first argument, see call_RT
  1311         StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
  1312         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1313               throw_index_exception), true);
  1315       break;
  1317   case throw_div0_exception_id:
  1318       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
  1319         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1320               throw_div0_exception), false);
  1322       break;
  1324   case throw_null_pointer_exception_id:
  1326         StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
  1327         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1328               throw_null_pointer_exception),false);
  1330       break;
  1332   case handle_exception_nofpu_id:
  1333     save_fpu_registers = false;
  1334      // fall through
  1335   case handle_exception_id:
  1337       StubFrame f(sasm, "handle_exception", dont_gc_arguments);
  1338       //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
  1339       oop_maps = generate_handle_exception(id, sasm);
  1341     break;
  1342   case handle_exception_from_callee_id:
  1344       StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
  1345       oop_maps = generate_handle_exception(id, sasm);
  1347     break;
  1348   case unwind_exception_id:
  1350       __ set_info("unwind_exception", dont_gc_arguments);
  1351       generate_unwind_exception(sasm);
  1353     break;
  1356   case throw_array_store_exception_id:
  1358       StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
  1359       // tos + 0: link
  1360       //     + 1: return address
  1361       oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1362             throw_array_store_exception), false);
  1364     break;
  1366   case throw_class_cast_exception_id:
  1368       StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
  1369       oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
  1370             throw_class_cast_exception), true);
  1372     break;
  1374   case throw_incompatible_class_change_error_id:
  1376       StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
  1377       oop_maps = generate_exception_throw(sasm,
  1378             CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
  1380     break;
  1382   case slow_subtype_check_id:
  1384     //actually , We do not use it
  1385       // A0:klass_RInfo    sub
  1386       // A1:k->encoding() super
  1387       __ set_info("slow_subtype_check", dont_gc_arguments);
  1388       __ st_ptr(T0, SP, (-1) * wordSize);
  1389       __ st_ptr(T1, SP, (-2) * wordSize);
  1390       __ addiu(SP, SP, (-2) * wordSize);
  1392       //+ Klass::secondary_supers_offset_in_bytes()));
  1393       __ ld_ptr(AT, A0, in_bytes( Klass::secondary_supers_offset()));
  1394       __ lw(T1, AT, arrayOopDesc::length_offset_in_bytes());
  1395       __ addiu(AT, AT, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
  1397       Label miss, hit, loop;
  1398       //      T1:count, AT:array, A1:sub maybe supper
  1399       __ bind(loop);
  1400       __ beq(T1, R0, miss);
  1401 #ifndef _LP64
  1402       __ delayed()->lw(T0, AT, 0);
  1403 #else
  1404       __ delayed()->ld(T0, AT, 0);
  1405 #endif
  1406       __ beq(T0, A1, hit);
  1407       __ delayed();
  1408       __ addiu(T1, T1, -1);
  1409       __ b(loop);
  1410       __ delayed();
  1411       __ addiu(AT, AT, 4);
  1413       __ bind(hit);
  1414       //+ Klass::secondary_super_cache_offset_in_bytes()), eax);
  1415       __ st_ptr(A1, A0,
  1416           in_bytes( Klass::secondary_super_cache_offset()));
  1417       __ addiu(V0, R0, 1);
  1418       __ addiu(SP, SP, 2 * wordSize);
  1419       __ ld_ptr(T0, SP, (-1) * wordSize);
  1420       __ ld_ptr(T1, SP, (-2) * wordSize);
  1421       __ jr(RA);
  1422       __ delayed()->nop();
  1425       __ bind(miss);
  1426       __ move(V0, R0);
  1427       __ addiu(SP, SP, 2 * wordSize);
  1428       __ ld_ptr(T0, SP, (-1) * wordSize);
  1429       __ ld_ptr(T1, SP, (-2) * wordSize);
  1430       __ jr(RA);
  1431       __ delayed()->nop();
  1433     break;
  1435   case monitorenter_nofpu_id:
  1436     save_fpu_registers = false;// fall through
  1438   case monitorenter_id:
  1440       StubFrame f(sasm, "monitorenter", dont_gc_arguments);
  1441       OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
  1443       f.load_argument(1, V0); // V0: object
  1444 #ifndef _LP64
  1445       f.load_argument(0, T6); // T6: lock address
  1446       int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
  1447            monitorenter), V0, T6);
  1448 #else
  1449       f.load_argument(0, A6); // A6: lock address
  1450       int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
  1451            monitorenter), V0, A6);
  1452 #endif
  1454       oop_maps = new OopMapSet();
  1455       oop_maps->add_gc_map(call_offset, map);
  1456       restore_live_registers(sasm, save_fpu_registers);
  1458     break;
  1460   case monitorexit_nofpu_id:
  1461     save_fpu_registers = false;
  1462         // fall through
  1463   case monitorexit_id:
  1465       StubFrame f(sasm, "monitorexit", dont_gc_arguments);
  1466       OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
  1468 #ifndef _LP64
  1469       f.load_argument(0, T6); // eax: lock address
  1470 #else
  1471       f.load_argument(0, A6); // A6: lock address
  1472 #endif
  1473       // note: really a leaf routine but must setup last java sp
  1474       //       => use call_RT for now (speed can be improved by
  1475       //       doing last java sp setup manually)
  1476 #ifndef _LP64
  1477       int call_offset = __ call_RT(noreg, noreg,
  1478                                     CAST_FROM_FN_PTR(address, monitorexit), T6);
  1479 #else
  1480       int call_offset = __ call_RT(noreg, noreg,
  1481                                     CAST_FROM_FN_PTR(address, monitorexit), A6);
  1482 #endif
  1483       oop_maps = new OopMapSet();
  1484       oop_maps->add_gc_map(call_offset, map);
  1485       restore_live_registers(sasm, save_fpu_registers);
  1488     break;
  1489         //  case init_check_patching_id:
  1490   case access_field_patching_id:
  1492       StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
  1493       // we should set up register map
  1494       oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
  1497     break;
  1499   case load_klass_patching_id:
  1501       StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
  1502       // we should set up register map
  1503       oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address,
  1504             move_klass_patching));
  1506     break;
  1507 /*  case jvmti_exception_throw_id:
  1509       // V0: exception oop
  1510       // V1: exception pc
  1511       StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
  1512       // Preserve all registers across this potentially blocking call
  1513       const int num_rt_args = 2;  // thread, exception oop
  1514       //OopMap* map = save_live_registers(sasm, num_rt_args);
  1515       OopMap* map = save_live_registers(sasm, 0);
  1516       int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
  1517             Runtime1::post_jvmti_exception_throw), V0);
  1518       oop_maps = new OopMapSet();
  1519       oop_maps->add_gc_map(call_offset,  map);
  1520       restore_live_registers(sasm);
  1521     }*/
  1522   case load_mirror_patching_id:
  1524       StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments);
  1525       oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
  1527     break;
  1529   case load_appendix_patching_id:
  1531       StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
  1532       // we should set up register map
  1533       oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
  1535     break;
  1537   case dtrace_object_alloc_id:
  1539       // V0:object
  1540       StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
  1541       // we can't gc here so skip the oopmap but make sure that all
  1542       // the live registers get saved.
  1543       save_live_registers(sasm, 0);
  1545       __ push_reg(V0);
  1546       __ move(A0, V0);
  1547       __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
  1548           relocInfo::runtime_call_type);
  1549       __ super_pop(V0);
  1551       restore_live_registers(sasm);
  1553     break;
  1555   case fpu2long_stub_id:
  1557       //FIXME, I hava no idea how to port this
  1558       //tty->print_cr("fpu2long_stub_id unimplemented yet!");
  1560     break;
  1562   case deoptimize_id:
  1564       StubFrame f(sasm, "deoptimize", dont_gc_arguments);
  1565       const int num_rt_args = 1;  // thread
  1566       OopMap* oop_map = save_live_registers(sasm, num_rt_args);
  1567       int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
  1568       oop_maps = new OopMapSet();
  1569       oop_maps->add_gc_map(call_offset, oop_map);
  1570       restore_live_registers(sasm);
  1571       DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
  1572       assert(deopt_blob != NULL, "deoptimization blob must have been created");
  1573       __ leave();
  1574       __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
  1576    break;
  1578   case predicate_failed_trap_id:
  1580       StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
  1582       OopMap* map = save_live_registers(sasm, 1);
  1584       int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
  1585       oop_maps = new OopMapSet();
  1586       oop_maps->add_gc_map(call_offset, map);
  1587       restore_live_registers(sasm);
  1588       __ leave();
  1589       DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
  1590       assert(deopt_blob != NULL, "deoptimization blob must have been created");
  1592       __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
  1594    break;
  1596   default:
  1598       StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
  1599       __ move(A1, (int)id);
  1600       __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1);
  1601       __ should_not_reach_here();
  1603     break;
  1605   return oop_maps;
  1608 #undef __
  1610 const char *Runtime1::pd_name_for_address(address entry) {
  1611   return "<unknown function>";

mercurial