src/cpu/mips/vm/c1_Runtime1_mips.cpp

Tue, 31 May 2016 00:22:06 -0400

author
aoqi
date
Tue, 31 May 2016 00:22:06 -0400
changeset 16
3cedde979d75
parent 1
2d8a650513c2
child 373
3a34fc828b4a
permissions
-rw-r--r--

[Code Reorganization] load_two_bytes_from_at_bcp -> get_2_byte_integer_at_bcp
remove useless MacroAssembler::store_two_byts_to_at_bcp
change MacroAssembler::load_two_bytes_from_at_bcp to InterpreterMacroAssembler::get_2_byte_integer_at_bcp
change MacroAssembler::get_4_byte_integer_at_bcp to InterpreterMacroAssembler::get_4_byte_integer_at_bcp

     1 /*
     2  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 #include "precompiled.hpp"
    27 #include "asm/assembler.hpp"
    28 #include "c1/c1_Defs.hpp"
    29 #include "c1/c1_MacroAssembler.hpp"
    30 #include "c1/c1_Runtime1.hpp"
    31 #include "interpreter/interpreter.hpp"
    32 #include "nativeInst_mips.hpp"
    33 #include "oops/compiledICHolder.hpp"
    34 #include "oops/oop.inline.hpp"
    35 #include "prims/jvmtiExport.hpp"
    36 #include "register_mips.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/signature.hpp"
    39 #include "runtime/vframeArray.hpp"
    40 #include "vmreg_mips.inline.hpp"
    43 // Implementation of StubAssembler
    44 // this method will preserve the stack space for arguments as indicated by args_size
    45 // for stack alignment consideration, you cannot call this with argument in stack.
    46 // if you need >3 arguments, you must implement this method yourself.
    47 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
    48 	// i use S7 for edi. 
    49 	// setup registers
    50 	const Register thread = TREG; // is callee-saved register (Visual C++ calling conventions)
    51 	assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2,                            "registers must be different");
    52 	assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
    53 	assert(args_size >= 0, "illegal args_size");
    55 	set_num_rt_args(1 + args_size);
    58 	// push java thread (becomes first argument of C function)
    59 #ifndef OPT_THREAD
    60 	get_thread(thread);
    61 #endif
    62 	move(A0, thread);
    64 	set_last_Java_frame(thread, NOREG, FP, NULL);
    65 	NOT_LP64(addi(SP, SP, - wordSize * (1+args_size)));
    66 	move(AT, -(StackAlignmentInBytes));
    67 	andr(SP, SP, AT);
    69 	relocate(relocInfo::internal_pc_type); 
    70 	{	
    71 #ifndef _LP64
    72 		int save_pc = (int)pc() +  12 + NativeCall::return_address_offset;
    73 		lui(AT, Assembler::split_high(save_pc));
    74 		addiu(AT, AT, Assembler::split_low(save_pc));
    75 #else
    76 		uintptr_t save_pc = (uintptr_t)pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset;
    77 		li48(AT, save_pc);
    78 #endif
    79 	}
    80 	st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); 
    82 	// do the call
    83 //#define aoqi_test
    84 #ifdef aoqi_test
    85 tty->print_cr("StubRuntime::%s:%d entry: %lx", __func__, __LINE__, entry);
    86 #endif
    87 #ifndef _LP64
    88 	lui(T9, Assembler::split_high((int)entry));
    89 	addiu(T9, T9, Assembler::split_low((int)entry));
    90 #else
    91 	li48(T9, (intptr_t)entry);
    92 #endif
    93 	jalr(T9);
    94 	delayed()->nop();
    95 	int call_offset = offset();
    97 	// verify callee-saved register
    98 #ifdef ASSERT
    99 	guarantee(thread != V0, "change this code");
   100 	push(V0);
   101 	{ 
   102 		Label L;
   103 		get_thread(V0);
   104 		beq(thread, V0, L);
   105 		delayed()->nop();
   106 		int3(); 
   107 		stop("StubAssembler::call_RT: edi not callee saved?");
   108 		bind(L);
   109 	}
   110 	super_pop(V0);
   111 #endif
   112 	// discard thread and arguments
   113 	ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); //by yyq
   114 	//FIXME , in x86 version , the second parameter is false, why true here? @jerome, 12/31, 06  
   115 	//  reset_last_Java_frame(thread, true);
   116 	reset_last_Java_frame(thread, true, false);
   117 	// check for pending exceptions
   118 	{ 
   119 		Label L;
   120 		ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
   121 		beq(AT, R0, L);
   122 		delayed()->nop();
   123 		// exception pending => remove activation and forward to exception handler
   124 		// make sure that the vm_results are cleared
   125 		if (oop_result1->is_valid()) {
   126 			st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
   127 		}
   128 		if (oop_result2->is_valid()) {
   129 			st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
   130 		}
   131 		// the leave() in x86 just pops ebp and remains the return address on the top 
   132 		// of stack   
   133 		// the return address will be needed by forward_exception_entry()
   134 		if (frame_size() == no_frame_size) {
   135 			addiu(SP, FP, wordSize);
   136 			ld_ptr(FP, SP, (-1) * wordSize);
   137 			jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
   138 			delayed()->nop();
   139 		} else if (_stub_id == Runtime1::forward_exception_id) {
   140 			should_not_reach_here();
   141 		} else {
   142 			jmp(Runtime1::entry_for(Runtime1::forward_exception_id), 
   143 					relocInfo::runtime_call_type);
   144 			delayed()->nop();
   145 		}
   146 		bind(L);
   147 	}
   148 	// get oop results if there are any and reset the values in the thread
   149 	if (oop_result1->is_valid()) {
   150 		ld_ptr(oop_result1, thread, in_bytes(JavaThread::vm_result_offset()));
   151 		st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
   152 		verify_oop(oop_result1);
   153 	}
   154 	if (oop_result2->is_valid()) {
   155 		ld_ptr(oop_result2, thread, in_bytes(JavaThread::vm_result_2_offset()));
   156 		st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
   157 		verify_oop(oop_result2);
   158 	}
   159 	return call_offset;
   160 }
   163 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
   164 	if (arg1 != A1) move(A1, arg1);
   165 	return call_RT(oop_result1, oop_result2, entry, 1);
   166 }
   169 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
   170 	if (arg1!=A1) move(A1, arg1);
   171 	if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
   172 	return call_RT(oop_result1, oop_result2, entry, 2);
   173 }
   176 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
   177 	if (arg1!=A1) move(A1, arg1);
   178 	if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
   179 	if (arg3!=A3) move(A3, arg3); assert(arg3 != A1 && arg3 != A2, "smashed argument");			
   180 	return call_RT(oop_result1, oop_result2, entry, 3);
   181 }
   184 // Implementation of StubFrame
   186 class StubFrame: public StackObj {
   187 	private:
   188 		StubAssembler* _sasm;
   190 	public:
   191 		StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
   192 		void load_argument(int offset_in_words, Register reg);
   193 		~StubFrame();
   194 };
   197 #define __ _sasm->
   199 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
   200 	_sasm = sasm;
   201 	__ set_info(name, must_gc_arguments);
   202 	__ enter();
   203 }
   206 //FIXME, I have no idea the frame architecture of mips
   207 // load parameters that were stored with LIR_Assembler::store_parameter
   208 // // Note: offsets for store_parameter and load_argument must match
   209 void StubFrame::load_argument(int offset_in_words, Register reg) {
   210 	//ebp + 0: link
   211 	//    + 1: return address
   212 	//    + 2: argument with offset 0
   213 	//    + 3: argument with offset 1
   214 	//    + 4: ...
   215 	//__ movl(reg, Address(ebp, (offset_in_words + 2) * BytesPerWord));
   216 	__ ld_ptr(reg, Address(FP, (offset_in_words + 2) * BytesPerWord));
   217 }
   218 StubFrame::~StubFrame() {
   219 	__ leave();
   220 	__ jr(RA);
   221 	__ delayed()->nop();
   222 }
   224 #undef __
   227 // Implementation of Runtime1
   229 #define __ sasm->
   231 //static OopMap* save_live_registers(MacroAssembler* sasm, int num_rt_args);
   232 //static void restore_live_registers(MacroAssembler* sasm);
   233 //DeoptimizationBlob* SharedRuntime::_deopt_blob = NULL;
   234 /*
   235 const int fpu_stack_as_doubles_size_in_words = 16;
   236 const int fpu_stack_as_doubles_size = 64;
   237 */
   238 const int float_regs_as_doubles_size_in_words = 16;
   240 //FIXME, 
   241 // Stack layout for saving/restoring  all the registers needed during a runtime
   242 // call (this includes deoptimization)
   243 // Note: note that users of this frame may well have arguments to some runtime
   244 // while these values are on the stack. These positions neglect those arguments
   245 // but the code in save_live_registers will take the argument count into
   246 // account.
   247 //
   248 #ifdef _LP64
   249   #define SLOT2(x) x,
   250   #define SLOT_PER_WORD 2
   251 #else
   252   #define SLOT2(x)
   253   #define SLOT_PER_WORD 1
   254 #endif // _LP64
   256 enum reg_save_layout {
   257 #ifndef _LP64
   258   T0_off = 0,
   259   S0_off = T0_off + SLOT_PER_WORD * 8,
   260 #else
   261   A4_off = 0,
   262   S0_off = A4_off + SLOT_PER_WORD * 8,
   263 #endif
   264   FP_off = S0_off + SLOT_PER_WORD * 8, SLOT2(FPH_off)
   265   T8_off, SLOT2(T8H_off)
   266   T9_off, SLOT2(T9H_off)
   267   SP_off, SLOT2(SPH_off)
   268   V0_off, SLOT2(V0H_off)
   269   V1_off, SLOT2(V1H_off)
   270   A0_off, SLOT2(A0H_off)
   271   A1_off, SLOT2(A1H_off)
   272   A2_off, SLOT2(A2H_off)
   273   A3_off, SLOT2(A3H_off)
   275   // Float registers
   276   /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
   277 #if 1
   278   F0_off, SLOT2( F0H_off)
   279   F1_off, SLOT2( F1H_off)
   280   F2_off, SLOT2( F2H_off)
   281   F3_off, SLOT2( F3H_off)
   282   F4_off, SLOT2( F4H_off)
   283   F5_off, SLOT2( F5H_off)
   284   F6_off, SLOT2( F6H_off)
   285   F7_off, SLOT2( F7H_off)
   286   F8_off, SLOT2( F8H_off)
   287   F9_off, SLOT2( F9H_off)
   288   F10_off, SLOT2( F10H_off)
   289   F11_off, SLOT2( F11H_off)
   290   F12_off, SLOT2( F12H_off)
   291   F13_off, SLOT2( F13H_off)
   292   F14_off, SLOT2( F14H_off)
   293   F15_off, SLOT2( F15H_off)
   294   F16_off, SLOT2( F16H_off)
   295   F17_off, SLOT2( F17H_off)
   296   F18_off, SLOT2( F18H_off)
   297   F19_off, SLOT2( F19H_off)
   298 #endif
   300   GP_off, SLOT2( GPH_off)
   301   //temp_2_off,
   302   temp_1_off, SLOT2(temp_1H_off)
   303   saved_fp_off, SLOT2(saved_fpH_off)
   304   return_off, SLOT2(returnH_off)
   306   reg_save_frame_size,
   308   // illegal instruction handler
   309   continue_dest_off = temp_1_off,
   311   // deoptimization equates
   312   //deopt_type = temp_2_off,             // slot for type of deopt in progress
   313   ret_type = temp_1_off                // slot for return type
   314 };
   316 // Save off registers which might be killed by calls into the runtime.
   317 // Tries to smart of about FP registers.  In particular we separate
   318 // saving and describing the FPU registers for deoptimization since we
   319 // have to save the FPU registers twice if we describe them and on P4
   320 // saving FPU registers which don't contain anything appears
   321 // expensive.  The deopt blob is the only thing which needs to
   322 // describe FPU registers.  In all other cases it should be sufficient
   323 // to simply save their current value.
   324 //FIXME, I have no idea which register should be saved . @jerome
   325 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
   326     bool save_fpu_registers = true, bool describe_fpu_registers = false) {
   328   /* Jin: num_rt_args is caculated by 8 bytes. */
   329   int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / SLOT_PER_WORD;   // args + thread
   330   sasm->set_frame_size(frame_size_in_slots / SLOT_PER_WORD);
   332   // record saved value locations in an OopMap
   333   // locations are offsets from sp after runtime call; num_rt_args is number of arguments 
   334   // in call, including thread
   335   OopMap* map = new OopMap(reg_save_frame_size, 0);
   337   map->set_callee_saved(VMRegImpl::stack2reg(V0_off + num_rt_args), V0->as_VMReg());
   338   map->set_callee_saved(VMRegImpl::stack2reg(V1_off + num_rt_args), V1->as_VMReg());
   339 #ifdef _LP64
   340   map->set_callee_saved(VMRegImpl::stack2reg(V0H_off + num_rt_args), V0->as_VMReg()->next());
   341   map->set_callee_saved(VMRegImpl::stack2reg(V1H_off + num_rt_args), V1->as_VMReg()->next());
   342 #endif
   344   int i = 0;
   345 #ifndef _LP64
   346   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   347     map->set_callee_saved(VMRegImpl::stack2reg(T0_off + num_rt_args + i++), r->as_VMReg());
   348   }
   349 #else
   350   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   351     map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg());
   352     map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()->next());
   353   }
   354 #endif
   356   i = 0;
   357   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   358     map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg());
   359 #ifdef _LP64
   360     map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()->next());
   361 #endif
   362   }
   364   map->set_callee_saved(VMRegImpl::stack2reg(FP_off + num_rt_args), FP->as_VMReg());
   365   map->set_callee_saved(VMRegImpl::stack2reg(GP_off + num_rt_args), GP->as_VMReg());
   366   map->set_callee_saved(VMRegImpl::stack2reg(T8_off + num_rt_args), T8->as_VMReg());
   367   map->set_callee_saved(VMRegImpl::stack2reg(T9_off + num_rt_args), T9->as_VMReg());
   368   map->set_callee_saved(VMRegImpl::stack2reg(A0_off + num_rt_args), A0->as_VMReg());
   369   map->set_callee_saved(VMRegImpl::stack2reg(A1_off + num_rt_args), A1->as_VMReg());
   370   map->set_callee_saved(VMRegImpl::stack2reg(A2_off + num_rt_args), A2->as_VMReg());
   371   map->set_callee_saved(VMRegImpl::stack2reg(A3_off + num_rt_args), A3->as_VMReg());
   373 #if 1
   374   map->set_callee_saved(VMRegImpl::stack2reg(F0_off + num_rt_args), F0->as_VMReg());
   375   map->set_callee_saved(VMRegImpl::stack2reg(F1_off + num_rt_args), F1->as_VMReg());
   376   map->set_callee_saved(VMRegImpl::stack2reg(F2_off + num_rt_args), F2->as_VMReg());
   377   map->set_callee_saved(VMRegImpl::stack2reg(F3_off + num_rt_args), F1->as_VMReg());
   378   map->set_callee_saved(VMRegImpl::stack2reg(F4_off + num_rt_args), F4->as_VMReg());
   379   map->set_callee_saved(VMRegImpl::stack2reg(F5_off + num_rt_args), F4->as_VMReg());
   380   map->set_callee_saved(VMRegImpl::stack2reg(F6_off + num_rt_args), F4->as_VMReg());
   381   map->set_callee_saved(VMRegImpl::stack2reg(F7_off + num_rt_args), F4->as_VMReg());
   382   map->set_callee_saved(VMRegImpl::stack2reg(F8_off + num_rt_args), F4->as_VMReg());
   383   map->set_callee_saved(VMRegImpl::stack2reg(F9_off + num_rt_args), F4->as_VMReg());
   384   map->set_callee_saved(VMRegImpl::stack2reg(F10_off + num_rt_args), F4->as_VMReg());
   385   map->set_callee_saved(VMRegImpl::stack2reg(F11_off + num_rt_args), F4->as_VMReg());
   386   map->set_callee_saved(VMRegImpl::stack2reg(F12_off + num_rt_args), F12->as_VMReg());
   387   map->set_callee_saved(VMRegImpl::stack2reg(F13_off + num_rt_args), F13->as_VMReg());
   388   map->set_callee_saved(VMRegImpl::stack2reg(F14_off + num_rt_args), F14->as_VMReg());
   389   map->set_callee_saved(VMRegImpl::stack2reg(F15_off + num_rt_args), F15->as_VMReg());
   390   map->set_callee_saved(VMRegImpl::stack2reg(F16_off + num_rt_args), F16->as_VMReg());
   391   map->set_callee_saved(VMRegImpl::stack2reg(F17_off + num_rt_args), F17->as_VMReg());
   392   map->set_callee_saved(VMRegImpl::stack2reg(F18_off + num_rt_args), F18->as_VMReg());
   393   map->set_callee_saved(VMRegImpl::stack2reg(F19_off + num_rt_args), F19->as_VMReg());
   394 #endif
   396 #ifdef _LP64
   397   map->set_callee_saved(VMRegImpl::stack2reg(FPH_off + num_rt_args), FP->as_VMReg()->next());
   398   map->set_callee_saved(VMRegImpl::stack2reg(GPH_off + num_rt_args), GP->as_VMReg()->next());
   399   map->set_callee_saved(VMRegImpl::stack2reg(T8H_off + num_rt_args), T8->as_VMReg()->next());
   400   map->set_callee_saved(VMRegImpl::stack2reg(T9H_off + num_rt_args), T9->as_VMReg()->next());
   401   map->set_callee_saved(VMRegImpl::stack2reg(A0H_off + num_rt_args), A0->as_VMReg()->next());
   402   map->set_callee_saved(VMRegImpl::stack2reg(A1H_off + num_rt_args), A1->as_VMReg()->next());
   403   map->set_callee_saved(VMRegImpl::stack2reg(A2H_off + num_rt_args), A2->as_VMReg()->next());
   404   map->set_callee_saved(VMRegImpl::stack2reg(A3H_off + num_rt_args), A3->as_VMReg()->next());
   405 #endif
   406   return map;
   407 }
   409 //FIXME, Is it enough to save this registers  by yyq
   410 static OopMap* save_live_registers(StubAssembler* sasm, 
   411                                    int num_rt_args,
   412 		                   bool save_fpu_registers = true, 
   413                                    bool describe_fpu_registers = false) {
   414   //const int reg_save_frame_size = return_off + 1 + num_rt_args;
   415   __ block_comment("save_live_registers");
   417   // save all register state - int, fpu  
   418   __ addi(SP, SP, -(reg_save_frame_size / SLOT_PER_WORD - 2)* wordSize);
   420 #ifndef _LP64
   421   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   422     __ sw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   423 #else
   424   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   425     __ sd(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   426 #endif
   427   }
   428   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   429     __ st_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   430   }
   431   __ st_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   432   __ st_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   433   __ st_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   434   __ st_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   435   __ st_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   436   __ st_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   437   __ st_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   438   __ st_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   439   __ st_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
   440   __ st_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);	
   442 #if 1
   443   __ sdc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);	
   444   __ sdc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);	
   445   __ sdc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);	
   446   __ sdc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);	
   447   __ sdc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);	
   448   __ sdc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);	
   449   __ sdc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);	
   450   __ sdc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);	
   451   __ sdc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);	
   452   __ sdc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);	
   453   __ sdc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);	
   454   __ sdc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);	
   455   __ sdc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);	
   456   __ sdc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);	
   457   __ sdc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);	
   458   __ sdc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);	
   459   __ sdc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);	
   460   __ sdc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);	
   461   __ sdc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);	
   462   __ sdc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);	
   463 #endif
   465   return generate_oop_map(sasm, num_rt_args, save_fpu_registers, describe_fpu_registers);
   466 }
   468 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
   469   //static void restore_live_registers(MacroAssembler* sasm) {
   470 #ifndef _LP64
   471   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   472     __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   473 #else
   474   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   475     __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   476 #endif
   477   }
   478   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   479     __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   480   }
   481   __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   482   __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   484   __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   485   __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   486   __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   487   __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   488   __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   489   __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   491   __ ld_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
   492   __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);	
   494 #if 1
   495   __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
   496   __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
   497   __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
   498   __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
   499   __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
   500   __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
   501   __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
   502   __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
   503   __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
   504   __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
   505   __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
   506   __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
   507   __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
   508   __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
   509   __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
   510   __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
   511   __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
   512   __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
   513   __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
   514   __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
   515 #endif
   517   __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
   518 }
   520 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
   521   __ block_comment("restore_live_registers");
   522   restore_fpu(sasm, restore_fpu_registers);
   523 }
   525 static void restore_live_registers_except_V0(StubAssembler* sasm, bool restore_fpu_registers = true) {	
   526   //static void restore_live_registers(MacroAssembler* sasm) {
   527   //FIXME , maybe V1 need to be saved too
   528   __ block_comment("restore_live_registers except V0");
   529 #ifndef _LP64
   530   for (Register r = T0; r != T7->successor(); r = r->successor() ) {
   531   	__ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
   532 #else
   533   for (Register r = A4; r != T3->successor(); r = r->successor() ) {
   534   	__ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
   535 #endif
   536   }
   537   for (Register r = S0; r != S7->successor(); r = r->successor() ) {
   538   	__ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
   539   }
   540   __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
   541   __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
   543   __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
   544   __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
   545   __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
   546   __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
   547   __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
   548   __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
   550 #if 1
   551   __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
   552   __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
   553   __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
   554   __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
   555   __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
   556   __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
   557   __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
   558   __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
   559   __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
   560   __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
   561   __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
   562   __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
   563   __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
   564   __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
   565   __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
   566   __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
   567   __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
   568   __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
   569   __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
   570   __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
   571 #endif
   573   __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);	
   575   __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
   576 }
   578 void Runtime1::initialize_pd() {
   579   // nothing to do
   580 }
   582 // target: the entry point of the method that creates and posts the exception oop
   583 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
   584 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
   585 	// preserve all registers
   586 	OopMap* oop_map = save_live_registers(sasm, 0);
   588 	// now all registers are saved and can be used freely
   589 	// verify that no old value is used accidentally
   590 	//all reigster are saved , I think mips do not need this
   592 	// registers used by this stub
   593 	const Register temp_reg = T3; 
   594 	// load argument for exception that is passed as an argument into the stub
   595 	if (has_argument) {
   596 		__ ld_ptr(temp_reg, Address(FP, 2*BytesPerWord));
   597 	}
   598 	int call_offset;
   599 	if (has_argument) 
   600 	 	call_offset = __ call_RT(noreg, noreg, target, temp_reg);
   601   else
   602 	 	call_offset = __ call_RT(noreg, noreg, target);
   604 	OopMapSet* oop_maps = new OopMapSet();
   605 	oop_maps->add_gc_map(call_offset, oop_map);
   607 	__ stop("should not reach here");
   609 	return oop_maps;
   610 }
   612 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome
   613 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   614         __ block_comment("generate_handle_exception");	
   615  // incoming parameters
   616 	const Register exception_oop = V0;
   617 	const Register exception_pc = V1;
   618 	// other registers used in this stub
   619 //	const Register real_return_addr = T3;
   620 	const Register thread = T8;
   621   // Save registers, if required.
   622    OopMapSet* oop_maps = new OopMapSet();
   623    OopMap* oop_map = NULL;
   624    switch (id) {
   625    case forward_exception_id:
   626      // We're handling an exception in the context of a compiled frame.
   627      // The registers have been saved in the standard places.  Perform
   628      // an exception lookup in the caller and dispatch to the handler
   629      // if found.  Otherwise unwind and dispatch to the callers
   630      // exception handler.
   631      oop_map = generate_oop_map(sasm, 1 /*thread*/);
   633      // load and clear pending exception oop into RAX
   634      __ ld(exception_oop, Address(thread, Thread::pending_exception_offset()));
   635      __ sw(R0,Address(thread, Thread::pending_exception_offset()));
   637      // load issuing PC (the return address for this stub) into rdx
   638      __ ld(exception_pc, Address(FP, 1*BytesPerWord));
   640      // make sure that the vm_results are cleared (may be unnecessary)
   641      __ sw(R0,Address(thread, JavaThread::vm_result_offset()));
   642      __ sw(R0,Address(thread, JavaThread::vm_result_2_offset()));
   643      break;
   644    case handle_exception_nofpu_id:
   645    case handle_exception_id:
   646      // At this point all registers MAY be live.
   647      oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
   648      break;
   649    case handle_exception_from_callee_id: {
   650      // At this point all registers except exception oop (RAX) and
   651      // exception pc (RDX) are dead.
   652      const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_by     tes / BytesPerWord);
   653      oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
   654      sasm->set_frame_size(frame_size);
   655      WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
   656      break;
   657    }
   658    default:  ShouldNotReachHere();
   659    }
   661 #ifdef TIERED
   662 	// C2 can leave the fpu stack dirty
   663 	__ empty_FPU_stack();
   664 	//}
   665 #endif // TIERED
   667 	// verify that only V0 and V1 is valid at this time
   668 	// verify that V0 contains a valid exception
   669 	__ verify_not_null_oop(exception_oop);
   671 	// load address of JavaThread object for thread-local data
   672 	__ get_thread(thread);
   674 #ifdef ASSERT
   675 	// check that fields in JavaThread for exception oop and issuing pc are 
   676 	// empty before writing to them
   677 	Label oop_empty;
   678 	__ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); 
   679 	__ beq(AT, R0, oop_empty); 
   680 	__ delayed()->nop(); 
   681 	__ stop("exception oop already set");
   682 	__ bind(oop_empty);
   683 	Label pc_empty;
   684 	__ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); 
   685 	__ beq(AT, R0, pc_empty); 
   686 	__ delayed()->nop(); 
   687 	__ stop("exception pc already set");
   688 	__ bind(pc_empty);
   689 #endif
   691 	// save exception oop and issuing pc into JavaThread
   692 	// (exception handler will load it from here)
   693 	__ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   694 	__ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   696 	// save real return address (pc that called this stub)
   697 //	__ ld_ptr(real_return_addr, FP, 1*BytesPerWord);   
   698 //	__ st_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD);
   700 	// patch throwing pc into return address (has bci & oop map)
   701 	__ st_ptr(exception_pc, FP, 1*BytesPerWord);       
   702 	// compute the exception handler. 
   703 	// the exception oop and the throwing pc are read from the fields in JavaThread
   704 	int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 
   705 				exception_handler_for_pc));
   706 	oop_maps->add_gc_map(call_offset, oop_map);
   707 	// V0:  handler address or NULL if no handler exists
   708 	//      will be the deopt blob if nmethod was deoptimized while we looked up
   709 	//      handler regardless of whether handler existed in the nmethod.
   711 	// only V0 is valid at this time, all other registers have been destroyed by the 
   712 	// runtime call
   714 	// Do we have an exception handler in the nmethod?
   715 	/*Label no_handler;
   716 	Label done;
   717 	__ beq(V0, R0, no_handler);
   718 	__ delayed()->nop(); */
   719 	// exception handler found
   720 	// patch the return address -> the stub will directly return to the exception handler
   721 	__ st_ptr(V0, FP, 1 * BytesPerWord); 
   723 	// restore registers
   724 //	restore_live_registers(sasm, save_fpu_registers);
   726 	// return to exception handler
   727 //	__ leave();
   728 //	__ jr(RA);
   729 //	__ delayed()->nop(); 
   730 //	__ bind(no_handler);
   731 	// no exception handler found in this method, so the exception is 
   732 	// forwarded to the caller (using the unwind code of the nmethod)
   733 	// there is no need to restore the registers
   735 	// restore the real return address that was saved before the RT-call
   736 //	__ ld_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD);
   737 //	__ st_ptr(real_return_addr, FP, 1 * BytesPerWord); 
   738 	// load address of JavaThread object for thread-local data
   739 //	__ get_thread(thread);
   740 	// restore exception oop into eax (convention for unwind code)
   741 //	__ ld_ptr(exception_oop, thread, in_bytes(JavaThread::exception_oop_offset()));
   743 	// clear exception fields in JavaThread because they are no longer needed
   744 	// (fields must be cleared because they are processed by GC otherwise)
   745 //	__ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset()));
   746 //	__ st_ptr(R0,thread, in_bytes(JavaThread::exception_pc_offset())); 
   747 	// pop the stub frame off
   748 //	__ leave();
   749 //	generate_unwind_exception(sasm);
   750 //	__ stop("should not reach here");
   751 //}
   752    switch (id) {
   753    case forward_exception_id:
   754    case handle_exception_nofpu_id:
   755    case handle_exception_id:
   756      // Restore the registers that were saved at the beginning.
   757      restore_live_registers(sasm, id == handle_exception_nofpu_id);
   758      break;
   759    case handle_exception_from_callee_id:
   760      // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
   761      // since we do a leave anyway.
   763      // Pop the return address since we are possibly changing SP (restoring from BP).
   764      __ leave();
   765      // Restore SP from BP if the exception PC is a method handle call site.
   766      NOT_LP64(__ get_thread(thread);)
   767      /*__ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
   768      __ beq(AT, R0, done);
   769      __ move(SP, rbp_mh_SP_save);
   770      __ bind(done);
   771      __ jr(RA);  // jump to exception handler
   772      __ delayed()->nop();*/
   773 // 759     __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
   774 // 760     __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
   775 // 761     __ jmp(rcx);  // jump to exception handler
   777      break;
   778    default:  ShouldNotReachHere();
   779    }
   781    return oop_maps;
   782  }
   788 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
   789 	// incoming parameters
   790 	const Register exception_oop = V0;
   791 	// other registers used in this stub
   792 	const Register exception_pc = V1;
   793 	const Register handler_addr = T3;
   794 	const Register thread = T8;
   796 	// verify that only eax is valid at this time
   797 	//  __ invalidate_registers(false, true, true, true, true, true);
   799 #ifdef ASSERT
   800 	// check that fields in JavaThread for exception oop and issuing pc are empty
   801 	__ get_thread(thread);
   802 	Label oop_empty;
   803 	__ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset())); 
   804 	__ beq(AT, R0, oop_empty); 
   805 	__ delayed()->nop(); 
   806 	__ stop("exception oop must be empty");
   807 	__ bind(oop_empty);
   809 	Label pc_empty;
   810 	__ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset())); 
   811 	__ beq(AT,R0, pc_empty); 
   812 	__ delayed()->nop(); 
   813 	__ stop("exception pc must be empty");
   814 	__ bind(pc_empty);
   815 #endif
   816 	// clear the FPU stack in case any FPU results are left behind
   817 	__ empty_FPU_stack();
   819 	// leave activation of nmethod
   820 	__ addi(SP, FP, wordSize);	
   821 	__ ld_ptr(FP, SP, - wordSize);
   822 	// store return address (is on top of stack after leave)
   823 	__ ld_ptr(exception_pc, SP, 0);
   824 	__ verify_oop(exception_oop);
   826 	// save exception oop from eax to stack before call
   827 	__ push(exception_oop);
   828 	// search the exception handler address of the caller (using the return address)
   829 	__ call_VM_leaf(CAST_FROM_FN_PTR(address, 
   830 			SharedRuntime::exception_handler_for_return_address), exception_pc);
   831 	// eax: exception handler address of the caller
   833 	// only eax is valid at this time, all other registers have been destroyed by the call
   835 	// move result of call into correct register
   836 	__ move(handler_addr, V0);
   837 	// restore exception oop in eax (required convention of exception handler)
   838 	__ super_pop(exception_oop);
   840 	__ verify_oop(exception_oop);
   842 	// get throwing pc (= return address).
   843 	// edx has been destroyed by the call, so it must be set again
   844 	// the pop is also necessary to simulate the effect of a ret(0)
   845 	__ super_pop(exception_pc);
   846 	// verify that that there is really a valid exception in eax
   847 	__ verify_not_null_oop(exception_oop);
   849 	// continue at exception handler (return address removed)
   850 	// note: do *not* remove arguments when unwinding the
   851 	//       activation since the caller assumes having
   852 	//       all arguments on the stack when entering the
   853 	//       runtime to determine the exception handler
   854 	//       (GC happens at call site with arguments!)
   855 	// eax: exception oop
   856 	// edx: throwing pc
   857 	// ebx: exception handler
   858 	__ jr(handler_addr);
   859 	__ delayed()->nop();
   860 }
   865 //static address deopt_with_exception_entry_for_patch = NULL;
   867 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   869 	// use the maximum number of runtime-arguments here because it is difficult to 
   870 	// distinguish each RT-Call.
   871 	// Note: This number affects also the RT-Call in generate_handle_exception because
   872 	//       the oop-map is shared for all calls.
   877 	DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
   878 	assert(deopt_blob != NULL, "deoptimization blob must have been created");
   879 	// assert(deopt_with_exception_entry_for_patch != NULL, 
   880 	// "deoptimization blob must have been created");
   882 	//OopMap* oop_map = save_live_registers(sasm, num_rt_args);
   883 	OopMap* oop_map = save_live_registers(sasm, 0);
   884 #ifndef OPT_THREAD
   885 	const Register thread = T8; 
   886 	// push java thread (becomes first argument of C function)
   887 	__ get_thread(thread);
   888 #else
   889 	const Register thread = TREG;
   890 #endif
   891 	__ move(A0, thread);
   894 /*	
   895  *	NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor
   896  *	is contained in interpreter. It should be wrong, and should be cleared but is not.
   897  * 	even if we cleared the wrong pc in anchor, the default way to get caller pc in class frame
   898  * 	is not right. It depends on that the caller pc is stored in *(sp - 1) but it's not the case
   899  */
   900 	__ set_last_Java_frame(thread, NOREG, FP, NULL);
   901 	NOT_LP64(__ addiu(SP, SP, (-1) * wordSize));
   902 	__ move(AT, -(StackAlignmentInBytes));
   903 	__ andr(SP, SP, AT);
   904 	__ relocate(relocInfo::internal_pc_type); 
   905 	{	
   906 #ifndef _LP64
   907 		int save_pc = (int)__ pc() +  12 + NativeCall::return_address_offset;
   908 		__ lui(AT, Assembler::split_high(save_pc));
   909 		__ addiu(AT, AT, Assembler::split_low(save_pc));
   910 #else
   911 		uintptr_t save_pc = (uintptr_t)__ pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset;
   912 		__ li48(AT, save_pc);
   913 #endif
   914 	}
   915 	__ st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); 
   917 	// do the call
   918 #ifndef _LP64
   919 	__ lui(T9, Assembler::split_high((int)target));	
   920 	__ addiu(T9, T9, Assembler::split_low((int)target));
   921 #else
   922 	__ li48(T9, (intptr_t)target);	
   923 #endif
   924 	__ jalr(T9);
   925 	__ delayed()->nop();
   926 	OopMapSet*  oop_maps = new OopMapSet();
   927 	oop_maps->add_gc_map(__ offset(),  oop_map);
   929 #ifndef OPT_THREAD
   930 	__ get_thread(thread);
   931 #endif
   933 	__ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
   934 	__ reset_last_Java_frame(thread, true,true);
   935 	// discard thread arg
   936 	// check for pending exceptions
   937 	{ 
   938 		Label L, skip;
   939 		//Label no_deopt;
   940 		__ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
   941 		__ beq(AT, R0, L);
   942 		__ delayed()->nop();
   943 		// exception pending => remove activation and forward to exception handler
   945 		__ bne(V0,R0, skip);	
   946 		__ delayed()->nop();	
   947 		//			relocInfo::runtime_call_type);
   948 		__ jmp(Runtime1::entry_for(Runtime1::forward_exception_id), 
   949 				relocInfo::runtime_call_type); 
   950 		__ delayed()->nop(); 	
   951 		__ bind(skip);	
   953 		// the deopt blob expects exceptions in the special fields of
   954 		// JavaThread, so copy and clear pending exception.
   956 		// load and clear pending exception
   957 		__ ld_ptr(V0, Address(thread,in_bytes(Thread::pending_exception_offset())));
   958 		__ st_ptr(R0, Address(thread, in_bytes(Thread::pending_exception_offset())));
   960 		// check that there is really a valid exception 
   961 		__ verify_not_null_oop(V0);
   963 		// load throwing pc: this is the return address of the stub
   964 		__ ld_ptr(V1, Address(SP, return_off * BytesPerWord));
   967 #ifdef ASSERT
   968 		// check that fields in JavaThread for exception oop and issuing pc are empty
   969 		Label oop_empty;
   970 		__ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset()))); 
   971 		__ beq(AT,R0,oop_empty); 
   972 		__ delayed()->nop(); 
   973 		__ stop("exception oop must be empty");
   974 		__ bind(oop_empty);
   976 		Label pc_empty;
   977 		__ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset()))); 
   978 		__ beq(AT,R0,pc_empty); 
   979 		__ delayed()->nop(); 
   980 		__ stop("exception pc must be empty");
   981 		__ bind(pc_empty);
   982 #endif
   984 		// store exception oop and throwing pc to JavaThread
   985 		__ st_ptr(V0,Address(thread, in_bytes(JavaThread::exception_oop_offset())));
   986 		__ st_ptr(V1,Address(thread, in_bytes(JavaThread::exception_pc_offset())));
   988 		restore_live_registers(sasm);
   990 		__ leave();
   992 		// Forward the exception directly to deopt blob. We can blow no
   993 		// registers and must leave throwing pc on the stack.  A patch may
   994 		// have values live in registers so the entry point with the
   995 		// exception in tls.
   996 		__ jmp(deopt_blob->unpack_with_exception_in_tls(), relocInfo::runtime_call_type);
   997 		__ delayed()->nop();
   999 		__ bind(L);
  1002 	// Runtime will return true if the nmethod has been deoptimized during
  1003 	// the patching process. In that case we must do a deopt reexecute instead.
  1005 	Label reexecuteEntry, cont;
  1007 	__ beq(V0, R0, cont);                              // have we deoptimized?
  1008 	__ delayed()->nop();
  1010 	// Will reexecute. Proper return address is already on the stack we just restore
  1011 	// registers, pop all of our frame but the return address and jump to the deopt blob
  1012 	restore_live_registers(sasm);
  1014 	__ leave();
  1015 	__ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
  1016 	__ delayed()->nop();
  1018 	__ bind(cont);
  1019 	restore_live_registers(sasm);
  1021 	__ leave();
  1022 	__ jr(RA);
  1023 	__ delayed()->nop();
  1025 	return oop_maps;
  1029 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
  1030 	// for better readability
  1031 	const bool must_gc_arguments = true;
  1032 	const bool dont_gc_arguments = false;
  1035 	// default value; overwritten for some optimized stubs that are called 
  1036 	// from methods that do not use the fpu
  1037 	bool save_fpu_registers = true;
  1040 	// stub code & info for the different stubs
  1041 	OopMapSet* oop_maps = NULL;
  1043   switch (id) {
  1044     case forward_exception_id:
  1046         // we're handling an exception in the context of a compiled
  1047         // frame.  The registers have been saved in the standard
  1048         // places.  Perform an exception lookup in the caller and
  1049         // dispatch to the handler if found.  Otherwise unwind and
  1050         // dispatch to the callers exception handler.
  1052         const Register exception_oop = V0;
  1053         const Register exception_pc = V1;
  1054 #ifndef OPT_THREAD
  1055 	const Register thread = T8; 
  1056 	__ get_thread(thread);
  1057 #else
  1058 	const Register thread = TREG;
  1059 #endif
  1060         // load pending exception oop into eax
  1061         __ ld_ptr(exception_oop, thread, in_bytes(Thread::pending_exception_offset()));
  1062         // clear pending exception
  1063         __ st_ptr(R0, thread, in_bytes(Thread::pending_exception_offset()));
  1065         // load issuing PC (the return address for this stub) into V1
  1066         __ ld_ptr(exception_pc, FP, 1*BytesPerWord);
  1068         // make sure that the vm_results are cleared (may be unnecessary)
  1069         __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_offset())));
  1070         __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_2_offset())));
  1072         // verify that that there is really a valid exception in eax
  1073         __ verify_not_null_oop(exception_oop);
  1076         oop_maps = new OopMapSet();
  1077         OopMap* oop_map = generate_oop_map(sasm, 0);
  1078         generate_handle_exception(id, sasm);
  1079         __ stop("should not reach here");
  1081       break;
  1083     case new_instance_id:
  1084     case fast_new_instance_id:
  1085     case fast_new_instance_init_check_id:
  1087         // i use T4 as klass register, V0 as result register. MUST accord with NewInstanceStub::emit_code
  1088 #ifndef _LP64
  1089         Register klass = T4; // Incoming
  1090 #else
  1091         Register klass = A4; // Incoming
  1092 #endif
  1093         Register obj   = V0; // Result
  1095         if (id == new_instance_id) {
  1096           __ set_info("new_instance", dont_gc_arguments);
  1097         } else if (id == fast_new_instance_id) {
  1098           __ set_info("fast new_instance", dont_gc_arguments);
  1099         } else {
  1100           assert(id == fast_new_instance_init_check_id, "bad StubID");
  1101           __ set_info("fast new_instance init check", dont_gc_arguments);
  1104         if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) 
  1105              && UseTLAB && FastTLABRefill) {
  1106           Label slow_path;
  1107           Register obj_size = T0;
  1108           Register t1       = T2;
  1109           Register t2       = T3;
  1110           assert_different_registers(klass, obj, obj_size, t1, t2);
  1111           if (id == fast_new_instance_init_check_id) {
  1112             // make sure the klass is initialized
  1113             __ lw(AT, klass, in_bytes(InstanceKlass::init_state_offset()));
  1114             __ move(t1, InstanceKlass::fully_initialized);
  1115             __ bne(AT, t1, slow_path);
  1116             __ delayed()->nop();
  1118 #ifdef ASSERT
  1119           // assert object can be fast path allocated
  1121             Label ok, not_ok;
  1122             __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1123             __ blez(obj_size, not_ok);
  1124             __ delayed()->nop();
  1125             __ andi(t1 , obj_size, Klass::_lh_instance_slow_path_bit);
  1126             __ beq(t1, R0, ok);
  1127             __ delayed()->nop();
  1128             __ bind(not_ok);
  1129             __ stop("assert(can be fast path allocated)");
  1130             __ should_not_reach_here();
  1131             __ bind(ok);
  1133 #endif // ASSERT
  1134           // if we got here then the TLAB allocation failed, so try
  1135           // refilling the TLAB or allocating directly from eden.
  1137           Label retry_tlab, try_eden;
  1138           __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy edx (klass)
  1140           __ bind(retry_tlab);
  1142           // get the instance size
  1143           __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1144           __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
  1145           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
  1146           __ verify_oop(obj);
  1147           __ jr(RA);
  1148           __ delayed()->nop();
  1150           __ bind(try_eden);
  1152           // get the instance size  
  1153           __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
  1154           __ eden_allocate(obj, obj_size, 0, t1, t2, slow_path);
  1155           __ initialize_object(obj, klass, obj_size, 0, t1, t2);
  1156           __ verify_oop(obj);
  1157           __ jr(RA);
  1158           __ delayed()->nop();
  1160           __ bind(slow_path);
  1162         __ enter();
  1163         OopMap* map = save_live_registers(sasm, 0);
  1164         int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
  1165         oop_maps = new OopMapSet();
  1166         oop_maps->add_gc_map(call_offset, map);
  1167         restore_live_registers_except_V0(sasm);
  1168         __ verify_oop(obj);
  1169         __ leave();
  1170         __ jr(RA);
  1171         __ delayed()->nop();
  1173         // V0: new instance
  1175       break;
  1178 #ifdef TIERED
  1179 //FIXME, I hava no idea which register to use
  1180     case counter_overflow_id:
  1182 #ifndef _LP64
  1183         Register bci = T5;
  1184 #else
  1185         Register bci = A5;
  1186 #endif
  1187         __ enter();
  1188         OopMap* map = save_live_registers(sasm, 0);
  1189         // Retrieve bci
  1190         __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==??
  1191 	int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
  1192         oop_maps = new OopMapSet();
  1193         oop_maps->add_gc_map(call_offset, map);
  1194         restore_live_registers(sasm);
  1195         __ leave();
  1196         __ jr(RA); 
  1197         __ delayed()->nop(); 
  1199       break;
  1200 #endif // TIERED
  1204     case new_type_array_id:
  1205     case new_object_array_id:
  1207         // i use T2 as length register, T4 as klass register, V0 as result register. 
  1208         // MUST accord with NewTypeArrayStub::emit_code, NewObjectArrayStub::emit_code
  1209         Register length   = T2; // Incoming
  1210 #ifndef _LP64
  1211         Register klass    = T4; // Incoming
  1212 #else
  1213         Register klass    = A4; // Incoming
  1214 #endif
  1215         Register obj      = V0; // Result
  1217         if (id == new_type_array_id) {
  1218           __ set_info("new_type_array", dont_gc_arguments);
  1219         } else {
  1220           __ set_info("new_object_array", dont_gc_arguments);
  1223         if (UseTLAB && FastTLABRefill) {
  1224           Register arr_size = T0;
  1225           Register t1       = T1; 
  1226           Register t2       = T3;
  1227           Label slow_path;
  1228           assert_different_registers(length, klass, obj, arr_size, t1, t2);
  1230           // check that array length is small enough for fast path
  1231           __ move(AT, C1_MacroAssembler::max_array_allocation_length);
  1232           __ sltu(AT, AT, length);
  1233           __ bne(AT, R0, slow_path);
  1234           __ delayed()->nop();
  1236           // if we got here then the TLAB allocation failed, so try
  1237           // refilling the TLAB or allocating directly from eden.
  1238           Label retry_tlab, try_eden;
  1239           //T0,T1,T5,T8 have changed! 
  1240           __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves T2 & T4
  1242           __ bind(retry_tlab);
  1244           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
  1245           __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));	 
  1246           __ andi(AT, t1, 0x1f);
  1247           __ sllv(arr_size, length, AT);
  1248           __ srl(t1, t1, Klass::_lh_header_size_shift);
  1249           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1250           __ add(arr_size, t1, arr_size);
  1251           __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask);  // align up
  1252           __ move(AT, ~MinObjAlignmentInBytesMask);
  1253           __ andr(arr_size, arr_size, AT);
  1256           __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
  1257           __ initialize_header(obj, klass, length,t1,t2);
  1258           __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) 
  1259                                     + (Klass::_lh_header_size_shift / BitsPerByte)));
  1260           assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
  1261           assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
  1262           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1263           __ sub(arr_size, arr_size, t1);  // body length
  1264           __ add(t1, t1, obj);             // body start
  1265           __ initialize_body(t1, arr_size, 0, t2);
  1266           __ verify_oop(obj);
  1267           __ jr(RA);
  1268           __ delayed()->nop();
  1270           __ bind(try_eden);
  1271           // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
  1272           __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));	 
  1273           __ andi(AT, t1, 0x1f);
  1274           __ sllv(arr_size, length, AT);
  1275           __ srl(t1, t1, Klass::_lh_header_size_shift);
  1276           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1277           __ add(arr_size, t1, arr_size);
  1278           __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask);  // align up
  1279           __ move(AT, ~MinObjAlignmentInBytesMask);
  1280           __ andr(arr_size, arr_size, AT);
  1281           __ eden_allocate(obj, arr_size, 0, t1, t2, slow_path);  // preserves arr_size
  1282           __ initialize_header(obj, klass, length,t1,t2);
  1283           __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
  1284                                     + (Klass::_lh_header_size_shift / BitsPerByte)));
  1285           __ andi(t1, t1, Klass::_lh_header_size_mask);
  1286           __ sub(arr_size, arr_size, t1);  // body length
  1287           __ add(t1, t1, obj);             // body start
  1289           __ initialize_body(t1, arr_size, 0, t2);
  1290           __ verify_oop(obj);
  1291           __ jr(RA);
  1292           __ delayed()->nop();
  1293           __ bind(slow_path);
  1297         __ enter();
  1298         OopMap* map = save_live_registers(sasm, 0);
  1299         int call_offset;
  1300         if (id == new_type_array_id) {
  1301           call_offset = __ call_RT(obj, noreg, 
  1302                                     CAST_FROM_FN_PTR(address, new_type_array), klass, length);
  1303         } else {
  1304           call_offset = __ call_RT(obj, noreg, 
  1305                                    CAST_FROM_FN_PTR(address, new_object_array), klass, length);
  1308         oop_maps = new OopMapSet();
  1309         oop_maps->add_gc_map(call_offset, map);
  1310         restore_live_registers_except_V0(sasm);
  1311         __ verify_oop(obj);
  1312         __ leave();	
  1313         __ jr(RA);
  1314         __ delayed()->nop();
  1316       break;
  1318     case new_multi_array_id:
  1320 	      StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
  1321 	     //refer to c1_LIRGenerate_mips.cpp:do_NewmultiArray 
  1322 	      // V0: klass
  1323 	      // T2: rank
  1324 	      // T0: address of 1st dimension
  1325 	      //__ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), A1, A2, A3);
  1326 	      //OopMap* map = save_live_registers(sasm, 4);
  1327 	      OopMap* map = save_live_registers(sasm, 0);
  1328 	      int call_offset = __ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), 
  1329 			      V0,T2,T0);
  1330 	      oop_maps = new OopMapSet();
  1331 	      oop_maps->add_gc_map(call_offset, map);
  1332 	      //FIXME 
  1333 	      restore_live_registers_except_V0(sasm);
  1334 	      // V0: new multi array
  1335 	      __ verify_oop(V0);
  1337       break;
  1340     case register_finalizer_id:
  1342 	      __ set_info("register_finalizer", dont_gc_arguments);
  1344 	      // The object is passed on the stack and we haven't pushed a
  1345 	      // frame yet so it's one work away from top of stack.
  1346         //reference to LIRGenerator::do_RegisterFinalizer, call_runtime
  1347 	      __ move(V0, A0); 
  1348 	      __ verify_oop(V0);
  1349 	      // load the klass and check the has finalizer flag
  1350 	      Label register_finalizer;
  1351 #ifndef _LP64
  1352 	      Register t = T5;
  1353 #else
  1354 	      Register t = A5;
  1355 #endif
  1356 	      //__ ld_ptr(t, Address(V0, oopDesc::klass_offset_in_bytes()));
  1357 	      __ load_klass(t, V0);
  1358 	      __ lw(t, Address(t, Klass::access_flags_offset()));
  1359 	      __ move(AT, JVM_ACC_HAS_FINALIZER); 
  1360 	      __ andr(AT, AT, t); 
  1362 	      __ bne(AT, R0, register_finalizer);	
  1363 	      __ delayed()->nop();	
  1364 	      __ jr(RA); 
  1365 	      __ delayed()->nop(); 
  1366 	      __ bind(register_finalizer);
  1367 	      __ enter();
  1368 	      OopMap* map = save_live_registers(sasm, 0 /*num_rt_args */);
  1370 	      int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 
  1371 				      SharedRuntime::register_finalizer), V0);
  1372 	      oop_maps = new OopMapSet();
  1373         oop_maps->add_gc_map(call_offset, map);
  1375 	      // Now restore all the live registers
  1376 	      restore_live_registers(sasm);
  1378 	      __ leave();
  1379 	      __ jr(RA);
  1380 	      __ delayed()->nop();
  1382       break;
  1384 //	case range_check_failed_id:
  1385 	case throw_range_check_failed_id:
  1386       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
  1387 	      oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1388               throw_range_check_exception),true);
  1390       break;
  1392       case throw_index_exception_id:
  1394 	      // i use A1 as the index register, for this will be the first argument, see call_RT
  1395 	      StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
  1396 	      oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1397 				      throw_index_exception), true);
  1399       break;
  1401 	case throw_div0_exception_id:
  1402       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
  1403 	      oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1404 				      throw_div0_exception), false);
  1406       break;
  1408 	case throw_null_pointer_exception_id:
  1409       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
  1410 	      oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1411 				      throw_null_pointer_exception),false);
  1413       break;
  1415         case handle_exception_nofpu_id:
  1416 		save_fpu_registers = false;
  1417 		 // fall through
  1418 	case handle_exception_id:
  1422 			StubFrame f(sasm, "handle_exception", dont_gc_arguments);
  1424 			//OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
  1425 			oop_maps = generate_handle_exception(id, sasm);
  1427 		break;
  1428         case handle_exception_from_callee_id:
  1430                         StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
  1431                         oop_maps = generate_handle_exception(id, sasm);
  1433                 break;
  1434 	case unwind_exception_id:
  1436 			__ set_info("unwind_exception", dont_gc_arguments);
  1438 			generate_unwind_exception(sasm);
  1440 		break;
  1443 	case throw_array_store_exception_id:
  1444 		{ StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
  1445 			// tos + 0: link
  1446 			//     + 1: return address
  1447 			oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1448 						throw_array_store_exception), false);
  1450 		break;
  1452 	case throw_class_cast_exception_id:
  1453 		{ StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
  1454 			oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, 
  1455 						throw_class_cast_exception), V0);
  1457 		break;
  1459 	case throw_incompatible_class_change_error_id:
  1461 		StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
  1462 		oop_maps = generate_exception_throw(sasm, 
  1463 			CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
  1465 		break;
  1467 	case slow_subtype_check_id:
  1469 		//actually , We do not use it	
  1470 			// A0:klass_RInfo		sub
  1471 			// A1:k->encoding() super
  1472 			__ set_info("slow_subtype_check", dont_gc_arguments);
  1473 			__ st_ptr(T0, SP, (-1) * wordSize);
  1474 			__ st_ptr(T1, SP, (-2) * wordSize);
  1475 			__ addiu(SP, SP, (-2) * wordSize);
  1477 			//+ Klass::secondary_supers_offset_in_bytes()));
  1478 			__ ld_ptr(AT, A0, in_bytes( Klass::secondary_supers_offset()));
  1479 			__ lw(T1, AT, arrayOopDesc::length_offset_in_bytes());
  1480 			__ addiu(AT, AT, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
  1482 			Label miss, hit, loop;
  1483 			//			T1:count, AT:array, A1:sub maybe supper
  1484 			__ bind(loop);
  1485 			__ beq(T1, R0, miss);
  1486 #ifndef _LP64
  1487 			__ delayed()->lw(T0, AT, 0);
  1488 #else
  1489 			__ delayed()->ld(T0, AT, 0);
  1490 #endif
  1491 			__ beq(T0, A1, hit);
  1492 			__ delayed();
  1493 			__ addiu(T1, T1, -1);
  1494 			__ b(loop);
  1495 			__ delayed();
  1496 			__ addiu(AT, AT, 4);
  1498 			__ bind(hit);
  1499 			//+ Klass::secondary_super_cache_offset_in_bytes()), eax);
  1500 			__ st_ptr(A1, A0,  
  1501 					in_bytes( Klass::secondary_super_cache_offset()));
  1502 			__ addiu(V0, R0, 1);
  1503 			__ addiu(SP, SP, 2 * wordSize);
  1504 			__ ld_ptr(T0, SP, (-1) * wordSize);
  1505 			__ ld_ptr(T1, SP, (-2) * wordSize);
  1506 			__ jr(RA);
  1507 			__ delayed()->nop();
  1510 			__ bind(miss);
  1511 			__ move(V0, R0);
  1512 			__ addiu(SP, SP, 2 * wordSize);
  1513 			__ ld_ptr(T0, SP, (-1) * wordSize);
  1514 			__ ld_ptr(T1, SP, (-2) * wordSize);
  1515 			__ jr(RA);
  1516 			__ delayed()->nop();
  1518 		break;
  1520   case monitorenter_nofpu_id:
  1521     save_fpu_registers = false;// fall through
  1523 	case monitorenter_id:
  1525 	    StubFrame f(sasm, "monitorenter", dont_gc_arguments);
  1526 	    OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
  1528 	    f.load_argument(1, V0); // V0: object
  1529 #ifndef _LP64
  1530 	    f.load_argument(0, T6); // T6: lock address
  1531 	    int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 
  1532 		       monitorenter), V0, T6);
  1533 #else
  1534 	    f.load_argument(0, A6); // A6: lock address
  1535 	    int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 
  1536 		       monitorenter), V0, A6);
  1537 #endif
  1539 	    oop_maps = new OopMapSet();
  1540 	    oop_maps->add_gc_map(call_offset, map);
  1541 	    restore_live_registers(sasm, save_fpu_registers);
  1543 	  break;
  1545 	case monitorexit_nofpu_id:
  1546 	  save_fpu_registers = false;
  1547 	      // fall through
  1548 	case monitorexit_id:
  1550       StubFrame f(sasm, "monitorexit", dont_gc_arguments);
  1551       OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
  1553 #ifndef _LP64
  1554       f.load_argument(0, T6); // eax: lock address
  1555 #else
  1556       f.load_argument(0, A6); // A6: lock address
  1557 #endif
  1558       // note: really a leaf routine but must setup last java sp
  1559       //       => use call_RT for now (speed can be improved by
  1560       //       doing last java sp setup manually)
  1561 #ifndef _LP64
  1562       int call_offset = __ call_RT(noreg, noreg, 
  1563   	                                CAST_FROM_FN_PTR(address, monitorexit), T6);
  1564 #else
  1565       int call_offset = __ call_RT(noreg, noreg, 
  1566   	                                CAST_FROM_FN_PTR(address, monitorexit), A6);
  1567 #endif
  1568       oop_maps = new OopMapSet();
  1569       oop_maps->add_gc_map(call_offset, map);
  1570       restore_live_registers(sasm, save_fpu_registers);
  1573     break;
  1574 	      //  case init_check_patching_id:
  1575 	case access_field_patching_id:
  1577       StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
  1578       // we should set up register map
  1579       oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
  1582     break;
  1584 	case load_klass_patching_id:
  1586 			StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
  1587 			// we should set up register map
  1588 			oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, 
  1589 						move_klass_patching));
  1591 		break;
  1592 /*	case jvmti_exception_throw_id:
  1594 			// V0: exception oop
  1595 			// V1: exception pc
  1596 			StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
  1597 			// Preserve all registers across this potentially blocking call
  1598 			const int num_rt_args = 2;  // thread, exception oop
  1599 			//OopMap* map = save_live_registers(sasm, num_rt_args);
  1600 			OopMap* map = save_live_registers(sasm, 0);
  1601 			int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, 
  1602 						Runtime1::post_jvmti_exception_throw), V0);
  1603 			oop_maps = new OopMapSet();
  1604 			oop_maps->add_gc_map(call_offset,  map);
  1605 			restore_live_registers(sasm);
  1606 		}*/
  1607         case load_mirror_patching_id:
  1609                      StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments);
  1610                      oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
  1612 		break;
  1613 	case dtrace_object_alloc_id:
  1615 			// V0:object 
  1616 			StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
  1617 			// we can't gc here so skip the oopmap but make sure that all
  1618 			// the live registers get saved.
  1619 			save_live_registers(sasm, 0);
  1621 			__ push_reg(V0);
  1622 			__ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
  1623 					relocInfo::runtime_call_type);
  1624 			__ super_pop(V0);
  1626 			restore_live_registers(sasm);
  1628 		break;
  1629 	case fpu2long_stub_id:
  1631                    //FIXME, I hava no idea how to port this	
  1633 	default:
  1634 		{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
  1635 			__ move(A1, (int)id);
  1636 			__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1);
  1637 			__ should_not_reach_here();
  1639 		break;
  1641 	return oop_maps;
  1644 #undef __
  1646 const char *Runtime1::pd_name_for_address(address entry) {
  1647   return "<unknown function>";

mercurial