src/cpu/mips/vm/c1_Runtime1_mips.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
child 373
3a34fc828b4a
permissions
-rw-r--r--

Added MIPS 64-bit port.

aoqi@1 1 /*
aoqi@1 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@1 3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
aoqi@1 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@1 5 *
aoqi@1 6 * This code is free software; you can redistribute it and/or modify it
aoqi@1 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@1 8 * published by the Free Software Foundation.
aoqi@1 9 *
aoqi@1 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@1 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@1 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@1 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@1 14 * accompanied this code).
aoqi@1 15 *
aoqi@1 16 * You should have received a copy of the GNU General Public License version
aoqi@1 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@1 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@1 19 *
aoqi@1 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@1 21 * or visit www.oracle.com if you need additional information or have any
aoqi@1 22 * questions.
aoqi@1 23 *
aoqi@1 24 */
aoqi@1 25
aoqi@1 26 #include "precompiled.hpp"
aoqi@1 27 #include "asm/assembler.hpp"
aoqi@1 28 #include "c1/c1_Defs.hpp"
aoqi@1 29 #include "c1/c1_MacroAssembler.hpp"
aoqi@1 30 #include "c1/c1_Runtime1.hpp"
aoqi@1 31 #include "interpreter/interpreter.hpp"
aoqi@1 32 #include "nativeInst_mips.hpp"
aoqi@1 33 #include "oops/compiledICHolder.hpp"
aoqi@1 34 #include "oops/oop.inline.hpp"
aoqi@1 35 #include "prims/jvmtiExport.hpp"
aoqi@1 36 #include "register_mips.hpp"
aoqi@1 37 #include "runtime/sharedRuntime.hpp"
aoqi@1 38 #include "runtime/signature.hpp"
aoqi@1 39 #include "runtime/vframeArray.hpp"
aoqi@1 40 #include "vmreg_mips.inline.hpp"
aoqi@1 41
aoqi@1 42
aoqi@1 43 // Implementation of StubAssembler
aoqi@1 44 // this method will preserve the stack space for arguments as indicated by args_size
aoqi@1 45 // for stack alignment consideration, you cannot call this with argument in stack.
aoqi@1 46 // if you need >3 arguments, you must implement this method yourself.
aoqi@1 47 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
aoqi@1 48 // i use S7 for edi.
aoqi@1 49 // setup registers
aoqi@1 50 const Register thread = TREG; // is callee-saved register (Visual C++ calling conventions)
aoqi@1 51 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
aoqi@1 52 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
aoqi@1 53 assert(args_size >= 0, "illegal args_size");
aoqi@1 54
aoqi@1 55 set_num_rt_args(1 + args_size);
aoqi@1 56
aoqi@1 57
aoqi@1 58 // push java thread (becomes first argument of C function)
aoqi@1 59 #ifndef OPT_THREAD
aoqi@1 60 get_thread(thread);
aoqi@1 61 #endif
aoqi@1 62 move(A0, thread);
aoqi@1 63
aoqi@1 64 set_last_Java_frame(thread, NOREG, FP, NULL);
aoqi@1 65 NOT_LP64(addi(SP, SP, - wordSize * (1+args_size)));
aoqi@1 66 move(AT, -(StackAlignmentInBytes));
aoqi@1 67 andr(SP, SP, AT);
aoqi@1 68
aoqi@1 69 relocate(relocInfo::internal_pc_type);
aoqi@1 70 {
aoqi@1 71 #ifndef _LP64
aoqi@1 72 int save_pc = (int)pc() + 12 + NativeCall::return_address_offset;
aoqi@1 73 lui(AT, Assembler::split_high(save_pc));
aoqi@1 74 addiu(AT, AT, Assembler::split_low(save_pc));
aoqi@1 75 #else
aoqi@1 76 uintptr_t save_pc = (uintptr_t)pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset;
aoqi@1 77 li48(AT, save_pc);
aoqi@1 78 #endif
aoqi@1 79 }
aoqi@1 80 st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
aoqi@1 81
aoqi@1 82 // do the call
aoqi@1 83 //#define aoqi_test
aoqi@1 84 #ifdef aoqi_test
aoqi@1 85 tty->print_cr("StubRuntime::%s:%d entry: %lx", __func__, __LINE__, entry);
aoqi@1 86 #endif
aoqi@1 87 #ifndef _LP64
aoqi@1 88 lui(T9, Assembler::split_high((int)entry));
aoqi@1 89 addiu(T9, T9, Assembler::split_low((int)entry));
aoqi@1 90 #else
aoqi@1 91 li48(T9, (intptr_t)entry);
aoqi@1 92 #endif
aoqi@1 93 jalr(T9);
aoqi@1 94 delayed()->nop();
aoqi@1 95 int call_offset = offset();
aoqi@1 96
aoqi@1 97 // verify callee-saved register
aoqi@1 98 #ifdef ASSERT
aoqi@1 99 guarantee(thread != V0, "change this code");
aoqi@1 100 push(V0);
aoqi@1 101 {
aoqi@1 102 Label L;
aoqi@1 103 get_thread(V0);
aoqi@1 104 beq(thread, V0, L);
aoqi@1 105 delayed()->nop();
aoqi@1 106 int3();
aoqi@1 107 stop("StubAssembler::call_RT: edi not callee saved?");
aoqi@1 108 bind(L);
aoqi@1 109 }
aoqi@1 110 super_pop(V0);
aoqi@1 111 #endif
aoqi@1 112 // discard thread and arguments
aoqi@1 113 ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); //by yyq
aoqi@1 114 //FIXME , in x86 version , the second parameter is false, why true here? @jerome, 12/31, 06
aoqi@1 115 // reset_last_Java_frame(thread, true);
aoqi@1 116 reset_last_Java_frame(thread, true, false);
aoqi@1 117 // check for pending exceptions
aoqi@1 118 {
aoqi@1 119 Label L;
aoqi@1 120 ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@1 121 beq(AT, R0, L);
aoqi@1 122 delayed()->nop();
aoqi@1 123 // exception pending => remove activation and forward to exception handler
aoqi@1 124 // make sure that the vm_results are cleared
aoqi@1 125 if (oop_result1->is_valid()) {
aoqi@1 126 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@1 127 }
aoqi@1 128 if (oop_result2->is_valid()) {
aoqi@1 129 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
aoqi@1 130 }
aoqi@1 131 // the leave() in x86 just pops ebp and remains the return address on the top
aoqi@1 132 // of stack
aoqi@1 133 // the return address will be needed by forward_exception_entry()
aoqi@1 134 if (frame_size() == no_frame_size) {
aoqi@1 135 addiu(SP, FP, wordSize);
aoqi@1 136 ld_ptr(FP, SP, (-1) * wordSize);
aoqi@1 137 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
aoqi@1 138 delayed()->nop();
aoqi@1 139 } else if (_stub_id == Runtime1::forward_exception_id) {
aoqi@1 140 should_not_reach_here();
aoqi@1 141 } else {
aoqi@1 142 jmp(Runtime1::entry_for(Runtime1::forward_exception_id),
aoqi@1 143 relocInfo::runtime_call_type);
aoqi@1 144 delayed()->nop();
aoqi@1 145 }
aoqi@1 146 bind(L);
aoqi@1 147 }
aoqi@1 148 // get oop results if there are any and reset the values in the thread
aoqi@1 149 if (oop_result1->is_valid()) {
aoqi@1 150 ld_ptr(oop_result1, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@1 151 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@1 152 verify_oop(oop_result1);
aoqi@1 153 }
aoqi@1 154 if (oop_result2->is_valid()) {
aoqi@1 155 ld_ptr(oop_result2, thread, in_bytes(JavaThread::vm_result_2_offset()));
aoqi@1 156 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
aoqi@1 157 verify_oop(oop_result2);
aoqi@1 158 }
aoqi@1 159 return call_offset;
aoqi@1 160 }
aoqi@1 161
aoqi@1 162
aoqi@1 163 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
aoqi@1 164 if (arg1 != A1) move(A1, arg1);
aoqi@1 165 return call_RT(oop_result1, oop_result2, entry, 1);
aoqi@1 166 }
aoqi@1 167
aoqi@1 168
aoqi@1 169 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
aoqi@1 170 if (arg1!=A1) move(A1, arg1);
aoqi@1 171 if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
aoqi@1 172 return call_RT(oop_result1, oop_result2, entry, 2);
aoqi@1 173 }
aoqi@1 174
aoqi@1 175
aoqi@1 176 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
aoqi@1 177 if (arg1!=A1) move(A1, arg1);
aoqi@1 178 if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
aoqi@1 179 if (arg3!=A3) move(A3, arg3); assert(arg3 != A1 && arg3 != A2, "smashed argument");
aoqi@1 180 return call_RT(oop_result1, oop_result2, entry, 3);
aoqi@1 181 }
aoqi@1 182
aoqi@1 183
aoqi@1 184 // Implementation of StubFrame
aoqi@1 185
aoqi@1 186 class StubFrame: public StackObj {
aoqi@1 187 private:
aoqi@1 188 StubAssembler* _sasm;
aoqi@1 189
aoqi@1 190 public:
aoqi@1 191 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
aoqi@1 192 void load_argument(int offset_in_words, Register reg);
aoqi@1 193 ~StubFrame();
aoqi@1 194 };
aoqi@1 195
aoqi@1 196
aoqi@1 197 #define __ _sasm->
aoqi@1 198
aoqi@1 199 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
aoqi@1 200 _sasm = sasm;
aoqi@1 201 __ set_info(name, must_gc_arguments);
aoqi@1 202 __ enter();
aoqi@1 203 }
aoqi@1 204
aoqi@1 205
aoqi@1 206 //FIXME, I have no idea the frame architecture of mips
aoqi@1 207 // load parameters that were stored with LIR_Assembler::store_parameter
aoqi@1 208 // // Note: offsets for store_parameter and load_argument must match
aoqi@1 209 void StubFrame::load_argument(int offset_in_words, Register reg) {
aoqi@1 210 //ebp + 0: link
aoqi@1 211 // + 1: return address
aoqi@1 212 // + 2: argument with offset 0
aoqi@1 213 // + 3: argument with offset 1
aoqi@1 214 // + 4: ...
aoqi@1 215 //__ movl(reg, Address(ebp, (offset_in_words + 2) * BytesPerWord));
aoqi@1 216 __ ld_ptr(reg, Address(FP, (offset_in_words + 2) * BytesPerWord));
aoqi@1 217 }
aoqi@1 218 StubFrame::~StubFrame() {
aoqi@1 219 __ leave();
aoqi@1 220 __ jr(RA);
aoqi@1 221 __ delayed()->nop();
aoqi@1 222 }
aoqi@1 223
aoqi@1 224 #undef __
aoqi@1 225
aoqi@1 226
aoqi@1 227 // Implementation of Runtime1
aoqi@1 228
aoqi@1 229 #define __ sasm->
aoqi@1 230
aoqi@1 231 //static OopMap* save_live_registers(MacroAssembler* sasm, int num_rt_args);
aoqi@1 232 //static void restore_live_registers(MacroAssembler* sasm);
aoqi@1 233 //DeoptimizationBlob* SharedRuntime::_deopt_blob = NULL;
aoqi@1 234 /*
aoqi@1 235 const int fpu_stack_as_doubles_size_in_words = 16;
aoqi@1 236 const int fpu_stack_as_doubles_size = 64;
aoqi@1 237 */
aoqi@1 238 const int float_regs_as_doubles_size_in_words = 16;
aoqi@1 239
aoqi@1 240 //FIXME,
aoqi@1 241 // Stack layout for saving/restoring all the registers needed during a runtime
aoqi@1 242 // call (this includes deoptimization)
aoqi@1 243 // Note: note that users of this frame may well have arguments to some runtime
aoqi@1 244 // while these values are on the stack. These positions neglect those arguments
aoqi@1 245 // but the code in save_live_registers will take the argument count into
aoqi@1 246 // account.
aoqi@1 247 //
aoqi@1 248 #ifdef _LP64
aoqi@1 249 #define SLOT2(x) x,
aoqi@1 250 #define SLOT_PER_WORD 2
aoqi@1 251 #else
aoqi@1 252 #define SLOT2(x)
aoqi@1 253 #define SLOT_PER_WORD 1
aoqi@1 254 #endif // _LP64
aoqi@1 255
aoqi@1 256 enum reg_save_layout {
aoqi@1 257 #ifndef _LP64
aoqi@1 258 T0_off = 0,
aoqi@1 259 S0_off = T0_off + SLOT_PER_WORD * 8,
aoqi@1 260 #else
aoqi@1 261 A4_off = 0,
aoqi@1 262 S0_off = A4_off + SLOT_PER_WORD * 8,
aoqi@1 263 #endif
aoqi@1 264 FP_off = S0_off + SLOT_PER_WORD * 8, SLOT2(FPH_off)
aoqi@1 265 T8_off, SLOT2(T8H_off)
aoqi@1 266 T9_off, SLOT2(T9H_off)
aoqi@1 267 SP_off, SLOT2(SPH_off)
aoqi@1 268 V0_off, SLOT2(V0H_off)
aoqi@1 269 V1_off, SLOT2(V1H_off)
aoqi@1 270 A0_off, SLOT2(A0H_off)
aoqi@1 271 A1_off, SLOT2(A1H_off)
aoqi@1 272 A2_off, SLOT2(A2H_off)
aoqi@1 273 A3_off, SLOT2(A3H_off)
aoqi@1 274
aoqi@1 275 // Float registers
aoqi@1 276 /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
aoqi@1 277 #if 1
aoqi@1 278 F0_off, SLOT2( F0H_off)
aoqi@1 279 F1_off, SLOT2( F1H_off)
aoqi@1 280 F2_off, SLOT2( F2H_off)
aoqi@1 281 F3_off, SLOT2( F3H_off)
aoqi@1 282 F4_off, SLOT2( F4H_off)
aoqi@1 283 F5_off, SLOT2( F5H_off)
aoqi@1 284 F6_off, SLOT2( F6H_off)
aoqi@1 285 F7_off, SLOT2( F7H_off)
aoqi@1 286 F8_off, SLOT2( F8H_off)
aoqi@1 287 F9_off, SLOT2( F9H_off)
aoqi@1 288 F10_off, SLOT2( F10H_off)
aoqi@1 289 F11_off, SLOT2( F11H_off)
aoqi@1 290 F12_off, SLOT2( F12H_off)
aoqi@1 291 F13_off, SLOT2( F13H_off)
aoqi@1 292 F14_off, SLOT2( F14H_off)
aoqi@1 293 F15_off, SLOT2( F15H_off)
aoqi@1 294 F16_off, SLOT2( F16H_off)
aoqi@1 295 F17_off, SLOT2( F17H_off)
aoqi@1 296 F18_off, SLOT2( F18H_off)
aoqi@1 297 F19_off, SLOT2( F19H_off)
aoqi@1 298 #endif
aoqi@1 299
aoqi@1 300 GP_off, SLOT2( GPH_off)
aoqi@1 301 //temp_2_off,
aoqi@1 302 temp_1_off, SLOT2(temp_1H_off)
aoqi@1 303 saved_fp_off, SLOT2(saved_fpH_off)
aoqi@1 304 return_off, SLOT2(returnH_off)
aoqi@1 305
aoqi@1 306 reg_save_frame_size,
aoqi@1 307
aoqi@1 308 // illegal instruction handler
aoqi@1 309 continue_dest_off = temp_1_off,
aoqi@1 310
aoqi@1 311 // deoptimization equates
aoqi@1 312 //deopt_type = temp_2_off, // slot for type of deopt in progress
aoqi@1 313 ret_type = temp_1_off // slot for return type
aoqi@1 314 };
aoqi@1 315
aoqi@1 316 // Save off registers which might be killed by calls into the runtime.
aoqi@1 317 // Tries to smart of about FP registers. In particular we separate
aoqi@1 318 // saving and describing the FPU registers for deoptimization since we
aoqi@1 319 // have to save the FPU registers twice if we describe them and on P4
aoqi@1 320 // saving FPU registers which don't contain anything appears
aoqi@1 321 // expensive. The deopt blob is the only thing which needs to
aoqi@1 322 // describe FPU registers. In all other cases it should be sufficient
aoqi@1 323 // to simply save their current value.
aoqi@1 324 //FIXME, I have no idea which register should be saved . @jerome
aoqi@1 325 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
aoqi@1 326 bool save_fpu_registers = true, bool describe_fpu_registers = false) {
aoqi@1 327
aoqi@1 328 /* Jin: num_rt_args is caculated by 8 bytes. */
aoqi@1 329 int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / SLOT_PER_WORD; // args + thread
aoqi@1 330 sasm->set_frame_size(frame_size_in_slots / SLOT_PER_WORD);
aoqi@1 331
aoqi@1 332 // record saved value locations in an OopMap
aoqi@1 333 // locations are offsets from sp after runtime call; num_rt_args is number of arguments
aoqi@1 334 // in call, including thread
aoqi@1 335 OopMap* map = new OopMap(reg_save_frame_size, 0);
aoqi@1 336
aoqi@1 337 map->set_callee_saved(VMRegImpl::stack2reg(V0_off + num_rt_args), V0->as_VMReg());
aoqi@1 338 map->set_callee_saved(VMRegImpl::stack2reg(V1_off + num_rt_args), V1->as_VMReg());
aoqi@1 339 #ifdef _LP64
aoqi@1 340 map->set_callee_saved(VMRegImpl::stack2reg(V0H_off + num_rt_args), V0->as_VMReg()->next());
aoqi@1 341 map->set_callee_saved(VMRegImpl::stack2reg(V1H_off + num_rt_args), V1->as_VMReg()->next());
aoqi@1 342 #endif
aoqi@1 343
aoqi@1 344 int i = 0;
aoqi@1 345 #ifndef _LP64
aoqi@1 346 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 347 map->set_callee_saved(VMRegImpl::stack2reg(T0_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 348 }
aoqi@1 349 #else
aoqi@1 350 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 351 map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 352 map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()->next());
aoqi@1 353 }
aoqi@1 354 #endif
aoqi@1 355
aoqi@1 356 i = 0;
aoqi@1 357 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 358 map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 359 #ifdef _LP64
aoqi@1 360 map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()->next());
aoqi@1 361 #endif
aoqi@1 362 }
aoqi@1 363
aoqi@1 364 map->set_callee_saved(VMRegImpl::stack2reg(FP_off + num_rt_args), FP->as_VMReg());
aoqi@1 365 map->set_callee_saved(VMRegImpl::stack2reg(GP_off + num_rt_args), GP->as_VMReg());
aoqi@1 366 map->set_callee_saved(VMRegImpl::stack2reg(T8_off + num_rt_args), T8->as_VMReg());
aoqi@1 367 map->set_callee_saved(VMRegImpl::stack2reg(T9_off + num_rt_args), T9->as_VMReg());
aoqi@1 368 map->set_callee_saved(VMRegImpl::stack2reg(A0_off + num_rt_args), A0->as_VMReg());
aoqi@1 369 map->set_callee_saved(VMRegImpl::stack2reg(A1_off + num_rt_args), A1->as_VMReg());
aoqi@1 370 map->set_callee_saved(VMRegImpl::stack2reg(A2_off + num_rt_args), A2->as_VMReg());
aoqi@1 371 map->set_callee_saved(VMRegImpl::stack2reg(A3_off + num_rt_args), A3->as_VMReg());
aoqi@1 372
aoqi@1 373 #if 1
aoqi@1 374 map->set_callee_saved(VMRegImpl::stack2reg(F0_off + num_rt_args), F0->as_VMReg());
aoqi@1 375 map->set_callee_saved(VMRegImpl::stack2reg(F1_off + num_rt_args), F1->as_VMReg());
aoqi@1 376 map->set_callee_saved(VMRegImpl::stack2reg(F2_off + num_rt_args), F2->as_VMReg());
aoqi@1 377 map->set_callee_saved(VMRegImpl::stack2reg(F3_off + num_rt_args), F1->as_VMReg());
aoqi@1 378 map->set_callee_saved(VMRegImpl::stack2reg(F4_off + num_rt_args), F4->as_VMReg());
aoqi@1 379 map->set_callee_saved(VMRegImpl::stack2reg(F5_off + num_rt_args), F4->as_VMReg());
aoqi@1 380 map->set_callee_saved(VMRegImpl::stack2reg(F6_off + num_rt_args), F4->as_VMReg());
aoqi@1 381 map->set_callee_saved(VMRegImpl::stack2reg(F7_off + num_rt_args), F4->as_VMReg());
aoqi@1 382 map->set_callee_saved(VMRegImpl::stack2reg(F8_off + num_rt_args), F4->as_VMReg());
aoqi@1 383 map->set_callee_saved(VMRegImpl::stack2reg(F9_off + num_rt_args), F4->as_VMReg());
aoqi@1 384 map->set_callee_saved(VMRegImpl::stack2reg(F10_off + num_rt_args), F4->as_VMReg());
aoqi@1 385 map->set_callee_saved(VMRegImpl::stack2reg(F11_off + num_rt_args), F4->as_VMReg());
aoqi@1 386 map->set_callee_saved(VMRegImpl::stack2reg(F12_off + num_rt_args), F12->as_VMReg());
aoqi@1 387 map->set_callee_saved(VMRegImpl::stack2reg(F13_off + num_rt_args), F13->as_VMReg());
aoqi@1 388 map->set_callee_saved(VMRegImpl::stack2reg(F14_off + num_rt_args), F14->as_VMReg());
aoqi@1 389 map->set_callee_saved(VMRegImpl::stack2reg(F15_off + num_rt_args), F15->as_VMReg());
aoqi@1 390 map->set_callee_saved(VMRegImpl::stack2reg(F16_off + num_rt_args), F16->as_VMReg());
aoqi@1 391 map->set_callee_saved(VMRegImpl::stack2reg(F17_off + num_rt_args), F17->as_VMReg());
aoqi@1 392 map->set_callee_saved(VMRegImpl::stack2reg(F18_off + num_rt_args), F18->as_VMReg());
aoqi@1 393 map->set_callee_saved(VMRegImpl::stack2reg(F19_off + num_rt_args), F19->as_VMReg());
aoqi@1 394 #endif
aoqi@1 395
aoqi@1 396 #ifdef _LP64
aoqi@1 397 map->set_callee_saved(VMRegImpl::stack2reg(FPH_off + num_rt_args), FP->as_VMReg()->next());
aoqi@1 398 map->set_callee_saved(VMRegImpl::stack2reg(GPH_off + num_rt_args), GP->as_VMReg()->next());
aoqi@1 399 map->set_callee_saved(VMRegImpl::stack2reg(T8H_off + num_rt_args), T8->as_VMReg()->next());
aoqi@1 400 map->set_callee_saved(VMRegImpl::stack2reg(T9H_off + num_rt_args), T9->as_VMReg()->next());
aoqi@1 401 map->set_callee_saved(VMRegImpl::stack2reg(A0H_off + num_rt_args), A0->as_VMReg()->next());
aoqi@1 402 map->set_callee_saved(VMRegImpl::stack2reg(A1H_off + num_rt_args), A1->as_VMReg()->next());
aoqi@1 403 map->set_callee_saved(VMRegImpl::stack2reg(A2H_off + num_rt_args), A2->as_VMReg()->next());
aoqi@1 404 map->set_callee_saved(VMRegImpl::stack2reg(A3H_off + num_rt_args), A3->as_VMReg()->next());
aoqi@1 405 #endif
aoqi@1 406 return map;
aoqi@1 407 }
aoqi@1 408
aoqi@1 409 //FIXME, Is it enough to save this registers by yyq
aoqi@1 410 static OopMap* save_live_registers(StubAssembler* sasm,
aoqi@1 411 int num_rt_args,
aoqi@1 412 bool save_fpu_registers = true,
aoqi@1 413 bool describe_fpu_registers = false) {
aoqi@1 414 //const int reg_save_frame_size = return_off + 1 + num_rt_args;
aoqi@1 415 __ block_comment("save_live_registers");
aoqi@1 416
aoqi@1 417 // save all register state - int, fpu
aoqi@1 418 __ addi(SP, SP, -(reg_save_frame_size / SLOT_PER_WORD - 2)* wordSize);
aoqi@1 419
aoqi@1 420 #ifndef _LP64
aoqi@1 421 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 422 __ sw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 423 #else
aoqi@1 424 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 425 __ sd(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 426 #endif
aoqi@1 427 }
aoqi@1 428 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 429 __ st_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 430 }
aoqi@1 431 __ st_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 432 __ st_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 433 __ st_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 434 __ st_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 435 __ st_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 436 __ st_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 437 __ st_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 438 __ st_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 439 __ st_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
aoqi@1 440 __ st_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 441
aoqi@1 442 #if 1
aoqi@1 443 __ sdc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@1 444 __ sdc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@1 445 __ sdc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@1 446 __ sdc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@1 447 __ sdc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@1 448 __ sdc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@1 449 __ sdc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@1 450 __ sdc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@1 451 __ sdc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@1 452 __ sdc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@1 453 __ sdc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@1 454 __ sdc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@1 455 __ sdc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@1 456 __ sdc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@1 457 __ sdc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@1 458 __ sdc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@1 459 __ sdc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@1 460 __ sdc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@1 461 __ sdc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@1 462 __ sdc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 463 #endif
aoqi@1 464
aoqi@1 465 return generate_oop_map(sasm, num_rt_args, save_fpu_registers, describe_fpu_registers);
aoqi@1 466 }
aoqi@1 467
aoqi@1 468 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 469 //static void restore_live_registers(MacroAssembler* sasm) {
aoqi@1 470 #ifndef _LP64
aoqi@1 471 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 472 __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 473 #else
aoqi@1 474 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 475 __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 476 #endif
aoqi@1 477 }
aoqi@1 478 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 479 __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 480 }
aoqi@1 481 __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 482 __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 483
aoqi@1 484 __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 485 __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 486 __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 487 __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 488 __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 489 __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 490
aoqi@1 491 __ ld_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
aoqi@1 492 __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 493
aoqi@1 494 #if 1
aoqi@1 495 __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@1 496 __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@1 497 __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@1 498 __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@1 499 __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@1 500 __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@1 501 __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@1 502 __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@1 503 __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@1 504 __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@1 505 __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@1 506 __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@1 507 __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@1 508 __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@1 509 __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@1 510 __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@1 511 __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@1 512 __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@1 513 __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@1 514 __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 515 #endif
aoqi@1 516
aoqi@1 517 __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
aoqi@1 518 }
aoqi@1 519
aoqi@1 520 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 521 __ block_comment("restore_live_registers");
aoqi@1 522 restore_fpu(sasm, restore_fpu_registers);
aoqi@1 523 }
aoqi@1 524
aoqi@1 525 static void restore_live_registers_except_V0(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 526 //static void restore_live_registers(MacroAssembler* sasm) {
aoqi@1 527 //FIXME , maybe V1 need to be saved too
aoqi@1 528 __ block_comment("restore_live_registers except V0");
aoqi@1 529 #ifndef _LP64
aoqi@1 530 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 531 __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 532 #else
aoqi@1 533 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 534 __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 535 #endif
aoqi@1 536 }
aoqi@1 537 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 538 __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 539 }
aoqi@1 540 __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 541 __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 542
aoqi@1 543 __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 544 __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 545 __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 546 __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 547 __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 548 __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 549
aoqi@1 550 #if 1
aoqi@1 551 __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@1 552 __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@1 553 __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@1 554 __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@1 555 __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@1 556 __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@1 557 __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@1 558 __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@1 559 __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@1 560 __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@1 561 __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@1 562 __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@1 563 __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@1 564 __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@1 565 __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@1 566 __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@1 567 __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@1 568 __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@1 569 __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@1 570 __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 571 #endif
aoqi@1 572
aoqi@1 573 __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 574
aoqi@1 575 __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
aoqi@1 576 }
aoqi@1 577
aoqi@1 578 void Runtime1::initialize_pd() {
aoqi@1 579 // nothing to do
aoqi@1 580 }
aoqi@1 581
aoqi@1 582 // target: the entry point of the method that creates and posts the exception oop
aoqi@1 583 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
aoqi@1 584 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
aoqi@1 585 // preserve all registers
aoqi@1 586 OopMap* oop_map = save_live_registers(sasm, 0);
aoqi@1 587
aoqi@1 588 // now all registers are saved and can be used freely
aoqi@1 589 // verify that no old value is used accidentally
aoqi@1 590 //all reigster are saved , I think mips do not need this
aoqi@1 591
aoqi@1 592 // registers used by this stub
aoqi@1 593 const Register temp_reg = T3;
aoqi@1 594 // load argument for exception that is passed as an argument into the stub
aoqi@1 595 if (has_argument) {
aoqi@1 596 __ ld_ptr(temp_reg, Address(FP, 2*BytesPerWord));
aoqi@1 597 }
aoqi@1 598 int call_offset;
aoqi@1 599 if (has_argument)
aoqi@1 600 call_offset = __ call_RT(noreg, noreg, target, temp_reg);
aoqi@1 601 else
aoqi@1 602 call_offset = __ call_RT(noreg, noreg, target);
aoqi@1 603
aoqi@1 604 OopMapSet* oop_maps = new OopMapSet();
aoqi@1 605 oop_maps->add_gc_map(call_offset, oop_map);
aoqi@1 606
aoqi@1 607 __ stop("should not reach here");
aoqi@1 608
aoqi@1 609 return oop_maps;
aoqi@1 610 }
aoqi@1 611
aoqi@1 612 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome
aoqi@1 613 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
aoqi@1 614 __ block_comment("generate_handle_exception");
aoqi@1 615 // incoming parameters
aoqi@1 616 const Register exception_oop = V0;
aoqi@1 617 const Register exception_pc = V1;
aoqi@1 618 // other registers used in this stub
aoqi@1 619 // const Register real_return_addr = T3;
aoqi@1 620 const Register thread = T8;
aoqi@1 621 // Save registers, if required.
aoqi@1 622 OopMapSet* oop_maps = new OopMapSet();
aoqi@1 623 OopMap* oop_map = NULL;
aoqi@1 624 switch (id) {
aoqi@1 625 case forward_exception_id:
aoqi@1 626 // We're handling an exception in the context of a compiled frame.
aoqi@1 627 // The registers have been saved in the standard places. Perform
aoqi@1 628 // an exception lookup in the caller and dispatch to the handler
aoqi@1 629 // if found. Otherwise unwind and dispatch to the callers
aoqi@1 630 // exception handler.
aoqi@1 631 oop_map = generate_oop_map(sasm, 1 /*thread*/);
aoqi@1 632
aoqi@1 633 // load and clear pending exception oop into RAX
aoqi@1 634 __ ld(exception_oop, Address(thread, Thread::pending_exception_offset()));
aoqi@1 635 __ sw(R0,Address(thread, Thread::pending_exception_offset()));
aoqi@1 636
aoqi@1 637 // load issuing PC (the return address for this stub) into rdx
aoqi@1 638 __ ld(exception_pc, Address(FP, 1*BytesPerWord));
aoqi@1 639
aoqi@1 640 // make sure that the vm_results are cleared (may be unnecessary)
aoqi@1 641 __ sw(R0,Address(thread, JavaThread::vm_result_offset()));
aoqi@1 642 __ sw(R0,Address(thread, JavaThread::vm_result_2_offset()));
aoqi@1 643 break;
aoqi@1 644 case handle_exception_nofpu_id:
aoqi@1 645 case handle_exception_id:
aoqi@1 646 // At this point all registers MAY be live.
aoqi@1 647 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
aoqi@1 648 break;
aoqi@1 649 case handle_exception_from_callee_id: {
aoqi@1 650 // At this point all registers except exception oop (RAX) and
aoqi@1 651 // exception pc (RDX) are dead.
aoqi@1 652 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_by tes / BytesPerWord);
aoqi@1 653 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
aoqi@1 654 sasm->set_frame_size(frame_size);
aoqi@1 655 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
aoqi@1 656 break;
aoqi@1 657 }
aoqi@1 658 default: ShouldNotReachHere();
aoqi@1 659 }
aoqi@1 660
aoqi@1 661 #ifdef TIERED
aoqi@1 662 // C2 can leave the fpu stack dirty
aoqi@1 663 __ empty_FPU_stack();
aoqi@1 664 //}
aoqi@1 665 #endif // TIERED
aoqi@1 666
aoqi@1 667 // verify that only V0 and V1 is valid at this time
aoqi@1 668 // verify that V0 contains a valid exception
aoqi@1 669 __ verify_not_null_oop(exception_oop);
aoqi@1 670
aoqi@1 671 // load address of JavaThread object for thread-local data
aoqi@1 672 __ get_thread(thread);
aoqi@1 673
aoqi@1 674 #ifdef ASSERT
aoqi@1 675 // check that fields in JavaThread for exception oop and issuing pc are
aoqi@1 676 // empty before writing to them
aoqi@1 677 Label oop_empty;
aoqi@1 678 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@1 679 __ beq(AT, R0, oop_empty);
aoqi@1 680 __ delayed()->nop();
aoqi@1 681 __ stop("exception oop already set");
aoqi@1 682 __ bind(oop_empty);
aoqi@1 683 Label pc_empty;
aoqi@1 684 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 685 __ beq(AT, R0, pc_empty);
aoqi@1 686 __ delayed()->nop();
aoqi@1 687 __ stop("exception pc already set");
aoqi@1 688 __ bind(pc_empty);
aoqi@1 689 #endif
aoqi@1 690
aoqi@1 691 // save exception oop and issuing pc into JavaThread
aoqi@1 692 // (exception handler will load it from here)
aoqi@1 693 __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@1 694 __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 695
aoqi@1 696 // save real return address (pc that called this stub)
aoqi@1 697 // __ ld_ptr(real_return_addr, FP, 1*BytesPerWord);
aoqi@1 698 // __ st_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD);
aoqi@1 699
aoqi@1 700 // patch throwing pc into return address (has bci & oop map)
aoqi@1 701 __ st_ptr(exception_pc, FP, 1*BytesPerWord);
aoqi@1 702 // compute the exception handler.
aoqi@1 703 // the exception oop and the throwing pc are read from the fields in JavaThread
aoqi@1 704 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@1 705 exception_handler_for_pc));
aoqi@1 706 oop_maps->add_gc_map(call_offset, oop_map);
aoqi@1 707 // V0: handler address or NULL if no handler exists
aoqi@1 708 // will be the deopt blob if nmethod was deoptimized while we looked up
aoqi@1 709 // handler regardless of whether handler existed in the nmethod.
aoqi@1 710
aoqi@1 711 // only V0 is valid at this time, all other registers have been destroyed by the
aoqi@1 712 // runtime call
aoqi@1 713
aoqi@1 714 // Do we have an exception handler in the nmethod?
aoqi@1 715 /*Label no_handler;
aoqi@1 716 Label done;
aoqi@1 717 __ beq(V0, R0, no_handler);
aoqi@1 718 __ delayed()->nop(); */
aoqi@1 719 // exception handler found
aoqi@1 720 // patch the return address -> the stub will directly return to the exception handler
aoqi@1 721 __ st_ptr(V0, FP, 1 * BytesPerWord);
aoqi@1 722
aoqi@1 723 // restore registers
aoqi@1 724 // restore_live_registers(sasm, save_fpu_registers);
aoqi@1 725
aoqi@1 726 // return to exception handler
aoqi@1 727 // __ leave();
aoqi@1 728 // __ jr(RA);
aoqi@1 729 // __ delayed()->nop();
aoqi@1 730 // __ bind(no_handler);
aoqi@1 731 // no exception handler found in this method, so the exception is
aoqi@1 732 // forwarded to the caller (using the unwind code of the nmethod)
aoqi@1 733 // there is no need to restore the registers
aoqi@1 734
aoqi@1 735 // restore the real return address that was saved before the RT-call
aoqi@1 736 // __ ld_ptr(real_return_addr, SP, temp_1_off * BytesPerWord / SLOT_PER_WORD);
aoqi@1 737 // __ st_ptr(real_return_addr, FP, 1 * BytesPerWord);
aoqi@1 738 // load address of JavaThread object for thread-local data
aoqi@1 739 // __ get_thread(thread);
aoqi@1 740 // restore exception oop into eax (convention for unwind code)
aoqi@1 741 // __ ld_ptr(exception_oop, thread, in_bytes(JavaThread::exception_oop_offset()));
aoqi@1 742
aoqi@1 743 // clear exception fields in JavaThread because they are no longer needed
aoqi@1 744 // (fields must be cleared because they are processed by GC otherwise)
aoqi@1 745 // __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset()));
aoqi@1 746 // __ st_ptr(R0,thread, in_bytes(JavaThread::exception_pc_offset()));
aoqi@1 747 // pop the stub frame off
aoqi@1 748 // __ leave();
aoqi@1 749 // generate_unwind_exception(sasm);
aoqi@1 750 // __ stop("should not reach here");
aoqi@1 751 //}
aoqi@1 752 switch (id) {
aoqi@1 753 case forward_exception_id:
aoqi@1 754 case handle_exception_nofpu_id:
aoqi@1 755 case handle_exception_id:
aoqi@1 756 // Restore the registers that were saved at the beginning.
aoqi@1 757 restore_live_registers(sasm, id == handle_exception_nofpu_id);
aoqi@1 758 break;
aoqi@1 759 case handle_exception_from_callee_id:
aoqi@1 760 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
aoqi@1 761 // since we do a leave anyway.
aoqi@1 762
aoqi@1 763 // Pop the return address since we are possibly changing SP (restoring from BP).
aoqi@1 764 __ leave();
aoqi@1 765 // Restore SP from BP if the exception PC is a method handle call site.
aoqi@1 766 NOT_LP64(__ get_thread(thread);)
aoqi@1 767 /*__ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
aoqi@1 768 __ beq(AT, R0, done);
aoqi@1 769 __ move(SP, rbp_mh_SP_save);
aoqi@1 770 __ bind(done);
aoqi@1 771 __ jr(RA); // jump to exception handler
aoqi@1 772 __ delayed()->nop();*/
aoqi@1 773 // 759 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
aoqi@1 774 // 760 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
aoqi@1 775 // 761 __ jmp(rcx); // jump to exception handler
aoqi@1 776
aoqi@1 777 break;
aoqi@1 778 default: ShouldNotReachHere();
aoqi@1 779 }
aoqi@1 780
aoqi@1 781 return oop_maps;
aoqi@1 782 }
aoqi@1 783
aoqi@1 784
aoqi@1 785
aoqi@1 786
aoqi@1 787
aoqi@1 788 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
aoqi@1 789 // incoming parameters
aoqi@1 790 const Register exception_oop = V0;
aoqi@1 791 // other registers used in this stub
aoqi@1 792 const Register exception_pc = V1;
aoqi@1 793 const Register handler_addr = T3;
aoqi@1 794 const Register thread = T8;
aoqi@1 795
aoqi@1 796 // verify that only eax is valid at this time
aoqi@1 797 // __ invalidate_registers(false, true, true, true, true, true);
aoqi@1 798
aoqi@1 799 #ifdef ASSERT
aoqi@1 800 // check that fields in JavaThread for exception oop and issuing pc are empty
aoqi@1 801 __ get_thread(thread);
aoqi@1 802 Label oop_empty;
aoqi@1 803 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset()));
aoqi@1 804 __ beq(AT, R0, oop_empty);
aoqi@1 805 __ delayed()->nop();
aoqi@1 806 __ stop("exception oop must be empty");
aoqi@1 807 __ bind(oop_empty);
aoqi@1 808
aoqi@1 809 Label pc_empty;
aoqi@1 810 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset()));
aoqi@1 811 __ beq(AT,R0, pc_empty);
aoqi@1 812 __ delayed()->nop();
aoqi@1 813 __ stop("exception pc must be empty");
aoqi@1 814 __ bind(pc_empty);
aoqi@1 815 #endif
aoqi@1 816 // clear the FPU stack in case any FPU results are left behind
aoqi@1 817 __ empty_FPU_stack();
aoqi@1 818
aoqi@1 819 // leave activation of nmethod
aoqi@1 820 __ addi(SP, FP, wordSize);
aoqi@1 821 __ ld_ptr(FP, SP, - wordSize);
aoqi@1 822 // store return address (is on top of stack after leave)
aoqi@1 823 __ ld_ptr(exception_pc, SP, 0);
aoqi@1 824 __ verify_oop(exception_oop);
aoqi@1 825
aoqi@1 826 // save exception oop from eax to stack before call
aoqi@1 827 __ push(exception_oop);
aoqi@1 828 // search the exception handler address of the caller (using the return address)
aoqi@1 829 __ call_VM_leaf(CAST_FROM_FN_PTR(address,
aoqi@1 830 SharedRuntime::exception_handler_for_return_address), exception_pc);
aoqi@1 831 // eax: exception handler address of the caller
aoqi@1 832
aoqi@1 833 // only eax is valid at this time, all other registers have been destroyed by the call
aoqi@1 834
aoqi@1 835 // move result of call into correct register
aoqi@1 836 __ move(handler_addr, V0);
aoqi@1 837 // restore exception oop in eax (required convention of exception handler)
aoqi@1 838 __ super_pop(exception_oop);
aoqi@1 839
aoqi@1 840 __ verify_oop(exception_oop);
aoqi@1 841
aoqi@1 842 // get throwing pc (= return address).
aoqi@1 843 // edx has been destroyed by the call, so it must be set again
aoqi@1 844 // the pop is also necessary to simulate the effect of a ret(0)
aoqi@1 845 __ super_pop(exception_pc);
aoqi@1 846 // verify that that there is really a valid exception in eax
aoqi@1 847 __ verify_not_null_oop(exception_oop);
aoqi@1 848
aoqi@1 849 // continue at exception handler (return address removed)
aoqi@1 850 // note: do *not* remove arguments when unwinding the
aoqi@1 851 // activation since the caller assumes having
aoqi@1 852 // all arguments on the stack when entering the
aoqi@1 853 // runtime to determine the exception handler
aoqi@1 854 // (GC happens at call site with arguments!)
aoqi@1 855 // eax: exception oop
aoqi@1 856 // edx: throwing pc
aoqi@1 857 // ebx: exception handler
aoqi@1 858 __ jr(handler_addr);
aoqi@1 859 __ delayed()->nop();
aoqi@1 860 }
aoqi@1 861
aoqi@1 862
aoqi@1 863
aoqi@1 864
aoqi@1 865 //static address deopt_with_exception_entry_for_patch = NULL;
aoqi@1 866
aoqi@1 867 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
aoqi@1 868
aoqi@1 869 // use the maximum number of runtime-arguments here because it is difficult to
aoqi@1 870 // distinguish each RT-Call.
aoqi@1 871 // Note: This number affects also the RT-Call in generate_handle_exception because
aoqi@1 872 // the oop-map is shared for all calls.
aoqi@1 873
aoqi@1 874
aoqi@1 875
aoqi@1 876
aoqi@1 877 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
aoqi@1 878 assert(deopt_blob != NULL, "deoptimization blob must have been created");
aoqi@1 879 // assert(deopt_with_exception_entry_for_patch != NULL,
aoqi@1 880 // "deoptimization blob must have been created");
aoqi@1 881
aoqi@1 882 //OopMap* oop_map = save_live_registers(sasm, num_rt_args);
aoqi@1 883 OopMap* oop_map = save_live_registers(sasm, 0);
aoqi@1 884 #ifndef OPT_THREAD
aoqi@1 885 const Register thread = T8;
aoqi@1 886 // push java thread (becomes first argument of C function)
aoqi@1 887 __ get_thread(thread);
aoqi@1 888 #else
aoqi@1 889 const Register thread = TREG;
aoqi@1 890 #endif
aoqi@1 891 __ move(A0, thread);
aoqi@1 892
aoqi@1 893
aoqi@1 894 /*
aoqi@1 895 * NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor
aoqi@1 896 * is contained in interpreter. It should be wrong, and should be cleared but is not.
aoqi@1 897 * even if we cleared the wrong pc in anchor, the default way to get caller pc in class frame
aoqi@1 898 * is not right. It depends on that the caller pc is stored in *(sp - 1) but it's not the case
aoqi@1 899 */
aoqi@1 900 __ set_last_Java_frame(thread, NOREG, FP, NULL);
aoqi@1 901 NOT_LP64(__ addiu(SP, SP, (-1) * wordSize));
aoqi@1 902 __ move(AT, -(StackAlignmentInBytes));
aoqi@1 903 __ andr(SP, SP, AT);
aoqi@1 904 __ relocate(relocInfo::internal_pc_type);
aoqi@1 905 {
aoqi@1 906 #ifndef _LP64
aoqi@1 907 int save_pc = (int)__ pc() + 12 + NativeCall::return_address_offset;
aoqi@1 908 __ lui(AT, Assembler::split_high(save_pc));
aoqi@1 909 __ addiu(AT, AT, Assembler::split_low(save_pc));
aoqi@1 910 #else
aoqi@1 911 uintptr_t save_pc = (uintptr_t)__ pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset;
aoqi@1 912 __ li48(AT, save_pc);
aoqi@1 913 #endif
aoqi@1 914 }
aoqi@1 915 __ st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
aoqi@1 916
aoqi@1 917 // do the call
aoqi@1 918 #ifndef _LP64
aoqi@1 919 __ lui(T9, Assembler::split_high((int)target));
aoqi@1 920 __ addiu(T9, T9, Assembler::split_low((int)target));
aoqi@1 921 #else
aoqi@1 922 __ li48(T9, (intptr_t)target);
aoqi@1 923 #endif
aoqi@1 924 __ jalr(T9);
aoqi@1 925 __ delayed()->nop();
aoqi@1 926 OopMapSet* oop_maps = new OopMapSet();
aoqi@1 927 oop_maps->add_gc_map(__ offset(), oop_map);
aoqi@1 928
aoqi@1 929 #ifndef OPT_THREAD
aoqi@1 930 __ get_thread(thread);
aoqi@1 931 #endif
aoqi@1 932
aoqi@1 933 __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
aoqi@1 934 __ reset_last_Java_frame(thread, true,true);
aoqi@1 935 // discard thread arg
aoqi@1 936 // check for pending exceptions
aoqi@1 937 {
aoqi@1 938 Label L, skip;
aoqi@1 939 //Label no_deopt;
aoqi@1 940 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@1 941 __ beq(AT, R0, L);
aoqi@1 942 __ delayed()->nop();
aoqi@1 943 // exception pending => remove activation and forward to exception handler
aoqi@1 944
aoqi@1 945 __ bne(V0,R0, skip);
aoqi@1 946 __ delayed()->nop();
aoqi@1 947 // relocInfo::runtime_call_type);
aoqi@1 948 __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id),
aoqi@1 949 relocInfo::runtime_call_type);
aoqi@1 950 __ delayed()->nop();
aoqi@1 951 __ bind(skip);
aoqi@1 952
aoqi@1 953 // the deopt blob expects exceptions in the special fields of
aoqi@1 954 // JavaThread, so copy and clear pending exception.
aoqi@1 955
aoqi@1 956 // load and clear pending exception
aoqi@1 957 __ ld_ptr(V0, Address(thread,in_bytes(Thread::pending_exception_offset())));
aoqi@1 958 __ st_ptr(R0, Address(thread, in_bytes(Thread::pending_exception_offset())));
aoqi@1 959
aoqi@1 960 // check that there is really a valid exception
aoqi@1 961 __ verify_not_null_oop(V0);
aoqi@1 962
aoqi@1 963 // load throwing pc: this is the return address of the stub
aoqi@1 964 __ ld_ptr(V1, Address(SP, return_off * BytesPerWord));
aoqi@1 965
aoqi@1 966
aoqi@1 967 #ifdef ASSERT
aoqi@1 968 // check that fields in JavaThread for exception oop and issuing pc are empty
aoqi@1 969 Label oop_empty;
aoqi@1 970 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@1 971 __ beq(AT,R0,oop_empty);
aoqi@1 972 __ delayed()->nop();
aoqi@1 973 __ stop("exception oop must be empty");
aoqi@1 974 __ bind(oop_empty);
aoqi@1 975
aoqi@1 976 Label pc_empty;
aoqi@1 977 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 978 __ beq(AT,R0,pc_empty);
aoqi@1 979 __ delayed()->nop();
aoqi@1 980 __ stop("exception pc must be empty");
aoqi@1 981 __ bind(pc_empty);
aoqi@1 982 #endif
aoqi@1 983
aoqi@1 984 // store exception oop and throwing pc to JavaThread
aoqi@1 985 __ st_ptr(V0,Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@1 986 __ st_ptr(V1,Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 987
aoqi@1 988 restore_live_registers(sasm);
aoqi@1 989
aoqi@1 990 __ leave();
aoqi@1 991
aoqi@1 992 // Forward the exception directly to deopt blob. We can blow no
aoqi@1 993 // registers and must leave throwing pc on the stack. A patch may
aoqi@1 994 // have values live in registers so the entry point with the
aoqi@1 995 // exception in tls.
aoqi@1 996 __ jmp(deopt_blob->unpack_with_exception_in_tls(), relocInfo::runtime_call_type);
aoqi@1 997 __ delayed()->nop();
aoqi@1 998
aoqi@1 999 __ bind(L);
aoqi@1 1000 }
aoqi@1 1001
aoqi@1 1002 // Runtime will return true if the nmethod has been deoptimized during
aoqi@1 1003 // the patching process. In that case we must do a deopt reexecute instead.
aoqi@1 1004
aoqi@1 1005 Label reexecuteEntry, cont;
aoqi@1 1006
aoqi@1 1007 __ beq(V0, R0, cont); // have we deoptimized?
aoqi@1 1008 __ delayed()->nop();
aoqi@1 1009
aoqi@1 1010 // Will reexecute. Proper return address is already on the stack we just restore
aoqi@1 1011 // registers, pop all of our frame but the return address and jump to the deopt blob
aoqi@1 1012 restore_live_registers(sasm);
aoqi@1 1013
aoqi@1 1014 __ leave();
aoqi@1 1015 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
aoqi@1 1016 __ delayed()->nop();
aoqi@1 1017
aoqi@1 1018 __ bind(cont);
aoqi@1 1019 restore_live_registers(sasm);
aoqi@1 1020
aoqi@1 1021 __ leave();
aoqi@1 1022 __ jr(RA);
aoqi@1 1023 __ delayed()->nop();
aoqi@1 1024
aoqi@1 1025 return oop_maps;
aoqi@1 1026 }
aoqi@1 1027
aoqi@1 1028
aoqi@1 1029 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
aoqi@1 1030 // for better readability
aoqi@1 1031 const bool must_gc_arguments = true;
aoqi@1 1032 const bool dont_gc_arguments = false;
aoqi@1 1033
aoqi@1 1034
aoqi@1 1035 // default value; overwritten for some optimized stubs that are called
aoqi@1 1036 // from methods that do not use the fpu
aoqi@1 1037 bool save_fpu_registers = true;
aoqi@1 1038
aoqi@1 1039
aoqi@1 1040 // stub code & info for the different stubs
aoqi@1 1041 OopMapSet* oop_maps = NULL;
aoqi@1 1042
aoqi@1 1043 switch (id) {
aoqi@1 1044 case forward_exception_id:
aoqi@1 1045 {
aoqi@1 1046 // we're handling an exception in the context of a compiled
aoqi@1 1047 // frame. The registers have been saved in the standard
aoqi@1 1048 // places. Perform an exception lookup in the caller and
aoqi@1 1049 // dispatch to the handler if found. Otherwise unwind and
aoqi@1 1050 // dispatch to the callers exception handler.
aoqi@1 1051
aoqi@1 1052 const Register exception_oop = V0;
aoqi@1 1053 const Register exception_pc = V1;
aoqi@1 1054 #ifndef OPT_THREAD
aoqi@1 1055 const Register thread = T8;
aoqi@1 1056 __ get_thread(thread);
aoqi@1 1057 #else
aoqi@1 1058 const Register thread = TREG;
aoqi@1 1059 #endif
aoqi@1 1060 // load pending exception oop into eax
aoqi@1 1061 __ ld_ptr(exception_oop, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@1 1062 // clear pending exception
aoqi@1 1063 __ st_ptr(R0, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@1 1064
aoqi@1 1065 // load issuing PC (the return address for this stub) into V1
aoqi@1 1066 __ ld_ptr(exception_pc, FP, 1*BytesPerWord);
aoqi@1 1067
aoqi@1 1068 // make sure that the vm_results are cleared (may be unnecessary)
aoqi@1 1069 __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_offset())));
aoqi@1 1070 __ st_ptr(R0, Address(thread, in_bytes(JavaThread::vm_result_2_offset())));
aoqi@1 1071
aoqi@1 1072 // verify that that there is really a valid exception in eax
aoqi@1 1073 __ verify_not_null_oop(exception_oop);
aoqi@1 1074
aoqi@1 1075
aoqi@1 1076 oop_maps = new OopMapSet();
aoqi@1 1077 OopMap* oop_map = generate_oop_map(sasm, 0);
aoqi@1 1078 generate_handle_exception(id, sasm);
aoqi@1 1079 __ stop("should not reach here");
aoqi@1 1080 }
aoqi@1 1081 break;
aoqi@1 1082
aoqi@1 1083 case new_instance_id:
aoqi@1 1084 case fast_new_instance_id:
aoqi@1 1085 case fast_new_instance_init_check_id:
aoqi@1 1086 {
aoqi@1 1087 // i use T4 as klass register, V0 as result register. MUST accord with NewInstanceStub::emit_code
aoqi@1 1088 #ifndef _LP64
aoqi@1 1089 Register klass = T4; // Incoming
aoqi@1 1090 #else
aoqi@1 1091 Register klass = A4; // Incoming
aoqi@1 1092 #endif
aoqi@1 1093 Register obj = V0; // Result
aoqi@1 1094
aoqi@1 1095 if (id == new_instance_id) {
aoqi@1 1096 __ set_info("new_instance", dont_gc_arguments);
aoqi@1 1097 } else if (id == fast_new_instance_id) {
aoqi@1 1098 __ set_info("fast new_instance", dont_gc_arguments);
aoqi@1 1099 } else {
aoqi@1 1100 assert(id == fast_new_instance_init_check_id, "bad StubID");
aoqi@1 1101 __ set_info("fast new_instance init check", dont_gc_arguments);
aoqi@1 1102 }
aoqi@1 1103
aoqi@1 1104 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id)
aoqi@1 1105 && UseTLAB && FastTLABRefill) {
aoqi@1 1106 Label slow_path;
aoqi@1 1107 Register obj_size = T0;
aoqi@1 1108 Register t1 = T2;
aoqi@1 1109 Register t2 = T3;
aoqi@1 1110 assert_different_registers(klass, obj, obj_size, t1, t2);
aoqi@1 1111 if (id == fast_new_instance_init_check_id) {
aoqi@1 1112 // make sure the klass is initialized
aoqi@1 1113 __ lw(AT, klass, in_bytes(InstanceKlass::init_state_offset()));
aoqi@1 1114 __ move(t1, InstanceKlass::fully_initialized);
aoqi@1 1115 __ bne(AT, t1, slow_path);
aoqi@1 1116 __ delayed()->nop();
aoqi@1 1117 }
aoqi@1 1118 #ifdef ASSERT
aoqi@1 1119 // assert object can be fast path allocated
aoqi@1 1120 {
aoqi@1 1121 Label ok, not_ok;
aoqi@1 1122 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1123 __ blez(obj_size, not_ok);
aoqi@1 1124 __ delayed()->nop();
aoqi@1 1125 __ andi(t1 , obj_size, Klass::_lh_instance_slow_path_bit);
aoqi@1 1126 __ beq(t1, R0, ok);
aoqi@1 1127 __ delayed()->nop();
aoqi@1 1128 __ bind(not_ok);
aoqi@1 1129 __ stop("assert(can be fast path allocated)");
aoqi@1 1130 __ should_not_reach_here();
aoqi@1 1131 __ bind(ok);
aoqi@1 1132 }
aoqi@1 1133 #endif // ASSERT
aoqi@1 1134 // if we got here then the TLAB allocation failed, so try
aoqi@1 1135 // refilling the TLAB or allocating directly from eden.
aoqi@1 1136
aoqi@1 1137 Label retry_tlab, try_eden;
aoqi@1 1138 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy edx (klass)
aoqi@1 1139
aoqi@1 1140 __ bind(retry_tlab);
aoqi@1 1141
aoqi@1 1142 // get the instance size
aoqi@1 1143 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1144 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
aoqi@1 1145 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
aoqi@1 1146 __ verify_oop(obj);
aoqi@1 1147 __ jr(RA);
aoqi@1 1148 __ delayed()->nop();
aoqi@1 1149
aoqi@1 1150 __ bind(try_eden);
aoqi@1 1151
aoqi@1 1152 // get the instance size
aoqi@1 1153 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1154 __ eden_allocate(obj, obj_size, 0, t1, t2, slow_path);
aoqi@1 1155 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
aoqi@1 1156 __ verify_oop(obj);
aoqi@1 1157 __ jr(RA);
aoqi@1 1158 __ delayed()->nop();
aoqi@1 1159
aoqi@1 1160 __ bind(slow_path);
aoqi@1 1161 }
aoqi@1 1162 __ enter();
aoqi@1 1163 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1164 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
aoqi@1 1165 oop_maps = new OopMapSet();
aoqi@1 1166 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1167 restore_live_registers_except_V0(sasm);
aoqi@1 1168 __ verify_oop(obj);
aoqi@1 1169 __ leave();
aoqi@1 1170 __ jr(RA);
aoqi@1 1171 __ delayed()->nop();
aoqi@1 1172
aoqi@1 1173 // V0: new instance
aoqi@1 1174 }
aoqi@1 1175 break;
aoqi@1 1176
aoqi@1 1177
aoqi@1 1178 #ifdef TIERED
aoqi@1 1179 //FIXME, I hava no idea which register to use
aoqi@1 1180 case counter_overflow_id:
aoqi@1 1181 {
aoqi@1 1182 #ifndef _LP64
aoqi@1 1183 Register bci = T5;
aoqi@1 1184 #else
aoqi@1 1185 Register bci = A5;
aoqi@1 1186 #endif
aoqi@1 1187 __ enter();
aoqi@1 1188 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1189 // Retrieve bci
aoqi@1 1190 __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==??
aoqi@1 1191 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
aoqi@1 1192 oop_maps = new OopMapSet();
aoqi@1 1193 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1194 restore_live_registers(sasm);
aoqi@1 1195 __ leave();
aoqi@1 1196 __ jr(RA);
aoqi@1 1197 __ delayed()->nop();
aoqi@1 1198 }
aoqi@1 1199 break;
aoqi@1 1200 #endif // TIERED
aoqi@1 1201
aoqi@1 1202
aoqi@1 1203
aoqi@1 1204 case new_type_array_id:
aoqi@1 1205 case new_object_array_id:
aoqi@1 1206 {
aoqi@1 1207 // i use T2 as length register, T4 as klass register, V0 as result register.
aoqi@1 1208 // MUST accord with NewTypeArrayStub::emit_code, NewObjectArrayStub::emit_code
aoqi@1 1209 Register length = T2; // Incoming
aoqi@1 1210 #ifndef _LP64
aoqi@1 1211 Register klass = T4; // Incoming
aoqi@1 1212 #else
aoqi@1 1213 Register klass = A4; // Incoming
aoqi@1 1214 #endif
aoqi@1 1215 Register obj = V0; // Result
aoqi@1 1216
aoqi@1 1217 if (id == new_type_array_id) {
aoqi@1 1218 __ set_info("new_type_array", dont_gc_arguments);
aoqi@1 1219 } else {
aoqi@1 1220 __ set_info("new_object_array", dont_gc_arguments);
aoqi@1 1221 }
aoqi@1 1222
aoqi@1 1223 if (UseTLAB && FastTLABRefill) {
aoqi@1 1224 Register arr_size = T0;
aoqi@1 1225 Register t1 = T1;
aoqi@1 1226 Register t2 = T3;
aoqi@1 1227 Label slow_path;
aoqi@1 1228 assert_different_registers(length, klass, obj, arr_size, t1, t2);
aoqi@1 1229
aoqi@1 1230 // check that array length is small enough for fast path
aoqi@1 1231 __ move(AT, C1_MacroAssembler::max_array_allocation_length);
aoqi@1 1232 __ sltu(AT, AT, length);
aoqi@1 1233 __ bne(AT, R0, slow_path);
aoqi@1 1234 __ delayed()->nop();
aoqi@1 1235
aoqi@1 1236 // if we got here then the TLAB allocation failed, so try
aoqi@1 1237 // refilling the TLAB or allocating directly from eden.
aoqi@1 1238 Label retry_tlab, try_eden;
aoqi@1 1239 //T0,T1,T5,T8 have changed!
aoqi@1 1240 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves T2 & T4
aoqi@1 1241
aoqi@1 1242 __ bind(retry_tlab);
aoqi@1 1243
aoqi@1 1244 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
aoqi@1 1245 __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1246 __ andi(AT, t1, 0x1f);
aoqi@1 1247 __ sllv(arr_size, length, AT);
aoqi@1 1248 __ srl(t1, t1, Klass::_lh_header_size_shift);
aoqi@1 1249 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1250 __ add(arr_size, t1, arr_size);
aoqi@1 1251 __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
aoqi@1 1252 __ move(AT, ~MinObjAlignmentInBytesMask);
aoqi@1 1253 __ andr(arr_size, arr_size, AT);
aoqi@1 1254
aoqi@1 1255
aoqi@1 1256 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
aoqi@1 1257 __ initialize_header(obj, klass, length,t1,t2);
aoqi@1 1258 __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
aoqi@1 1259 + (Klass::_lh_header_size_shift / BitsPerByte)));
aoqi@1 1260 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
aoqi@1 1261 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
aoqi@1 1262 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1263 __ sub(arr_size, arr_size, t1); // body length
aoqi@1 1264 __ add(t1, t1, obj); // body start
aoqi@1 1265 __ initialize_body(t1, arr_size, 0, t2);
aoqi@1 1266 __ verify_oop(obj);
aoqi@1 1267 __ jr(RA);
aoqi@1 1268 __ delayed()->nop();
aoqi@1 1269
aoqi@1 1270 __ bind(try_eden);
aoqi@1 1271 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
aoqi@1 1272 __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1273 __ andi(AT, t1, 0x1f);
aoqi@1 1274 __ sllv(arr_size, length, AT);
aoqi@1 1275 __ srl(t1, t1, Klass::_lh_header_size_shift);
aoqi@1 1276 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1277 __ add(arr_size, t1, arr_size);
aoqi@1 1278 __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
aoqi@1 1279 __ move(AT, ~MinObjAlignmentInBytesMask);
aoqi@1 1280 __ andr(arr_size, arr_size, AT);
aoqi@1 1281 __ eden_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
aoqi@1 1282 __ initialize_header(obj, klass, length,t1,t2);
aoqi@1 1283 __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
aoqi@1 1284 + (Klass::_lh_header_size_shift / BitsPerByte)));
aoqi@1 1285 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1286 __ sub(arr_size, arr_size, t1); // body length
aoqi@1 1287 __ add(t1, t1, obj); // body start
aoqi@1 1288
aoqi@1 1289 __ initialize_body(t1, arr_size, 0, t2);
aoqi@1 1290 __ verify_oop(obj);
aoqi@1 1291 __ jr(RA);
aoqi@1 1292 __ delayed()->nop();
aoqi@1 1293 __ bind(slow_path);
aoqi@1 1294 }
aoqi@1 1295
aoqi@1 1296
aoqi@1 1297 __ enter();
aoqi@1 1298 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1299 int call_offset;
aoqi@1 1300 if (id == new_type_array_id) {
aoqi@1 1301 call_offset = __ call_RT(obj, noreg,
aoqi@1 1302 CAST_FROM_FN_PTR(address, new_type_array), klass, length);
aoqi@1 1303 } else {
aoqi@1 1304 call_offset = __ call_RT(obj, noreg,
aoqi@1 1305 CAST_FROM_FN_PTR(address, new_object_array), klass, length);
aoqi@1 1306 }
aoqi@1 1307
aoqi@1 1308 oop_maps = new OopMapSet();
aoqi@1 1309 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1310 restore_live_registers_except_V0(sasm);
aoqi@1 1311 __ verify_oop(obj);
aoqi@1 1312 __ leave();
aoqi@1 1313 __ jr(RA);
aoqi@1 1314 __ delayed()->nop();
aoqi@1 1315 }
aoqi@1 1316 break;
aoqi@1 1317
aoqi@1 1318 case new_multi_array_id:
aoqi@1 1319 {
aoqi@1 1320 StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
aoqi@1 1321 //refer to c1_LIRGenerate_mips.cpp:do_NewmultiArray
aoqi@1 1322 // V0: klass
aoqi@1 1323 // T2: rank
aoqi@1 1324 // T0: address of 1st dimension
aoqi@1 1325 //__ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), A1, A2, A3);
aoqi@1 1326 //OopMap* map = save_live_registers(sasm, 4);
aoqi@1 1327 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1328 int call_offset = __ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array),
aoqi@1 1329 V0,T2,T0);
aoqi@1 1330 oop_maps = new OopMapSet();
aoqi@1 1331 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1332 //FIXME
aoqi@1 1333 restore_live_registers_except_V0(sasm);
aoqi@1 1334 // V0: new multi array
aoqi@1 1335 __ verify_oop(V0);
aoqi@1 1336 }
aoqi@1 1337 break;
aoqi@1 1338
aoqi@1 1339
aoqi@1 1340 case register_finalizer_id:
aoqi@1 1341 {
aoqi@1 1342 __ set_info("register_finalizer", dont_gc_arguments);
aoqi@1 1343
aoqi@1 1344 // The object is passed on the stack and we haven't pushed a
aoqi@1 1345 // frame yet so it's one work away from top of stack.
aoqi@1 1346 //reference to LIRGenerator::do_RegisterFinalizer, call_runtime
aoqi@1 1347 __ move(V0, A0);
aoqi@1 1348 __ verify_oop(V0);
aoqi@1 1349 // load the klass and check the has finalizer flag
aoqi@1 1350 Label register_finalizer;
aoqi@1 1351 #ifndef _LP64
aoqi@1 1352 Register t = T5;
aoqi@1 1353 #else
aoqi@1 1354 Register t = A5;
aoqi@1 1355 #endif
aoqi@1 1356 //__ ld_ptr(t, Address(V0, oopDesc::klass_offset_in_bytes()));
aoqi@1 1357 __ load_klass(t, V0);
aoqi@1 1358 __ lw(t, Address(t, Klass::access_flags_offset()));
aoqi@1 1359 __ move(AT, JVM_ACC_HAS_FINALIZER);
aoqi@1 1360 __ andr(AT, AT, t);
aoqi@1 1361
aoqi@1 1362 __ bne(AT, R0, register_finalizer);
aoqi@1 1363 __ delayed()->nop();
aoqi@1 1364 __ jr(RA);
aoqi@1 1365 __ delayed()->nop();
aoqi@1 1366 __ bind(register_finalizer);
aoqi@1 1367 __ enter();
aoqi@1 1368 OopMap* map = save_live_registers(sasm, 0 /*num_rt_args */);
aoqi@1 1369
aoqi@1 1370 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@1 1371 SharedRuntime::register_finalizer), V0);
aoqi@1 1372 oop_maps = new OopMapSet();
aoqi@1 1373 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1374
aoqi@1 1375 // Now restore all the live registers
aoqi@1 1376 restore_live_registers(sasm);
aoqi@1 1377
aoqi@1 1378 __ leave();
aoqi@1 1379 __ jr(RA);
aoqi@1 1380 __ delayed()->nop();
aoqi@1 1381 }
aoqi@1 1382 break;
aoqi@1 1383
aoqi@1 1384 // case range_check_failed_id:
aoqi@1 1385 case throw_range_check_failed_id:
aoqi@1 1386 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
aoqi@1 1387 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1388 throw_range_check_exception),true);
aoqi@1 1389 }
aoqi@1 1390 break;
aoqi@1 1391
aoqi@1 1392 case throw_index_exception_id:
aoqi@1 1393 {
aoqi@1 1394 // i use A1 as the index register, for this will be the first argument, see call_RT
aoqi@1 1395 StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
aoqi@1 1396 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1397 throw_index_exception), true);
aoqi@1 1398 }
aoqi@1 1399 break;
aoqi@1 1400
aoqi@1 1401 case throw_div0_exception_id:
aoqi@1 1402 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
aoqi@1 1403 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1404 throw_div0_exception), false);
aoqi@1 1405 }
aoqi@1 1406 break;
aoqi@1 1407
aoqi@1 1408 case throw_null_pointer_exception_id:
aoqi@1 1409 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
aoqi@1 1410 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1411 throw_null_pointer_exception),false);
aoqi@1 1412 }
aoqi@1 1413 break;
aoqi@1 1414
aoqi@1 1415 case handle_exception_nofpu_id:
aoqi@1 1416 save_fpu_registers = false;
aoqi@1 1417 // fall through
aoqi@1 1418 case handle_exception_id:
aoqi@1 1419 {
aoqi@1 1420
aoqi@1 1421
aoqi@1 1422 StubFrame f(sasm, "handle_exception", dont_gc_arguments);
aoqi@1 1423
aoqi@1 1424 //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
aoqi@1 1425 oop_maps = generate_handle_exception(id, sasm);
aoqi@1 1426 }
aoqi@1 1427 break;
aoqi@1 1428 case handle_exception_from_callee_id:
aoqi@1 1429 {
aoqi@1 1430 StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
aoqi@1 1431 oop_maps = generate_handle_exception(id, sasm);
aoqi@1 1432 }
aoqi@1 1433 break;
aoqi@1 1434 case unwind_exception_id:
aoqi@1 1435 {
aoqi@1 1436 __ set_info("unwind_exception", dont_gc_arguments);
aoqi@1 1437
aoqi@1 1438 generate_unwind_exception(sasm);
aoqi@1 1439 }
aoqi@1 1440 break;
aoqi@1 1441
aoqi@1 1442
aoqi@1 1443 case throw_array_store_exception_id:
aoqi@1 1444 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
aoqi@1 1445 // tos + 0: link
aoqi@1 1446 // + 1: return address
aoqi@1 1447 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1448 throw_array_store_exception), false);
aoqi@1 1449 }
aoqi@1 1450 break;
aoqi@1 1451
aoqi@1 1452 case throw_class_cast_exception_id:
aoqi@1 1453 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
aoqi@1 1454 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1455 throw_class_cast_exception), V0);
aoqi@1 1456 }
aoqi@1 1457 break;
aoqi@1 1458
aoqi@1 1459 case throw_incompatible_class_change_error_id:
aoqi@1 1460 {
aoqi@1 1461 StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
aoqi@1 1462 oop_maps = generate_exception_throw(sasm,
aoqi@1 1463 CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
aoqi@1 1464 }
aoqi@1 1465 break;
aoqi@1 1466
aoqi@1 1467 case slow_subtype_check_id:
aoqi@1 1468 {
aoqi@1 1469 //actually , We do not use it
aoqi@1 1470 // A0:klass_RInfo sub
aoqi@1 1471 // A1:k->encoding() super
aoqi@1 1472 __ set_info("slow_subtype_check", dont_gc_arguments);
aoqi@1 1473 __ st_ptr(T0, SP, (-1) * wordSize);
aoqi@1 1474 __ st_ptr(T1, SP, (-2) * wordSize);
aoqi@1 1475 __ addiu(SP, SP, (-2) * wordSize);
aoqi@1 1476
aoqi@1 1477 //+ Klass::secondary_supers_offset_in_bytes()));
aoqi@1 1478 __ ld_ptr(AT, A0, in_bytes( Klass::secondary_supers_offset()));
aoqi@1 1479 __ lw(T1, AT, arrayOopDesc::length_offset_in_bytes());
aoqi@1 1480 __ addiu(AT, AT, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
aoqi@1 1481
aoqi@1 1482 Label miss, hit, loop;
aoqi@1 1483 // T1:count, AT:array, A1:sub maybe supper
aoqi@1 1484 __ bind(loop);
aoqi@1 1485 __ beq(T1, R0, miss);
aoqi@1 1486 #ifndef _LP64
aoqi@1 1487 __ delayed()->lw(T0, AT, 0);
aoqi@1 1488 #else
aoqi@1 1489 __ delayed()->ld(T0, AT, 0);
aoqi@1 1490 #endif
aoqi@1 1491 __ beq(T0, A1, hit);
aoqi@1 1492 __ delayed();
aoqi@1 1493 __ addiu(T1, T1, -1);
aoqi@1 1494 __ b(loop);
aoqi@1 1495 __ delayed();
aoqi@1 1496 __ addiu(AT, AT, 4);
aoqi@1 1497
aoqi@1 1498 __ bind(hit);
aoqi@1 1499 //+ Klass::secondary_super_cache_offset_in_bytes()), eax);
aoqi@1 1500 __ st_ptr(A1, A0,
aoqi@1 1501 in_bytes( Klass::secondary_super_cache_offset()));
aoqi@1 1502 __ addiu(V0, R0, 1);
aoqi@1 1503 __ addiu(SP, SP, 2 * wordSize);
aoqi@1 1504 __ ld_ptr(T0, SP, (-1) * wordSize);
aoqi@1 1505 __ ld_ptr(T1, SP, (-2) * wordSize);
aoqi@1 1506 __ jr(RA);
aoqi@1 1507 __ delayed()->nop();
aoqi@1 1508
aoqi@1 1509
aoqi@1 1510 __ bind(miss);
aoqi@1 1511 __ move(V0, R0);
aoqi@1 1512 __ addiu(SP, SP, 2 * wordSize);
aoqi@1 1513 __ ld_ptr(T0, SP, (-1) * wordSize);
aoqi@1 1514 __ ld_ptr(T1, SP, (-2) * wordSize);
aoqi@1 1515 __ jr(RA);
aoqi@1 1516 __ delayed()->nop();
aoqi@1 1517 }
aoqi@1 1518 break;
aoqi@1 1519
aoqi@1 1520 case monitorenter_nofpu_id:
aoqi@1 1521 save_fpu_registers = false;// fall through
aoqi@1 1522
aoqi@1 1523 case monitorenter_id:
aoqi@1 1524 {
aoqi@1 1525 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
aoqi@1 1526 OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
aoqi@1 1527
aoqi@1 1528 f.load_argument(1, V0); // V0: object
aoqi@1 1529 #ifndef _LP64
aoqi@1 1530 f.load_argument(0, T6); // T6: lock address
aoqi@1 1531 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@1 1532 monitorenter), V0, T6);
aoqi@1 1533 #else
aoqi@1 1534 f.load_argument(0, A6); // A6: lock address
aoqi@1 1535 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@1 1536 monitorenter), V0, A6);
aoqi@1 1537 #endif
aoqi@1 1538
aoqi@1 1539 oop_maps = new OopMapSet();
aoqi@1 1540 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1541 restore_live_registers(sasm, save_fpu_registers);
aoqi@1 1542 }
aoqi@1 1543 break;
aoqi@1 1544
aoqi@1 1545 case monitorexit_nofpu_id:
aoqi@1 1546 save_fpu_registers = false;
aoqi@1 1547 // fall through
aoqi@1 1548 case monitorexit_id:
aoqi@1 1549 {
aoqi@1 1550 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
aoqi@1 1551 OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
aoqi@1 1552
aoqi@1 1553 #ifndef _LP64
aoqi@1 1554 f.load_argument(0, T6); // eax: lock address
aoqi@1 1555 #else
aoqi@1 1556 f.load_argument(0, A6); // A6: lock address
aoqi@1 1557 #endif
aoqi@1 1558 // note: really a leaf routine but must setup last java sp
aoqi@1 1559 // => use call_RT for now (speed can be improved by
aoqi@1 1560 // doing last java sp setup manually)
aoqi@1 1561 #ifndef _LP64
aoqi@1 1562 int call_offset = __ call_RT(noreg, noreg,
aoqi@1 1563 CAST_FROM_FN_PTR(address, monitorexit), T6);
aoqi@1 1564 #else
aoqi@1 1565 int call_offset = __ call_RT(noreg, noreg,
aoqi@1 1566 CAST_FROM_FN_PTR(address, monitorexit), A6);
aoqi@1 1567 #endif
aoqi@1 1568 oop_maps = new OopMapSet();
aoqi@1 1569 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1570 restore_live_registers(sasm, save_fpu_registers);
aoqi@1 1571
aoqi@1 1572 }
aoqi@1 1573 break;
aoqi@1 1574 // case init_check_patching_id:
aoqi@1 1575 case access_field_patching_id:
aoqi@1 1576 {
aoqi@1 1577 StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
aoqi@1 1578 // we should set up register map
aoqi@1 1579 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
aoqi@1 1580
aoqi@1 1581 }
aoqi@1 1582 break;
aoqi@1 1583
aoqi@1 1584 case load_klass_patching_id:
aoqi@1 1585 {
aoqi@1 1586 StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
aoqi@1 1587 // we should set up register map
aoqi@1 1588 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1589 move_klass_patching));
aoqi@1 1590 }
aoqi@1 1591 break;
aoqi@1 1592 /* case jvmti_exception_throw_id:
aoqi@1 1593 {
aoqi@1 1594 // V0: exception oop
aoqi@1 1595 // V1: exception pc
aoqi@1 1596 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
aoqi@1 1597 // Preserve all registers across this potentially blocking call
aoqi@1 1598 const int num_rt_args = 2; // thread, exception oop
aoqi@1 1599 //OopMap* map = save_live_registers(sasm, num_rt_args);
aoqi@1 1600 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1601 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@1 1602 Runtime1::post_jvmti_exception_throw), V0);
aoqi@1 1603 oop_maps = new OopMapSet();
aoqi@1 1604 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1605 restore_live_registers(sasm);
aoqi@1 1606 }*/
aoqi@1 1607 case load_mirror_patching_id:
aoqi@1 1608 {
aoqi@1 1609 StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments);
aoqi@1 1610 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
aoqi@1 1611 }
aoqi@1 1612 break;
aoqi@1 1613 case dtrace_object_alloc_id:
aoqi@1 1614 {
aoqi@1 1615 // V0:object
aoqi@1 1616 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
aoqi@1 1617 // we can't gc here so skip the oopmap but make sure that all
aoqi@1 1618 // the live registers get saved.
aoqi@1 1619 save_live_registers(sasm, 0);
aoqi@1 1620
aoqi@1 1621 __ push_reg(V0);
aoqi@1 1622 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
aoqi@1 1623 relocInfo::runtime_call_type);
aoqi@1 1624 __ super_pop(V0);
aoqi@1 1625
aoqi@1 1626 restore_live_registers(sasm);
aoqi@1 1627 }
aoqi@1 1628 break;
aoqi@1 1629 case fpu2long_stub_id:
aoqi@1 1630 {
aoqi@1 1631 //FIXME, I hava no idea how to port this
aoqi@1 1632 }
aoqi@1 1633 default:
aoqi@1 1634 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
aoqi@1 1635 __ move(A1, (int)id);
aoqi@1 1636 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1);
aoqi@1 1637 __ should_not_reach_here();
aoqi@1 1638 }
aoqi@1 1639 break;
aoqi@1 1640 }
aoqi@1 1641 return oop_maps;
aoqi@1 1642 }
aoqi@1 1643
aoqi@1 1644 #undef __
aoqi@1 1645
aoqi@1 1646 const char *Runtime1::pd_name_for_address(address entry) {
aoqi@1 1647 return "<unknown function>";
aoqi@1 1648 }
aoqi@1 1649

mercurial