src/cpu/mips/vm/c1_Runtime1_mips.cpp

Fri, 27 Jul 2018 15:17:45 +0800

author
wangxue
date
Fri, 27 Jul 2018 15:17:45 +0800
changeset 9205
cce12244eb8c
parent 9171
c67c94f5b85d
child 9207
874b8588c4ae
permissions
-rw-r--r--

#7376 Implement MacroAssembler::incr_allocated_bytes

aoqi@1 1 /*
aoqi@1 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@1 3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
aoqi@1 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@1 5 *
aoqi@1 6 * This code is free software; you can redistribute it and/or modify it
aoqi@1 7 * under the terms of the GNU General Public License version 2 only, as
aoqi@1 8 * published by the Free Software Foundation.
aoqi@1 9 *
aoqi@1 10 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@1 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@1 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@1 13 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@1 14 * accompanied this code).
aoqi@1 15 *
aoqi@1 16 * You should have received a copy of the GNU General Public License version
aoqi@1 17 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@1 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@1 19 *
aoqi@1 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@1 21 * or visit www.oracle.com if you need additional information or have any
aoqi@1 22 * questions.
aoqi@1 23 *
aoqi@1 24 */
aoqi@1 25
aoqi@1 26 #include "precompiled.hpp"
aoqi@1 27 #include "asm/assembler.hpp"
aoqi@1 28 #include "c1/c1_Defs.hpp"
aoqi@1 29 #include "c1/c1_MacroAssembler.hpp"
aoqi@1 30 #include "c1/c1_Runtime1.hpp"
aoqi@1 31 #include "interpreter/interpreter.hpp"
aoqi@1 32 #include "nativeInst_mips.hpp"
aoqi@1 33 #include "oops/compiledICHolder.hpp"
aoqi@1 34 #include "oops/oop.inline.hpp"
aoqi@1 35 #include "prims/jvmtiExport.hpp"
aoqi@1 36 #include "register_mips.hpp"
aoqi@1 37 #include "runtime/sharedRuntime.hpp"
aoqi@1 38 #include "runtime/signature.hpp"
aoqi@1 39 #include "runtime/vframeArray.hpp"
aoqi@6880 40 #include "utilities/macros.hpp"
aoqi@1 41 #include "vmreg_mips.inline.hpp"
aoqi@6880 42 #if INCLUDE_ALL_GCS
aoqi@6880 43 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
aoqi@6880 44 #endif
aoqi@1 45
aoqi@1 46
aoqi@1 47 // Implementation of StubAssembler
aoqi@1 48 // this method will preserve the stack space for arguments as indicated by args_size
aoqi@1 49 // for stack alignment consideration, you cannot call this with argument in stack.
aoqi@1 50 // if you need >3 arguments, you must implement this method yourself.
fujie@9153 51 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
aoqi@6880 52 // i use S7 for edi.
aoqi@6880 53 // setup registers
aoqi@6880 54 const Register thread = TREG; // is callee-saved register (Visual C++ calling conventions)
fujie@9153 55 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
fujie@9153 56 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
aoqi@6880 57 assert(args_size >= 0, "illegal args_size");
fujie@9153 58 bool align_stack = false;
fujie@9153 59 #ifdef _LP64
fujie@9153 60 // At a method handle call, the stack may not be properly aligned
fujie@9153 61 // when returning with an exception.
fujie@9153 62 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
fujie@9153 63 #endif
aoqi@1 64
aoqi@6880 65 set_num_rt_args(1 + args_size);
aoqi@1 66
aoqi@1 67
aoqi@6880 68 // push java thread (becomes first argument of C function)
aoqi@6880 69 get_thread(thread);
aoqi@6880 70 move(A0, thread);
aoqi@1 71
fujie@9153 72 if(!align_stack) {
fujie@9153 73 set_last_Java_frame(thread, NOREG, FP, NULL);
fujie@9153 74 } else {
fujie@9153 75 address the_pc = pc();
fujie@9153 76 set_last_Java_frame(thread, NOREG, FP, the_pc);
fujie@9153 77 move(AT, -(StackAlignmentInBytes));
fujie@9153 78 andr(SP, SP, AT);
fujie@9153 79 }
aoqi@1 80
aoqi@6880 81 relocate(relocInfo::internal_pc_type);
aoqi@6880 82 {
aoqi@1 83 #ifndef _LP64
aoqi@6880 84 int save_pc = (int)pc() + 12 + NativeCall::return_address_offset;
aoqi@6880 85 lui(AT, Assembler::split_high(save_pc));
aoqi@6880 86 addiu(AT, AT, Assembler::split_low(save_pc));
aoqi@1 87 #else
aoqi@6880 88 uintptr_t save_pc = (uintptr_t)pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long;
aoqi@6880 89 li48(AT, save_pc);
aoqi@1 90 #endif
aoqi@6880 91 }
aoqi@6880 92 st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
aoqi@1 93
aoqi@6880 94 // do the call
aoqi@6880 95 #ifndef _LP64
aoqi@6880 96 lui(T9, Assembler::split_high((int)entry));
aoqi@6880 97 addiu(T9, T9, Assembler::split_low((int)entry));
aoqi@6880 98 #else
aoqi@6880 99 li48(T9, (intptr_t)entry);
aoqi@1 100 #endif
aoqi@6880 101 jalr(T9);
aoqi@6880 102 delayed()->nop();
fujie@9161 103
fujie@9161 104 int call_offset = offset();
aoqi@6880 105
aoqi@6880 106 // verify callee-saved register
aoqi@6880 107 #ifdef ASSERT
aoqi@6880 108 guarantee(thread != V0, "change this code");
aoqi@6880 109 push(V0);
aoqi@6880 110 {
aoqi@6880 111 Label L;
aoqi@6880 112 get_thread(V0);
aoqi@6880 113 beq(thread, V0, L);
aoqi@6880 114 delayed()->nop();
aoqi@6880 115 int3();
aoqi@6880 116 stop("StubAssembler::call_RT: edi not callee saved?");
aoqi@6880 117 bind(L);
aoqi@6880 118 }
aoqi@6880 119 super_pop(V0);
aoqi@1 120 #endif
aoqi@6880 121 // discard thread and arguments
aoqi@6880 122 ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); //by yyq
fujie@9171 123 reset_last_Java_frame(thread, true);
aoqi@6880 124 // check for pending exceptions
aoqi@6880 125 {
aoqi@6880 126 Label L;
aoqi@6880 127 ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@6880 128 beq(AT, R0, L);
aoqi@6880 129 delayed()->nop();
aoqi@6880 130 // exception pending => remove activation and forward to exception handler
aoqi@6880 131 // make sure that the vm_results are cleared
aoqi@6880 132 if (oop_result1->is_valid()) {
aoqi@6880 133 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@6880 134 }
fujie@9153 135 if (metadata_result->is_valid()) {
aoqi@6880 136 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
aoqi@6880 137 }
aoqi@6880 138 // the leave() in x86 just pops ebp and remains the return address on the top
aoqi@6880 139 // of stack
aoqi@6880 140 // the return address will be needed by forward_exception_entry()
aoqi@6880 141 if (frame_size() == no_frame_size) {
aoqi@6880 142 addiu(SP, FP, wordSize);
aoqi@6880 143 ld_ptr(FP, SP, (-1) * wordSize);
aoqi@6880 144 jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
aoqi@6880 145 delayed()->nop();
aoqi@6880 146 } else if (_stub_id == Runtime1::forward_exception_id) {
aoqi@6880 147 should_not_reach_here();
aoqi@6880 148 } else {
aoqi@8865 149 jmp(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type);
aoqi@6880 150 delayed()->nop();
aoqi@6880 151 }
aoqi@6880 152 bind(L);
aoqi@6880 153 }
aoqi@6880 154 // get oop results if there are any and reset the values in the thread
aoqi@6880 155 if (oop_result1->is_valid()) {
aoqi@6880 156 ld_ptr(oop_result1, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@6880 157 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
aoqi@6880 158 verify_oop(oop_result1);
aoqi@6880 159 }
fujie@9153 160 if (metadata_result->is_valid()) {
fujie@9153 161 ld_ptr(metadata_result, thread, in_bytes(JavaThread::vm_result_2_offset()));
aoqi@6880 162 st_ptr(R0, thread, in_bytes(JavaThread::vm_result_2_offset()));
fujie@9153 163 verify_oop(metadata_result);
aoqi@6880 164 }
aoqi@6880 165 return call_offset;
aoqi@1 166 }
aoqi@1 167
aoqi@1 168
fujie@9153 169 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
aoqi@6880 170 if (arg1 != A1) move(A1, arg1);
fujie@9153 171 return call_RT(oop_result1, metadata_result, entry, 1);
aoqi@1 172 }
aoqi@1 173
aoqi@1 174
fujie@9153 175 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
aoqi@6880 176 if (arg1!=A1) move(A1, arg1);
aoqi@6880 177 if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
fujie@9153 178 return call_RT(oop_result1, metadata_result, entry, 2);
aoqi@1 179 }
aoqi@1 180
aoqi@1 181
fujie@9153 182 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
aoqi@6880 183 if (arg1!=A1) move(A1, arg1);
aoqi@6880 184 if (arg2!=A2) move(A2, arg2); assert(arg2 != A1, "smashed argument");
aoqi@6880 185 if (arg3!=A3) move(A3, arg3); assert(arg3 != A1 && arg3 != A2, "smashed argument");
fujie@9153 186 return call_RT(oop_result1, metadata_result, entry, 3);
aoqi@1 187 }
aoqi@1 188
aoqi@1 189
aoqi@1 190 // Implementation of StubFrame
aoqi@1 191
aoqi@1 192 class StubFrame: public StackObj {
aoqi@6880 193 private:
aoqi@6880 194 StubAssembler* _sasm;
aoqi@1 195
aoqi@6880 196 public:
aoqi@6880 197 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
aoqi@6880 198 void load_argument(int offset_in_words, Register reg);
aoqi@6880 199
aoqi@6880 200 ~StubFrame();
aoqi@1 201 };
aoqi@1 202
aoqi@1 203
aoqi@1 204 #define __ _sasm->
aoqi@1 205
aoqi@1 206 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
aoqi@6880 207 _sasm = sasm;
aoqi@6880 208 __ set_info(name, must_gc_arguments);
aoqi@6880 209 __ enter();
aoqi@1 210 }
aoqi@1 211
aoqi@1 212
aoqi@1 213 //FIXME, I have no idea the frame architecture of mips
aoqi@1 214 // load parameters that were stored with LIR_Assembler::store_parameter
aoqi@6880 215 // Note: offsets for store_parameter and load_argument must match
aoqi@1 216 void StubFrame::load_argument(int offset_in_words, Register reg) {
aoqi@6880 217 //ebp + 0: link
aoqi@6880 218 // + 1: return address
aoqi@6880 219 // + 2: argument with offset 0
aoqi@6880 220 // + 3: argument with offset 1
aoqi@6880 221 // + 4: ...
aoqi@6880 222 __ ld_ptr(reg, Address(FP, (offset_in_words + 2) * BytesPerWord));
aoqi@1 223 }
aoqi@6880 224
aoqi@6880 225
aoqi@1 226 StubFrame::~StubFrame() {
aoqi@6880 227 __ leave();
aoqi@6880 228 __ jr(RA);
aoqi@6880 229 __ delayed()->nop();
aoqi@1 230 }
aoqi@1 231
aoqi@1 232 #undef __
aoqi@1 233
aoqi@1 234
aoqi@1 235 // Implementation of Runtime1
aoqi@1 236
aoqi@1 237 #define __ sasm->
aoqi@1 238
aoqi@1 239 //static OopMap* save_live_registers(MacroAssembler* sasm, int num_rt_args);
aoqi@1 240 //static void restore_live_registers(MacroAssembler* sasm);
aoqi@1 241 //DeoptimizationBlob* SharedRuntime::_deopt_blob = NULL;
aoqi@1 242 /*
aoqi@1 243 const int fpu_stack_as_doubles_size_in_words = 16;
aoqi@1 244 const int fpu_stack_as_doubles_size = 64;
aoqi@1 245 */
aoqi@1 246 const int float_regs_as_doubles_size_in_words = 16;
aoqi@1 247
aoqi@6880 248 //FIXME,
aoqi@1 249 // Stack layout for saving/restoring all the registers needed during a runtime
aoqi@1 250 // call (this includes deoptimization)
aoqi@1 251 // Note: note that users of this frame may well have arguments to some runtime
aoqi@1 252 // while these values are on the stack. These positions neglect those arguments
aoqi@1 253 // but the code in save_live_registers will take the argument count into
aoqi@1 254 // account.
aoqi@1 255 //
aoqi@1 256 #ifdef _LP64
aoqi@1 257 #define SLOT2(x) x,
aoqi@1 258 #define SLOT_PER_WORD 2
aoqi@1 259 #else
aoqi@1 260 #define SLOT2(x)
aoqi@1 261 #define SLOT_PER_WORD 1
aoqi@1 262 #endif // _LP64
aoqi@1 263
aoqi@1 264 enum reg_save_layout {
aoqi@1 265 #ifndef _LP64
aoqi@1 266 T0_off = 0,
aoqi@1 267 S0_off = T0_off + SLOT_PER_WORD * 8,
aoqi@1 268 #else
aoqi@1 269 A4_off = 0,
aoqi@1 270 S0_off = A4_off + SLOT_PER_WORD * 8,
aoqi@1 271 #endif
aoqi@1 272 FP_off = S0_off + SLOT_PER_WORD * 8, SLOT2(FPH_off)
aoqi@1 273 T8_off, SLOT2(T8H_off)
aoqi@1 274 T9_off, SLOT2(T9H_off)
aoqi@1 275 SP_off, SLOT2(SPH_off)
aoqi@1 276 V0_off, SLOT2(V0H_off)
aoqi@1 277 V1_off, SLOT2(V1H_off)
aoqi@1 278 A0_off, SLOT2(A0H_off)
aoqi@1 279 A1_off, SLOT2(A1H_off)
aoqi@1 280 A2_off, SLOT2(A2H_off)
aoqi@1 281 A3_off, SLOT2(A3H_off)
aoqi@1 282
aoqi@1 283 // Float registers
aoqi@1 284 /* FIXME: Jin: In MIPS64, F0~23 are all caller-saved registers */
aoqi@1 285 F0_off, SLOT2( F0H_off)
aoqi@1 286 F1_off, SLOT2( F1H_off)
aoqi@1 287 F2_off, SLOT2( F2H_off)
aoqi@1 288 F3_off, SLOT2( F3H_off)
aoqi@1 289 F4_off, SLOT2( F4H_off)
aoqi@1 290 F5_off, SLOT2( F5H_off)
aoqi@1 291 F6_off, SLOT2( F6H_off)
aoqi@1 292 F7_off, SLOT2( F7H_off)
aoqi@1 293 F8_off, SLOT2( F8H_off)
aoqi@1 294 F9_off, SLOT2( F9H_off)
aoqi@1 295 F10_off, SLOT2( F10H_off)
aoqi@1 296 F11_off, SLOT2( F11H_off)
aoqi@1 297 F12_off, SLOT2( F12H_off)
aoqi@1 298 F13_off, SLOT2( F13H_off)
aoqi@1 299 F14_off, SLOT2( F14H_off)
aoqi@1 300 F15_off, SLOT2( F15H_off)
aoqi@1 301 F16_off, SLOT2( F16H_off)
aoqi@1 302 F17_off, SLOT2( F17H_off)
aoqi@1 303 F18_off, SLOT2( F18H_off)
aoqi@1 304 F19_off, SLOT2( F19H_off)
aoqi@1 305
aoqi@1 306 GP_off, SLOT2( GPH_off)
aoqi@1 307 //temp_2_off,
aoqi@1 308 temp_1_off, SLOT2(temp_1H_off)
aoqi@1 309 saved_fp_off, SLOT2(saved_fpH_off)
aoqi@1 310 return_off, SLOT2(returnH_off)
aoqi@1 311
aoqi@1 312 reg_save_frame_size,
aoqi@1 313
aoqi@1 314 // illegal instruction handler
aoqi@1 315 continue_dest_off = temp_1_off,
aoqi@1 316
aoqi@1 317 // deoptimization equates
aoqi@1 318 //deopt_type = temp_2_off, // slot for type of deopt in progress
aoqi@1 319 ret_type = temp_1_off // slot for return type
aoqi@1 320 };
aoqi@1 321
aoqi@6880 322
aoqi@6880 323
aoqi@1 324 // Save off registers which might be killed by calls into the runtime.
aoqi@1 325 // Tries to smart of about FP registers. In particular we separate
aoqi@1 326 // saving and describing the FPU registers for deoptimization since we
aoqi@1 327 // have to save the FPU registers twice if we describe them and on P4
aoqi@1 328 // saving FPU registers which don't contain anything appears
aoqi@1 329 // expensive. The deopt blob is the only thing which needs to
aoqi@1 330 // describe FPU registers. In all other cases it should be sufficient
aoqi@1 331 // to simply save their current value.
aoqi@1 332 //FIXME, I have no idea which register should be saved . @jerome
aoqi@1 333 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
aoqi@6880 334 bool save_fpu_registers = true, bool describe_fpu_registers = false) {
aoqi@1 335
aoqi@8865 336 LP64_ONLY(num_rt_args = 0);
aoqi@8865 337 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
aoqi@8865 338 int frame_size_in_slots = reg_save_frame_size + num_rt_args * wordSize / VMRegImpl::slots_per_word; // args + thread
aoqi@8865 339 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
aoqi@1 340
aoqi@1 341 // record saved value locations in an OopMap
aoqi@6880 342 // locations are offsets from sp after runtime call; num_rt_args is number of arguments
aoqi@1 343 // in call, including thread
aoqi@1 344 OopMap* map = new OopMap(reg_save_frame_size, 0);
aoqi@6880 345
aoqi@1 346 map->set_callee_saved(VMRegImpl::stack2reg(V0_off + num_rt_args), V0->as_VMReg());
aoqi@1 347 map->set_callee_saved(VMRegImpl::stack2reg(V1_off + num_rt_args), V1->as_VMReg());
aoqi@1 348 #ifdef _LP64
aoqi@1 349 map->set_callee_saved(VMRegImpl::stack2reg(V0H_off + num_rt_args), V0->as_VMReg()->next());
aoqi@1 350 map->set_callee_saved(VMRegImpl::stack2reg(V1H_off + num_rt_args), V1->as_VMReg()->next());
aoqi@1 351 #endif
aoqi@1 352
aoqi@1 353 int i = 0;
aoqi@1 354 #ifndef _LP64
aoqi@1 355 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 356 map->set_callee_saved(VMRegImpl::stack2reg(T0_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 357 }
aoqi@1 358 #else
aoqi@1 359 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 360 map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 361 map->set_callee_saved(VMRegImpl::stack2reg(A4_off + num_rt_args + i++), r->as_VMReg()->next());
aoqi@1 362 }
aoqi@1 363 #endif
aoqi@1 364
aoqi@1 365 i = 0;
aoqi@1 366 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 367 map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg());
aoqi@1 368 #ifdef _LP64
aoqi@1 369 map->set_callee_saved(VMRegImpl::stack2reg(S0_off + num_rt_args + i++), r->as_VMReg()->next());
aoqi@1 370 #endif
aoqi@1 371 }
aoqi@1 372
aoqi@1 373 map->set_callee_saved(VMRegImpl::stack2reg(FP_off + num_rt_args), FP->as_VMReg());
aoqi@1 374 map->set_callee_saved(VMRegImpl::stack2reg(GP_off + num_rt_args), GP->as_VMReg());
aoqi@1 375 map->set_callee_saved(VMRegImpl::stack2reg(T8_off + num_rt_args), T8->as_VMReg());
aoqi@1 376 map->set_callee_saved(VMRegImpl::stack2reg(T9_off + num_rt_args), T9->as_VMReg());
aoqi@1 377 map->set_callee_saved(VMRegImpl::stack2reg(A0_off + num_rt_args), A0->as_VMReg());
aoqi@1 378 map->set_callee_saved(VMRegImpl::stack2reg(A1_off + num_rt_args), A1->as_VMReg());
aoqi@1 379 map->set_callee_saved(VMRegImpl::stack2reg(A2_off + num_rt_args), A2->as_VMReg());
aoqi@1 380 map->set_callee_saved(VMRegImpl::stack2reg(A3_off + num_rt_args), A3->as_VMReg());
aoqi@1 381
aoqi@1 382 map->set_callee_saved(VMRegImpl::stack2reg(F0_off + num_rt_args), F0->as_VMReg());
aoqi@1 383 map->set_callee_saved(VMRegImpl::stack2reg(F1_off + num_rt_args), F1->as_VMReg());
aoqi@1 384 map->set_callee_saved(VMRegImpl::stack2reg(F2_off + num_rt_args), F2->as_VMReg());
aoqi@1 385 map->set_callee_saved(VMRegImpl::stack2reg(F3_off + num_rt_args), F1->as_VMReg());
aoqi@1 386 map->set_callee_saved(VMRegImpl::stack2reg(F4_off + num_rt_args), F4->as_VMReg());
aoqi@1 387 map->set_callee_saved(VMRegImpl::stack2reg(F5_off + num_rt_args), F4->as_VMReg());
aoqi@1 388 map->set_callee_saved(VMRegImpl::stack2reg(F6_off + num_rt_args), F4->as_VMReg());
aoqi@1 389 map->set_callee_saved(VMRegImpl::stack2reg(F7_off + num_rt_args), F4->as_VMReg());
aoqi@1 390 map->set_callee_saved(VMRegImpl::stack2reg(F8_off + num_rt_args), F4->as_VMReg());
aoqi@1 391 map->set_callee_saved(VMRegImpl::stack2reg(F9_off + num_rt_args), F4->as_VMReg());
aoqi@1 392 map->set_callee_saved(VMRegImpl::stack2reg(F10_off + num_rt_args), F4->as_VMReg());
aoqi@1 393 map->set_callee_saved(VMRegImpl::stack2reg(F11_off + num_rt_args), F4->as_VMReg());
aoqi@1 394 map->set_callee_saved(VMRegImpl::stack2reg(F12_off + num_rt_args), F12->as_VMReg());
aoqi@1 395 map->set_callee_saved(VMRegImpl::stack2reg(F13_off + num_rt_args), F13->as_VMReg());
aoqi@1 396 map->set_callee_saved(VMRegImpl::stack2reg(F14_off + num_rt_args), F14->as_VMReg());
aoqi@1 397 map->set_callee_saved(VMRegImpl::stack2reg(F15_off + num_rt_args), F15->as_VMReg());
aoqi@1 398 map->set_callee_saved(VMRegImpl::stack2reg(F16_off + num_rt_args), F16->as_VMReg());
aoqi@1 399 map->set_callee_saved(VMRegImpl::stack2reg(F17_off + num_rt_args), F17->as_VMReg());
aoqi@1 400 map->set_callee_saved(VMRegImpl::stack2reg(F18_off + num_rt_args), F18->as_VMReg());
aoqi@1 401 map->set_callee_saved(VMRegImpl::stack2reg(F19_off + num_rt_args), F19->as_VMReg());
aoqi@1 402
aoqi@1 403 #ifdef _LP64
aoqi@1 404 map->set_callee_saved(VMRegImpl::stack2reg(FPH_off + num_rt_args), FP->as_VMReg()->next());
aoqi@1 405 map->set_callee_saved(VMRegImpl::stack2reg(GPH_off + num_rt_args), GP->as_VMReg()->next());
aoqi@1 406 map->set_callee_saved(VMRegImpl::stack2reg(T8H_off + num_rt_args), T8->as_VMReg()->next());
aoqi@1 407 map->set_callee_saved(VMRegImpl::stack2reg(T9H_off + num_rt_args), T9->as_VMReg()->next());
aoqi@1 408 map->set_callee_saved(VMRegImpl::stack2reg(A0H_off + num_rt_args), A0->as_VMReg()->next());
aoqi@1 409 map->set_callee_saved(VMRegImpl::stack2reg(A1H_off + num_rt_args), A1->as_VMReg()->next());
aoqi@1 410 map->set_callee_saved(VMRegImpl::stack2reg(A2H_off + num_rt_args), A2->as_VMReg()->next());
aoqi@1 411 map->set_callee_saved(VMRegImpl::stack2reg(A3H_off + num_rt_args), A3->as_VMReg()->next());
aoqi@1 412 #endif
aoqi@1 413 return map;
aoqi@1 414 }
aoqi@1 415
aoqi@1 416 //FIXME, Is it enough to save this registers by yyq
aoqi@8865 417 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
aoqi@8865 418 bool save_fpu_registers = true,
aoqi@1 419 bool describe_fpu_registers = false) {
aoqi@1 420 //const int reg_save_frame_size = return_off + 1 + num_rt_args;
aoqi@1 421 __ block_comment("save_live_registers");
aoqi@1 422
aoqi@6880 423 // save all register state - int, fpu
aoqi@1 424 __ addi(SP, SP, -(reg_save_frame_size / SLOT_PER_WORD - 2)* wordSize);
aoqi@6880 425
aoqi@1 426 #ifndef _LP64
aoqi@1 427 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 428 __ sw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 429 #else
aoqi@1 430 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 431 __ sd(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 432 #endif
aoqi@1 433 }
aoqi@1 434 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 435 __ st_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 436 }
aoqi@1 437 __ st_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 438 __ st_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 439 __ st_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 440 __ st_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 441 __ st_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 442 __ st_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 443 __ st_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 444 __ st_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 445 __ st_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
aoqi@6880 446 __ st_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 447
aoqi@6880 448 __ sdc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@6880 449 __ sdc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@6880 450 __ sdc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@6880 451 __ sdc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@6880 452 __ sdc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@6880 453 __ sdc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@6880 454 __ sdc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@6880 455 __ sdc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@6880 456 __ sdc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@6880 457 __ sdc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@6880 458 __ sdc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@6880 459 __ sdc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@6880 460 __ sdc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@6880 461 __ sdc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@6880 462 __ sdc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@6880 463 __ sdc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@6880 464 __ sdc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@6880 465 __ sdc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@6880 466 __ sdc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@6880 467 __ sdc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 468
aoqi@1 469 return generate_oop_map(sasm, num_rt_args, save_fpu_registers, describe_fpu_registers);
aoqi@1 470 }
aoqi@1 471
aoqi@1 472 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 473 //static void restore_live_registers(MacroAssembler* sasm) {
aoqi@1 474 #ifndef _LP64
aoqi@1 475 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@1 476 __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 477 #else
aoqi@1 478 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@1 479 __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 480 #endif
aoqi@1 481 }
aoqi@1 482 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@1 483 __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 484 }
aoqi@1 485 __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 486 __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 487
aoqi@1 488 __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 489 __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 490 __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 491 __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 492 __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 493 __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 494
aoqi@1 495 __ ld_ptr(V0, SP, V0_off * wordSize / SLOT_PER_WORD);
aoqi@6880 496 __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 497
aoqi@1 498 __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@1 499 __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@1 500 __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@1 501 __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@1 502 __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@1 503 __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@1 504 __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@1 505 __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@1 506 __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@1 507 __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@1 508 __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@1 509 __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@1 510 __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@1 511 __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@1 512 __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@1 513 __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@1 514 __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@1 515 __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@1 516 __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@1 517 __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 518
aoqi@1 519 __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
aoqi@1 520 }
aoqi@1 521
aoqi@1 522 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 523 __ block_comment("restore_live_registers");
aoqi@1 524 restore_fpu(sasm, restore_fpu_registers);
aoqi@1 525 }
aoqi@1 526
aoqi@6880 527 static void restore_live_registers_except_V0(StubAssembler* sasm, bool restore_fpu_registers = true) {
aoqi@1 528 //static void restore_live_registers(MacroAssembler* sasm) {
aoqi@1 529 //FIXME , maybe V1 need to be saved too
aoqi@1 530 __ block_comment("restore_live_registers except V0");
aoqi@1 531 #ifndef _LP64
aoqi@1 532 for (Register r = T0; r != T7->successor(); r = r->successor() ) {
aoqi@6880 533 __ lw(r, SP, (r->encoding() - T0->encoding() + T0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 534 #else
aoqi@1 535 for (Register r = A4; r != T3->successor(); r = r->successor() ) {
aoqi@6880 536 __ ld(r, SP, (r->encoding() - A4->encoding() + A4_off / SLOT_PER_WORD) * wordSize);
aoqi@1 537 #endif
aoqi@1 538 }
aoqi@1 539 for (Register r = S0; r != S7->successor(); r = r->successor() ) {
aoqi@6880 540 __ ld_ptr(r, SP, (r->encoding() - S0->encoding() + S0_off / SLOT_PER_WORD) * wordSize);
aoqi@1 541 }
aoqi@1 542 __ ld_ptr(FP, SP, FP_off * wordSize / SLOT_PER_WORD);
aoqi@1 543 __ ld_ptr(GP, SP, GP_off * wordSize / SLOT_PER_WORD);
aoqi@1 544
aoqi@1 545 __ ld_ptr(T8, SP, T8_off * wordSize / SLOT_PER_WORD);
aoqi@1 546 __ ld_ptr(T9, SP, T9_off * wordSize / SLOT_PER_WORD);
aoqi@1 547 __ ld_ptr(A0, SP, A0_off * wordSize / SLOT_PER_WORD);
aoqi@1 548 __ ld_ptr(A1, SP, A1_off * wordSize / SLOT_PER_WORD);
aoqi@1 549 __ ld_ptr(A2, SP, A2_off * wordSize / SLOT_PER_WORD);
aoqi@1 550 __ ld_ptr(A3, SP, A3_off * wordSize / SLOT_PER_WORD);
aoqi@1 551
aoqi@1 552 #if 1
aoqi@1 553 __ ldc1(F0, SP, F0_off * wordSize / SLOT_PER_WORD);
aoqi@1 554 __ ldc1(F1, SP, F1_off * wordSize / SLOT_PER_WORD);
aoqi@1 555 __ ldc1(F2, SP, F2_off * wordSize / SLOT_PER_WORD);
aoqi@1 556 __ ldc1(F3, SP, F3_off * wordSize / SLOT_PER_WORD);
aoqi@1 557 __ ldc1(F4, SP, F4_off * wordSize / SLOT_PER_WORD);
aoqi@1 558 __ ldc1(F5, SP, F5_off * wordSize / SLOT_PER_WORD);
aoqi@1 559 __ ldc1(F6, SP, F6_off * wordSize / SLOT_PER_WORD);
aoqi@1 560 __ ldc1(F7, SP, F7_off * wordSize / SLOT_PER_WORD);
aoqi@1 561 __ ldc1(F8, SP, F8_off * wordSize / SLOT_PER_WORD);
aoqi@1 562 __ ldc1(F9, SP, F9_off * wordSize / SLOT_PER_WORD);
aoqi@1 563 __ ldc1(F10, SP, F10_off * wordSize / SLOT_PER_WORD);
aoqi@1 564 __ ldc1(F11, SP, F11_off * wordSize / SLOT_PER_WORD);
aoqi@1 565 __ ldc1(F12, SP, F12_off * wordSize / SLOT_PER_WORD);
aoqi@1 566 __ ldc1(F13, SP, F13_off * wordSize / SLOT_PER_WORD);
aoqi@1 567 __ ldc1(F14, SP, F14_off * wordSize / SLOT_PER_WORD);
aoqi@1 568 __ ldc1(F15, SP, F15_off * wordSize / SLOT_PER_WORD);
aoqi@1 569 __ ldc1(F16, SP, F16_off * wordSize / SLOT_PER_WORD);
aoqi@1 570 __ ldc1(F17, SP, F17_off * wordSize / SLOT_PER_WORD);
aoqi@1 571 __ ldc1(F18, SP, F18_off * wordSize / SLOT_PER_WORD);
aoqi@1 572 __ ldc1(F19, SP, F19_off * wordSize / SLOT_PER_WORD);
aoqi@1 573 #endif
aoqi@1 574
aoqi@6880 575 __ ld_ptr(V1, SP, V1_off * wordSize / SLOT_PER_WORD);
aoqi@1 576
aoqi@1 577 __ addiu(SP, SP, (reg_save_frame_size / SLOT_PER_WORD - 2) * wordSize);
aoqi@1 578 }
aoqi@1 579
aoqi@1 580 void Runtime1::initialize_pd() {
aoqi@1 581 // nothing to do
aoqi@1 582 }
aoqi@1 583
aoqi@1 584 // target: the entry point of the method that creates and posts the exception oop
aoqi@1 585 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
aoqi@1 586 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
aoqi@6880 587 // preserve all registers
aoqi@6880 588 OopMap* oop_map = save_live_registers(sasm, 0);
aoqi@1 589
aoqi@6880 590 // now all registers are saved and can be used freely
aoqi@6880 591 // verify that no old value is used accidentally
aoqi@6880 592 //all reigster are saved , I think mips do not need this
aoqi@1 593
aoqi@6880 594 // registers used by this stub
aoqi@6880 595 const Register temp_reg = T3;
aoqi@6880 596 // load argument for exception that is passed as an argument into the stub
aoqi@6880 597 if (has_argument) {
aoqi@6880 598 __ ld_ptr(temp_reg, Address(FP, 2*BytesPerWord));
aoqi@6880 599 }
aoqi@6880 600 int call_offset;
aoqi@6880 601 if (has_argument)
aoqi@6880 602 call_offset = __ call_RT(noreg, noreg, target, temp_reg);
aoqi@1 603 else
aoqi@6880 604 call_offset = __ call_RT(noreg, noreg, target);
aoqi@1 605
aoqi@6880 606 OopMapSet* oop_maps = new OopMapSet();
aoqi@6880 607 oop_maps->add_gc_map(call_offset, oop_map);
aoqi@1 608
aoqi@6880 609 __ stop("should not reach here");
aoqi@6880 610
aoqi@6880 611 return oop_maps;
aoqi@1 612 }
aoqi@1 613
aoqi@1 614 //FIXME I do not know which reigster to use.should use T3 as real_return_addr @jerome
aoqi@1 615 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
aoqi@8865 616 __ block_comment("generate_handle_exception");
aoqi@8865 617
aoqi@8865 618 // incoming parameters
aoqi@6880 619 const Register exception_oop = V0;
aoqi@6880 620 const Register exception_pc = V1;
aoqi@6880 621 // other registers used in this stub
aoqi@8865 622 // const Register real_return_addr = T3;
aoqi@8865 623 const Register thread = TREG;
aoqi@8865 624 #ifndef OPT_THREAD
aoqi@8865 625 __ get_thread(thread);
aoqi@8865 626 #endif
aoqi@1 627 // Save registers, if required.
aoqi@8865 628 OopMapSet* oop_maps = new OopMapSet();
aoqi@8865 629 OopMap* oop_map = NULL;
aoqi@8865 630 switch (id) {
aoqi@8865 631 case forward_exception_id:
aoqi@8865 632 // We're handling an exception in the context of a compiled frame.
aoqi@8865 633 // The registers have been saved in the standard places. Perform
aoqi@8865 634 // an exception lookup in the caller and dispatch to the handler
aoqi@8865 635 // if found. Otherwise unwind and dispatch to the callers
aoqi@8865 636 // exception handler.
aoqi@8865 637 oop_map = generate_oop_map(sasm, 1 /*thread*/);
aoqi@6880 638
aoqi@8865 639 // load and clear pending exception oop into RAX
aoqi@8865 640 __ ld_ptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
aoqi@8865 641 __ st_ptr(R0, Address(thread, Thread::pending_exception_offset()));
aoqi@6880 642
aoqi@8865 643 // load issuing PC (the return address for this stub) into rdx
aoqi@8865 644 __ ld_ptr(exception_pc, Address(FP, 1*BytesPerWord));
aoqi@6880 645
aoqi@8865 646 // make sure that the vm_results are cleared (may be unnecessary)
aoqi@8865 647 __ st_ptr(R0, Address(thread, JavaThread::vm_result_offset()));
aoqi@8865 648 __ st_ptr(R0, Address(thread, JavaThread::vm_result_2_offset()));
aoqi@8865 649 break;
aoqi@8865 650 case handle_exception_nofpu_id:
aoqi@8865 651 case handle_exception_id:
aoqi@8865 652 // At this point all registers MAY be live.
aoqi@8865 653 oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
aoqi@8865 654 break;
aoqi@8865 655 case handle_exception_from_callee_id: {
aoqi@8865 656 // At this point all registers except exception oop (RAX) and
aoqi@8865 657 // exception pc (RDX) are dead.
aoqi@8865 658 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/);
aoqi@8865 659 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
aoqi@8865 660 sasm->set_frame_size(frame_size);
aoqi@8865 661 break;
aoqi@8865 662 }
aoqi@8865 663 default: ShouldNotReachHere();
aoqi@8865 664 }
aoqi@1 665
aoqi@1 666 #ifdef TIERED
aoqi@6880 667 // C2 can leave the fpu stack dirty
aoqi@6880 668 __ empty_FPU_stack();
aoqi@1 669 #endif // TIERED
aoqi@1 670
aoqi@6880 671 // verify that only V0 and V1 is valid at this time
aoqi@6880 672 // verify that V0 contains a valid exception
aoqi@6880 673 __ verify_not_null_oop(exception_oop);
aoqi@1 674
aoqi@6880 675 // load address of JavaThread object for thread-local data
aoqi@6880 676 __ get_thread(thread);
aoqi@1 677
aoqi@1 678 #ifdef ASSERT
aoqi@6880 679 // check that fields in JavaThread for exception oop and issuing pc are
aoqi@6880 680 // empty before writing to them
aoqi@6880 681 Label oop_empty;
aoqi@6880 682 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@6880 683 __ beq(AT, R0, oop_empty);
aoqi@6880 684 __ delayed()->nop();
aoqi@6880 685 __ stop("exception oop already set");
aoqi@6880 686 __ bind(oop_empty);
aoqi@6880 687 Label pc_empty;
aoqi@6880 688 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@6880 689 __ beq(AT, R0, pc_empty);
aoqi@6880 690 __ delayed()->nop();
aoqi@6880 691 __ stop("exception pc already set");
aoqi@6880 692 __ bind(pc_empty);
aoqi@1 693 #endif
aoqi@1 694
aoqi@6880 695 // save exception oop and issuing pc into JavaThread
aoqi@6880 696 // (exception handler will load it from here)
aoqi@6880 697 __ st_ptr(exception_oop, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@6880 698 __ st_ptr(exception_pc, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 699
aoqi@8865 700 // patch throwing pc into return address (has bci & oop map)
aoqi@8865 701 __ st_ptr(exception_pc, Address(FP, 1*BytesPerWord));
aoqi@1 702
aoqi@6880 703 // compute the exception handler.
aoqi@6880 704 // the exception oop and the throwing pc are read from the fields in JavaThread
fujie@9161 705 __ block_comment(";; will call_RT exception_handler_for_pc");
aoqi@8865 706 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
aoqi@6880 707 oop_maps->add_gc_map(call_offset, oop_map);
fujie@9161 708 __ block_comment(";; end of call_RT exception_handler_for_pc");
aoqi@6880 709 // V0: handler address or NULL if no handler exists
aoqi@6880 710 // will be the deopt blob if nmethod was deoptimized while we looked up
aoqi@6880 711 // handler regardless of whether handler existed in the nmethod.
aoqi@1 712
aoqi@6880 713 // only V0 is valid at this time, all other registers have been destroyed by the
aoqi@6880 714 // runtime call
aoqi@1 715
aoqi@6880 716 // patch the return address -> the stub will directly return to the exception handler
aoqi@8865 717 __ st_ptr(V0, Address(FP, 1 * BytesPerWord));
aoqi@1 718
aoqi@8865 719 switch (id) {
aoqi@8865 720 case forward_exception_id:
aoqi@8865 721 case handle_exception_nofpu_id:
aoqi@8865 722 case handle_exception_id:
aoqi@8865 723 // Restore the registers that were saved at the beginning.
aoqi@8865 724 restore_live_registers(sasm, id != handle_exception_nofpu_id);
aoqi@8865 725 break;
aoqi@8865 726 case handle_exception_from_callee_id:
aoqi@8865 727 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
aoqi@8865 728 // since we do a leave anyway.
aoqi@1 729
aoqi@8865 730 // Pop the return address since we are possibly changing SP (restoring from BP).
aoqi@8865 731 __ leave();
aoqi@8865 732 // Restore SP from BP if the exception PC is a method handle call site.
aoqi@8865 733 {
aoqi@8865 734 Label done;
aoqi@8865 735 __ ld(AT, Address(thread, JavaThread::is_method_handle_return_offset()));
aoqi@8865 736 __ beq(AT, R0, done);
aoqi@8865 737 __ delayed()->nop();
aoqi@8865 738 __ bind(done);
aoqi@8865 739 }
aoqi@8865 740 __ jr(RA); // jump to exception handler
aoqi@8865 741 __ delayed()->nop();
aoqi@8865 742 break;
aoqi@8865 743 default: ShouldNotReachHere();
aoqi@8865 744 }
aoqi@1 745
aoqi@8865 746 return oop_maps;
aoqi@8865 747 }
aoqi@1 748
aoqi@1 749
aoqi@1 750
aoqi@1 751
aoqi@1 752
aoqi@1 753 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
aoqi@6880 754 // incoming parameters
aoqi@6880 755 const Register exception_oop = V0;
aoqi@8865 756 // callee-saved copy of exception_oop during runtime call
aoqi@8865 757 const Register exception_oop_callee_saved = S0;
aoqi@6880 758 // other registers used in this stub
aoqi@6880 759 const Register exception_pc = V1;
aoqi@6880 760 const Register handler_addr = T3;
aoqi@8865 761 const Register thread = TREG;
aoqi@1 762
aoqi@6880 763 // verify that only eax is valid at this time
aoqi@6880 764 // __ invalidate_registers(false, true, true, true, true, true);
aoqi@1 765
aoqi@1 766 #ifdef ASSERT
aoqi@6880 767 // check that fields in JavaThread for exception oop and issuing pc are empty
aoqi@6880 768 __ get_thread(thread);
aoqi@6880 769 Label oop_empty;
aoqi@6880 770 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset()));
aoqi@6880 771 __ beq(AT, R0, oop_empty);
aoqi@6880 772 __ delayed()->nop();
aoqi@6880 773 __ stop("exception oop must be empty");
aoqi@6880 774 __ bind(oop_empty);
aoqi@1 775
aoqi@6880 776 Label pc_empty;
aoqi@6880 777 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_pc_offset()));
aoqi@8865 778 __ beq(AT, R0, pc_empty);
aoqi@6880 779 __ delayed()->nop();
aoqi@6880 780 __ stop("exception pc must be empty");
aoqi@6880 781 __ bind(pc_empty);
aoqi@1 782 #endif
aoqi@6880 783 // clear the FPU stack in case any FPU results are left behind
aoqi@6880 784 __ empty_FPU_stack();
aoqi@1 785
aoqi@8865 786 // save exception_oop in callee-saved register to preserve it during runtime calls
aoqi@8865 787 __ verify_not_null_oop(exception_oop);
aoqi@8865 788 __ move(exception_oop_callee_saved, exception_oop);
aoqi@8865 789
aoqi@8865 790 #ifndef OPT_THREAD
aoqi@8865 791 __ get_thread(thread);
aoqi@8865 792 #endif
aoqi@8865 793 // Get return address (is on top of stack after leave).
aoqi@6880 794 // store return address (is on top of stack after leave)
aoqi@8865 795
aoqi@6880 796 __ ld_ptr(exception_pc, SP, 0);
aoqi@1 797
aoqi@6880 798 // search the exception handler address of the caller (using the return address)
aoqi@8865 799 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
aoqi@8865 800 // V0: exception handler address of the caller
aoqi@1 801
aoqi@6880 802 // only eax is valid at this time, all other registers have been destroyed by the call
aoqi@1 803
aoqi@6880 804 // move result of call into correct register
aoqi@6880 805 __ move(handler_addr, V0);
aoqi@1 806
aoqi@8865 807 // Restore exception oop to V0 (required convention of exception handler).
aoqi@8865 808 __ move(exception_oop, exception_oop_callee_saved);
aoqi@8865 809
aoqi@8865 810 // verify that there is really a valid exception in V0
aoqi@6880 811 __ verify_oop(exception_oop);
aoqi@1 812
aoqi@6880 813 // get throwing pc (= return address).
aoqi@8865 814 // V1 has been destroyed by the call, so it must be set again
aoqi@6880 815 // the pop is also necessary to simulate the effect of a ret(0)
aoqi@6880 816 __ super_pop(exception_pc);
aoqi@1 817
aoqi@6880 818 // continue at exception handler (return address removed)
aoqi@6880 819 // note: do *not* remove arguments when unwinding the
aoqi@6880 820 // activation since the caller assumes having
aoqi@6880 821 // all arguments on the stack when entering the
aoqi@6880 822 // runtime to determine the exception handler
aoqi@6880 823 // (GC happens at call site with arguments!)
aoqi@8865 824 // V0: exception oop
aoqi@8865 825 // V1: throwing pc
aoqi@8865 826 // T3: exception handler
aoqi@6880 827 __ jr(handler_addr);
aoqi@6880 828 __ delayed()->nop();
aoqi@1 829 }
aoqi@1 830
aoqi@1 831
aoqi@1 832
aoqi@1 833
aoqi@1 834 //static address deopt_with_exception_entry_for_patch = NULL;
aoqi@1 835
aoqi@1 836 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
aoqi@1 837
aoqi@6880 838 // use the maximum number of runtime-arguments here because it is difficult to
aoqi@6880 839 // distinguish each RT-Call.
aoqi@6880 840 // Note: This number affects also the RT-Call in generate_handle_exception because
aoqi@6880 841 // the oop-map is shared for all calls.
aoqi@1 842
aoqi@6880 843 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
aoqi@6880 844 assert(deopt_blob != NULL, "deoptimization blob must have been created");
aoqi@6880 845 // assert(deopt_with_exception_entry_for_patch != NULL,
aoqi@6880 846 // "deoptimization blob must have been created");
aoqi@6880 847
aoqi@6880 848 //OopMap* oop_map = save_live_registers(sasm, num_rt_args);
aoqi@6880 849 OopMap* oop_map = save_live_registers(sasm, 0);
aoqi@6880 850 const Register thread = T8;
aoqi@6880 851 // push java thread (becomes first argument of C function)
aoqi@6880 852 __ get_thread(thread);
aoqi@6880 853 __ move(A0, thread);
aoqi@1 854
aoqi@1 855
aoqi@6880 856 /*
aoqi@6880 857 * NOTE: this frame should be compiled frame, but at this point, the pc in frame-anchor
aoqi@6880 858 * is contained in interpreter. It should be wrong, and should be cleared but is not.
aoqi@6880 859 * even if we cleared the wrong pc in anchor, the default way to get caller pc in class frame
aoqi@6880 860 * is not right. It depends on that the caller pc is stored in *(sp - 1) but it's not the case
aoqi@1 861 */
aoqi@6880 862 __ set_last_Java_frame(thread, NOREG, FP, NULL);
aoqi@6880 863 NOT_LP64(__ addiu(SP, SP, (-1) * wordSize));
aoqi@6880 864 __ move(AT, -(StackAlignmentInBytes));
aoqi@6880 865 __ andr(SP, SP, AT);
aoqi@6880 866 __ relocate(relocInfo::internal_pc_type);
aoqi@6880 867 {
aoqi@1 868 #ifndef _LP64
aoqi@6880 869 int save_pc = (int)__ pc() + 12 + NativeCall::return_address_offset;
aoqi@6880 870 __ lui(AT, Assembler::split_high(save_pc));
aoqi@6880 871 __ addiu(AT, AT, Assembler::split_low(save_pc));
aoqi@1 872 #else
aoqi@6880 873 uintptr_t save_pc = (uintptr_t)__ pc() + NativeMovConstReg::instruction_size + 1 * BytesPerInstWord + NativeCall::return_address_offset_long;
aoqi@6880 874 __ li48(AT, save_pc);
aoqi@1 875 #endif
aoqi@6880 876 }
aoqi@6880 877 __ st_ptr(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
aoqi@1 878
aoqi@6880 879 // do the call
aoqi@1 880 #ifndef _LP64
aoqi@6880 881 __ lui(T9, Assembler::split_high((int)target));
aoqi@6880 882 __ addiu(T9, T9, Assembler::split_low((int)target));
aoqi@1 883 #else
aoqi@6880 884 __ li48(T9, (intptr_t)target);
aoqi@1 885 #endif
aoqi@6880 886 __ jalr(T9);
aoqi@6880 887 __ delayed()->nop();
aoqi@6880 888 OopMapSet* oop_maps = new OopMapSet();
aoqi@6880 889 oop_maps->add_gc_map(__ offset(), oop_map);
aoqi@1 890
aoqi@6880 891 __ get_thread(thread);
aoqi@1 892
aoqi@6880 893 __ ld_ptr (SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
fujie@9171 894 __ reset_last_Java_frame(thread, true);
aoqi@6880 895 // discard thread arg
aoqi@6880 896 // check for pending exceptions
aoqi@6880 897 {
aoqi@6880 898 Label L, skip;
aoqi@6880 899 //Label no_deopt;
aoqi@6880 900 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
aoqi@6880 901 __ beq(AT, R0, L);
aoqi@6880 902 __ delayed()->nop();
aoqi@6880 903 // exception pending => remove activation and forward to exception handler
aoqi@1 904
aoqi@8865 905 __ bne(V0, R0, skip);
aoqi@6880 906 __ delayed()->nop();
aoqi@6880 907 __ jmp(Runtime1::entry_for(Runtime1::forward_exception_id),
aoqi@6880 908 relocInfo::runtime_call_type);
aoqi@6880 909 __ delayed()->nop();
aoqi@6880 910 __ bind(skip);
aoqi@1 911
aoqi@6880 912 // the deopt blob expects exceptions in the special fields of
aoqi@6880 913 // JavaThread, so copy and clear pending exception.
aoqi@1 914
aoqi@6880 915 // load and clear pending exception
aoqi@6880 916 __ ld_ptr(V0, Address(thread,in_bytes(Thread::pending_exception_offset())));
aoqi@6880 917 __ st_ptr(R0, Address(thread, in_bytes(Thread::pending_exception_offset())));
aoqi@1 918
aoqi@6880 919 // check that there is really a valid exception
aoqi@6880 920 __ verify_not_null_oop(V0);
aoqi@6880 921
aoqi@6880 922 // load throwing pc: this is the return address of the stub
aoqi@6880 923 __ ld_ptr(V1, Address(SP, return_off * BytesPerWord));
aoqi@1 924
aoqi@1 925
aoqi@1 926 #ifdef ASSERT
aoqi@6880 927 // check that fields in JavaThread for exception oop and issuing pc are empty
aoqi@6880 928 Label oop_empty;
aoqi@6880 929 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@6880 930 __ beq(AT,R0,oop_empty);
aoqi@6880 931 __ delayed()->nop();
aoqi@6880 932 __ stop("exception oop must be empty");
aoqi@6880 933 __ bind(oop_empty);
aoqi@1 934
aoqi@6880 935 Label pc_empty;
aoqi@6880 936 __ ld_ptr(AT, Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@6880 937 __ beq(AT,R0,pc_empty);
aoqi@6880 938 __ delayed()->nop();
aoqi@6880 939 __ stop("exception pc must be empty");
aoqi@6880 940 __ bind(pc_empty);
aoqi@1 941 #endif
aoqi@1 942
aoqi@6880 943 // store exception oop and throwing pc to JavaThread
aoqi@6880 944 __ st_ptr(V0,Address(thread, in_bytes(JavaThread::exception_oop_offset())));
aoqi@6880 945 __ st_ptr(V1,Address(thread, in_bytes(JavaThread::exception_pc_offset())));
aoqi@1 946
aoqi@6880 947 restore_live_registers(sasm);
aoqi@1 948
aoqi@6880 949 __ leave();
aoqi@1 950
aoqi@6880 951 // Forward the exception directly to deopt blob. We can blow no
aoqi@6880 952 // registers and must leave throwing pc on the stack. A patch may
aoqi@6880 953 // have values live in registers so the entry point with the
aoqi@6880 954 // exception in tls.
aoqi@6880 955 __ jmp(deopt_blob->unpack_with_exception_in_tls(), relocInfo::runtime_call_type);
aoqi@6880 956 __ delayed()->nop();
aoqi@1 957
aoqi@6880 958 __ bind(L);
aoqi@6880 959 }
aoqi@1 960
aoqi@6880 961 // Runtime will return true if the nmethod has been deoptimized during
aoqi@6880 962 // the patching process. In that case we must do a deopt reexecute instead.
aoqi@1 963
aoqi@6880 964 Label reexecuteEntry, cont;
aoqi@1 965
aoqi@6880 966 __ beq(V0, R0, cont); // have we deoptimized?
aoqi@6880 967 __ delayed()->nop();
aoqi@1 968
aoqi@6880 969 // Will reexecute. Proper return address is already on the stack we just restore
aoqi@6880 970 // registers, pop all of our frame but the return address and jump to the deopt blob
aoqi@6880 971 restore_live_registers(sasm);
aoqi@1 972
aoqi@6880 973 __ leave();
aoqi@6880 974 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
aoqi@6880 975 __ delayed()->nop();
aoqi@1 976
aoqi@6880 977 __ bind(cont);
aoqi@6880 978 restore_live_registers(sasm);
aoqi@1 979
aoqi@6880 980 __ leave();
aoqi@6880 981 __ jr(RA);
aoqi@6880 982 __ delayed()->nop();
aoqi@6880 983
aoqi@6880 984 return oop_maps;
aoqi@1 985 }
aoqi@1 986
aoqi@1 987
aoqi@1 988 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
aoqi@6880 989 // for better readability
aoqi@6880 990 const bool must_gc_arguments = true;
aoqi@6880 991 const bool dont_gc_arguments = false;
aoqi@1 992
aoqi@1 993
aoqi@6880 994 // default value; overwritten for some optimized stubs that are called
aoqi@6880 995 // from methods that do not use the fpu
aoqi@6880 996 bool save_fpu_registers = true;
aoqi@1 997
aoqi@1 998
aoqi@6880 999 // stub code & info for the different stubs
aoqi@6880 1000 OopMapSet* oop_maps = NULL;
aoqi@1 1001
aoqi@1 1002 switch (id) {
aoqi@1 1003 case forward_exception_id:
aoqi@1 1004 {
aoqi@8865 1005 oop_maps = generate_handle_exception(id, sasm);
aoqi@8865 1006 __ leave();
aoqi@8865 1007 __ jr(RA);
aoqi@8865 1008 __ delayed()->nop();
aoqi@1 1009 }
aoqi@1 1010 break;
aoqi@1 1011
aoqi@1 1012 case new_instance_id:
aoqi@1 1013 case fast_new_instance_id:
aoqi@1 1014 case fast_new_instance_init_check_id:
aoqi@1 1015 {
aoqi@1 1016 Register klass = A4; // Incoming
aoqi@1 1017 Register obj = V0; // Result
aoqi@1 1018
aoqi@1 1019 if (id == new_instance_id) {
aoqi@1 1020 __ set_info("new_instance", dont_gc_arguments);
aoqi@1 1021 } else if (id == fast_new_instance_id) {
aoqi@1 1022 __ set_info("fast new_instance", dont_gc_arguments);
aoqi@1 1023 } else {
aoqi@1 1024 assert(id == fast_new_instance_init_check_id, "bad StubID");
aoqi@1 1025 __ set_info("fast new_instance init check", dont_gc_arguments);
aoqi@1 1026 }
aoqi@1 1027
aoqi@6880 1028 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id)
aoqi@1 1029 && UseTLAB && FastTLABRefill) {
aoqi@1 1030 Label slow_path;
aoqi@1 1031 Register obj_size = T0;
aoqi@1 1032 Register t1 = T2;
aoqi@1 1033 Register t2 = T3;
aoqi@1 1034 assert_different_registers(klass, obj, obj_size, t1, t2);
aoqi@1 1035 if (id == fast_new_instance_init_check_id) {
aoqi@1 1036 // make sure the klass is initialized
aoqi@8865 1037 __ ld_ptr(AT, Address(klass, in_bytes(InstanceKlass::init_state_offset())));
aoqi@1 1038 __ move(t1, InstanceKlass::fully_initialized);
aoqi@1 1039 __ bne(AT, t1, slow_path);
aoqi@1 1040 __ delayed()->nop();
aoqi@1 1041 }
aoqi@1 1042 #ifdef ASSERT
aoqi@1 1043 // assert object can be fast path allocated
aoqi@1 1044 {
aoqi@1 1045 Label ok, not_ok;
aoqi@1 1046 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1047 __ blez(obj_size, not_ok);
aoqi@1 1048 __ delayed()->nop();
aoqi@1 1049 __ andi(t1 , obj_size, Klass::_lh_instance_slow_path_bit);
aoqi@1 1050 __ beq(t1, R0, ok);
aoqi@1 1051 __ delayed()->nop();
aoqi@1 1052 __ bind(not_ok);
aoqi@1 1053 __ stop("assert(can be fast path allocated)");
aoqi@1 1054 __ should_not_reach_here();
aoqi@1 1055 __ bind(ok);
aoqi@1 1056 }
aoqi@1 1057 #endif // ASSERT
aoqi@1 1058 // if we got here then the TLAB allocation failed, so try
aoqi@1 1059 // refilling the TLAB or allocating directly from eden.
aoqi@6880 1060
aoqi@1 1061 Label retry_tlab, try_eden;
aoqi@1 1062 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy edx (klass)
aoqi@6880 1063
aoqi@1 1064 __ bind(retry_tlab);
aoqi@6880 1065
aoqi@1 1066 // get the instance size
aoqi@1 1067 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1068 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
aoqi@1 1069 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
aoqi@1 1070 __ verify_oop(obj);
aoqi@1 1071 __ jr(RA);
aoqi@1 1072 __ delayed()->nop();
aoqi@6880 1073
wangxue@9205 1074 #ifndef OPT_THREAD
wangxue@9205 1075 const Register thread = T8;
wangxue@9205 1076 __ get_thread(thread);
wangxue@9205 1077 #else
wangxue@9205 1078 const Register thread = TREG;
wangxue@9205 1079 #endif
wangxue@9205 1080
aoqi@1 1081 __ bind(try_eden);
aoqi@1 1082
aoqi@6880 1083 // get the instance size
aoqi@1 1084 __ lw(obj_size, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1085 __ eden_allocate(obj, obj_size, 0, t1, t2, slow_path);
wangxue@9205 1086 __ incr_allocated_bytes(thread, obj_size, 0);
wangxue@9205 1087
aoqi@1 1088 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
aoqi@1 1089 __ verify_oop(obj);
aoqi@1 1090 __ jr(RA);
aoqi@1 1091 __ delayed()->nop();
aoqi@6880 1092
aoqi@1 1093 __ bind(slow_path);
aoqi@1 1094 }
aoqi@1 1095 __ enter();
aoqi@1 1096 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1097 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
aoqi@1 1098 oop_maps = new OopMapSet();
aoqi@1 1099 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1100 restore_live_registers_except_V0(sasm);
aoqi@1 1101 __ verify_oop(obj);
aoqi@1 1102 __ leave();
aoqi@1 1103 __ jr(RA);
aoqi@1 1104 __ delayed()->nop();
aoqi@6880 1105
aoqi@1 1106 // V0: new instance
aoqi@1 1107 }
aoqi@1 1108 break;
aoqi@1 1109
aoqi@1 1110
aoqi@1 1111 #ifdef TIERED
aoqi@1 1112 //FIXME, I hava no idea which register to use
aoqi@1 1113 case counter_overflow_id:
aoqi@1 1114 {
aoqi@1 1115 #ifndef _LP64
aoqi@1 1116 Register bci = T5;
aoqi@1 1117 #else
aoqi@1 1118 Register bci = A5;
aoqi@1 1119 #endif
aoqi@8865 1120 Register method = AT;
aoqi@1 1121 __ enter();
aoqi@1 1122 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1123 // Retrieve bci
aoqi@1 1124 __ lw(bci, Address(FP, 2*BytesPerWord));// FIXME:wuhui.ebp==??
aoqi@8865 1125 __ ld(method, Address(FP, 3*BytesPerWord));
aoqi@8865 1126 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
aoqi@1 1127 oop_maps = new OopMapSet();
aoqi@1 1128 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1129 restore_live_registers(sasm);
aoqi@1 1130 __ leave();
aoqi@6880 1131 __ jr(RA);
aoqi@6880 1132 __ delayed()->nop();
aoqi@1 1133 }
aoqi@1 1134 break;
aoqi@1 1135 #endif // TIERED
aoqi@1 1136
aoqi@1 1137
aoqi@1 1138
aoqi@1 1139 case new_type_array_id:
aoqi@1 1140 case new_object_array_id:
aoqi@6880 1141 {
aoqi@6880 1142 // i use T2 as length register, T4 as klass register, V0 as result register.
aoqi@1 1143 // MUST accord with NewTypeArrayStub::emit_code, NewObjectArrayStub::emit_code
aoqi@1 1144 Register length = T2; // Incoming
aoqi@1 1145 #ifndef _LP64
aoqi@1 1146 Register klass = T4; // Incoming
aoqi@1 1147 #else
aoqi@1 1148 Register klass = A4; // Incoming
aoqi@1 1149 #endif
aoqi@1 1150 Register obj = V0; // Result
aoqi@6880 1151
aoqi@1 1152 if (id == new_type_array_id) {
aoqi@1 1153 __ set_info("new_type_array", dont_gc_arguments);
aoqi@1 1154 } else {
aoqi@1 1155 __ set_info("new_object_array", dont_gc_arguments);
aoqi@1 1156 }
aoqi@6880 1157
aoqi@1 1158 if (UseTLAB && FastTLABRefill) {
aoqi@1 1159 Register arr_size = T0;
aoqi@6880 1160 Register t1 = T1;
aoqi@1 1161 Register t2 = T3;
aoqi@1 1162 Label slow_path;
aoqi@1 1163 assert_different_registers(length, klass, obj, arr_size, t1, t2);
aoqi@6880 1164
aoqi@1 1165 // check that array length is small enough for fast path
aoqi@1 1166 __ move(AT, C1_MacroAssembler::max_array_allocation_length);
aoqi@1 1167 __ sltu(AT, AT, length);
aoqi@1 1168 __ bne(AT, R0, slow_path);
aoqi@1 1169 __ delayed()->nop();
aoqi@1 1170
aoqi@1 1171 // if we got here then the TLAB allocation failed, so try
aoqi@1 1172 // refilling the TLAB or allocating directly from eden.
aoqi@1 1173 Label retry_tlab, try_eden;
aoqi@6880 1174 //T0,T1,T5,T8 have changed!
aoqi@1 1175 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves T2 & T4
aoqi@6880 1176
aoqi@1 1177 __ bind(retry_tlab);
aoqi@6880 1178
aoqi@1 1179 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
aoqi@6880 1180 __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1181 __ andi(AT, t1, 0x1f);
aoqi@1 1182 __ sllv(arr_size, length, AT);
aoqi@1 1183 __ srl(t1, t1, Klass::_lh_header_size_shift);
aoqi@1 1184 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1185 __ add(arr_size, t1, arr_size);
aoqi@1 1186 __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
aoqi@1 1187 __ move(AT, ~MinObjAlignmentInBytesMask);
aoqi@1 1188 __ andr(arr_size, arr_size, AT);
aoqi@6880 1189
aoqi@6880 1190
aoqi@1 1191 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
aoqi@1 1192 __ initialize_header(obj, klass, length,t1,t2);
aoqi@6880 1193 __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
aoqi@1 1194 + (Klass::_lh_header_size_shift / BitsPerByte)));
aoqi@1 1195 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
aoqi@1 1196 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
aoqi@1 1197 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1198 __ sub(arr_size, arr_size, t1); // body length
aoqi@1 1199 __ add(t1, t1, obj); // body start
aoqi@1 1200 __ initialize_body(t1, arr_size, 0, t2);
aoqi@1 1201 __ verify_oop(obj);
aoqi@1 1202 __ jr(RA);
aoqi@1 1203 __ delayed()->nop();
aoqi@6880 1204
wangxue@9205 1205 #ifndef OPT_THREAD
wangxue@9205 1206 const Register thread = T8;
wangxue@9205 1207 __ get_thread(thread);
wangxue@9205 1208 #else
wangxue@9205 1209 const Register thread = TREG;
wangxue@9205 1210 #endif
wangxue@9205 1211
aoqi@1 1212 __ bind(try_eden);
aoqi@1 1213 // get the allocation size: (length << (layout_helper & 0x1F)) + header_size
aoqi@6880 1214 __ lw(t1, klass, in_bytes(Klass::layout_helper_offset()));
aoqi@1 1215 __ andi(AT, t1, 0x1f);
aoqi@1 1216 __ sllv(arr_size, length, AT);
aoqi@1 1217 __ srl(t1, t1, Klass::_lh_header_size_shift);
aoqi@1 1218 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1219 __ add(arr_size, t1, arr_size);
aoqi@1 1220 __ addi(arr_size, arr_size, MinObjAlignmentInBytesMask); // align up
aoqi@1 1221 __ move(AT, ~MinObjAlignmentInBytesMask);
aoqi@1 1222 __ andr(arr_size, arr_size, AT);
aoqi@1 1223 __ eden_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
wangxue@9205 1224 __ incr_allocated_bytes(thread, arr_size, 0);
wangxue@9205 1225
aoqi@1 1226 __ initialize_header(obj, klass, length,t1,t2);
aoqi@1 1227 __ lbu(t1, Address(klass, in_bytes(Klass::layout_helper_offset())
aoqi@1 1228 + (Klass::_lh_header_size_shift / BitsPerByte)));
aoqi@1 1229 __ andi(t1, t1, Klass::_lh_header_size_mask);
aoqi@1 1230 __ sub(arr_size, arr_size, t1); // body length
aoqi@1 1231 __ add(t1, t1, obj); // body start
aoqi@6880 1232
aoqi@1 1233 __ initialize_body(t1, arr_size, 0, t2);
aoqi@1 1234 __ verify_oop(obj);
aoqi@1 1235 __ jr(RA);
aoqi@1 1236 __ delayed()->nop();
aoqi@1 1237 __ bind(slow_path);
aoqi@1 1238 }
aoqi@6880 1239
aoqi@6880 1240
aoqi@1 1241 __ enter();
aoqi@1 1242 OopMap* map = save_live_registers(sasm, 0);
aoqi@1 1243 int call_offset;
aoqi@1 1244 if (id == new_type_array_id) {
aoqi@6880 1245 call_offset = __ call_RT(obj, noreg,
aoqi@1 1246 CAST_FROM_FN_PTR(address, new_type_array), klass, length);
aoqi@1 1247 } else {
aoqi@6880 1248 call_offset = __ call_RT(obj, noreg,
aoqi@1 1249 CAST_FROM_FN_PTR(address, new_object_array), klass, length);
aoqi@1 1250 }
aoqi@6880 1251
aoqi@1 1252 oop_maps = new OopMapSet();
aoqi@1 1253 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1254 restore_live_registers_except_V0(sasm);
aoqi@1 1255 __ verify_oop(obj);
aoqi@6880 1256 __ leave();
aoqi@1 1257 __ jr(RA);
aoqi@1 1258 __ delayed()->nop();
aoqi@1 1259 }
aoqi@1 1260 break;
aoqi@1 1261
aoqi@1 1262 case new_multi_array_id:
aoqi@6880 1263 {
aoqi@6880 1264 StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
aoqi@6880 1265 //refer to c1_LIRGenerate_mips.cpp:do_NewmultiArray
aoqi@6880 1266 // V0: klass
aoqi@6880 1267 // T2: rank
aoqi@6880 1268 // T0: address of 1st dimension
aoqi@6880 1269 //__ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array), A1, A2, A3);
aoqi@6880 1270 //OopMap* map = save_live_registers(sasm, 4);
aoqi@6880 1271 OopMap* map = save_live_registers(sasm, 0);
aoqi@6880 1272 int call_offset = __ call_RT(V0, noreg, CAST_FROM_FN_PTR(address, new_multi_array),
aoqi@6880 1273 V0,T2,T0);
aoqi@6880 1274 oop_maps = new OopMapSet();
aoqi@6880 1275 oop_maps->add_gc_map(call_offset, map);
aoqi@6880 1276 //FIXME
aoqi@6880 1277 restore_live_registers_except_V0(sasm);
aoqi@6880 1278 // V0: new multi array
aoqi@6880 1279 __ verify_oop(V0);
aoqi@1 1280 }
aoqi@1 1281 break;
aoqi@1 1282
aoqi@6880 1283
aoqi@1 1284 case register_finalizer_id:
aoqi@1 1285 {
aoqi@6880 1286 __ set_info("register_finalizer", dont_gc_arguments);
aoqi@1 1287
aoqi@6880 1288 // The object is passed on the stack and we haven't pushed a
aoqi@6880 1289 // frame yet so it's one work away from top of stack.
aoqi@1 1290 //reference to LIRGenerator::do_RegisterFinalizer, call_runtime
aoqi@6880 1291 __ move(V0, A0);
aoqi@6880 1292 __ verify_oop(V0);
aoqi@6880 1293 // load the klass and check the has finalizer flag
aoqi@6880 1294 Label register_finalizer;
aoqi@1 1295 #ifndef _LP64
aoqi@6880 1296 Register t = T5;
aoqi@1 1297 #else
aoqi@6880 1298 Register t = A5;
aoqi@1 1299 #endif
aoqi@6880 1300 //__ ld_ptr(t, Address(V0, oopDesc::klass_offset_in_bytes()));
aoqi@6880 1301 __ load_klass(t, V0);
aoqi@6880 1302 __ lw(t, Address(t, Klass::access_flags_offset()));
aoqi@6880 1303 __ move(AT, JVM_ACC_HAS_FINALIZER);
aoqi@6880 1304 __ andr(AT, AT, t);
aoqi@1 1305
aoqi@6880 1306 __ bne(AT, R0, register_finalizer);
aoqi@6880 1307 __ delayed()->nop();
aoqi@6880 1308 __ jr(RA);
aoqi@6880 1309 __ delayed()->nop();
aoqi@6880 1310 __ bind(register_finalizer);
aoqi@6880 1311 __ enter();
aoqi@6880 1312 OopMap* map = save_live_registers(sasm, 0 /*num_rt_args */);
aoqi@6880 1313
aoqi@6880 1314 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@6880 1315 SharedRuntime::register_finalizer), V0);
aoqi@6880 1316 oop_maps = new OopMapSet();
aoqi@1 1317 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1318
aoqi@6880 1319 // Now restore all the live registers
aoqi@6880 1320 restore_live_registers(sasm);
aoqi@1 1321
aoqi@6880 1322 __ leave();
aoqi@6880 1323 __ jr(RA);
aoqi@6880 1324 __ delayed()->nop();
aoqi@1 1325 }
aoqi@1 1326 break;
aoqi@1 1327
aoqi@6880 1328 // case range_check_failed_id:
aoqi@6880 1329 case throw_range_check_failed_id:
aoqi@8865 1330 {
aoqi@8865 1331 StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
aoqi@6880 1332 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@1 1333 throw_range_check_exception),true);
aoqi@1 1334 }
aoqi@1 1335 break;
aoqi@1 1336
aoqi@1 1337 case throw_index_exception_id:
aoqi@6880 1338 {
aoqi@6880 1339 // i use A1 as the index register, for this will be the first argument, see call_RT
aoqi@6880 1340 StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
aoqi@6880 1341 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@6880 1342 throw_index_exception), true);
aoqi@1 1343 }
aoqi@1 1344 break;
aoqi@1 1345
aoqi@6880 1346 case throw_div0_exception_id:
aoqi@1 1347 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
aoqi@6880 1348 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@6880 1349 throw_div0_exception), false);
aoqi@1 1350 }
aoqi@1 1351 break;
aoqi@1 1352
aoqi@6880 1353 case throw_null_pointer_exception_id:
aoqi@8865 1354 {
aoqi@8865 1355 StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
aoqi@6880 1356 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@6880 1357 throw_null_pointer_exception),false);
aoqi@1 1358 }
aoqi@1 1359 break;
aoqi@1 1360
aoqi@8865 1361 case handle_exception_nofpu_id:
aoqi@6880 1362 save_fpu_registers = false;
aoqi@6880 1363 // fall through
aoqi@6880 1364 case handle_exception_id:
aoqi@6880 1365 {
aoqi@6880 1366 StubFrame f(sasm, "handle_exception", dont_gc_arguments);
aoqi@6880 1367 //OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
aoqi@6880 1368 oop_maps = generate_handle_exception(id, sasm);
aoqi@6880 1369 }
aoqi@6880 1370 break;
aoqi@8865 1371 case handle_exception_from_callee_id:
aoqi@8865 1372 {
aoqi@8865 1373 StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
aoqi@8865 1374 oop_maps = generate_handle_exception(id, sasm);
aoqi@8865 1375 }
aoqi@8865 1376 break;
aoqi@6880 1377 case unwind_exception_id:
aoqi@6880 1378 {
aoqi@6880 1379 __ set_info("unwind_exception", dont_gc_arguments);
aoqi@6880 1380 generate_unwind_exception(sasm);
aoqi@6880 1381 }
aoqi@6880 1382 break;
aoqi@1 1383
aoqi@1 1384
aoqi@6880 1385 case throw_array_store_exception_id:
aoqi@8865 1386 {
aoqi@8865 1387 StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
aoqi@6880 1388 // tos + 0: link
aoqi@6880 1389 // + 1: return address
aoqi@6880 1390 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@6880 1391 throw_array_store_exception), false);
aoqi@6880 1392 }
aoqi@6880 1393 break;
aoqi@1 1394
aoqi@6880 1395 case throw_class_cast_exception_id:
aoqi@8865 1396 {
aoqi@8865 1397 StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
aoqi@6880 1398 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address,
aoqi@8865 1399 throw_class_cast_exception), true);
aoqi@6880 1400 }
aoqi@6880 1401 break;
aoqi@1 1402
aoqi@6880 1403 case throw_incompatible_class_change_error_id:
aoqi@6880 1404 {
aoqi@8865 1405 StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
aoqi@8865 1406 oop_maps = generate_exception_throw(sasm,
aoqi@8865 1407 CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
aoqi@6880 1408 }
aoqi@6880 1409 break;
aoqi@1 1410
aoqi@6880 1411 case slow_subtype_check_id:
aoqi@6880 1412 {
aoqi@6880 1413 //actually , We do not use it
aoqi@6880 1414 // A0:klass_RInfo sub
aoqi@6880 1415 // A1:k->encoding() super
aoqi@6880 1416 __ set_info("slow_subtype_check", dont_gc_arguments);
aoqi@6880 1417 __ st_ptr(T0, SP, (-1) * wordSize);
aoqi@6880 1418 __ st_ptr(T1, SP, (-2) * wordSize);
aoqi@6880 1419 __ addiu(SP, SP, (-2) * wordSize);
aoqi@1 1420
fujie@9132 1421 Label miss;
fujie@9132 1422 __ check_klass_subtype_slow_path(A0, A1, T0, T1, NULL, &miss);
aoqi@1 1423
aoqi@6880 1424 __ addiu(V0, R0, 1);
aoqi@6880 1425 __ addiu(SP, SP, 2 * wordSize);
aoqi@6880 1426 __ ld_ptr(T0, SP, (-1) * wordSize);
aoqi@6880 1427 __ ld_ptr(T1, SP, (-2) * wordSize);
aoqi@6880 1428 __ jr(RA);
aoqi@6880 1429 __ delayed()->nop();
aoqi@1 1430
aoqi@1 1431
aoqi@6880 1432 __ bind(miss);
aoqi@6880 1433 __ move(V0, R0);
aoqi@6880 1434 __ addiu(SP, SP, 2 * wordSize);
aoqi@6880 1435 __ ld_ptr(T0, SP, (-1) * wordSize);
aoqi@6880 1436 __ ld_ptr(T1, SP, (-2) * wordSize);
aoqi@6880 1437 __ jr(RA);
aoqi@6880 1438 __ delayed()->nop();
aoqi@6880 1439 }
aoqi@6880 1440 break;
aoqi@1 1441
aoqi@1 1442 case monitorenter_nofpu_id:
aoqi@1 1443 save_fpu_registers = false;// fall through
aoqi@1 1444
aoqi@6880 1445 case monitorenter_id:
aoqi@1 1446 {
aoqi@6880 1447 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
aoqi@6880 1448 OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
aoqi@1 1449
aoqi@6880 1450 f.load_argument(1, V0); // V0: object
aoqi@1 1451 #ifndef _LP64
aoqi@6880 1452 f.load_argument(0, T6); // T6: lock address
aoqi@6880 1453 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@6880 1454 monitorenter), V0, T6);
aoqi@1 1455 #else
aoqi@6880 1456 f.load_argument(0, A6); // A6: lock address
aoqi@6880 1457 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@6880 1458 monitorenter), V0, A6);
aoqi@1 1459 #endif
aoqi@1 1460
aoqi@6880 1461 oop_maps = new OopMapSet();
aoqi@6880 1462 oop_maps->add_gc_map(call_offset, map);
aoqi@6880 1463 restore_live_registers(sasm, save_fpu_registers);
aoqi@6880 1464 }
aoqi@6880 1465 break;
aoqi@1 1466
aoqi@6880 1467 case monitorexit_nofpu_id:
aoqi@6880 1468 save_fpu_registers = false;
aoqi@6880 1469 // fall through
aoqi@6880 1470 case monitorexit_id:
aoqi@6880 1471 {
aoqi@1 1472 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
aoqi@1 1473 OopMap* map = save_live_registers(sasm, 0, save_fpu_registers);
aoqi@6880 1474
aoqi@1 1475 #ifndef _LP64
aoqi@1 1476 f.load_argument(0, T6); // eax: lock address
aoqi@1 1477 #else
aoqi@1 1478 f.load_argument(0, A6); // A6: lock address
aoqi@1 1479 #endif
aoqi@1 1480 // note: really a leaf routine but must setup last java sp
aoqi@1 1481 // => use call_RT for now (speed can be improved by
aoqi@1 1482 // doing last java sp setup manually)
aoqi@1 1483 #ifndef _LP64
aoqi@6880 1484 int call_offset = __ call_RT(noreg, noreg,
aoqi@6880 1485 CAST_FROM_FN_PTR(address, monitorexit), T6);
aoqi@1 1486 #else
aoqi@6880 1487 int call_offset = __ call_RT(noreg, noreg,
aoqi@6880 1488 CAST_FROM_FN_PTR(address, monitorexit), A6);
aoqi@1 1489 #endif
aoqi@1 1490 oop_maps = new OopMapSet();
aoqi@1 1491 oop_maps->add_gc_map(call_offset, map);
aoqi@1 1492 restore_live_registers(sasm, save_fpu_registers);
aoqi@6880 1493
aoqi@1 1494 }
aoqi@1 1495 break;
aoqi@6880 1496 // case init_check_patching_id:
aoqi@6880 1497 case access_field_patching_id:
aoqi@6880 1498 {
aoqi@1 1499 StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
aoqi@1 1500 // we should set up register map
aoqi@1 1501 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
aoqi@1 1502
aoqi@1 1503 }
aoqi@1 1504 break;
aoqi@1 1505
aoqi@6880 1506 case load_klass_patching_id:
aoqi@6880 1507 {
aoqi@6880 1508 StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
aoqi@6880 1509 // we should set up register map
aoqi@6880 1510 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address,
aoqi@6880 1511 move_klass_patching));
aoqi@6880 1512 }
aoqi@6880 1513 break;
aoqi@6880 1514 /* case jvmti_exception_throw_id:
aoqi@6880 1515 {
aoqi@6880 1516 // V0: exception oop
aoqi@6880 1517 // V1: exception pc
aoqi@6880 1518 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
aoqi@6880 1519 // Preserve all registers across this potentially blocking call
aoqi@6880 1520 const int num_rt_args = 2; // thread, exception oop
aoqi@6880 1521 //OopMap* map = save_live_registers(sasm, num_rt_args);
aoqi@6880 1522 OopMap* map = save_live_registers(sasm, 0);
aoqi@6880 1523 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address,
aoqi@6880 1524 Runtime1::post_jvmti_exception_throw), V0);
aoqi@6880 1525 oop_maps = new OopMapSet();
aoqi@6880 1526 oop_maps->add_gc_map(call_offset, map);
aoqi@6880 1527 restore_live_registers(sasm);
aoqi@6880 1528 }*/
aoqi@8865 1529 case load_mirror_patching_id:
aoqi@8865 1530 {
aoqi@8865 1531 StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments);
aoqi@8865 1532 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
aoqi@8865 1533 }
aoqi@6880 1534 break;
aoqi@8865 1535
aoqi@8865 1536 case load_appendix_patching_id:
aoqi@8865 1537 {
aoqi@8865 1538 StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
aoqi@8865 1539 // we should set up register map
aoqi@8865 1540 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
aoqi@8865 1541 }
aoqi@8865 1542 break;
aoqi@8865 1543
aoqi@6880 1544 case dtrace_object_alloc_id:
aoqi@6880 1545 {
aoqi@6880 1546 // V0:object
aoqi@6880 1547 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
aoqi@6880 1548 // we can't gc here so skip the oopmap but make sure that all
aoqi@6880 1549 // the live registers get saved.
aoqi@6880 1550 save_live_registers(sasm, 0);
aoqi@1 1551
aoqi@6880 1552 __ push_reg(V0);
aoqi@8865 1553 __ move(A0, V0);
aoqi@6880 1554 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc),
aoqi@6880 1555 relocInfo::runtime_call_type);
fujie@9134 1556 __ delayed()->nop();
aoqi@6880 1557 __ super_pop(V0);
aoqi@1 1558
aoqi@6880 1559 restore_live_registers(sasm);
aoqi@6880 1560 }
aoqi@6880 1561 break;
aoqi@8865 1562
aoqi@6880 1563 case fpu2long_stub_id:
aoqi@6880 1564 {
aoqi@8865 1565 //FIXME, I hava no idea how to port this
aoqi@8865 1566 //tty->print_cr("fpu2long_stub_id unimplemented yet!");
aoqi@6880 1567 }
aoqi@8865 1568 break;
aoqi@8865 1569
aoqi@8865 1570 case deoptimize_id:
aoqi@8865 1571 {
aoqi@8865 1572 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
aoqi@8865 1573 const int num_rt_args = 1; // thread
aoqi@8865 1574 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
aoqi@8865 1575 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
aoqi@8865 1576 oop_maps = new OopMapSet();
aoqi@8865 1577 oop_maps->add_gc_map(call_offset, oop_map);
aoqi@8865 1578 restore_live_registers(sasm);
aoqi@8865 1579 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
aoqi@8865 1580 assert(deopt_blob != NULL, "deoptimization blob must have been created");
aoqi@8865 1581 __ leave();
aoqi@8865 1582 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
zhaixiang@9135 1583 __ delayed()->nop();
aoqi@8865 1584 }
aoqi@8865 1585 break;
aoqi@8865 1586
aoqi@8865 1587 case predicate_failed_trap_id:
aoqi@8865 1588 {
aoqi@8865 1589 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
aoqi@8865 1590
aoqi@8865 1591 OopMap* map = save_live_registers(sasm, 1);
aoqi@8865 1592
aoqi@8865 1593 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
aoqi@8865 1594 oop_maps = new OopMapSet();
aoqi@8865 1595 oop_maps->add_gc_map(call_offset, map);
aoqi@8865 1596 restore_live_registers(sasm);
aoqi@8865 1597 __ leave();
aoqi@8865 1598 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
aoqi@8865 1599 assert(deopt_blob != NULL, "deoptimization blob must have been created");
aoqi@8865 1600
aoqi@8865 1601 __ jmp(deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
zhaixiang@9135 1602 __ delayed()->nop();
aoqi@8865 1603 }
aoqi@8865 1604 break;
aoqi@8865 1605
aoqi@6880 1606 default:
aoqi@8865 1607 {
aoqi@8865 1608 StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
aoqi@6880 1609 __ move(A1, (int)id);
aoqi@6880 1610 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), A1);
aoqi@6880 1611 __ should_not_reach_here();
aoqi@6880 1612 }
aoqi@6880 1613 break;
aoqi@6880 1614 }
aoqi@6880 1615 return oop_maps;
aoqi@1 1616 }
aoqi@1 1617
aoqi@1 1618 #undef __
aoqi@1 1619
aoqi@1 1620 const char *Runtime1::pd_name_for_address(address entry) {
aoqi@1 1621 return "<unknown function>";
aoqi@1 1622 }

mercurial