Wed, 08 Apr 2009 10:56:49 -0700
6655638: dynamic languages need method handles
Summary: initial implementation, with known omissions (x86/64, sparc, compiler optim., c-oops, C++ interp.)
Reviewed-by: kvn, twisti, never
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_c1_Runtime1_sparc.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | // Implementation of StubAssembler |
duke@435 | 29 | |
duke@435 | 30 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry_point, int number_of_arguments) { |
duke@435 | 31 | // for sparc changing the number of arguments doesn't change |
duke@435 | 32 | // anything about the frame size so we'll always lie and claim that |
duke@435 | 33 | // we are only passing 1 argument. |
duke@435 | 34 | set_num_rt_args(1); |
duke@435 | 35 | |
duke@435 | 36 | assert_not_delayed(); |
duke@435 | 37 | // bang stack before going to runtime |
duke@435 | 38 | set(-os::vm_page_size() + STACK_BIAS, G3_scratch); |
duke@435 | 39 | st(G0, SP, G3_scratch); |
duke@435 | 40 | |
duke@435 | 41 | // debugging support |
duke@435 | 42 | assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); |
duke@435 | 43 | |
duke@435 | 44 | set_last_Java_frame(SP, noreg); |
duke@435 | 45 | if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early |
duke@435 | 46 | save_thread(L7_thread_cache); |
duke@435 | 47 | // do the call |
duke@435 | 48 | call(entry_point, relocInfo::runtime_call_type); |
duke@435 | 49 | if (!VerifyThread) { |
duke@435 | 50 | delayed()->mov(G2_thread, O0); // pass thread as first argument |
duke@435 | 51 | } else { |
duke@435 | 52 | delayed()->nop(); // (thread already passed) |
duke@435 | 53 | } |
duke@435 | 54 | int call_offset = offset(); // offset of return address |
duke@435 | 55 | restore_thread(L7_thread_cache); |
duke@435 | 56 | reset_last_Java_frame(); |
duke@435 | 57 | |
duke@435 | 58 | // check for pending exceptions |
duke@435 | 59 | { Label L; |
duke@435 | 60 | Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); |
duke@435 | 61 | ld_ptr(exception_addr, Gtemp); |
duke@435 | 62 | br_null(Gtemp, false, pt, L); |
duke@435 | 63 | delayed()->nop(); |
duke@435 | 64 | Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); |
duke@435 | 65 | st_ptr(G0, vm_result_addr); |
duke@435 | 66 | Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); |
duke@435 | 67 | st_ptr(G0, vm_result_addr_2); |
duke@435 | 68 | |
duke@435 | 69 | if (frame_size() == no_frame_size) { |
duke@435 | 70 | // we use O7 linkage so that forward_exception_entry has the issuing PC |
duke@435 | 71 | call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); |
duke@435 | 72 | delayed()->restore(); |
duke@435 | 73 | } else if (_stub_id == Runtime1::forward_exception_id) { |
duke@435 | 74 | should_not_reach_here(); |
duke@435 | 75 | } else { |
duke@435 | 76 | Address exc(G4, Runtime1::entry_for(Runtime1::forward_exception_id)); |
duke@435 | 77 | jump_to(exc, 0); |
duke@435 | 78 | delayed()->nop(); |
duke@435 | 79 | } |
duke@435 | 80 | bind(L); |
duke@435 | 81 | } |
duke@435 | 82 | |
duke@435 | 83 | // get oop result if there is one and reset the value in the thread |
duke@435 | 84 | if (oop_result1->is_valid()) { // get oop result if there is one and reset it in the thread |
duke@435 | 85 | get_vm_result (oop_result1); |
duke@435 | 86 | } else { |
duke@435 | 87 | // be a little paranoid and clear the result |
duke@435 | 88 | Address vm_result_addr(G2_thread, 0, in_bytes(JavaThread::vm_result_offset())); |
duke@435 | 89 | st_ptr(G0, vm_result_addr); |
duke@435 | 90 | } |
duke@435 | 91 | |
duke@435 | 92 | if (oop_result2->is_valid()) { |
duke@435 | 93 | get_vm_result_2(oop_result2); |
duke@435 | 94 | } else { |
duke@435 | 95 | // be a little paranoid and clear the result |
duke@435 | 96 | Address vm_result_addr_2(G2_thread, 0, in_bytes(JavaThread::vm_result_2_offset())); |
duke@435 | 97 | st_ptr(G0, vm_result_addr_2); |
duke@435 | 98 | } |
duke@435 | 99 | |
duke@435 | 100 | return call_offset; |
duke@435 | 101 | } |
duke@435 | 102 | |
duke@435 | 103 | |
duke@435 | 104 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { |
duke@435 | 105 | // O0 is reserved for the thread |
duke@435 | 106 | mov(arg1, O1); |
duke@435 | 107 | return call_RT(oop_result1, oop_result2, entry, 1); |
duke@435 | 108 | } |
duke@435 | 109 | |
duke@435 | 110 | |
duke@435 | 111 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { |
duke@435 | 112 | // O0 is reserved for the thread |
duke@435 | 113 | mov(arg1, O1); |
duke@435 | 114 | mov(arg2, O2); assert(arg2 != O1, "smashed argument"); |
duke@435 | 115 | return call_RT(oop_result1, oop_result2, entry, 2); |
duke@435 | 116 | } |
duke@435 | 117 | |
duke@435 | 118 | |
duke@435 | 119 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { |
duke@435 | 120 | // O0 is reserved for the thread |
duke@435 | 121 | mov(arg1, O1); |
duke@435 | 122 | mov(arg2, O2); assert(arg2 != O1, "smashed argument"); |
duke@435 | 123 | mov(arg3, O3); assert(arg3 != O1 && arg3 != O2, "smashed argument"); |
duke@435 | 124 | return call_RT(oop_result1, oop_result2, entry, 3); |
duke@435 | 125 | } |
duke@435 | 126 | |
duke@435 | 127 | |
duke@435 | 128 | // Implementation of Runtime1 |
duke@435 | 129 | |
duke@435 | 130 | #define __ sasm-> |
duke@435 | 131 | |
duke@435 | 132 | static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs]; |
duke@435 | 133 | static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs]; |
duke@435 | 134 | static int reg_save_size_in_words; |
duke@435 | 135 | static int frame_size_in_bytes = -1; |
duke@435 | 136 | |
duke@435 | 137 | static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { |
duke@435 | 138 | assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), |
duke@435 | 139 | " mismatch in calculation"); |
duke@435 | 140 | sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); |
duke@435 | 141 | int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); |
duke@435 | 142 | OopMap* oop_map = new OopMap(frame_size_in_slots, 0); |
duke@435 | 143 | |
duke@435 | 144 | int i; |
duke@435 | 145 | for (i = 0; i < FrameMap::nof_cpu_regs; i++) { |
duke@435 | 146 | Register r = as_Register(i); |
duke@435 | 147 | if (r == G1 || r == G3 || r == G4 || r == G5) { |
duke@435 | 148 | int sp_offset = cpu_reg_save_offsets[i]; |
duke@435 | 149 | oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), |
duke@435 | 150 | r->as_VMReg()); |
duke@435 | 151 | } |
duke@435 | 152 | } |
duke@435 | 153 | |
duke@435 | 154 | if (save_fpu_registers) { |
duke@435 | 155 | for (i = 0; i < FrameMap::nof_fpu_regs; i++) { |
duke@435 | 156 | FloatRegister r = as_FloatRegister(i); |
duke@435 | 157 | int sp_offset = fpu_reg_save_offsets[i]; |
duke@435 | 158 | oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset), |
duke@435 | 159 | r->as_VMReg()); |
duke@435 | 160 | } |
duke@435 | 161 | } |
duke@435 | 162 | return oop_map; |
duke@435 | 163 | } |
duke@435 | 164 | |
duke@435 | 165 | static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { |
duke@435 | 166 | assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), |
duke@435 | 167 | " mismatch in calculation"); |
duke@435 | 168 | __ save_frame_c1(frame_size_in_bytes); |
duke@435 | 169 | sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); |
duke@435 | 170 | |
duke@435 | 171 | // Record volatile registers as callee-save values in an OopMap so their save locations will be |
duke@435 | 172 | // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for |
duke@435 | 173 | // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers |
duke@435 | 174 | // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame |
duke@435 | 175 | // (as the stub's I's) when the runtime routine called by the stub creates its frame. |
duke@435 | 176 | // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint)) |
duke@435 | 177 | |
duke@435 | 178 | int i; |
duke@435 | 179 | for (i = 0; i < FrameMap::nof_cpu_regs; i++) { |
duke@435 | 180 | Register r = as_Register(i); |
duke@435 | 181 | if (r == G1 || r == G3 || r == G4 || r == G5) { |
duke@435 | 182 | int sp_offset = cpu_reg_save_offsets[i]; |
duke@435 | 183 | __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); |
duke@435 | 184 | } |
duke@435 | 185 | } |
duke@435 | 186 | |
duke@435 | 187 | if (save_fpu_registers) { |
duke@435 | 188 | for (i = 0; i < FrameMap::nof_fpu_regs; i++) { |
duke@435 | 189 | FloatRegister r = as_FloatRegister(i); |
duke@435 | 190 | int sp_offset = fpu_reg_save_offsets[i]; |
duke@435 | 191 | __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS); |
duke@435 | 192 | } |
duke@435 | 193 | } |
duke@435 | 194 | |
duke@435 | 195 | return generate_oop_map(sasm, save_fpu_registers); |
duke@435 | 196 | } |
duke@435 | 197 | |
duke@435 | 198 | static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { |
duke@435 | 199 | for (int i = 0; i < FrameMap::nof_cpu_regs; i++) { |
duke@435 | 200 | Register r = as_Register(i); |
duke@435 | 201 | if (r == G1 || r == G3 || r == G4 || r == G5) { |
duke@435 | 202 | __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); |
duke@435 | 203 | } |
duke@435 | 204 | } |
duke@435 | 205 | |
duke@435 | 206 | if (restore_fpu_registers) { |
duke@435 | 207 | for (int i = 0; i < FrameMap::nof_fpu_regs; i++) { |
duke@435 | 208 | FloatRegister r = as_FloatRegister(i); |
duke@435 | 209 | __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r); |
duke@435 | 210 | } |
duke@435 | 211 | } |
duke@435 | 212 | } |
duke@435 | 213 | |
duke@435 | 214 | |
duke@435 | 215 | void Runtime1::initialize_pd() { |
duke@435 | 216 | // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines |
duke@435 | 217 | // |
duke@435 | 218 | // A stub routine will have a frame that is at least large enough to hold |
duke@435 | 219 | // a register window save area (obviously) and the volatile g registers |
duke@435 | 220 | // and floating registers. A user of save_live_registers can have a frame |
duke@435 | 221 | // that has more scratch area in it (although typically they will use L-regs). |
duke@435 | 222 | // in that case the frame will look like this (stack growing down) |
duke@435 | 223 | // |
duke@435 | 224 | // FP -> | | |
duke@435 | 225 | // | scratch mem | |
duke@435 | 226 | // | " " | |
duke@435 | 227 | // -------------- |
duke@435 | 228 | // | float regs | |
duke@435 | 229 | // | " " | |
duke@435 | 230 | // --------------- |
duke@435 | 231 | // | G regs | |
duke@435 | 232 | // | " " | |
duke@435 | 233 | // --------------- |
duke@435 | 234 | // | abi reg. | |
duke@435 | 235 | // | window save | |
duke@435 | 236 | // | area | |
duke@435 | 237 | // SP -> --------------- |
duke@435 | 238 | // |
duke@435 | 239 | int i; |
duke@435 | 240 | int sp_offset = round_to(frame::register_save_words, 2); // start doubleword aligned |
duke@435 | 241 | |
duke@435 | 242 | // only G int registers are saved explicitly; others are found in register windows |
duke@435 | 243 | for (i = 0; i < FrameMap::nof_cpu_regs; i++) { |
duke@435 | 244 | Register r = as_Register(i); |
duke@435 | 245 | if (r == G1 || r == G3 || r == G4 || r == G5) { |
duke@435 | 246 | cpu_reg_save_offsets[i] = sp_offset; |
duke@435 | 247 | sp_offset++; |
duke@435 | 248 | } |
duke@435 | 249 | } |
duke@435 | 250 | |
duke@435 | 251 | // all float registers are saved explicitly |
duke@435 | 252 | assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here"); |
duke@435 | 253 | for (i = 0; i < FrameMap::nof_fpu_regs; i++) { |
duke@435 | 254 | fpu_reg_save_offsets[i] = sp_offset; |
duke@435 | 255 | sp_offset++; |
duke@435 | 256 | } |
duke@435 | 257 | reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset; |
duke@435 | 258 | // this should match assembler::total_frame_size_in_bytes, which |
duke@435 | 259 | // isn't callable from this context. It's checked by an assert when |
duke@435 | 260 | // it's used though. |
duke@435 | 261 | frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8); |
duke@435 | 262 | } |
duke@435 | 263 | |
duke@435 | 264 | |
duke@435 | 265 | OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { |
duke@435 | 266 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 267 | OopMap* oop_map = save_live_registers(sasm); |
duke@435 | 268 | int call_offset; |
duke@435 | 269 | if (!has_argument) { |
duke@435 | 270 | call_offset = __ call_RT(noreg, noreg, target); |
duke@435 | 271 | } else { |
duke@435 | 272 | call_offset = __ call_RT(noreg, noreg, target, G4); |
duke@435 | 273 | } |
duke@435 | 274 | OopMapSet* oop_maps = new OopMapSet(); |
duke@435 | 275 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 276 | |
duke@435 | 277 | __ should_not_reach_here(); |
duke@435 | 278 | return oop_maps; |
duke@435 | 279 | } |
duke@435 | 280 | |
duke@435 | 281 | |
duke@435 | 282 | OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, |
duke@435 | 283 | Register arg1, Register arg2, Register arg3) { |
duke@435 | 284 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 285 | OopMap* oop_map = save_live_registers(sasm); |
duke@435 | 286 | |
duke@435 | 287 | int call_offset; |
duke@435 | 288 | if (arg1 == noreg) { |
duke@435 | 289 | call_offset = __ call_RT(result, noreg, target); |
duke@435 | 290 | } else if (arg2 == noreg) { |
duke@435 | 291 | call_offset = __ call_RT(result, noreg, target, arg1); |
duke@435 | 292 | } else if (arg3 == noreg) { |
duke@435 | 293 | call_offset = __ call_RT(result, noreg, target, arg1, arg2); |
duke@435 | 294 | } else { |
duke@435 | 295 | call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); |
duke@435 | 296 | } |
duke@435 | 297 | OopMapSet* oop_maps = NULL; |
duke@435 | 298 | |
duke@435 | 299 | oop_maps = new OopMapSet(); |
duke@435 | 300 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 301 | restore_live_registers(sasm); |
duke@435 | 302 | |
duke@435 | 303 | __ ret(); |
duke@435 | 304 | __ delayed()->restore(); |
duke@435 | 305 | |
duke@435 | 306 | return oop_maps; |
duke@435 | 307 | } |
duke@435 | 308 | |
duke@435 | 309 | |
duke@435 | 310 | OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { |
duke@435 | 311 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 312 | OopMap* oop_map = save_live_registers(sasm); |
duke@435 | 313 | |
duke@435 | 314 | // call the runtime patching routine, returns non-zero if nmethod got deopted. |
duke@435 | 315 | int call_offset = __ call_RT(noreg, noreg, target); |
duke@435 | 316 | OopMapSet* oop_maps = new OopMapSet(); |
duke@435 | 317 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 318 | |
duke@435 | 319 | // re-execute the patched instruction or, if the nmethod was deoptmized, return to the |
duke@435 | 320 | // deoptimization handler entry that will cause re-execution of the current bytecode |
duke@435 | 321 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
duke@435 | 322 | assert(deopt_blob != NULL, "deoptimization blob must have been created"); |
duke@435 | 323 | |
duke@435 | 324 | Label no_deopt; |
duke@435 | 325 | __ tst(O0); |
duke@435 | 326 | __ brx(Assembler::equal, false, Assembler::pt, no_deopt); |
duke@435 | 327 | __ delayed()->nop(); |
duke@435 | 328 | |
duke@435 | 329 | // return to the deoptimization handler entry for unpacking and rexecute |
duke@435 | 330 | // if we simply returned the we'd deopt as if any call we patched had just |
duke@435 | 331 | // returned. |
duke@435 | 332 | |
duke@435 | 333 | restore_live_registers(sasm); |
duke@435 | 334 | __ restore(); |
duke@435 | 335 | __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type); |
duke@435 | 336 | __ delayed()->nop(); |
duke@435 | 337 | |
duke@435 | 338 | __ bind(no_deopt); |
duke@435 | 339 | restore_live_registers(sasm); |
duke@435 | 340 | __ ret(); |
duke@435 | 341 | __ delayed()->restore(); |
duke@435 | 342 | |
duke@435 | 343 | return oop_maps; |
duke@435 | 344 | } |
duke@435 | 345 | |
duke@435 | 346 | OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { |
duke@435 | 347 | |
duke@435 | 348 | OopMapSet* oop_maps = NULL; |
duke@435 | 349 | // for better readability |
duke@435 | 350 | const bool must_gc_arguments = true; |
duke@435 | 351 | const bool dont_gc_arguments = false; |
duke@435 | 352 | |
duke@435 | 353 | // stub code & info for the different stubs |
duke@435 | 354 | switch (id) { |
duke@435 | 355 | case forward_exception_id: |
duke@435 | 356 | { |
duke@435 | 357 | // we're handling an exception in the context of a compiled |
duke@435 | 358 | // frame. The registers have been saved in the standard |
duke@435 | 359 | // places. Perform an exception lookup in the caller and |
duke@435 | 360 | // dispatch to the handler if found. Otherwise unwind and |
duke@435 | 361 | // dispatch to the callers exception handler. |
duke@435 | 362 | |
duke@435 | 363 | oop_maps = new OopMapSet(); |
duke@435 | 364 | OopMap* oop_map = generate_oop_map(sasm, true); |
duke@435 | 365 | |
duke@435 | 366 | // transfer the pending exception to the exception_oop |
duke@435 | 367 | __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); |
duke@435 | 368 | __ ld_ptr(Oexception, 0, G0); |
duke@435 | 369 | __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); |
duke@435 | 370 | __ add(I7, frame::pc_return_offset, Oissuing_pc); |
duke@435 | 371 | |
duke@435 | 372 | generate_handle_exception(sasm, oop_maps, oop_map); |
duke@435 | 373 | __ should_not_reach_here(); |
duke@435 | 374 | } |
duke@435 | 375 | break; |
duke@435 | 376 | |
duke@435 | 377 | case new_instance_id: |
duke@435 | 378 | case fast_new_instance_id: |
duke@435 | 379 | case fast_new_instance_init_check_id: |
duke@435 | 380 | { |
duke@435 | 381 | Register G5_klass = G5; // Incoming |
duke@435 | 382 | Register O0_obj = O0; // Outgoing |
duke@435 | 383 | |
duke@435 | 384 | if (id == new_instance_id) { |
duke@435 | 385 | __ set_info("new_instance", dont_gc_arguments); |
duke@435 | 386 | } else if (id == fast_new_instance_id) { |
duke@435 | 387 | __ set_info("fast new_instance", dont_gc_arguments); |
duke@435 | 388 | } else { |
duke@435 | 389 | assert(id == fast_new_instance_init_check_id, "bad StubID"); |
duke@435 | 390 | __ set_info("fast new_instance init check", dont_gc_arguments); |
duke@435 | 391 | } |
duke@435 | 392 | |
duke@435 | 393 | if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && |
duke@435 | 394 | UseTLAB && FastTLABRefill) { |
duke@435 | 395 | Label slow_path; |
duke@435 | 396 | Register G1_obj_size = G1; |
duke@435 | 397 | Register G3_t1 = G3; |
duke@435 | 398 | Register G4_t2 = G4; |
duke@435 | 399 | assert_different_registers(G5_klass, G1_obj_size, G3_t1, G4_t2); |
duke@435 | 400 | |
duke@435 | 401 | // Push a frame since we may do dtrace notification for the |
duke@435 | 402 | // allocation which requires calling out and we don't want |
duke@435 | 403 | // to stomp the real return address. |
duke@435 | 404 | __ save_frame(0); |
duke@435 | 405 | |
duke@435 | 406 | if (id == fast_new_instance_init_check_id) { |
duke@435 | 407 | // make sure the klass is initialized |
duke@435 | 408 | __ ld(G5_klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_t1); |
duke@435 | 409 | __ cmp(G3_t1, instanceKlass::fully_initialized); |
duke@435 | 410 | __ br(Assembler::notEqual, false, Assembler::pn, slow_path); |
duke@435 | 411 | __ delayed()->nop(); |
duke@435 | 412 | } |
duke@435 | 413 | #ifdef ASSERT |
duke@435 | 414 | // assert object can be fast path allocated |
duke@435 | 415 | { |
duke@435 | 416 | Label ok, not_ok; |
duke@435 | 417 | __ ld(G5_klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), G1_obj_size); |
duke@435 | 418 | __ cmp(G1_obj_size, 0); // make sure it's an instance (LH > 0) |
duke@435 | 419 | __ br(Assembler::lessEqual, false, Assembler::pn, not_ok); |
duke@435 | 420 | __ delayed()->nop(); |
duke@435 | 421 | __ btst(Klass::_lh_instance_slow_path_bit, G1_obj_size); |
duke@435 | 422 | __ br(Assembler::zero, false, Assembler::pn, ok); |
duke@435 | 423 | __ delayed()->nop(); |
duke@435 | 424 | __ bind(not_ok); |
duke@435 | 425 | __ stop("assert(can be fast path allocated)"); |
duke@435 | 426 | __ should_not_reach_here(); |
duke@435 | 427 | __ bind(ok); |
duke@435 | 428 | } |
duke@435 | 429 | #endif // ASSERT |
duke@435 | 430 | // if we got here then the TLAB allocation failed, so try |
duke@435 | 431 | // refilling the TLAB or allocating directly from eden. |
duke@435 | 432 | Label retry_tlab, try_eden; |
duke@435 | 433 | __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G5_klass |
duke@435 | 434 | |
duke@435 | 435 | __ bind(retry_tlab); |
duke@435 | 436 | |
duke@435 | 437 | // get the instance size |
duke@435 | 438 | __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); |
duke@435 | 439 | __ tlab_allocate(O0_obj, G1_obj_size, 0, G3_t1, slow_path); |
duke@435 | 440 | __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); |
duke@435 | 441 | __ verify_oop(O0_obj); |
duke@435 | 442 | __ mov(O0, I0); |
duke@435 | 443 | __ ret(); |
duke@435 | 444 | __ delayed()->restore(); |
duke@435 | 445 | |
duke@435 | 446 | __ bind(try_eden); |
duke@435 | 447 | // get the instance size |
duke@435 | 448 | __ ld(G5_klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes(), G1_obj_size); |
duke@435 | 449 | __ eden_allocate(O0_obj, G1_obj_size, 0, G3_t1, G4_t2, slow_path); |
duke@435 | 450 | __ initialize_object(O0_obj, G5_klass, G1_obj_size, 0, G3_t1, G4_t2); |
duke@435 | 451 | __ verify_oop(O0_obj); |
duke@435 | 452 | __ mov(O0, I0); |
duke@435 | 453 | __ ret(); |
duke@435 | 454 | __ delayed()->restore(); |
duke@435 | 455 | |
duke@435 | 456 | __ bind(slow_path); |
duke@435 | 457 | |
duke@435 | 458 | // pop this frame so generate_stub_call can push it's own |
duke@435 | 459 | __ restore(); |
duke@435 | 460 | } |
duke@435 | 461 | |
duke@435 | 462 | oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_instance), G5_klass); |
duke@435 | 463 | // I0->O0: new instance |
duke@435 | 464 | } |
duke@435 | 465 | |
duke@435 | 466 | break; |
duke@435 | 467 | |
duke@435 | 468 | #ifdef TIERED |
duke@435 | 469 | case counter_overflow_id: |
duke@435 | 470 | // G4 contains bci |
duke@435 | 471 | oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4); |
duke@435 | 472 | break; |
duke@435 | 473 | #endif // TIERED |
duke@435 | 474 | |
duke@435 | 475 | case new_type_array_id: |
duke@435 | 476 | case new_object_array_id: |
duke@435 | 477 | { |
duke@435 | 478 | Register G5_klass = G5; // Incoming |
duke@435 | 479 | Register G4_length = G4; // Incoming |
duke@435 | 480 | Register O0_obj = O0; // Outgoing |
duke@435 | 481 | |
duke@435 | 482 | Address klass_lh(G5_klass, 0, ((klassOopDesc::header_size() * HeapWordSize) |
duke@435 | 483 | + Klass::layout_helper_offset_in_bytes())); |
duke@435 | 484 | assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
duke@435 | 485 | assert(Klass::_lh_header_size_mask == 0xFF, "bytewise"); |
duke@435 | 486 | // Use this offset to pick out an individual byte of the layout_helper: |
duke@435 | 487 | const int klass_lh_header_size_offset = ((BytesPerInt - 1) // 3 - 2 selects byte {0,1,0,0} |
duke@435 | 488 | - Klass::_lh_header_size_shift / BitsPerByte); |
duke@435 | 489 | |
duke@435 | 490 | if (id == new_type_array_id) { |
duke@435 | 491 | __ set_info("new_type_array", dont_gc_arguments); |
duke@435 | 492 | } else { |
duke@435 | 493 | __ set_info("new_object_array", dont_gc_arguments); |
duke@435 | 494 | } |
duke@435 | 495 | |
duke@435 | 496 | #ifdef ASSERT |
duke@435 | 497 | // assert object type is really an array of the proper kind |
duke@435 | 498 | { |
duke@435 | 499 | Label ok; |
duke@435 | 500 | Register G3_t1 = G3; |
duke@435 | 501 | __ ld(klass_lh, G3_t1); |
duke@435 | 502 | __ sra(G3_t1, Klass::_lh_array_tag_shift, G3_t1); |
duke@435 | 503 | int tag = ((id == new_type_array_id) |
duke@435 | 504 | ? Klass::_lh_array_tag_type_value |
duke@435 | 505 | : Klass::_lh_array_tag_obj_value); |
duke@435 | 506 | __ cmp(G3_t1, tag); |
duke@435 | 507 | __ brx(Assembler::equal, false, Assembler::pt, ok); |
duke@435 | 508 | __ delayed()->nop(); |
duke@435 | 509 | __ stop("assert(is an array klass)"); |
duke@435 | 510 | __ should_not_reach_here(); |
duke@435 | 511 | __ bind(ok); |
duke@435 | 512 | } |
duke@435 | 513 | #endif // ASSERT |
duke@435 | 514 | |
duke@435 | 515 | if (UseTLAB && FastTLABRefill) { |
duke@435 | 516 | Label slow_path; |
duke@435 | 517 | Register G1_arr_size = G1; |
duke@435 | 518 | Register G3_t1 = G3; |
duke@435 | 519 | Register O1_t2 = O1; |
duke@435 | 520 | assert_different_registers(G5_klass, G4_length, G1_arr_size, G3_t1, O1_t2); |
duke@435 | 521 | |
duke@435 | 522 | // check that array length is small enough for fast path |
duke@435 | 523 | __ set(C1_MacroAssembler::max_array_allocation_length, G3_t1); |
duke@435 | 524 | __ cmp(G4_length, G3_t1); |
duke@435 | 525 | __ br(Assembler::greaterUnsigned, false, Assembler::pn, slow_path); |
duke@435 | 526 | __ delayed()->nop(); |
duke@435 | 527 | |
duke@435 | 528 | // if we got here then the TLAB allocation failed, so try |
duke@435 | 529 | // refilling the TLAB or allocating directly from eden. |
duke@435 | 530 | Label retry_tlab, try_eden; |
duke@435 | 531 | __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves G4_length and G5_klass |
duke@435 | 532 | |
duke@435 | 533 | __ bind(retry_tlab); |
duke@435 | 534 | |
duke@435 | 535 | // get the allocation size: (length << (layout_helper & 0x1F)) + header_size |
duke@435 | 536 | __ ld(klass_lh, G3_t1); |
duke@435 | 537 | __ sll(G4_length, G3_t1, G1_arr_size); |
duke@435 | 538 | __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); |
duke@435 | 539 | __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); |
duke@435 | 540 | __ add(G1_arr_size, G3_t1, G1_arr_size); |
duke@435 | 541 | __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); // align up |
duke@435 | 542 | __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); |
duke@435 | 543 | |
duke@435 | 544 | __ tlab_allocate(O0_obj, G1_arr_size, 0, G3_t1, slow_path); // preserves G1_arr_size |
duke@435 | 545 | |
duke@435 | 546 | __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); |
duke@435 | 547 | __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); |
duke@435 | 548 | __ sub(G1_arr_size, G3_t1, O1_t2); // body length |
duke@435 | 549 | __ add(O0_obj, G3_t1, G3_t1); // body start |
duke@435 | 550 | __ initialize_body(G3_t1, O1_t2); |
duke@435 | 551 | __ verify_oop(O0_obj); |
duke@435 | 552 | __ retl(); |
duke@435 | 553 | __ delayed()->nop(); |
duke@435 | 554 | |
duke@435 | 555 | __ bind(try_eden); |
duke@435 | 556 | // get the allocation size: (length << (layout_helper & 0x1F)) + header_size |
duke@435 | 557 | __ ld(klass_lh, G3_t1); |
duke@435 | 558 | __ sll(G4_length, G3_t1, G1_arr_size); |
duke@435 | 559 | __ srl(G3_t1, Klass::_lh_header_size_shift, G3_t1); |
duke@435 | 560 | __ and3(G3_t1, Klass::_lh_header_size_mask, G3_t1); |
duke@435 | 561 | __ add(G1_arr_size, G3_t1, G1_arr_size); |
duke@435 | 562 | __ add(G1_arr_size, MinObjAlignmentInBytesMask, G1_arr_size); |
duke@435 | 563 | __ and3(G1_arr_size, ~MinObjAlignmentInBytesMask, G1_arr_size); |
duke@435 | 564 | |
duke@435 | 565 | __ eden_allocate(O0_obj, G1_arr_size, 0, G3_t1, O1_t2, slow_path); // preserves G1_arr_size |
duke@435 | 566 | |
duke@435 | 567 | __ initialize_header(O0_obj, G5_klass, G4_length, G3_t1, O1_t2); |
duke@435 | 568 | __ ldub(klass_lh, G3_t1, klass_lh_header_size_offset); |
duke@435 | 569 | __ sub(G1_arr_size, G3_t1, O1_t2); // body length |
duke@435 | 570 | __ add(O0_obj, G3_t1, G3_t1); // body start |
duke@435 | 571 | __ initialize_body(G3_t1, O1_t2); |
duke@435 | 572 | __ verify_oop(O0_obj); |
duke@435 | 573 | __ retl(); |
duke@435 | 574 | __ delayed()->nop(); |
duke@435 | 575 | |
duke@435 | 576 | __ bind(slow_path); |
duke@435 | 577 | } |
duke@435 | 578 | |
duke@435 | 579 | if (id == new_type_array_id) { |
duke@435 | 580 | oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_type_array), G5_klass, G4_length); |
duke@435 | 581 | } else { |
duke@435 | 582 | oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_object_array), G5_klass, G4_length); |
duke@435 | 583 | } |
duke@435 | 584 | // I0 -> O0: new array |
duke@435 | 585 | } |
duke@435 | 586 | break; |
duke@435 | 587 | |
duke@435 | 588 | case new_multi_array_id: |
duke@435 | 589 | { // O0: klass |
duke@435 | 590 | // O1: rank |
duke@435 | 591 | // O2: address of 1st dimension |
duke@435 | 592 | __ set_info("new_multi_array", dont_gc_arguments); |
duke@435 | 593 | oop_maps = generate_stub_call(sasm, I0, CAST_FROM_FN_PTR(address, new_multi_array), I0, I1, I2); |
duke@435 | 594 | // I0 -> O0: new multi array |
duke@435 | 595 | } |
duke@435 | 596 | break; |
duke@435 | 597 | |
duke@435 | 598 | case register_finalizer_id: |
duke@435 | 599 | { |
duke@435 | 600 | __ set_info("register_finalizer", dont_gc_arguments); |
duke@435 | 601 | |
duke@435 | 602 | // load the klass and check the has finalizer flag |
duke@435 | 603 | Label register_finalizer; |
duke@435 | 604 | Register t = O1; |
duke@435 | 605 | __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t); |
duke@435 | 606 | __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); |
duke@435 | 607 | __ set(JVM_ACC_HAS_FINALIZER, G3); |
duke@435 | 608 | __ andcc(G3, t, G0); |
duke@435 | 609 | __ br(Assembler::notZero, false, Assembler::pt, register_finalizer); |
duke@435 | 610 | __ delayed()->nop(); |
duke@435 | 611 | |
duke@435 | 612 | // do a leaf return |
duke@435 | 613 | __ retl(); |
duke@435 | 614 | __ delayed()->nop(); |
duke@435 | 615 | |
duke@435 | 616 | __ bind(register_finalizer); |
duke@435 | 617 | OopMap* oop_map = save_live_registers(sasm); |
duke@435 | 618 | int call_offset = __ call_RT(noreg, noreg, |
duke@435 | 619 | CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), I0); |
duke@435 | 620 | oop_maps = new OopMapSet(); |
duke@435 | 621 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 622 | |
duke@435 | 623 | // Now restore all the live registers |
duke@435 | 624 | restore_live_registers(sasm); |
duke@435 | 625 | |
duke@435 | 626 | __ ret(); |
duke@435 | 627 | __ delayed()->restore(); |
duke@435 | 628 | } |
duke@435 | 629 | break; |
duke@435 | 630 | |
duke@435 | 631 | case throw_range_check_failed_id: |
duke@435 | 632 | { __ set_info("range_check_failed", dont_gc_arguments); // arguments will be discarded |
duke@435 | 633 | // G4: index |
duke@435 | 634 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); |
duke@435 | 635 | } |
duke@435 | 636 | break; |
duke@435 | 637 | |
duke@435 | 638 | case throw_index_exception_id: |
duke@435 | 639 | { __ set_info("index_range_check_failed", dont_gc_arguments); // arguments will be discarded |
duke@435 | 640 | // G4: index |
duke@435 | 641 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); |
duke@435 | 642 | } |
duke@435 | 643 | break; |
duke@435 | 644 | |
duke@435 | 645 | case throw_div0_exception_id: |
duke@435 | 646 | { __ set_info("throw_div0_exception", dont_gc_arguments); |
duke@435 | 647 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); |
duke@435 | 648 | } |
duke@435 | 649 | break; |
duke@435 | 650 | |
duke@435 | 651 | case throw_null_pointer_exception_id: |
duke@435 | 652 | { __ set_info("throw_null_pointer_exception", dont_gc_arguments); |
duke@435 | 653 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); |
duke@435 | 654 | } |
duke@435 | 655 | break; |
duke@435 | 656 | |
duke@435 | 657 | case handle_exception_id: |
duke@435 | 658 | { |
duke@435 | 659 | __ set_info("handle_exception", dont_gc_arguments); |
duke@435 | 660 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 661 | |
duke@435 | 662 | oop_maps = new OopMapSet(); |
duke@435 | 663 | OopMap* oop_map = save_live_registers(sasm); |
duke@435 | 664 | __ mov(Oexception->after_save(), Oexception); |
duke@435 | 665 | __ mov(Oissuing_pc->after_save(), Oissuing_pc); |
duke@435 | 666 | generate_handle_exception(sasm, oop_maps, oop_map); |
duke@435 | 667 | } |
duke@435 | 668 | break; |
duke@435 | 669 | |
duke@435 | 670 | case unwind_exception_id: |
duke@435 | 671 | { |
duke@435 | 672 | // O0: exception |
duke@435 | 673 | // I7: address of call to this method |
duke@435 | 674 | |
duke@435 | 675 | __ set_info("unwind_exception", dont_gc_arguments); |
duke@435 | 676 | __ mov(Oexception, Oexception->after_save()); |
duke@435 | 677 | __ add(I7, frame::pc_return_offset, Oissuing_pc->after_save()); |
duke@435 | 678 | |
duke@435 | 679 | __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), |
duke@435 | 680 | Oissuing_pc->after_save()); |
duke@435 | 681 | __ verify_not_null_oop(Oexception->after_save()); |
duke@435 | 682 | __ jmp(O0, 0); |
duke@435 | 683 | __ delayed()->restore(); |
duke@435 | 684 | } |
duke@435 | 685 | break; |
duke@435 | 686 | |
duke@435 | 687 | case throw_array_store_exception_id: |
duke@435 | 688 | { |
duke@435 | 689 | __ set_info("throw_array_store_exception", dont_gc_arguments); |
duke@435 | 690 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false); |
duke@435 | 691 | } |
duke@435 | 692 | break; |
duke@435 | 693 | |
duke@435 | 694 | case throw_class_cast_exception_id: |
duke@435 | 695 | { |
duke@435 | 696 | // G4: object |
duke@435 | 697 | __ set_info("throw_class_cast_exception", dont_gc_arguments); |
duke@435 | 698 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); |
duke@435 | 699 | } |
duke@435 | 700 | break; |
duke@435 | 701 | |
duke@435 | 702 | case throw_incompatible_class_change_error_id: |
duke@435 | 703 | { |
duke@435 | 704 | __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments); |
duke@435 | 705 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); |
duke@435 | 706 | } |
duke@435 | 707 | break; |
duke@435 | 708 | |
duke@435 | 709 | case slow_subtype_check_id: |
duke@435 | 710 | { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super ); |
duke@435 | 711 | // Arguments : |
duke@435 | 712 | // |
duke@435 | 713 | // ret : G3 |
duke@435 | 714 | // sub : G3, argument, destroyed |
duke@435 | 715 | // super: G1, argument, not changed |
duke@435 | 716 | // raddr: O7, blown by call |
jrose@1079 | 717 | Label miss; |
duke@435 | 718 | |
duke@435 | 719 | __ save_frame(0); // Blow no registers! |
duke@435 | 720 | |
jrose@1079 | 721 | __ check_klass_subtype_slow_path(G3, G1, L0, L1, L2, L4, NULL, &miss); |
duke@435 | 722 | |
duke@435 | 723 | __ mov(1, G3); |
jrose@1079 | 724 | __ ret(); // Result in G5 is 'true' |
duke@435 | 725 | __ delayed()->restore(); // free copy or add can go here |
duke@435 | 726 | |
duke@435 | 727 | __ bind(miss); |
duke@435 | 728 | __ mov(0, G3); |
jrose@1079 | 729 | __ ret(); // Result in G5 is 'false' |
duke@435 | 730 | __ delayed()->restore(); // free copy or add can go here |
duke@435 | 731 | } |
duke@435 | 732 | |
duke@435 | 733 | case monitorenter_nofpu_id: |
duke@435 | 734 | case monitorenter_id: |
duke@435 | 735 | { // G4: object |
duke@435 | 736 | // G5: lock address |
duke@435 | 737 | __ set_info("monitorenter", dont_gc_arguments); |
duke@435 | 738 | |
duke@435 | 739 | int save_fpu_registers = (id == monitorenter_id); |
duke@435 | 740 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 741 | OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); |
duke@435 | 742 | |
duke@435 | 743 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), G4, G5); |
duke@435 | 744 | |
duke@435 | 745 | oop_maps = new OopMapSet(); |
duke@435 | 746 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 747 | restore_live_registers(sasm, save_fpu_registers); |
duke@435 | 748 | |
duke@435 | 749 | __ ret(); |
duke@435 | 750 | __ delayed()->restore(); |
duke@435 | 751 | } |
duke@435 | 752 | break; |
duke@435 | 753 | |
duke@435 | 754 | case monitorexit_nofpu_id: |
duke@435 | 755 | case monitorexit_id: |
duke@435 | 756 | { // G4: lock address |
duke@435 | 757 | // note: really a leaf routine but must setup last java sp |
duke@435 | 758 | // => use call_RT for now (speed can be improved by |
duke@435 | 759 | // doing last java sp setup manually) |
duke@435 | 760 | __ set_info("monitorexit", dont_gc_arguments); |
duke@435 | 761 | |
duke@435 | 762 | int save_fpu_registers = (id == monitorexit_id); |
duke@435 | 763 | // make a frame and preserve the caller's caller-save registers |
duke@435 | 764 | OopMap* oop_map = save_live_registers(sasm, save_fpu_registers); |
duke@435 | 765 | |
duke@435 | 766 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), G4); |
duke@435 | 767 | |
duke@435 | 768 | oop_maps = new OopMapSet(); |
duke@435 | 769 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 770 | restore_live_registers(sasm, save_fpu_registers); |
duke@435 | 771 | |
duke@435 | 772 | __ ret(); |
duke@435 | 773 | __ delayed()->restore(); |
duke@435 | 774 | |
duke@435 | 775 | } |
duke@435 | 776 | break; |
duke@435 | 777 | |
duke@435 | 778 | case access_field_patching_id: |
duke@435 | 779 | { __ set_info("access_field_patching", dont_gc_arguments); |
duke@435 | 780 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); |
duke@435 | 781 | } |
duke@435 | 782 | break; |
duke@435 | 783 | |
duke@435 | 784 | case load_klass_patching_id: |
duke@435 | 785 | { __ set_info("load_klass_patching", dont_gc_arguments); |
duke@435 | 786 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); |
duke@435 | 787 | } |
duke@435 | 788 | break; |
duke@435 | 789 | |
duke@435 | 790 | case jvmti_exception_throw_id: |
duke@435 | 791 | { // Oexception : exception |
duke@435 | 792 | __ set_info("jvmti_exception_throw", dont_gc_arguments); |
duke@435 | 793 | oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), I0); |
duke@435 | 794 | } |
duke@435 | 795 | break; |
duke@435 | 796 | |
duke@435 | 797 | case dtrace_object_alloc_id: |
duke@435 | 798 | { // O0: object |
duke@435 | 799 | __ set_info("dtrace_object_alloc", dont_gc_arguments); |
duke@435 | 800 | // we can't gc here so skip the oopmap but make sure that all |
duke@435 | 801 | // the live registers get saved. |
duke@435 | 802 | save_live_registers(sasm); |
duke@435 | 803 | |
duke@435 | 804 | __ save_thread(L7_thread_cache); |
duke@435 | 805 | __ call(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), |
duke@435 | 806 | relocInfo::runtime_call_type); |
duke@435 | 807 | __ delayed()->mov(I0, O0); |
duke@435 | 808 | __ restore_thread(L7_thread_cache); |
duke@435 | 809 | |
duke@435 | 810 | restore_live_registers(sasm); |
duke@435 | 811 | __ ret(); |
duke@435 | 812 | __ delayed()->restore(); |
duke@435 | 813 | } |
duke@435 | 814 | break; |
duke@435 | 815 | |
ysr@777 | 816 | #ifndef SERIALGC |
ysr@777 | 817 | case g1_pre_barrier_slow_id: |
ysr@777 | 818 | { // G4: previous value of memory |
ysr@777 | 819 | BarrierSet* bs = Universe::heap()->barrier_set(); |
ysr@777 | 820 | if (bs->kind() != BarrierSet::G1SATBCTLogging) { |
ysr@777 | 821 | __ save_frame(0); |
ysr@777 | 822 | __ set((int)id, O1); |
ysr@777 | 823 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); |
ysr@777 | 824 | __ should_not_reach_here(); |
ysr@777 | 825 | break; |
ysr@777 | 826 | } |
ysr@777 | 827 | |
ysr@777 | 828 | __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments); |
ysr@777 | 829 | |
ysr@777 | 830 | Register pre_val = G4; |
ysr@777 | 831 | Register tmp = G1_scratch; |
ysr@777 | 832 | Register tmp2 = G3_scratch; |
ysr@777 | 833 | |
ysr@777 | 834 | Label refill, restart; |
ysr@777 | 835 | bool with_frame = false; // I don't know if we can do with-frame. |
ysr@777 | 836 | int satb_q_index_byte_offset = |
ysr@777 | 837 | in_bytes(JavaThread::satb_mark_queue_offset() + |
ysr@777 | 838 | PtrQueue::byte_offset_of_index()); |
ysr@777 | 839 | int satb_q_buf_byte_offset = |
ysr@777 | 840 | in_bytes(JavaThread::satb_mark_queue_offset() + |
ysr@777 | 841 | PtrQueue::byte_offset_of_buf()); |
ysr@777 | 842 | __ bind(restart); |
ysr@777 | 843 | __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp); |
ysr@777 | 844 | |
ysr@777 | 845 | __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, |
ysr@777 | 846 | Assembler::pn, tmp, refill); |
ysr@777 | 847 | |
ysr@777 | 848 | // If the branch is taken, no harm in executing this in the delay slot. |
ysr@777 | 849 | __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2); |
ysr@777 | 850 | __ sub(tmp, oopSize, tmp); |
ysr@777 | 851 | |
ysr@777 | 852 | __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card> |
ysr@777 | 853 | // Use return-from-leaf |
ysr@777 | 854 | __ retl(); |
ysr@777 | 855 | __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset); |
ysr@777 | 856 | |
ysr@777 | 857 | __ bind(refill); |
ysr@777 | 858 | __ save_frame(0); |
ysr@777 | 859 | |
ysr@777 | 860 | __ mov(pre_val, L0); |
ysr@777 | 861 | __ mov(tmp, L1); |
ysr@777 | 862 | __ mov(tmp2, L2); |
ysr@777 | 863 | |
ysr@777 | 864 | __ call_VM_leaf(L7_thread_cache, |
ysr@777 | 865 | CAST_FROM_FN_PTR(address, |
ysr@777 | 866 | SATBMarkQueueSet::handle_zero_index_for_thread), |
ysr@777 | 867 | G2_thread); |
ysr@777 | 868 | |
ysr@777 | 869 | __ mov(L0, pre_val); |
ysr@777 | 870 | __ mov(L1, tmp); |
ysr@777 | 871 | __ mov(L2, tmp2); |
ysr@777 | 872 | |
ysr@777 | 873 | __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
ysr@777 | 874 | __ delayed()->restore(); |
ysr@777 | 875 | } |
ysr@777 | 876 | break; |
ysr@777 | 877 | |
ysr@777 | 878 | case g1_post_barrier_slow_id: |
ysr@777 | 879 | { |
ysr@777 | 880 | BarrierSet* bs = Universe::heap()->barrier_set(); |
ysr@777 | 881 | if (bs->kind() != BarrierSet::G1SATBCTLogging) { |
ysr@777 | 882 | __ save_frame(0); |
ysr@777 | 883 | __ set((int)id, O1); |
ysr@777 | 884 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0); |
ysr@777 | 885 | __ should_not_reach_here(); |
ysr@777 | 886 | break; |
ysr@777 | 887 | } |
ysr@777 | 888 | |
ysr@777 | 889 | __ set_info("g1_post_barrier_slow_id", dont_gc_arguments); |
ysr@777 | 890 | |
ysr@777 | 891 | Register addr = G4; |
ysr@777 | 892 | Register cardtable = G5; |
ysr@777 | 893 | Register tmp = G1_scratch; |
ysr@777 | 894 | Register tmp2 = G3_scratch; |
ysr@777 | 895 | jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base; |
ysr@777 | 896 | |
ysr@777 | 897 | Label not_already_dirty, restart, refill; |
ysr@777 | 898 | |
ysr@777 | 899 | #ifdef _LP64 |
ysr@777 | 900 | __ srlx(addr, CardTableModRefBS::card_shift, addr); |
ysr@777 | 901 | #else |
ysr@777 | 902 | __ srl(addr, CardTableModRefBS::card_shift, addr); |
ysr@777 | 903 | #endif |
ysr@777 | 904 | |
ysr@777 | 905 | Address rs(cardtable, (address)byte_map_base); |
ysr@777 | 906 | __ load_address(rs); // cardtable := <card table base> |
ysr@777 | 907 | __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable] |
ysr@777 | 908 | |
ysr@777 | 909 | __ br_on_reg_cond(Assembler::rc_nz, /*annul*/false, Assembler::pt, |
ysr@777 | 910 | tmp, not_already_dirty); |
ysr@777 | 911 | // Get cardtable + tmp into a reg by itself -- useful in the take-the-branch |
ysr@777 | 912 | // case, harmless if not. |
ysr@777 | 913 | __ delayed()->add(addr, cardtable, tmp2); |
ysr@777 | 914 | |
ysr@777 | 915 | // We didn't take the branch, so we're already dirty: return. |
ysr@777 | 916 | // Use return-from-leaf |
ysr@777 | 917 | __ retl(); |
ysr@777 | 918 | __ delayed()->nop(); |
ysr@777 | 919 | |
ysr@777 | 920 | // Not dirty. |
ysr@777 | 921 | __ bind(not_already_dirty); |
ysr@777 | 922 | // First, dirty it. |
ysr@777 | 923 | __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty). |
ysr@777 | 924 | |
ysr@777 | 925 | Register tmp3 = cardtable; |
ysr@777 | 926 | Register tmp4 = tmp; |
ysr@777 | 927 | |
ysr@777 | 928 | // these registers are now dead |
ysr@777 | 929 | addr = cardtable = tmp = noreg; |
ysr@777 | 930 | |
ysr@777 | 931 | int dirty_card_q_index_byte_offset = |
ysr@777 | 932 | in_bytes(JavaThread::dirty_card_queue_offset() + |
ysr@777 | 933 | PtrQueue::byte_offset_of_index()); |
ysr@777 | 934 | int dirty_card_q_buf_byte_offset = |
ysr@777 | 935 | in_bytes(JavaThread::dirty_card_queue_offset() + |
ysr@777 | 936 | PtrQueue::byte_offset_of_buf()); |
ysr@777 | 937 | __ bind(restart); |
ysr@777 | 938 | __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3); |
ysr@777 | 939 | |
ysr@777 | 940 | __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, |
ysr@777 | 941 | tmp3, refill); |
ysr@777 | 942 | // If the branch is taken, no harm in executing this in the delay slot. |
ysr@777 | 943 | __ delayed()->ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4); |
ysr@777 | 944 | __ sub(tmp3, oopSize, tmp3); |
ysr@777 | 945 | |
ysr@777 | 946 | __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card> |
ysr@777 | 947 | // Use return-from-leaf |
ysr@777 | 948 | __ retl(); |
ysr@777 | 949 | __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset); |
ysr@777 | 950 | |
ysr@777 | 951 | __ bind(refill); |
ysr@777 | 952 | __ save_frame(0); |
ysr@777 | 953 | |
ysr@777 | 954 | __ mov(tmp2, L0); |
ysr@777 | 955 | __ mov(tmp3, L1); |
ysr@777 | 956 | __ mov(tmp4, L2); |
ysr@777 | 957 | |
ysr@777 | 958 | __ call_VM_leaf(L7_thread_cache, |
ysr@777 | 959 | CAST_FROM_FN_PTR(address, |
ysr@777 | 960 | DirtyCardQueueSet::handle_zero_index_for_thread), |
ysr@777 | 961 | G2_thread); |
ysr@777 | 962 | |
ysr@777 | 963 | __ mov(L0, tmp2); |
ysr@777 | 964 | __ mov(L1, tmp3); |
ysr@777 | 965 | __ mov(L2, tmp4); |
ysr@777 | 966 | |
ysr@777 | 967 | __ br(Assembler::always, /*annul*/false, Assembler::pt, restart); |
ysr@777 | 968 | __ delayed()->restore(); |
ysr@777 | 969 | } |
ysr@777 | 970 | break; |
ysr@777 | 971 | #endif // !SERIALGC |
ysr@777 | 972 | |
duke@435 | 973 | default: |
duke@435 | 974 | { __ set_info("unimplemented entry", dont_gc_arguments); |
duke@435 | 975 | __ save_frame(0); |
duke@435 | 976 | __ set((int)id, O1); |
duke@435 | 977 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), O1); |
duke@435 | 978 | __ should_not_reach_here(); |
duke@435 | 979 | } |
duke@435 | 980 | break; |
duke@435 | 981 | } |
duke@435 | 982 | return oop_maps; |
duke@435 | 983 | } |
duke@435 | 984 | |
duke@435 | 985 | |
duke@435 | 986 | void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { |
duke@435 | 987 | Label no_deopt; |
duke@435 | 988 | Label no_handler; |
duke@435 | 989 | |
duke@435 | 990 | __ verify_not_null_oop(Oexception); |
duke@435 | 991 | |
duke@435 | 992 | // save the exception and issuing pc in the thread |
duke@435 | 993 | __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); |
duke@435 | 994 | __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); |
duke@435 | 995 | |
duke@435 | 996 | // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) |
duke@435 | 997 | __ mov(I7, L0); |
duke@435 | 998 | __ mov(Oissuing_pc, I7); |
duke@435 | 999 | __ sub(I7, frame::pc_return_offset, I7); |
duke@435 | 1000 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); |
duke@435 | 1001 | |
duke@435 | 1002 | // Note: if nmethod has been deoptimized then regardless of |
duke@435 | 1003 | // whether it had a handler or not we will deoptimize |
duke@435 | 1004 | // by entering the deopt blob with a pending exception. |
duke@435 | 1005 | |
duke@435 | 1006 | __ tst(O0); |
duke@435 | 1007 | __ br(Assembler::zero, false, Assembler::pn, no_handler); |
duke@435 | 1008 | __ delayed()->nop(); |
duke@435 | 1009 | |
duke@435 | 1010 | // restore the registers that were saved at the beginning and jump to the exception handler. |
duke@435 | 1011 | restore_live_registers(sasm); |
duke@435 | 1012 | |
duke@435 | 1013 | __ jmp(O0, 0); |
duke@435 | 1014 | __ delayed()->restore(); |
duke@435 | 1015 | |
duke@435 | 1016 | __ bind(no_handler); |
duke@435 | 1017 | __ mov(L0, I7); // restore return address |
duke@435 | 1018 | |
duke@435 | 1019 | // restore exception oop |
duke@435 | 1020 | __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save()); |
duke@435 | 1021 | __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); |
duke@435 | 1022 | |
duke@435 | 1023 | __ restore(); |
duke@435 | 1024 | |
duke@435 | 1025 | Address exc(G4, Runtime1::entry_for(Runtime1::unwind_exception_id)); |
duke@435 | 1026 | __ jump_to(exc, 0); |
duke@435 | 1027 | __ delayed()->nop(); |
duke@435 | 1028 | |
duke@435 | 1029 | |
duke@435 | 1030 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 1031 | } |
duke@435 | 1032 | |
duke@435 | 1033 | |
duke@435 | 1034 | #undef __ |
duke@435 | 1035 | |
duke@435 | 1036 | #define __ masm-> |