Wed, 02 Jun 2010 22:45:42 -0700
Merge
duke@435 | 1 | /* |
trims@1907 | 2 | * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_c1_Runtime1_x86.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | |
duke@435 | 29 | // Implementation of StubAssembler |
duke@435 | 30 | |
duke@435 | 31 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) { |
duke@435 | 32 | // setup registers |
never@739 | 33 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) |
duke@435 | 34 | assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different"); |
duke@435 | 35 | assert(oop_result1 != thread && oop_result2 != thread, "registers must be different"); |
duke@435 | 36 | assert(args_size >= 0, "illegal args_size"); |
duke@435 | 37 | |
never@739 | 38 | #ifdef _LP64 |
never@739 | 39 | mov(c_rarg0, thread); |
never@739 | 40 | set_num_rt_args(0); // Nothing on stack |
never@739 | 41 | #else |
duke@435 | 42 | set_num_rt_args(1 + args_size); |
duke@435 | 43 | |
duke@435 | 44 | // push java thread (becomes first argument of C function) |
duke@435 | 45 | get_thread(thread); |
never@739 | 46 | push(thread); |
never@739 | 47 | #endif // _LP64 |
duke@435 | 48 | |
duke@435 | 49 | set_last_Java_frame(thread, noreg, rbp, NULL); |
never@739 | 50 | |
duke@435 | 51 | // do the call |
duke@435 | 52 | call(RuntimeAddress(entry)); |
duke@435 | 53 | int call_offset = offset(); |
duke@435 | 54 | // verify callee-saved register |
duke@435 | 55 | #ifdef ASSERT |
duke@435 | 56 | guarantee(thread != rax, "change this code"); |
never@739 | 57 | push(rax); |
duke@435 | 58 | { Label L; |
duke@435 | 59 | get_thread(rax); |
never@739 | 60 | cmpptr(thread, rax); |
duke@435 | 61 | jcc(Assembler::equal, L); |
duke@435 | 62 | int3(); |
duke@435 | 63 | stop("StubAssembler::call_RT: rdi not callee saved?"); |
duke@435 | 64 | bind(L); |
duke@435 | 65 | } |
never@739 | 66 | pop(rax); |
duke@435 | 67 | #endif |
duke@435 | 68 | reset_last_Java_frame(thread, true, false); |
duke@435 | 69 | |
duke@435 | 70 | // discard thread and arguments |
never@739 | 71 | NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); |
duke@435 | 72 | |
duke@435 | 73 | // check for pending exceptions |
duke@435 | 74 | { Label L; |
never@739 | 75 | cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
duke@435 | 76 | jcc(Assembler::equal, L); |
duke@435 | 77 | // exception pending => remove activation and forward to exception handler |
never@739 | 78 | movptr(rax, Address(thread, Thread::pending_exception_offset())); |
duke@435 | 79 | // make sure that the vm_results are cleared |
duke@435 | 80 | if (oop_result1->is_valid()) { |
xlu@947 | 81 | movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
duke@435 | 82 | } |
duke@435 | 83 | if (oop_result2->is_valid()) { |
xlu@947 | 84 | movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
duke@435 | 85 | } |
duke@435 | 86 | if (frame_size() == no_frame_size) { |
duke@435 | 87 | leave(); |
duke@435 | 88 | jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
duke@435 | 89 | } else if (_stub_id == Runtime1::forward_exception_id) { |
duke@435 | 90 | should_not_reach_here(); |
duke@435 | 91 | } else { |
duke@435 | 92 | jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); |
duke@435 | 93 | } |
duke@435 | 94 | bind(L); |
duke@435 | 95 | } |
duke@435 | 96 | // get oop results if there are any and reset the values in the thread |
duke@435 | 97 | if (oop_result1->is_valid()) { |
never@739 | 98 | movptr(oop_result1, Address(thread, JavaThread::vm_result_offset())); |
xlu@947 | 99 | movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
duke@435 | 100 | verify_oop(oop_result1); |
duke@435 | 101 | } |
duke@435 | 102 | if (oop_result2->is_valid()) { |
never@739 | 103 | movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset())); |
xlu@947 | 104 | movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
duke@435 | 105 | verify_oop(oop_result2); |
duke@435 | 106 | } |
duke@435 | 107 | return call_offset; |
duke@435 | 108 | } |
duke@435 | 109 | |
duke@435 | 110 | |
duke@435 | 111 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) { |
never@739 | 112 | #ifdef _LP64 |
never@739 | 113 | mov(c_rarg1, arg1); |
never@739 | 114 | #else |
never@739 | 115 | push(arg1); |
never@739 | 116 | #endif // _LP64 |
duke@435 | 117 | return call_RT(oop_result1, oop_result2, entry, 1); |
duke@435 | 118 | } |
duke@435 | 119 | |
duke@435 | 120 | |
duke@435 | 121 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) { |
never@739 | 122 | #ifdef _LP64 |
never@739 | 123 | if (c_rarg1 == arg2) { |
never@739 | 124 | if (c_rarg2 == arg1) { |
never@739 | 125 | xchgq(arg1, arg2); |
never@739 | 126 | } else { |
never@739 | 127 | mov(c_rarg2, arg2); |
never@739 | 128 | mov(c_rarg1, arg1); |
never@739 | 129 | } |
never@739 | 130 | } else { |
never@739 | 131 | mov(c_rarg1, arg1); |
never@739 | 132 | mov(c_rarg2, arg2); |
never@739 | 133 | } |
never@739 | 134 | #else |
never@739 | 135 | push(arg2); |
never@739 | 136 | push(arg1); |
never@739 | 137 | #endif // _LP64 |
duke@435 | 138 | return call_RT(oop_result1, oop_result2, entry, 2); |
duke@435 | 139 | } |
duke@435 | 140 | |
duke@435 | 141 | |
duke@435 | 142 | int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) { |
never@739 | 143 | #ifdef _LP64 |
never@739 | 144 | // if there is any conflict use the stack |
never@739 | 145 | if (arg1 == c_rarg2 || arg1 == c_rarg3 || |
never@739 | 146 | arg2 == c_rarg1 || arg1 == c_rarg3 || |
never@739 | 147 | arg3 == c_rarg1 || arg1 == c_rarg2) { |
never@739 | 148 | push(arg3); |
never@739 | 149 | push(arg2); |
never@739 | 150 | push(arg1); |
never@739 | 151 | pop(c_rarg1); |
never@739 | 152 | pop(c_rarg2); |
never@739 | 153 | pop(c_rarg3); |
never@739 | 154 | } else { |
never@739 | 155 | mov(c_rarg1, arg1); |
never@739 | 156 | mov(c_rarg2, arg2); |
never@739 | 157 | mov(c_rarg3, arg3); |
never@739 | 158 | } |
never@739 | 159 | #else |
never@739 | 160 | push(arg3); |
never@739 | 161 | push(arg2); |
never@739 | 162 | push(arg1); |
never@739 | 163 | #endif // _LP64 |
duke@435 | 164 | return call_RT(oop_result1, oop_result2, entry, 3); |
duke@435 | 165 | } |
duke@435 | 166 | |
duke@435 | 167 | |
duke@435 | 168 | // Implementation of StubFrame |
duke@435 | 169 | |
duke@435 | 170 | class StubFrame: public StackObj { |
duke@435 | 171 | private: |
duke@435 | 172 | StubAssembler* _sasm; |
duke@435 | 173 | |
duke@435 | 174 | public: |
duke@435 | 175 | StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); |
duke@435 | 176 | void load_argument(int offset_in_words, Register reg); |
duke@435 | 177 | |
duke@435 | 178 | ~StubFrame(); |
duke@435 | 179 | }; |
duke@435 | 180 | |
duke@435 | 181 | |
duke@435 | 182 | #define __ _sasm-> |
duke@435 | 183 | |
duke@435 | 184 | StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { |
duke@435 | 185 | _sasm = sasm; |
duke@435 | 186 | __ set_info(name, must_gc_arguments); |
duke@435 | 187 | __ enter(); |
duke@435 | 188 | } |
duke@435 | 189 | |
duke@435 | 190 | // load parameters that were stored with LIR_Assembler::store_parameter |
duke@435 | 191 | // Note: offsets for store_parameter and load_argument must match |
duke@435 | 192 | void StubFrame::load_argument(int offset_in_words, Register reg) { |
duke@435 | 193 | // rbp, + 0: link |
duke@435 | 194 | // + 1: return address |
duke@435 | 195 | // + 2: argument with offset 0 |
duke@435 | 196 | // + 3: argument with offset 1 |
duke@435 | 197 | // + 4: ... |
duke@435 | 198 | |
never@739 | 199 | __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord)); |
duke@435 | 200 | } |
duke@435 | 201 | |
duke@435 | 202 | |
duke@435 | 203 | StubFrame::~StubFrame() { |
duke@435 | 204 | __ leave(); |
duke@435 | 205 | __ ret(0); |
duke@435 | 206 | } |
duke@435 | 207 | |
duke@435 | 208 | #undef __ |
duke@435 | 209 | |
duke@435 | 210 | |
duke@435 | 211 | // Implementation of Runtime1 |
duke@435 | 212 | |
duke@435 | 213 | #define __ sasm-> |
duke@435 | 214 | |
never@739 | 215 | const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; |
never@739 | 216 | const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; |
duke@435 | 217 | |
duke@435 | 218 | // Stack layout for saving/restoring all the registers needed during a runtime |
duke@435 | 219 | // call (this includes deoptimization) |
duke@435 | 220 | // Note: note that users of this frame may well have arguments to some runtime |
duke@435 | 221 | // while these values are on the stack. These positions neglect those arguments |
duke@435 | 222 | // but the code in save_live_registers will take the argument count into |
duke@435 | 223 | // account. |
duke@435 | 224 | // |
never@739 | 225 | #ifdef _LP64 |
never@739 | 226 | #define SLOT2(x) x, |
never@739 | 227 | #define SLOT_PER_WORD 2 |
never@739 | 228 | #else |
never@739 | 229 | #define SLOT2(x) |
never@739 | 230 | #define SLOT_PER_WORD 1 |
never@739 | 231 | #endif // _LP64 |
never@739 | 232 | |
duke@435 | 233 | enum reg_save_layout { |
never@739 | 234 | // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that |
never@739 | 235 | // happen and will assert if the stack size we create is misaligned |
never@739 | 236 | #ifdef _LP64 |
never@739 | 237 | align_dummy_0, align_dummy_1, |
never@739 | 238 | #endif // _LP64 |
never@739 | 239 | dummy1, SLOT2(dummy1H) // 0, 4 |
never@739 | 240 | dummy2, SLOT2(dummy2H) // 8, 12 |
duke@435 | 241 | // Two temps to be used as needed by users of save/restore callee registers |
never@739 | 242 | temp_2_off, SLOT2(temp_2H_off) // 16, 20 |
never@739 | 243 | temp_1_off, SLOT2(temp_1H_off) // 24, 28 |
never@739 | 244 | xmm_regs_as_doubles_off, // 32 |
never@739 | 245 | float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 |
never@739 | 246 | fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 |
never@739 | 247 | // fpu_state_end_off is exclusive |
never@739 | 248 | fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 |
never@739 | 249 | marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 |
never@739 | 250 | extra_space_offset, // 360 |
never@739 | 251 | #ifdef _LP64 |
never@739 | 252 | r15_off = extra_space_offset, r15H_off, // 360, 364 |
never@739 | 253 | r14_off, r14H_off, // 368, 372 |
never@739 | 254 | r13_off, r13H_off, // 376, 380 |
never@739 | 255 | r12_off, r12H_off, // 384, 388 |
never@739 | 256 | r11_off, r11H_off, // 392, 396 |
never@739 | 257 | r10_off, r10H_off, // 400, 404 |
never@739 | 258 | r9_off, r9H_off, // 408, 412 |
never@739 | 259 | r8_off, r8H_off, // 416, 420 |
never@739 | 260 | rdi_off, rdiH_off, // 424, 428 |
never@739 | 261 | #else |
duke@435 | 262 | rdi_off = extra_space_offset, |
never@739 | 263 | #endif // _LP64 |
never@739 | 264 | rsi_off, SLOT2(rsiH_off) // 432, 436 |
never@739 | 265 | rbp_off, SLOT2(rbpH_off) // 440, 444 |
never@739 | 266 | rsp_off, SLOT2(rspH_off) // 448, 452 |
never@739 | 267 | rbx_off, SLOT2(rbxH_off) // 456, 460 |
never@739 | 268 | rdx_off, SLOT2(rdxH_off) // 464, 468 |
never@739 | 269 | rcx_off, SLOT2(rcxH_off) // 472, 476 |
never@739 | 270 | rax_off, SLOT2(raxH_off) // 480, 484 |
never@739 | 271 | saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 |
never@739 | 272 | return_off, SLOT2(returnH_off) // 496, 500 |
never@739 | 273 | reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 |
never@739 | 274 | |
never@739 | 275 | #ifdef _WIN64 |
never@739 | 276 | c_rarg0_off = rcx_off, |
never@739 | 277 | #else |
never@739 | 278 | c_rarg0_off = rdi_off, |
never@739 | 279 | #endif // WIN64 |
duke@435 | 280 | |
duke@435 | 281 | // equates |
duke@435 | 282 | |
duke@435 | 283 | // illegal instruction handler |
duke@435 | 284 | continue_dest_off = temp_1_off, |
duke@435 | 285 | |
duke@435 | 286 | // deoptimization equates |
duke@435 | 287 | fp0_off = float_regs_as_doubles_off, // slot for java float/double return value |
duke@435 | 288 | xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value |
duke@435 | 289 | deopt_type = temp_2_off, // slot for type of deopt in progress |
duke@435 | 290 | ret_type = temp_1_off // slot for return type |
duke@435 | 291 | }; |
duke@435 | 292 | |
duke@435 | 293 | |
duke@435 | 294 | |
duke@435 | 295 | // Save off registers which might be killed by calls into the runtime. |
duke@435 | 296 | // Tries to smart of about FP registers. In particular we separate |
duke@435 | 297 | // saving and describing the FPU registers for deoptimization since we |
duke@435 | 298 | // have to save the FPU registers twice if we describe them and on P4 |
duke@435 | 299 | // saving FPU registers which don't contain anything appears |
duke@435 | 300 | // expensive. The deopt blob is the only thing which needs to |
duke@435 | 301 | // describe FPU registers. In all other cases it should be sufficient |
duke@435 | 302 | // to simply save their current value. |
duke@435 | 303 | |
duke@435 | 304 | static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, |
duke@435 | 305 | bool save_fpu_registers = true) { |
never@739 | 306 | |
never@739 | 307 | // In 64bit all the args are in regs so there are no additional stack slots |
never@739 | 308 | LP64_ONLY(num_rt_args = 0); |
never@739 | 309 | LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");) |
never@739 | 310 | int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread |
never@739 | 311 | sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); |
duke@435 | 312 | |
duke@435 | 313 | // record saved value locations in an OopMap |
duke@435 | 314 | // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread |
never@739 | 315 | OopMap* map = new OopMap(frame_size_in_slots, 0); |
duke@435 | 316 | map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); |
duke@435 | 317 | map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); |
duke@435 | 318 | map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); |
duke@435 | 319 | map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); |
duke@435 | 320 | map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); |
duke@435 | 321 | map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); |
never@739 | 322 | #ifdef _LP64 |
never@739 | 323 | map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); |
never@739 | 324 | map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); |
never@739 | 325 | map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); |
never@739 | 326 | map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); |
never@739 | 327 | map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); |
never@739 | 328 | map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); |
never@739 | 329 | map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); |
never@739 | 330 | map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); |
never@739 | 331 | |
never@739 | 332 | // This is stupid but needed. |
never@739 | 333 | map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); |
never@739 | 334 | map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); |
never@739 | 335 | map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); |
never@739 | 336 | map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); |
never@739 | 337 | map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); |
never@739 | 338 | map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); |
never@739 | 339 | |
never@739 | 340 | map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); |
never@739 | 341 | map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); |
never@739 | 342 | map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); |
never@739 | 343 | map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); |
never@739 | 344 | map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); |
never@739 | 345 | map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); |
never@739 | 346 | map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); |
never@739 | 347 | map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); |
never@739 | 348 | #endif // _LP64 |
duke@435 | 349 | |
duke@435 | 350 | if (save_fpu_registers) { |
duke@435 | 351 | if (UseSSE < 2) { |
duke@435 | 352 | int fpu_off = float_regs_as_doubles_off; |
duke@435 | 353 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
duke@435 | 354 | VMReg fpu_name_0 = FrameMap::fpu_regname(n); |
duke@435 | 355 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); |
duke@435 | 356 | // %%% This is really a waste but we'll keep things as they were for now |
duke@435 | 357 | if (true) { |
duke@435 | 358 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); |
duke@435 | 359 | } |
duke@435 | 360 | fpu_off += 2; |
duke@435 | 361 | } |
duke@435 | 362 | assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots"); |
duke@435 | 363 | } |
duke@435 | 364 | |
duke@435 | 365 | if (UseSSE >= 2) { |
duke@435 | 366 | int xmm_off = xmm_regs_as_doubles_off; |
duke@435 | 367 | for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { |
duke@435 | 368 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); |
duke@435 | 369 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); |
duke@435 | 370 | // %%% This is really a waste but we'll keep things as they were for now |
duke@435 | 371 | if (true) { |
duke@435 | 372 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); |
duke@435 | 373 | } |
duke@435 | 374 | xmm_off += 2; |
duke@435 | 375 | } |
duke@435 | 376 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); |
duke@435 | 377 | |
duke@435 | 378 | } else if (UseSSE == 1) { |
duke@435 | 379 | int xmm_off = xmm_regs_as_doubles_off; |
duke@435 | 380 | for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { |
duke@435 | 381 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); |
duke@435 | 382 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); |
duke@435 | 383 | xmm_off += 2; |
duke@435 | 384 | } |
duke@435 | 385 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers"); |
duke@435 | 386 | } |
duke@435 | 387 | } |
duke@435 | 388 | |
duke@435 | 389 | return map; |
duke@435 | 390 | } |
duke@435 | 391 | |
duke@435 | 392 | static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, |
duke@435 | 393 | bool save_fpu_registers = true) { |
duke@435 | 394 | __ block_comment("save_live_registers"); |
duke@435 | 395 | |
never@739 | 396 | // 64bit passes the args in regs to the c++ runtime |
never@739 | 397 | int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread |
duke@435 | 398 | // frame_size = round_to(frame_size, 4); |
never@739 | 399 | sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); |
duke@435 | 400 | |
never@739 | 401 | __ pusha(); // integer registers |
duke@435 | 402 | |
duke@435 | 403 | // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); |
duke@435 | 404 | // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); |
duke@435 | 405 | |
never@739 | 406 | __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
duke@435 | 407 | |
duke@435 | 408 | #ifdef ASSERT |
never@739 | 409 | __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
duke@435 | 410 | #endif |
duke@435 | 411 | |
duke@435 | 412 | if (save_fpu_registers) { |
duke@435 | 413 | if (UseSSE < 2) { |
duke@435 | 414 | // save FPU stack |
never@739 | 415 | __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
duke@435 | 416 | __ fwait(); |
duke@435 | 417 | |
duke@435 | 418 | #ifdef ASSERT |
duke@435 | 419 | Label ok; |
never@739 | 420 | __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
duke@435 | 421 | __ jccb(Assembler::equal, ok); |
duke@435 | 422 | __ stop("corrupted control word detected"); |
duke@435 | 423 | __ bind(ok); |
duke@435 | 424 | #endif |
duke@435 | 425 | |
duke@435 | 426 | // Reset the control word to guard against exceptions being unmasked |
duke@435 | 427 | // since fstp_d can cause FPU stack underflow exceptions. Write it |
duke@435 | 428 | // into the on stack copy and then reload that to make sure that the |
duke@435 | 429 | // current and future values are correct. |
never@739 | 430 | __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
never@739 | 431 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
duke@435 | 432 | |
duke@435 | 433 | // Save the FPU registers in de-opt-able form |
never@739 | 434 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
never@739 | 435 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); |
never@739 | 436 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); |
never@739 | 437 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); |
never@739 | 438 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); |
never@739 | 439 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); |
never@739 | 440 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); |
never@739 | 441 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); |
duke@435 | 442 | } |
duke@435 | 443 | |
duke@435 | 444 | if (UseSSE >= 2) { |
duke@435 | 445 | // save XMM registers |
duke@435 | 446 | // XMM registers can contain float or double values, but this is not known here, |
duke@435 | 447 | // so always save them as doubles. |
duke@435 | 448 | // note that float values are _not_ converted automatically, so for float values |
duke@435 | 449 | // the second word contains only garbage data. |
never@739 | 450 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); |
never@739 | 451 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); |
never@739 | 452 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); |
never@739 | 453 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); |
never@739 | 454 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); |
never@739 | 455 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); |
never@739 | 456 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); |
never@739 | 457 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); |
never@739 | 458 | #ifdef _LP64 |
never@739 | 459 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8); |
never@739 | 460 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9); |
never@739 | 461 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10); |
never@739 | 462 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11); |
never@739 | 463 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12); |
never@739 | 464 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13); |
never@739 | 465 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14); |
never@739 | 466 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15); |
never@739 | 467 | #endif // _LP64 |
duke@435 | 468 | } else if (UseSSE == 1) { |
duke@435 | 469 | // save XMM registers as float because double not supported without SSE2 |
never@739 | 470 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0); |
never@739 | 471 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1); |
never@739 | 472 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2); |
never@739 | 473 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3); |
never@739 | 474 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4); |
never@739 | 475 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5); |
never@739 | 476 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6); |
never@739 | 477 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7); |
duke@435 | 478 | } |
duke@435 | 479 | } |
duke@435 | 480 | |
duke@435 | 481 | // FPU stack must be empty now |
duke@435 | 482 | __ verify_FPU(0, "save_live_registers"); |
duke@435 | 483 | |
duke@435 | 484 | return generate_oop_map(sasm, num_rt_args, save_fpu_registers); |
duke@435 | 485 | } |
duke@435 | 486 | |
duke@435 | 487 | |
duke@435 | 488 | static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) { |
duke@435 | 489 | if (restore_fpu_registers) { |
duke@435 | 490 | if (UseSSE >= 2) { |
duke@435 | 491 | // restore XMM registers |
never@739 | 492 | __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
never@739 | 493 | __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); |
never@739 | 494 | __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); |
never@739 | 495 | __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); |
never@739 | 496 | __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); |
never@739 | 497 | __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); |
never@739 | 498 | __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); |
never@739 | 499 | __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); |
never@739 | 500 | #ifdef _LP64 |
never@739 | 501 | __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64)); |
never@739 | 502 | __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72)); |
never@739 | 503 | __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80)); |
never@739 | 504 | __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88)); |
never@739 | 505 | __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96)); |
never@739 | 506 | __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104)); |
never@739 | 507 | __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112)); |
never@739 | 508 | __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120)); |
never@739 | 509 | #endif // _LP64 |
duke@435 | 510 | } else if (UseSSE == 1) { |
duke@435 | 511 | // restore XMM registers |
never@739 | 512 | __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0)); |
never@739 | 513 | __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8)); |
never@739 | 514 | __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16)); |
never@739 | 515 | __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24)); |
never@739 | 516 | __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32)); |
never@739 | 517 | __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40)); |
never@739 | 518 | __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48)); |
never@739 | 519 | __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56)); |
duke@435 | 520 | } |
duke@435 | 521 | |
duke@435 | 522 | if (UseSSE < 2) { |
never@739 | 523 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
duke@435 | 524 | } else { |
duke@435 | 525 | // check that FPU stack is really empty |
duke@435 | 526 | __ verify_FPU(0, "restore_live_registers"); |
duke@435 | 527 | } |
duke@435 | 528 | |
duke@435 | 529 | } else { |
duke@435 | 530 | // check that FPU stack is really empty |
duke@435 | 531 | __ verify_FPU(0, "restore_live_registers"); |
duke@435 | 532 | } |
duke@435 | 533 | |
duke@435 | 534 | #ifdef ASSERT |
duke@435 | 535 | { |
duke@435 | 536 | Label ok; |
never@739 | 537 | __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
duke@435 | 538 | __ jcc(Assembler::equal, ok); |
duke@435 | 539 | __ stop("bad offsets in frame"); |
duke@435 | 540 | __ bind(ok); |
duke@435 | 541 | } |
never@739 | 542 | #endif // ASSERT |
duke@435 | 543 | |
never@739 | 544 | __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
duke@435 | 545 | } |
duke@435 | 546 | |
duke@435 | 547 | |
duke@435 | 548 | static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { |
duke@435 | 549 | __ block_comment("restore_live_registers"); |
duke@435 | 550 | |
duke@435 | 551 | restore_fpu(sasm, restore_fpu_registers); |
never@739 | 552 | __ popa(); |
duke@435 | 553 | } |
duke@435 | 554 | |
duke@435 | 555 | |
duke@435 | 556 | static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { |
duke@435 | 557 | __ block_comment("restore_live_registers_except_rax"); |
duke@435 | 558 | |
duke@435 | 559 | restore_fpu(sasm, restore_fpu_registers); |
duke@435 | 560 | |
never@739 | 561 | #ifdef _LP64 |
never@739 | 562 | __ movptr(r15, Address(rsp, 0)); |
never@739 | 563 | __ movptr(r14, Address(rsp, wordSize)); |
never@739 | 564 | __ movptr(r13, Address(rsp, 2 * wordSize)); |
never@739 | 565 | __ movptr(r12, Address(rsp, 3 * wordSize)); |
never@739 | 566 | __ movptr(r11, Address(rsp, 4 * wordSize)); |
never@739 | 567 | __ movptr(r10, Address(rsp, 5 * wordSize)); |
never@739 | 568 | __ movptr(r9, Address(rsp, 6 * wordSize)); |
never@739 | 569 | __ movptr(r8, Address(rsp, 7 * wordSize)); |
never@739 | 570 | __ movptr(rdi, Address(rsp, 8 * wordSize)); |
never@739 | 571 | __ movptr(rsi, Address(rsp, 9 * wordSize)); |
never@739 | 572 | __ movptr(rbp, Address(rsp, 10 * wordSize)); |
never@739 | 573 | // skip rsp |
never@739 | 574 | __ movptr(rbx, Address(rsp, 12 * wordSize)); |
never@739 | 575 | __ movptr(rdx, Address(rsp, 13 * wordSize)); |
never@739 | 576 | __ movptr(rcx, Address(rsp, 14 * wordSize)); |
never@739 | 577 | |
never@739 | 578 | __ addptr(rsp, 16 * wordSize); |
never@739 | 579 | #else |
never@739 | 580 | |
never@739 | 581 | __ pop(rdi); |
never@739 | 582 | __ pop(rsi); |
never@739 | 583 | __ pop(rbp); |
never@739 | 584 | __ pop(rbx); // skip this value |
never@739 | 585 | __ pop(rbx); |
never@739 | 586 | __ pop(rdx); |
never@739 | 587 | __ pop(rcx); |
never@739 | 588 | __ addptr(rsp, BytesPerWord); |
never@739 | 589 | #endif // _LP64 |
duke@435 | 590 | } |
duke@435 | 591 | |
duke@435 | 592 | |
duke@435 | 593 | void Runtime1::initialize_pd() { |
duke@435 | 594 | // nothing to do |
duke@435 | 595 | } |
duke@435 | 596 | |
duke@435 | 597 | |
duke@435 | 598 | // target: the entry point of the method that creates and posts the exception oop |
duke@435 | 599 | // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved) |
duke@435 | 600 | |
duke@435 | 601 | OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { |
duke@435 | 602 | // preserve all registers |
duke@435 | 603 | int num_rt_args = has_argument ? 2 : 1; |
duke@435 | 604 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
duke@435 | 605 | |
duke@435 | 606 | // now all registers are saved and can be used freely |
duke@435 | 607 | // verify that no old value is used accidentally |
duke@435 | 608 | __ invalidate_registers(true, true, true, true, true, true); |
duke@435 | 609 | |
duke@435 | 610 | // registers used by this stub |
duke@435 | 611 | const Register temp_reg = rbx; |
duke@435 | 612 | |
duke@435 | 613 | // load argument for exception that is passed as an argument into the stub |
duke@435 | 614 | if (has_argument) { |
never@739 | 615 | #ifdef _LP64 |
never@739 | 616 | __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); |
never@739 | 617 | #else |
never@739 | 618 | __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); |
never@739 | 619 | __ push(temp_reg); |
never@739 | 620 | #endif // _LP64 |
duke@435 | 621 | } |
duke@435 | 622 | int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); |
duke@435 | 623 | |
duke@435 | 624 | OopMapSet* oop_maps = new OopMapSet(); |
duke@435 | 625 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 626 | |
duke@435 | 627 | __ stop("should not reach here"); |
duke@435 | 628 | |
duke@435 | 629 | return oop_maps; |
duke@435 | 630 | } |
duke@435 | 631 | |
duke@435 | 632 | |
duke@435 | 633 | void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { |
duke@435 | 634 | // incoming parameters |
duke@435 | 635 | const Register exception_oop = rax; |
duke@435 | 636 | const Register exception_pc = rdx; |
duke@435 | 637 | // other registers used in this stub |
duke@435 | 638 | const Register real_return_addr = rbx; |
never@739 | 639 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
duke@435 | 640 | |
duke@435 | 641 | __ block_comment("generate_handle_exception"); |
duke@435 | 642 | |
duke@435 | 643 | #ifdef TIERED |
duke@435 | 644 | // C2 can leave the fpu stack dirty |
duke@435 | 645 | if (UseSSE < 2 ) { |
duke@435 | 646 | __ empty_FPU_stack(); |
duke@435 | 647 | } |
duke@435 | 648 | #endif // TIERED |
duke@435 | 649 | |
duke@435 | 650 | // verify that only rax, and rdx is valid at this time |
duke@435 | 651 | __ invalidate_registers(false, true, true, false, true, true); |
duke@435 | 652 | // verify that rax, contains a valid exception |
duke@435 | 653 | __ verify_not_null_oop(exception_oop); |
duke@435 | 654 | |
duke@435 | 655 | // load address of JavaThread object for thread-local data |
never@739 | 656 | NOT_LP64(__ get_thread(thread);) |
duke@435 | 657 | |
duke@435 | 658 | #ifdef ASSERT |
duke@435 | 659 | // check that fields in JavaThread for exception oop and issuing pc are |
duke@435 | 660 | // empty before writing to them |
duke@435 | 661 | Label oop_empty; |
never@739 | 662 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); |
duke@435 | 663 | __ jcc(Assembler::equal, oop_empty); |
duke@435 | 664 | __ stop("exception oop already set"); |
duke@435 | 665 | __ bind(oop_empty); |
duke@435 | 666 | |
duke@435 | 667 | Label pc_empty; |
never@739 | 668 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
duke@435 | 669 | __ jcc(Assembler::equal, pc_empty); |
duke@435 | 670 | __ stop("exception pc already set"); |
duke@435 | 671 | __ bind(pc_empty); |
duke@435 | 672 | #endif |
duke@435 | 673 | |
duke@435 | 674 | // save exception oop and issuing pc into JavaThread |
duke@435 | 675 | // (exception handler will load it from here) |
never@739 | 676 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); |
never@739 | 677 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); |
duke@435 | 678 | |
duke@435 | 679 | // save real return address (pc that called this stub) |
never@739 | 680 | __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); |
never@739 | 681 | __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); |
duke@435 | 682 | |
duke@435 | 683 | // patch throwing pc into return address (has bci & oop map) |
never@739 | 684 | __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); |
duke@435 | 685 | |
duke@435 | 686 | // compute the exception handler. |
duke@435 | 687 | // the exception oop and the throwing pc are read from the fields in JavaThread |
duke@435 | 688 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); |
duke@435 | 689 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 690 | |
twisti@1730 | 691 | // rax,: handler address |
duke@435 | 692 | // will be the deopt blob if nmethod was deoptimized while we looked up |
duke@435 | 693 | // handler regardless of whether handler existed in the nmethod. |
duke@435 | 694 | |
duke@435 | 695 | // only rax, is valid at this time, all other registers have been destroyed by the runtime call |
duke@435 | 696 | __ invalidate_registers(false, true, true, true, true, true); |
duke@435 | 697 | |
twisti@1730 | 698 | #ifdef ASSERT |
duke@435 | 699 | // Do we have an exception handler in the nmethod? |
duke@435 | 700 | Label done; |
never@739 | 701 | __ testptr(rax, rax); |
twisti@1730 | 702 | __ jcc(Assembler::notZero, done); |
twisti@1730 | 703 | __ stop("no handler found"); |
twisti@1730 | 704 | __ bind(done); |
twisti@1730 | 705 | #endif |
duke@435 | 706 | |
duke@435 | 707 | // exception handler found |
duke@435 | 708 | // patch the return address -> the stub will directly return to the exception handler |
never@739 | 709 | __ movptr(Address(rbp, 1*BytesPerWord), rax); |
duke@435 | 710 | |
duke@435 | 711 | // restore registers |
duke@435 | 712 | restore_live_registers(sasm, save_fpu_registers); |
duke@435 | 713 | |
duke@435 | 714 | // return to exception handler |
duke@435 | 715 | __ leave(); |
duke@435 | 716 | __ ret(0); |
duke@435 | 717 | |
duke@435 | 718 | } |
duke@435 | 719 | |
duke@435 | 720 | |
duke@435 | 721 | void Runtime1::generate_unwind_exception(StubAssembler *sasm) { |
duke@435 | 722 | // incoming parameters |
duke@435 | 723 | const Register exception_oop = rax; |
twisti@1730 | 724 | // callee-saved copy of exception_oop during runtime call |
twisti@1730 | 725 | const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); |
duke@435 | 726 | // other registers used in this stub |
duke@435 | 727 | const Register exception_pc = rdx; |
duke@435 | 728 | const Register handler_addr = rbx; |
never@739 | 729 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
duke@435 | 730 | |
duke@435 | 731 | // verify that only rax, is valid at this time |
duke@435 | 732 | __ invalidate_registers(false, true, true, true, true, true); |
duke@435 | 733 | |
duke@435 | 734 | #ifdef ASSERT |
duke@435 | 735 | // check that fields in JavaThread for exception oop and issuing pc are empty |
never@739 | 736 | NOT_LP64(__ get_thread(thread);) |
duke@435 | 737 | Label oop_empty; |
never@739 | 738 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); |
duke@435 | 739 | __ jcc(Assembler::equal, oop_empty); |
duke@435 | 740 | __ stop("exception oop must be empty"); |
duke@435 | 741 | __ bind(oop_empty); |
duke@435 | 742 | |
duke@435 | 743 | Label pc_empty; |
never@739 | 744 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
duke@435 | 745 | __ jcc(Assembler::equal, pc_empty); |
duke@435 | 746 | __ stop("exception pc must be empty"); |
duke@435 | 747 | __ bind(pc_empty); |
duke@435 | 748 | #endif |
duke@435 | 749 | |
duke@435 | 750 | // clear the FPU stack in case any FPU results are left behind |
duke@435 | 751 | __ empty_FPU_stack(); |
duke@435 | 752 | |
twisti@1730 | 753 | // save exception_oop in callee-saved register to preserve it during runtime calls |
twisti@1730 | 754 | __ verify_not_null_oop(exception_oop); |
twisti@1730 | 755 | __ movptr(exception_oop_callee_saved, exception_oop); |
twisti@1730 | 756 | |
twisti@1730 | 757 | NOT_LP64(__ get_thread(thread);) |
twisti@1730 | 758 | // Get return address (is on top of stack after leave). |
never@739 | 759 | __ movptr(exception_pc, Address(rsp, 0)); |
duke@435 | 760 | |
twisti@1730 | 761 | // search the exception handler address of the caller (using the return address) |
twisti@1730 | 762 | __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); |
twisti@1730 | 763 | // rax: exception handler address of the caller |
duke@435 | 764 | |
twisti@1730 | 765 | // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. |
twisti@1730 | 766 | __ invalidate_registers(false, true, true, true, false, true); |
duke@435 | 767 | |
duke@435 | 768 | // move result of call into correct register |
never@739 | 769 | __ movptr(handler_addr, rax); |
duke@435 | 770 | |
twisti@1730 | 771 | // Restore exception oop to RAX (required convention of exception handler). |
twisti@1730 | 772 | __ movptr(exception_oop, exception_oop_callee_saved); |
duke@435 | 773 | |
twisti@1730 | 774 | // verify that there is really a valid exception in rax |
twisti@1730 | 775 | __ verify_not_null_oop(exception_oop); |
duke@435 | 776 | |
duke@435 | 777 | // get throwing pc (= return address). |
duke@435 | 778 | // rdx has been destroyed by the call, so it must be set again |
duke@435 | 779 | // the pop is also necessary to simulate the effect of a ret(0) |
never@739 | 780 | __ pop(exception_pc); |
duke@435 | 781 | |
twisti@1730 | 782 | // Restore SP from BP if the exception PC is a MethodHandle call site. |
twisti@1730 | 783 | NOT_LP64(__ get_thread(thread);) |
twisti@1803 | 784 | __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); |
twisti@1919 | 785 | __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); |
duke@435 | 786 | |
duke@435 | 787 | // continue at exception handler (return address removed) |
duke@435 | 788 | // note: do *not* remove arguments when unwinding the |
duke@435 | 789 | // activation since the caller assumes having |
duke@435 | 790 | // all arguments on the stack when entering the |
duke@435 | 791 | // runtime to determine the exception handler |
duke@435 | 792 | // (GC happens at call site with arguments!) |
twisti@1730 | 793 | // rax: exception oop |
duke@435 | 794 | // rdx: throwing pc |
twisti@1730 | 795 | // rbx: exception handler |
duke@435 | 796 | __ jmp(handler_addr); |
duke@435 | 797 | } |
duke@435 | 798 | |
duke@435 | 799 | |
duke@435 | 800 | OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { |
duke@435 | 801 | // use the maximum number of runtime-arguments here because it is difficult to |
duke@435 | 802 | // distinguish each RT-Call. |
duke@435 | 803 | // Note: This number affects also the RT-Call in generate_handle_exception because |
duke@435 | 804 | // the oop-map is shared for all calls. |
duke@435 | 805 | const int num_rt_args = 2; // thread + dummy |
duke@435 | 806 | |
duke@435 | 807 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
duke@435 | 808 | assert(deopt_blob != NULL, "deoptimization blob must have been created"); |
duke@435 | 809 | |
duke@435 | 810 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
duke@435 | 811 | |
never@739 | 812 | #ifdef _LP64 |
never@739 | 813 | const Register thread = r15_thread; |
never@739 | 814 | // No need to worry about dummy |
never@739 | 815 | __ mov(c_rarg0, thread); |
never@739 | 816 | #else |
never@739 | 817 | __ push(rax); // push dummy |
duke@435 | 818 | |
duke@435 | 819 | const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) |
duke@435 | 820 | // push java thread (becomes first argument of C function) |
duke@435 | 821 | __ get_thread(thread); |
never@739 | 822 | __ push(thread); |
never@739 | 823 | #endif // _LP64 |
duke@435 | 824 | __ set_last_Java_frame(thread, noreg, rbp, NULL); |
duke@435 | 825 | // do the call |
duke@435 | 826 | __ call(RuntimeAddress(target)); |
duke@435 | 827 | OopMapSet* oop_maps = new OopMapSet(); |
duke@435 | 828 | oop_maps->add_gc_map(__ offset(), oop_map); |
duke@435 | 829 | // verify callee-saved register |
duke@435 | 830 | #ifdef ASSERT |
duke@435 | 831 | guarantee(thread != rax, "change this code"); |
never@739 | 832 | __ push(rax); |
duke@435 | 833 | { Label L; |
duke@435 | 834 | __ get_thread(rax); |
never@739 | 835 | __ cmpptr(thread, rax); |
duke@435 | 836 | __ jcc(Assembler::equal, L); |
never@739 | 837 | __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?"); |
duke@435 | 838 | __ bind(L); |
duke@435 | 839 | } |
never@739 | 840 | __ pop(rax); |
duke@435 | 841 | #endif |
duke@435 | 842 | __ reset_last_Java_frame(thread, true, false); |
never@739 | 843 | #ifndef _LP64 |
never@739 | 844 | __ pop(rcx); // discard thread arg |
never@739 | 845 | __ pop(rcx); // discard dummy |
never@739 | 846 | #endif // _LP64 |
duke@435 | 847 | |
duke@435 | 848 | // check for pending exceptions |
duke@435 | 849 | { Label L; |
never@739 | 850 | __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
duke@435 | 851 | __ jcc(Assembler::equal, L); |
duke@435 | 852 | // exception pending => remove activation and forward to exception handler |
duke@435 | 853 | |
never@739 | 854 | __ testptr(rax, rax); // have we deoptimized? |
duke@435 | 855 | __ jump_cc(Assembler::equal, |
duke@435 | 856 | RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); |
duke@435 | 857 | |
duke@435 | 858 | // the deopt blob expects exceptions in the special fields of |
duke@435 | 859 | // JavaThread, so copy and clear pending exception. |
duke@435 | 860 | |
duke@435 | 861 | // load and clear pending exception |
never@739 | 862 | __ movptr(rax, Address(thread, Thread::pending_exception_offset())); |
xlu@947 | 863 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
duke@435 | 864 | |
duke@435 | 865 | // check that there is really a valid exception |
duke@435 | 866 | __ verify_not_null_oop(rax); |
duke@435 | 867 | |
duke@435 | 868 | // load throwing pc: this is the return address of the stub |
never@739 | 869 | __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); |
duke@435 | 870 | |
duke@435 | 871 | #ifdef ASSERT |
duke@435 | 872 | // check that fields in JavaThread for exception oop and issuing pc are empty |
duke@435 | 873 | Label oop_empty; |
never@739 | 874 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); |
duke@435 | 875 | __ jcc(Assembler::equal, oop_empty); |
duke@435 | 876 | __ stop("exception oop must be empty"); |
duke@435 | 877 | __ bind(oop_empty); |
duke@435 | 878 | |
duke@435 | 879 | Label pc_empty; |
never@739 | 880 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); |
duke@435 | 881 | __ jcc(Assembler::equal, pc_empty); |
duke@435 | 882 | __ stop("exception pc must be empty"); |
duke@435 | 883 | __ bind(pc_empty); |
duke@435 | 884 | #endif |
duke@435 | 885 | |
duke@435 | 886 | // store exception oop and throwing pc to JavaThread |
never@739 | 887 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); |
never@739 | 888 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); |
duke@435 | 889 | |
duke@435 | 890 | restore_live_registers(sasm); |
duke@435 | 891 | |
duke@435 | 892 | __ leave(); |
never@739 | 893 | __ addptr(rsp, BytesPerWord); // remove return address from stack |
duke@435 | 894 | |
duke@435 | 895 | // Forward the exception directly to deopt blob. We can blow no |
duke@435 | 896 | // registers and must leave throwing pc on the stack. A patch may |
duke@435 | 897 | // have values live in registers so the entry point with the |
duke@435 | 898 | // exception in tls. |
duke@435 | 899 | __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); |
duke@435 | 900 | |
duke@435 | 901 | __ bind(L); |
duke@435 | 902 | } |
duke@435 | 903 | |
duke@435 | 904 | |
duke@435 | 905 | // Runtime will return true if the nmethod has been deoptimized during |
duke@435 | 906 | // the patching process. In that case we must do a deopt reexecute instead. |
duke@435 | 907 | |
duke@435 | 908 | Label reexecuteEntry, cont; |
duke@435 | 909 | |
never@739 | 910 | __ testptr(rax, rax); // have we deoptimized? |
duke@435 | 911 | __ jcc(Assembler::equal, cont); // no |
duke@435 | 912 | |
duke@435 | 913 | // Will reexecute. Proper return address is already on the stack we just restore |
duke@435 | 914 | // registers, pop all of our frame but the return address and jump to the deopt blob |
duke@435 | 915 | restore_live_registers(sasm); |
duke@435 | 916 | __ leave(); |
duke@435 | 917 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); |
duke@435 | 918 | |
duke@435 | 919 | __ bind(cont); |
duke@435 | 920 | restore_live_registers(sasm); |
duke@435 | 921 | __ leave(); |
duke@435 | 922 | __ ret(0); |
duke@435 | 923 | |
duke@435 | 924 | return oop_maps; |
duke@435 | 925 | |
duke@435 | 926 | } |
duke@435 | 927 | |
duke@435 | 928 | |
duke@435 | 929 | OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { |
duke@435 | 930 | |
duke@435 | 931 | // for better readability |
duke@435 | 932 | const bool must_gc_arguments = true; |
duke@435 | 933 | const bool dont_gc_arguments = false; |
duke@435 | 934 | |
duke@435 | 935 | // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu |
duke@435 | 936 | bool save_fpu_registers = true; |
duke@435 | 937 | |
duke@435 | 938 | // stub code & info for the different stubs |
duke@435 | 939 | OopMapSet* oop_maps = NULL; |
duke@435 | 940 | switch (id) { |
duke@435 | 941 | case forward_exception_id: |
duke@435 | 942 | { |
duke@435 | 943 | // we're handling an exception in the context of a compiled |
duke@435 | 944 | // frame. The registers have been saved in the standard |
duke@435 | 945 | // places. Perform an exception lookup in the caller and |
duke@435 | 946 | // dispatch to the handler if found. Otherwise unwind and |
duke@435 | 947 | // dispatch to the callers exception handler. |
duke@435 | 948 | |
never@739 | 949 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
duke@435 | 950 | const Register exception_oop = rax; |
duke@435 | 951 | const Register exception_pc = rdx; |
duke@435 | 952 | |
duke@435 | 953 | // load pending exception oop into rax, |
never@739 | 954 | __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); |
duke@435 | 955 | // clear pending exception |
xlu@947 | 956 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
duke@435 | 957 | |
duke@435 | 958 | // load issuing PC (the return address for this stub) into rdx |
never@739 | 959 | __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); |
duke@435 | 960 | |
duke@435 | 961 | // make sure that the vm_results are cleared (may be unnecessary) |
xlu@947 | 962 | __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
xlu@947 | 963 | __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
duke@435 | 964 | |
duke@435 | 965 | // verify that that there is really a valid exception in rax, |
duke@435 | 966 | __ verify_not_null_oop(exception_oop); |
duke@435 | 967 | |
duke@435 | 968 | |
duke@435 | 969 | oop_maps = new OopMapSet(); |
duke@435 | 970 | OopMap* oop_map = generate_oop_map(sasm, 1); |
duke@435 | 971 | generate_handle_exception(sasm, oop_maps, oop_map); |
duke@435 | 972 | __ stop("should not reach here"); |
duke@435 | 973 | } |
duke@435 | 974 | break; |
duke@435 | 975 | |
duke@435 | 976 | case new_instance_id: |
duke@435 | 977 | case fast_new_instance_id: |
duke@435 | 978 | case fast_new_instance_init_check_id: |
duke@435 | 979 | { |
duke@435 | 980 | Register klass = rdx; // Incoming |
duke@435 | 981 | Register obj = rax; // Result |
duke@435 | 982 | |
duke@435 | 983 | if (id == new_instance_id) { |
duke@435 | 984 | __ set_info("new_instance", dont_gc_arguments); |
duke@435 | 985 | } else if (id == fast_new_instance_id) { |
duke@435 | 986 | __ set_info("fast new_instance", dont_gc_arguments); |
duke@435 | 987 | } else { |
duke@435 | 988 | assert(id == fast_new_instance_init_check_id, "bad StubID"); |
duke@435 | 989 | __ set_info("fast new_instance init check", dont_gc_arguments); |
duke@435 | 990 | } |
duke@435 | 991 | |
duke@435 | 992 | if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && |
duke@435 | 993 | UseTLAB && FastTLABRefill) { |
duke@435 | 994 | Label slow_path; |
duke@435 | 995 | Register obj_size = rcx; |
duke@435 | 996 | Register t1 = rbx; |
duke@435 | 997 | Register t2 = rsi; |
duke@435 | 998 | assert_different_registers(klass, obj, obj_size, t1, t2); |
duke@435 | 999 | |
never@739 | 1000 | __ push(rdi); |
never@739 | 1001 | __ push(rbx); |
duke@435 | 1002 | |
duke@435 | 1003 | if (id == fast_new_instance_init_check_id) { |
duke@435 | 1004 | // make sure the klass is initialized |
duke@435 | 1005 | __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized); |
duke@435 | 1006 | __ jcc(Assembler::notEqual, slow_path); |
duke@435 | 1007 | } |
duke@435 | 1008 | |
duke@435 | 1009 | #ifdef ASSERT |
duke@435 | 1010 | // assert object can be fast path allocated |
duke@435 | 1011 | { |
duke@435 | 1012 | Label ok, not_ok; |
duke@435 | 1013 | __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); |
duke@435 | 1014 | __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) |
duke@435 | 1015 | __ jcc(Assembler::lessEqual, not_ok); |
duke@435 | 1016 | __ testl(obj_size, Klass::_lh_instance_slow_path_bit); |
duke@435 | 1017 | __ jcc(Assembler::zero, ok); |
duke@435 | 1018 | __ bind(not_ok); |
duke@435 | 1019 | __ stop("assert(can be fast path allocated)"); |
duke@435 | 1020 | __ should_not_reach_here(); |
duke@435 | 1021 | __ bind(ok); |
duke@435 | 1022 | } |
duke@435 | 1023 | #endif // ASSERT |
duke@435 | 1024 | |
duke@435 | 1025 | // if we got here then the TLAB allocation failed, so try |
duke@435 | 1026 | // refilling the TLAB or allocating directly from eden. |
duke@435 | 1027 | Label retry_tlab, try_eden; |
duke@435 | 1028 | __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass) |
duke@435 | 1029 | |
duke@435 | 1030 | __ bind(retry_tlab); |
duke@435 | 1031 | |
never@739 | 1032 | // get the instance size (size is postive so movl is fine for 64bit) |
duke@435 | 1033 | __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); |
duke@435 | 1034 | __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path); |
duke@435 | 1035 | __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
duke@435 | 1036 | __ verify_oop(obj); |
never@739 | 1037 | __ pop(rbx); |
never@739 | 1038 | __ pop(rdi); |
duke@435 | 1039 | __ ret(0); |
duke@435 | 1040 | |
duke@435 | 1041 | __ bind(try_eden); |
never@739 | 1042 | // get the instance size (size is postive so movl is fine for 64bit) |
duke@435 | 1043 | __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); |
duke@435 | 1044 | __ eden_allocate(obj, obj_size, 0, t1, slow_path); |
duke@435 | 1045 | __ initialize_object(obj, klass, obj_size, 0, t1, t2); |
duke@435 | 1046 | __ verify_oop(obj); |
never@739 | 1047 | __ pop(rbx); |
never@739 | 1048 | __ pop(rdi); |
duke@435 | 1049 | __ ret(0); |
duke@435 | 1050 | |
duke@435 | 1051 | __ bind(slow_path); |
never@739 | 1052 | __ pop(rbx); |
never@739 | 1053 | __ pop(rdi); |
duke@435 | 1054 | } |
duke@435 | 1055 | |
duke@435 | 1056 | __ enter(); |
duke@435 | 1057 | OopMap* map = save_live_registers(sasm, 2); |
duke@435 | 1058 | int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); |
duke@435 | 1059 | oop_maps = new OopMapSet(); |
duke@435 | 1060 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1061 | restore_live_registers_except_rax(sasm); |
duke@435 | 1062 | __ verify_oop(obj); |
duke@435 | 1063 | __ leave(); |
duke@435 | 1064 | __ ret(0); |
duke@435 | 1065 | |
duke@435 | 1066 | // rax,: new instance |
duke@435 | 1067 | } |
duke@435 | 1068 | |
duke@435 | 1069 | break; |
duke@435 | 1070 | |
duke@435 | 1071 | #ifdef TIERED |
duke@435 | 1072 | case counter_overflow_id: |
duke@435 | 1073 | { |
duke@435 | 1074 | Register bci = rax; |
duke@435 | 1075 | __ enter(); |
duke@435 | 1076 | OopMap* map = save_live_registers(sasm, 2); |
duke@435 | 1077 | // Retrieve bci |
duke@435 | 1078 | __ movl(bci, Address(rbp, 2*BytesPerWord)); |
duke@435 | 1079 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci); |
duke@435 | 1080 | oop_maps = new OopMapSet(); |
duke@435 | 1081 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1082 | restore_live_registers(sasm); |
duke@435 | 1083 | __ leave(); |
duke@435 | 1084 | __ ret(0); |
duke@435 | 1085 | } |
duke@435 | 1086 | break; |
duke@435 | 1087 | #endif // TIERED |
duke@435 | 1088 | |
duke@435 | 1089 | case new_type_array_id: |
duke@435 | 1090 | case new_object_array_id: |
duke@435 | 1091 | { |
duke@435 | 1092 | Register length = rbx; // Incoming |
duke@435 | 1093 | Register klass = rdx; // Incoming |
duke@435 | 1094 | Register obj = rax; // Result |
duke@435 | 1095 | |
duke@435 | 1096 | if (id == new_type_array_id) { |
duke@435 | 1097 | __ set_info("new_type_array", dont_gc_arguments); |
duke@435 | 1098 | } else { |
duke@435 | 1099 | __ set_info("new_object_array", dont_gc_arguments); |
duke@435 | 1100 | } |
duke@435 | 1101 | |
duke@435 | 1102 | #ifdef ASSERT |
duke@435 | 1103 | // assert object type is really an array of the proper kind |
duke@435 | 1104 | { |
duke@435 | 1105 | Label ok; |
duke@435 | 1106 | Register t0 = obj; |
duke@435 | 1107 | __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc))); |
duke@435 | 1108 | __ sarl(t0, Klass::_lh_array_tag_shift); |
duke@435 | 1109 | int tag = ((id == new_type_array_id) |
duke@435 | 1110 | ? Klass::_lh_array_tag_type_value |
duke@435 | 1111 | : Klass::_lh_array_tag_obj_value); |
duke@435 | 1112 | __ cmpl(t0, tag); |
duke@435 | 1113 | __ jcc(Assembler::equal, ok); |
duke@435 | 1114 | __ stop("assert(is an array klass)"); |
duke@435 | 1115 | __ should_not_reach_here(); |
duke@435 | 1116 | __ bind(ok); |
duke@435 | 1117 | } |
duke@435 | 1118 | #endif // ASSERT |
duke@435 | 1119 | |
duke@435 | 1120 | if (UseTLAB && FastTLABRefill) { |
duke@435 | 1121 | Register arr_size = rsi; |
duke@435 | 1122 | Register t1 = rcx; // must be rcx for use as shift count |
duke@435 | 1123 | Register t2 = rdi; |
duke@435 | 1124 | Label slow_path; |
duke@435 | 1125 | assert_different_registers(length, klass, obj, arr_size, t1, t2); |
duke@435 | 1126 | |
duke@435 | 1127 | // check that array length is small enough for fast path. |
duke@435 | 1128 | __ cmpl(length, C1_MacroAssembler::max_array_allocation_length); |
duke@435 | 1129 | __ jcc(Assembler::above, slow_path); |
duke@435 | 1130 | |
duke@435 | 1131 | // if we got here then the TLAB allocation failed, so try |
duke@435 | 1132 | // refilling the TLAB or allocating directly from eden. |
duke@435 | 1133 | Label retry_tlab, try_eden; |
duke@435 | 1134 | __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx |
duke@435 | 1135 | |
duke@435 | 1136 | __ bind(retry_tlab); |
duke@435 | 1137 | |
duke@435 | 1138 | // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) |
never@739 | 1139 | // since size is postive movl does right thing on 64bit |
duke@435 | 1140 | __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); |
never@739 | 1141 | // since size is postive movl does right thing on 64bit |
duke@435 | 1142 | __ movl(arr_size, length); |
duke@435 | 1143 | assert(t1 == rcx, "fixed register usage"); |
never@739 | 1144 | __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
never@739 | 1145 | __ shrptr(t1, Klass::_lh_header_size_shift); |
never@739 | 1146 | __ andptr(t1, Klass::_lh_header_size_mask); |
never@739 | 1147 | __ addptr(arr_size, t1); |
never@739 | 1148 | __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up |
never@739 | 1149 | __ andptr(arr_size, ~MinObjAlignmentInBytesMask); |
duke@435 | 1150 | |
duke@435 | 1151 | __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size |
duke@435 | 1152 | |
duke@435 | 1153 | __ initialize_header(obj, klass, length, t1, t2); |
duke@435 | 1154 | __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); |
duke@435 | 1155 | assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
duke@435 | 1156 | assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); |
never@739 | 1157 | __ andptr(t1, Klass::_lh_header_size_mask); |
never@739 | 1158 | __ subptr(arr_size, t1); // body length |
never@739 | 1159 | __ addptr(t1, obj); // body start |
duke@435 | 1160 | __ initialize_body(t1, arr_size, 0, t2); |
duke@435 | 1161 | __ verify_oop(obj); |
duke@435 | 1162 | __ ret(0); |
duke@435 | 1163 | |
duke@435 | 1164 | __ bind(try_eden); |
duke@435 | 1165 | // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) |
never@739 | 1166 | // since size is postive movl does right thing on 64bit |
duke@435 | 1167 | __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes())); |
never@739 | 1168 | // since size is postive movl does right thing on 64bit |
duke@435 | 1169 | __ movl(arr_size, length); |
duke@435 | 1170 | assert(t1 == rcx, "fixed register usage"); |
never@739 | 1171 | __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
never@739 | 1172 | __ shrptr(t1, Klass::_lh_header_size_shift); |
never@739 | 1173 | __ andptr(t1, Klass::_lh_header_size_mask); |
never@739 | 1174 | __ addptr(arr_size, t1); |
never@739 | 1175 | __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up |
never@739 | 1176 | __ andptr(arr_size, ~MinObjAlignmentInBytesMask); |
duke@435 | 1177 | |
duke@435 | 1178 | __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size |
duke@435 | 1179 | |
duke@435 | 1180 | __ initialize_header(obj, klass, length, t1, t2); |
duke@435 | 1181 | __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte))); |
duke@435 | 1182 | assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise"); |
duke@435 | 1183 | assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise"); |
never@739 | 1184 | __ andptr(t1, Klass::_lh_header_size_mask); |
never@739 | 1185 | __ subptr(arr_size, t1); // body length |
never@739 | 1186 | __ addptr(t1, obj); // body start |
duke@435 | 1187 | __ initialize_body(t1, arr_size, 0, t2); |
duke@435 | 1188 | __ verify_oop(obj); |
duke@435 | 1189 | __ ret(0); |
duke@435 | 1190 | |
duke@435 | 1191 | __ bind(slow_path); |
duke@435 | 1192 | } |
duke@435 | 1193 | |
duke@435 | 1194 | __ enter(); |
duke@435 | 1195 | OopMap* map = save_live_registers(sasm, 3); |
duke@435 | 1196 | int call_offset; |
duke@435 | 1197 | if (id == new_type_array_id) { |
duke@435 | 1198 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); |
duke@435 | 1199 | } else { |
duke@435 | 1200 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); |
duke@435 | 1201 | } |
duke@435 | 1202 | |
duke@435 | 1203 | oop_maps = new OopMapSet(); |
duke@435 | 1204 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1205 | restore_live_registers_except_rax(sasm); |
duke@435 | 1206 | |
duke@435 | 1207 | __ verify_oop(obj); |
duke@435 | 1208 | __ leave(); |
duke@435 | 1209 | __ ret(0); |
duke@435 | 1210 | |
duke@435 | 1211 | // rax,: new array |
duke@435 | 1212 | } |
duke@435 | 1213 | break; |
duke@435 | 1214 | |
duke@435 | 1215 | case new_multi_array_id: |
duke@435 | 1216 | { StubFrame f(sasm, "new_multi_array", dont_gc_arguments); |
duke@435 | 1217 | // rax,: klass |
duke@435 | 1218 | // rbx,: rank |
duke@435 | 1219 | // rcx: address of 1st dimension |
duke@435 | 1220 | OopMap* map = save_live_registers(sasm, 4); |
duke@435 | 1221 | int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); |
duke@435 | 1222 | |
duke@435 | 1223 | oop_maps = new OopMapSet(); |
duke@435 | 1224 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1225 | restore_live_registers_except_rax(sasm); |
duke@435 | 1226 | |
duke@435 | 1227 | // rax,: new multi array |
duke@435 | 1228 | __ verify_oop(rax); |
duke@435 | 1229 | } |
duke@435 | 1230 | break; |
duke@435 | 1231 | |
duke@435 | 1232 | case register_finalizer_id: |
duke@435 | 1233 | { |
duke@435 | 1234 | __ set_info("register_finalizer", dont_gc_arguments); |
duke@435 | 1235 | |
never@739 | 1236 | // This is called via call_runtime so the arguments |
never@739 | 1237 | // will be place in C abi locations |
never@739 | 1238 | |
never@739 | 1239 | #ifdef _LP64 |
never@739 | 1240 | __ verify_oop(c_rarg0); |
never@739 | 1241 | __ mov(rax, c_rarg0); |
never@739 | 1242 | #else |
duke@435 | 1243 | // The object is passed on the stack and we haven't pushed a |
duke@435 | 1244 | // frame yet so it's one work away from top of stack. |
never@739 | 1245 | __ movptr(rax, Address(rsp, 1 * BytesPerWord)); |
duke@435 | 1246 | __ verify_oop(rax); |
never@739 | 1247 | #endif // _LP64 |
duke@435 | 1248 | |
duke@435 | 1249 | // load the klass and check the has finalizer flag |
duke@435 | 1250 | Label register_finalizer; |
duke@435 | 1251 | Register t = rsi; |
never@739 | 1252 | __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); |
duke@435 | 1253 | __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); |
duke@435 | 1254 | __ testl(t, JVM_ACC_HAS_FINALIZER); |
duke@435 | 1255 | __ jcc(Assembler::notZero, register_finalizer); |
duke@435 | 1256 | __ ret(0); |
duke@435 | 1257 | |
duke@435 | 1258 | __ bind(register_finalizer); |
duke@435 | 1259 | __ enter(); |
duke@435 | 1260 | OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); |
duke@435 | 1261 | int call_offset = __ call_RT(noreg, noreg, |
duke@435 | 1262 | CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); |
duke@435 | 1263 | oop_maps = new OopMapSet(); |
duke@435 | 1264 | oop_maps->add_gc_map(call_offset, oop_map); |
duke@435 | 1265 | |
duke@435 | 1266 | // Now restore all the live registers |
duke@435 | 1267 | restore_live_registers(sasm); |
duke@435 | 1268 | |
duke@435 | 1269 | __ leave(); |
duke@435 | 1270 | __ ret(0); |
duke@435 | 1271 | } |
duke@435 | 1272 | break; |
duke@435 | 1273 | |
duke@435 | 1274 | case throw_range_check_failed_id: |
duke@435 | 1275 | { StubFrame f(sasm, "range_check_failed", dont_gc_arguments); |
duke@435 | 1276 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); |
duke@435 | 1277 | } |
duke@435 | 1278 | break; |
duke@435 | 1279 | |
duke@435 | 1280 | case throw_index_exception_id: |
duke@435 | 1281 | { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments); |
duke@435 | 1282 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); |
duke@435 | 1283 | } |
duke@435 | 1284 | break; |
duke@435 | 1285 | |
duke@435 | 1286 | case throw_div0_exception_id: |
duke@435 | 1287 | { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments); |
duke@435 | 1288 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); |
duke@435 | 1289 | } |
duke@435 | 1290 | break; |
duke@435 | 1291 | |
duke@435 | 1292 | case throw_null_pointer_exception_id: |
duke@435 | 1293 | { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments); |
duke@435 | 1294 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); |
duke@435 | 1295 | } |
duke@435 | 1296 | break; |
duke@435 | 1297 | |
duke@435 | 1298 | case handle_exception_nofpu_id: |
duke@435 | 1299 | save_fpu_registers = false; |
duke@435 | 1300 | // fall through |
duke@435 | 1301 | case handle_exception_id: |
duke@435 | 1302 | { StubFrame f(sasm, "handle_exception", dont_gc_arguments); |
duke@435 | 1303 | oop_maps = new OopMapSet(); |
duke@435 | 1304 | OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); |
duke@435 | 1305 | generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); |
duke@435 | 1306 | } |
duke@435 | 1307 | break; |
duke@435 | 1308 | |
duke@435 | 1309 | case unwind_exception_id: |
duke@435 | 1310 | { __ set_info("unwind_exception", dont_gc_arguments); |
duke@435 | 1311 | // note: no stubframe since we are about to leave the current |
duke@435 | 1312 | // activation and we are calling a leaf VM function only. |
duke@435 | 1313 | generate_unwind_exception(sasm); |
duke@435 | 1314 | } |
duke@435 | 1315 | break; |
duke@435 | 1316 | |
duke@435 | 1317 | case throw_array_store_exception_id: |
duke@435 | 1318 | { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments); |
duke@435 | 1319 | // tos + 0: link |
duke@435 | 1320 | // + 1: return address |
duke@435 | 1321 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false); |
duke@435 | 1322 | } |
duke@435 | 1323 | break; |
duke@435 | 1324 | |
duke@435 | 1325 | case throw_class_cast_exception_id: |
duke@435 | 1326 | { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments); |
duke@435 | 1327 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); |
duke@435 | 1328 | } |
duke@435 | 1329 | break; |
duke@435 | 1330 | |
duke@435 | 1331 | case throw_incompatible_class_change_error_id: |
duke@435 | 1332 | { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments); |
duke@435 | 1333 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); |
duke@435 | 1334 | } |
duke@435 | 1335 | break; |
duke@435 | 1336 | |
duke@435 | 1337 | case slow_subtype_check_id: |
duke@435 | 1338 | { |
jrose@1079 | 1339 | // Typical calling sequence: |
jrose@1079 | 1340 | // __ push(klass_RInfo); // object klass or other subclass |
jrose@1079 | 1341 | // __ push(sup_k_RInfo); // array element klass or other superclass |
jrose@1079 | 1342 | // __ call(slow_subtype_check); |
jrose@1079 | 1343 | // Note that the subclass is pushed first, and is therefore deepest. |
jrose@1079 | 1344 | // Previous versions of this code reversed the names 'sub' and 'super'. |
jrose@1079 | 1345 | // This was operationally harmless but made the code unreadable. |
duke@435 | 1346 | enum layout { |
never@739 | 1347 | rax_off, SLOT2(raxH_off) |
never@739 | 1348 | rcx_off, SLOT2(rcxH_off) |
never@739 | 1349 | rsi_off, SLOT2(rsiH_off) |
never@739 | 1350 | rdi_off, SLOT2(rdiH_off) |
never@739 | 1351 | // saved_rbp_off, SLOT2(saved_rbpH_off) |
never@739 | 1352 | return_off, SLOT2(returnH_off) |
jrose@1079 | 1353 | sup_k_off, SLOT2(sup_kH_off) |
jrose@1079 | 1354 | klass_off, SLOT2(superH_off) |
jrose@1079 | 1355 | framesize, |
jrose@1079 | 1356 | result_off = klass_off // deepest argument is also the return value |
duke@435 | 1357 | }; |
duke@435 | 1358 | |
duke@435 | 1359 | __ set_info("slow_subtype_check", dont_gc_arguments); |
never@739 | 1360 | __ push(rdi); |
never@739 | 1361 | __ push(rsi); |
never@739 | 1362 | __ push(rcx); |
never@739 | 1363 | __ push(rax); |
duke@435 | 1364 | |
never@739 | 1365 | // This is called by pushing args and not with C abi |
jrose@1079 | 1366 | __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass |
jrose@1079 | 1367 | __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass |
duke@435 | 1368 | |
duke@435 | 1369 | Label miss; |
jrose@1079 | 1370 | __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); |
jrose@1079 | 1371 | |
jrose@1079 | 1372 | // fallthrough on success: |
jrose@1079 | 1373 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result |
never@739 | 1374 | __ pop(rax); |
never@739 | 1375 | __ pop(rcx); |
never@739 | 1376 | __ pop(rsi); |
never@739 | 1377 | __ pop(rdi); |
duke@435 | 1378 | __ ret(0); |
duke@435 | 1379 | |
duke@435 | 1380 | __ bind(miss); |
jrose@1079 | 1381 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result |
never@739 | 1382 | __ pop(rax); |
never@739 | 1383 | __ pop(rcx); |
never@739 | 1384 | __ pop(rsi); |
never@739 | 1385 | __ pop(rdi); |
duke@435 | 1386 | __ ret(0); |
duke@435 | 1387 | } |
duke@435 | 1388 | break; |
duke@435 | 1389 | |
duke@435 | 1390 | case monitorenter_nofpu_id: |
duke@435 | 1391 | save_fpu_registers = false; |
duke@435 | 1392 | // fall through |
duke@435 | 1393 | case monitorenter_id: |
duke@435 | 1394 | { |
duke@435 | 1395 | StubFrame f(sasm, "monitorenter", dont_gc_arguments); |
duke@435 | 1396 | OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); |
duke@435 | 1397 | |
never@739 | 1398 | // Called with store_parameter and not C abi |
never@739 | 1399 | |
duke@435 | 1400 | f.load_argument(1, rax); // rax,: object |
duke@435 | 1401 | f.load_argument(0, rbx); // rbx,: lock address |
duke@435 | 1402 | |
duke@435 | 1403 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); |
duke@435 | 1404 | |
duke@435 | 1405 | oop_maps = new OopMapSet(); |
duke@435 | 1406 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1407 | restore_live_registers(sasm, save_fpu_registers); |
duke@435 | 1408 | } |
duke@435 | 1409 | break; |
duke@435 | 1410 | |
duke@435 | 1411 | case monitorexit_nofpu_id: |
duke@435 | 1412 | save_fpu_registers = false; |
duke@435 | 1413 | // fall through |
duke@435 | 1414 | case monitorexit_id: |
duke@435 | 1415 | { |
duke@435 | 1416 | StubFrame f(sasm, "monitorexit", dont_gc_arguments); |
duke@435 | 1417 | OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); |
duke@435 | 1418 | |
never@739 | 1419 | // Called with store_parameter and not C abi |
never@739 | 1420 | |
duke@435 | 1421 | f.load_argument(0, rax); // rax,: lock address |
duke@435 | 1422 | |
duke@435 | 1423 | // note: really a leaf routine but must setup last java sp |
duke@435 | 1424 | // => use call_RT for now (speed can be improved by |
duke@435 | 1425 | // doing last java sp setup manually) |
duke@435 | 1426 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); |
duke@435 | 1427 | |
duke@435 | 1428 | oop_maps = new OopMapSet(); |
duke@435 | 1429 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1430 | restore_live_registers(sasm, save_fpu_registers); |
duke@435 | 1431 | |
duke@435 | 1432 | } |
duke@435 | 1433 | break; |
duke@435 | 1434 | |
duke@435 | 1435 | case access_field_patching_id: |
duke@435 | 1436 | { StubFrame f(sasm, "access_field_patching", dont_gc_arguments); |
duke@435 | 1437 | // we should set up register map |
duke@435 | 1438 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); |
duke@435 | 1439 | } |
duke@435 | 1440 | break; |
duke@435 | 1441 | |
duke@435 | 1442 | case load_klass_patching_id: |
duke@435 | 1443 | { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments); |
duke@435 | 1444 | // we should set up register map |
duke@435 | 1445 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); |
duke@435 | 1446 | } |
duke@435 | 1447 | break; |
duke@435 | 1448 | |
duke@435 | 1449 | case jvmti_exception_throw_id: |
duke@435 | 1450 | { // rax,: exception oop |
duke@435 | 1451 | StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments); |
duke@435 | 1452 | // Preserve all registers across this potentially blocking call |
duke@435 | 1453 | const int num_rt_args = 2; // thread, exception oop |
duke@435 | 1454 | OopMap* map = save_live_registers(sasm, num_rt_args); |
duke@435 | 1455 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax); |
duke@435 | 1456 | oop_maps = new OopMapSet(); |
duke@435 | 1457 | oop_maps->add_gc_map(call_offset, map); |
duke@435 | 1458 | restore_live_registers(sasm); |
duke@435 | 1459 | } |
duke@435 | 1460 | break; |
duke@435 | 1461 | |
duke@435 | 1462 | case dtrace_object_alloc_id: |
duke@435 | 1463 | { // rax,: object |
duke@435 | 1464 | StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments); |
duke@435 | 1465 | // we can't gc here so skip the oopmap but make sure that all |
duke@435 | 1466 | // the live registers get saved. |
duke@435 | 1467 | save_live_registers(sasm, 1); |
duke@435 | 1468 | |
never@739 | 1469 | __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); |
duke@435 | 1470 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); |
never@739 | 1471 | NOT_LP64(__ pop(rax)); |
duke@435 | 1472 | |
duke@435 | 1473 | restore_live_registers(sasm); |
duke@435 | 1474 | } |
duke@435 | 1475 | break; |
duke@435 | 1476 | |
duke@435 | 1477 | case fpu2long_stub_id: |
duke@435 | 1478 | { |
duke@435 | 1479 | // rax, and rdx are destroyed, but should be free since the result is returned there |
duke@435 | 1480 | // preserve rsi,ecx |
never@739 | 1481 | __ push(rsi); |
never@739 | 1482 | __ push(rcx); |
never@739 | 1483 | LP64_ONLY(__ push(rdx);) |
duke@435 | 1484 | |
duke@435 | 1485 | // check for NaN |
duke@435 | 1486 | Label return0, do_return, return_min_jlong, do_convert; |
duke@435 | 1487 | |
never@739 | 1488 | Address value_high_word(rsp, wordSize + 4); |
never@739 | 1489 | Address value_low_word(rsp, wordSize); |
never@739 | 1490 | Address result_high_word(rsp, 3*wordSize + 4); |
never@739 | 1491 | Address result_low_word(rsp, 3*wordSize); |
duke@435 | 1492 | |
never@739 | 1493 | __ subptr(rsp, 32); // more than enough on 32bit |
duke@435 | 1494 | __ fst_d(value_low_word); |
duke@435 | 1495 | __ movl(rax, value_high_word); |
duke@435 | 1496 | __ andl(rax, 0x7ff00000); |
duke@435 | 1497 | __ cmpl(rax, 0x7ff00000); |
duke@435 | 1498 | __ jcc(Assembler::notEqual, do_convert); |
duke@435 | 1499 | __ movl(rax, value_high_word); |
duke@435 | 1500 | __ andl(rax, 0xfffff); |
duke@435 | 1501 | __ orl(rax, value_low_word); |
duke@435 | 1502 | __ jcc(Assembler::notZero, return0); |
duke@435 | 1503 | |
duke@435 | 1504 | __ bind(do_convert); |
duke@435 | 1505 | __ fnstcw(Address(rsp, 0)); |
never@739 | 1506 | __ movzwl(rax, Address(rsp, 0)); |
duke@435 | 1507 | __ orl(rax, 0xc00); |
duke@435 | 1508 | __ movw(Address(rsp, 2), rax); |
duke@435 | 1509 | __ fldcw(Address(rsp, 2)); |
duke@435 | 1510 | __ fwait(); |
duke@435 | 1511 | __ fistp_d(result_low_word); |
duke@435 | 1512 | __ fldcw(Address(rsp, 0)); |
duke@435 | 1513 | __ fwait(); |
never@739 | 1514 | // This gets the entire long in rax on 64bit |
never@739 | 1515 | __ movptr(rax, result_low_word); |
never@739 | 1516 | // testing of high bits |
duke@435 | 1517 | __ movl(rdx, result_high_word); |
never@739 | 1518 | __ mov(rcx, rax); |
duke@435 | 1519 | // What the heck is the point of the next instruction??? |
duke@435 | 1520 | __ xorl(rcx, 0x0); |
duke@435 | 1521 | __ movl(rsi, 0x80000000); |
duke@435 | 1522 | __ xorl(rsi, rdx); |
duke@435 | 1523 | __ orl(rcx, rsi); |
duke@435 | 1524 | __ jcc(Assembler::notEqual, do_return); |
duke@435 | 1525 | __ fldz(); |
duke@435 | 1526 | __ fcomp_d(value_low_word); |
duke@435 | 1527 | __ fnstsw_ax(); |
never@739 | 1528 | #ifdef _LP64 |
never@739 | 1529 | __ testl(rax, 0x4100); // ZF & CF == 0 |
never@739 | 1530 | __ jcc(Assembler::equal, return_min_jlong); |
never@739 | 1531 | #else |
duke@435 | 1532 | __ sahf(); |
duke@435 | 1533 | __ jcc(Assembler::above, return_min_jlong); |
never@739 | 1534 | #endif // _LP64 |
duke@435 | 1535 | // return max_jlong |
never@739 | 1536 | #ifndef _LP64 |
duke@435 | 1537 | __ movl(rdx, 0x7fffffff); |
duke@435 | 1538 | __ movl(rax, 0xffffffff); |
never@739 | 1539 | #else |
never@739 | 1540 | __ mov64(rax, CONST64(0x7fffffffffffffff)); |
never@739 | 1541 | #endif // _LP64 |
duke@435 | 1542 | __ jmp(do_return); |
duke@435 | 1543 | |
duke@435 | 1544 | __ bind(return_min_jlong); |
never@739 | 1545 | #ifndef _LP64 |
duke@435 | 1546 | __ movl(rdx, 0x80000000); |
duke@435 | 1547 | __ xorl(rax, rax); |
never@739 | 1548 | #else |
never@739 | 1549 | __ mov64(rax, CONST64(0x8000000000000000)); |
never@739 | 1550 | #endif // _LP64 |
duke@435 | 1551 | __ jmp(do_return); |
duke@435 | 1552 | |
duke@435 | 1553 | __ bind(return0); |
duke@435 | 1554 | __ fpop(); |
never@739 | 1555 | #ifndef _LP64 |
never@739 | 1556 | __ xorptr(rdx,rdx); |
never@739 | 1557 | __ xorptr(rax,rax); |
never@739 | 1558 | #else |
never@739 | 1559 | __ xorptr(rax, rax); |
never@739 | 1560 | #endif // _LP64 |
duke@435 | 1561 | |
duke@435 | 1562 | __ bind(do_return); |
never@739 | 1563 | __ addptr(rsp, 32); |
never@739 | 1564 | LP64_ONLY(__ pop(rdx);) |
never@739 | 1565 | __ pop(rcx); |
never@739 | 1566 | __ pop(rsi); |
duke@435 | 1567 | __ ret(0); |
duke@435 | 1568 | } |
duke@435 | 1569 | break; |
duke@435 | 1570 | |
ysr@777 | 1571 | #ifndef SERIALGC |
ysr@777 | 1572 | case g1_pre_barrier_slow_id: |
ysr@777 | 1573 | { |
ysr@777 | 1574 | StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments); |
ysr@777 | 1575 | // arg0 : previous value of memory |
ysr@777 | 1576 | |
ysr@777 | 1577 | BarrierSet* bs = Universe::heap()->barrier_set(); |
ysr@777 | 1578 | if (bs->kind() != BarrierSet::G1SATBCTLogging) { |
apetrusenko@797 | 1579 | __ movptr(rax, (int)id); |
ysr@777 | 1580 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); |
ysr@777 | 1581 | __ should_not_reach_here(); |
ysr@777 | 1582 | break; |
ysr@777 | 1583 | } |
apetrusenko@797 | 1584 | __ push(rax); |
apetrusenko@797 | 1585 | __ push(rdx); |
ysr@777 | 1586 | |
ysr@777 | 1587 | const Register pre_val = rax; |
apetrusenko@797 | 1588 | const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
ysr@777 | 1589 | const Register tmp = rdx; |
ysr@777 | 1590 | |
apetrusenko@797 | 1591 | NOT_LP64(__ get_thread(thread);) |
ysr@777 | 1592 | |
ysr@777 | 1593 | Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
ysr@777 | 1594 | PtrQueue::byte_offset_of_active())); |
ysr@777 | 1595 | |
ysr@777 | 1596 | Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
ysr@777 | 1597 | PtrQueue::byte_offset_of_index())); |
ysr@777 | 1598 | Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() + |
ysr@777 | 1599 | PtrQueue::byte_offset_of_buf())); |
ysr@777 | 1600 | |
ysr@777 | 1601 | |
ysr@777 | 1602 | Label done; |
ysr@777 | 1603 | Label runtime; |
ysr@777 | 1604 | |
ysr@777 | 1605 | // Can we store original value in the thread's buffer? |
ysr@777 | 1606 | |
apetrusenko@797 | 1607 | #ifdef _LP64 |
iveresov@1927 | 1608 | __ movslq(tmp, queue_index); |
apetrusenko@797 | 1609 | __ cmpq(tmp, 0); |
apetrusenko@797 | 1610 | #else |
ysr@777 | 1611 | __ cmpl(queue_index, 0); |
apetrusenko@797 | 1612 | #endif |
ysr@777 | 1613 | __ jcc(Assembler::equal, runtime); |
apetrusenko@797 | 1614 | #ifdef _LP64 |
apetrusenko@797 | 1615 | __ subq(tmp, wordSize); |
apetrusenko@797 | 1616 | __ movl(queue_index, tmp); |
apetrusenko@797 | 1617 | __ addq(tmp, buffer); |
apetrusenko@797 | 1618 | #else |
ysr@777 | 1619 | __ subl(queue_index, wordSize); |
ysr@777 | 1620 | __ movl(tmp, buffer); |
ysr@777 | 1621 | __ addl(tmp, queue_index); |
apetrusenko@797 | 1622 | #endif |
apetrusenko@797 | 1623 | |
ysr@777 | 1624 | // prev_val (rax) |
ysr@777 | 1625 | f.load_argument(0, pre_val); |
apetrusenko@797 | 1626 | __ movptr(Address(tmp, 0), pre_val); |
ysr@777 | 1627 | __ jmp(done); |
ysr@777 | 1628 | |
ysr@777 | 1629 | __ bind(runtime); |
iveresov@1927 | 1630 | __ push(rcx); |
iveresov@1927 | 1631 | #ifdef _LP64 |
iveresov@1927 | 1632 | __ push(r8); |
iveresov@1927 | 1633 | __ push(r9); |
iveresov@1927 | 1634 | __ push(r10); |
iveresov@1927 | 1635 | __ push(r11); |
iveresov@1927 | 1636 | # ifndef _WIN64 |
iveresov@1927 | 1637 | __ push(rdi); |
iveresov@1927 | 1638 | __ push(rsi); |
iveresov@1927 | 1639 | # endif |
iveresov@1927 | 1640 | #endif |
ysr@777 | 1641 | // load the pre-value |
ysr@777 | 1642 | f.load_argument(0, rcx); |
ysr@777 | 1643 | __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread); |
iveresov@1927 | 1644 | #ifdef _LP64 |
iveresov@1927 | 1645 | # ifndef _WIN64 |
iveresov@1927 | 1646 | __ pop(rsi); |
iveresov@1927 | 1647 | __ pop(rdi); |
iveresov@1927 | 1648 | # endif |
iveresov@1927 | 1649 | __ pop(r11); |
iveresov@1927 | 1650 | __ pop(r10); |
iveresov@1927 | 1651 | __ pop(r9); |
iveresov@1927 | 1652 | __ pop(r8); |
iveresov@1927 | 1653 | #endif |
apetrusenko@797 | 1654 | __ pop(rcx); |
iveresov@1927 | 1655 | __ bind(done); |
ysr@777 | 1656 | |
apetrusenko@797 | 1657 | __ pop(rdx); |
apetrusenko@797 | 1658 | __ pop(rax); |
ysr@777 | 1659 | } |
ysr@777 | 1660 | break; |
ysr@777 | 1661 | |
ysr@777 | 1662 | case g1_post_barrier_slow_id: |
ysr@777 | 1663 | { |
ysr@777 | 1664 | StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments); |
ysr@777 | 1665 | |
ysr@777 | 1666 | |
ysr@777 | 1667 | // arg0: store_address |
ysr@777 | 1668 | Address store_addr(rbp, 2*BytesPerWord); |
ysr@777 | 1669 | |
ysr@777 | 1670 | BarrierSet* bs = Universe::heap()->barrier_set(); |
ysr@777 | 1671 | CardTableModRefBS* ct = (CardTableModRefBS*)bs; |
ysr@777 | 1672 | Label done; |
ysr@777 | 1673 | Label runtime; |
ysr@777 | 1674 | |
ysr@777 | 1675 | // At this point we know new_value is non-NULL and the new_value crosses regsion. |
ysr@777 | 1676 | // Must check to see if card is already dirty |
ysr@777 | 1677 | |
apetrusenko@797 | 1678 | const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); |
ysr@777 | 1679 | |
ysr@777 | 1680 | Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
ysr@777 | 1681 | PtrQueue::byte_offset_of_index())); |
ysr@777 | 1682 | Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() + |
ysr@777 | 1683 | PtrQueue::byte_offset_of_buf())); |
ysr@777 | 1684 | |
apetrusenko@797 | 1685 | __ push(rax); |
iveresov@1927 | 1686 | __ push(rcx); |
ysr@777 | 1687 | |
apetrusenko@797 | 1688 | NOT_LP64(__ get_thread(thread);) |
apetrusenko@797 | 1689 | ExternalAddress cardtable((address)ct->byte_map_base); |
ysr@777 | 1690 | assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); |
ysr@777 | 1691 | |
iveresov@1927 | 1692 | const Register card_addr = rcx; |
apetrusenko@797 | 1693 | #ifdef _LP64 |
apetrusenko@797 | 1694 | const Register tmp = rscratch1; |
apetrusenko@797 | 1695 | f.load_argument(0, card_addr); |
apetrusenko@797 | 1696 | __ shrq(card_addr, CardTableModRefBS::card_shift); |
apetrusenko@797 | 1697 | __ lea(tmp, cardtable); |
apetrusenko@797 | 1698 | // get the address of the card |
apetrusenko@797 | 1699 | __ addq(card_addr, tmp); |
apetrusenko@797 | 1700 | #else |
iveresov@1927 | 1701 | const Register card_index = rcx; |
apetrusenko@797 | 1702 | f.load_argument(0, card_index); |
apetrusenko@797 | 1703 | __ shrl(card_index, CardTableModRefBS::card_shift); |
apetrusenko@797 | 1704 | |
ysr@777 | 1705 | Address index(noreg, card_index, Address::times_1); |
ysr@777 | 1706 | __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index))); |
apetrusenko@797 | 1707 | #endif |
apetrusenko@797 | 1708 | |
ysr@777 | 1709 | __ cmpb(Address(card_addr, 0), 0); |
ysr@777 | 1710 | __ jcc(Assembler::equal, done); |
ysr@777 | 1711 | |
ysr@777 | 1712 | // storing region crossing non-NULL, card is clean. |
ysr@777 | 1713 | // dirty card and log. |
ysr@777 | 1714 | |
ysr@777 | 1715 | __ movb(Address(card_addr, 0), 0); |
ysr@777 | 1716 | |
ysr@777 | 1717 | __ cmpl(queue_index, 0); |
ysr@777 | 1718 | __ jcc(Assembler::equal, runtime); |
ysr@777 | 1719 | __ subl(queue_index, wordSize); |
ysr@777 | 1720 | |
ysr@777 | 1721 | const Register buffer_addr = rbx; |
apetrusenko@797 | 1722 | __ push(rbx); |
ysr@777 | 1723 | |
apetrusenko@797 | 1724 | __ movptr(buffer_addr, buffer); |
apetrusenko@797 | 1725 | |
apetrusenko@797 | 1726 | #ifdef _LP64 |
apetrusenko@797 | 1727 | __ movslq(rscratch1, queue_index); |
apetrusenko@797 | 1728 | __ addptr(buffer_addr, rscratch1); |
apetrusenko@797 | 1729 | #else |
apetrusenko@797 | 1730 | __ addptr(buffer_addr, queue_index); |
apetrusenko@797 | 1731 | #endif |
apetrusenko@797 | 1732 | __ movptr(Address(buffer_addr, 0), card_addr); |
apetrusenko@797 | 1733 | |
apetrusenko@797 | 1734 | __ pop(rbx); |
ysr@777 | 1735 | __ jmp(done); |
ysr@777 | 1736 | |
ysr@777 | 1737 | __ bind(runtime); |
iveresov@1927 | 1738 | __ push(rdx); |
iveresov@1927 | 1739 | #ifdef _LP64 |
iveresov@1927 | 1740 | __ push(r8); |
iveresov@1927 | 1741 | __ push(r9); |
iveresov@1927 | 1742 | __ push(r10); |
iveresov@1927 | 1743 | __ push(r11); |
iveresov@1927 | 1744 | # ifndef _WIN64 |
iveresov@1927 | 1745 | __ push(rdi); |
iveresov@1927 | 1746 | __ push(rsi); |
iveresov@1927 | 1747 | # endif |
iveresov@1927 | 1748 | #endif |
ysr@777 | 1749 | __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread); |
iveresov@1927 | 1750 | #ifdef _LP64 |
iveresov@1927 | 1751 | # ifndef _WIN64 |
iveresov@1927 | 1752 | __ pop(rsi); |
iveresov@1927 | 1753 | __ pop(rdi); |
iveresov@1927 | 1754 | # endif |
iveresov@1927 | 1755 | __ pop(r11); |
iveresov@1927 | 1756 | __ pop(r10); |
iveresov@1927 | 1757 | __ pop(r9); |
iveresov@1927 | 1758 | __ pop(r8); |
iveresov@1927 | 1759 | #endif |
iveresov@1927 | 1760 | __ pop(rdx); |
iveresov@1927 | 1761 | __ bind(done); |
ysr@777 | 1762 | |
iveresov@1927 | 1763 | __ pop(rcx); |
apetrusenko@797 | 1764 | __ pop(rax); |
ysr@777 | 1765 | |
ysr@777 | 1766 | } |
ysr@777 | 1767 | break; |
ysr@777 | 1768 | #endif // !SERIALGC |
ysr@777 | 1769 | |
duke@435 | 1770 | default: |
duke@435 | 1771 | { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments); |
never@739 | 1772 | __ movptr(rax, (int)id); |
duke@435 | 1773 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); |
duke@435 | 1774 | __ should_not_reach_here(); |
duke@435 | 1775 | } |
duke@435 | 1776 | break; |
duke@435 | 1777 | } |
duke@435 | 1778 | return oop_maps; |
duke@435 | 1779 | } |
duke@435 | 1780 | |
duke@435 | 1781 | #undef __ |