Tue, 02 Sep 2014 12:48:45 -0700
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
Summary: Add new C2 intrinsic for BigInteger::multiplyToLen() on x86 in 64-bit VM.
Reviewed-by: roland
1 /*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "utilities/macros.hpp"
40 #include "vmreg_x86.inline.hpp"
41 #if INCLUDE_ALL_GCS
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #endif
46 // Implementation of StubAssembler
48 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
49 // setup registers
50 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
51 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
52 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
53 assert(args_size >= 0, "illegal args_size");
54 bool align_stack = false;
55 #ifdef _LP64
56 // At a method handle call, the stack may not be properly aligned
57 // when returning with an exception.
58 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
59 #endif
61 #ifdef _LP64
62 mov(c_rarg0, thread);
63 set_num_rt_args(0); // Nothing on stack
64 #else
65 set_num_rt_args(1 + args_size);
67 // push java thread (becomes first argument of C function)
68 get_thread(thread);
69 push(thread);
70 #endif // _LP64
72 int call_offset;
73 if (!align_stack) {
74 set_last_Java_frame(thread, noreg, rbp, NULL);
75 } else {
76 address the_pc = pc();
77 call_offset = offset();
78 set_last_Java_frame(thread, noreg, rbp, the_pc);
79 andptr(rsp, -(StackAlignmentInBytes)); // Align stack
80 }
82 // do the call
83 call(RuntimeAddress(entry));
84 if (!align_stack) {
85 call_offset = offset();
86 }
87 // verify callee-saved register
88 #ifdef ASSERT
89 guarantee(thread != rax, "change this code");
90 push(rax);
91 { Label L;
92 get_thread(rax);
93 cmpptr(thread, rax);
94 jcc(Assembler::equal, L);
95 int3();
96 stop("StubAssembler::call_RT: rdi not callee saved?");
97 bind(L);
98 }
99 pop(rax);
100 #endif
101 reset_last_Java_frame(thread, true, align_stack);
103 // discard thread and arguments
104 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
106 // check for pending exceptions
107 { Label L;
108 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
109 jcc(Assembler::equal, L);
110 // exception pending => remove activation and forward to exception handler
111 movptr(rax, Address(thread, Thread::pending_exception_offset()));
112 // make sure that the vm_results are cleared
113 if (oop_result1->is_valid()) {
114 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
115 }
116 if (metadata_result->is_valid()) {
117 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
118 }
119 if (frame_size() == no_frame_size) {
120 leave();
121 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
122 } else if (_stub_id == Runtime1::forward_exception_id) {
123 should_not_reach_here();
124 } else {
125 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
126 }
127 bind(L);
128 }
129 // get oop results if there are any and reset the values in the thread
130 if (oop_result1->is_valid()) {
131 get_vm_result(oop_result1, thread);
132 }
133 if (metadata_result->is_valid()) {
134 get_vm_result_2(metadata_result, thread);
135 }
136 return call_offset;
137 }
140 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
141 #ifdef _LP64
142 mov(c_rarg1, arg1);
143 #else
144 push(arg1);
145 #endif // _LP64
146 return call_RT(oop_result1, metadata_result, entry, 1);
147 }
150 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
151 #ifdef _LP64
152 if (c_rarg1 == arg2) {
153 if (c_rarg2 == arg1) {
154 xchgq(arg1, arg2);
155 } else {
156 mov(c_rarg2, arg2);
157 mov(c_rarg1, arg1);
158 }
159 } else {
160 mov(c_rarg1, arg1);
161 mov(c_rarg2, arg2);
162 }
163 #else
164 push(arg2);
165 push(arg1);
166 #endif // _LP64
167 return call_RT(oop_result1, metadata_result, entry, 2);
168 }
171 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
172 #ifdef _LP64
173 // if there is any conflict use the stack
174 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
175 arg2 == c_rarg1 || arg1 == c_rarg3 ||
176 arg3 == c_rarg1 || arg1 == c_rarg2) {
177 push(arg3);
178 push(arg2);
179 push(arg1);
180 pop(c_rarg1);
181 pop(c_rarg2);
182 pop(c_rarg3);
183 } else {
184 mov(c_rarg1, arg1);
185 mov(c_rarg2, arg2);
186 mov(c_rarg3, arg3);
187 }
188 #else
189 push(arg3);
190 push(arg2);
191 push(arg1);
192 #endif // _LP64
193 return call_RT(oop_result1, metadata_result, entry, 3);
194 }
197 // Implementation of StubFrame
199 class StubFrame: public StackObj {
200 private:
201 StubAssembler* _sasm;
203 public:
204 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
205 void load_argument(int offset_in_words, Register reg);
207 ~StubFrame();
208 };
211 #define __ _sasm->
213 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
214 _sasm = sasm;
215 __ set_info(name, must_gc_arguments);
216 __ enter();
217 }
219 // load parameters that were stored with LIR_Assembler::store_parameter
220 // Note: offsets for store_parameter and load_argument must match
221 void StubFrame::load_argument(int offset_in_words, Register reg) {
222 // rbp, + 0: link
223 // + 1: return address
224 // + 2: argument with offset 0
225 // + 3: argument with offset 1
226 // + 4: ...
228 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
229 }
232 StubFrame::~StubFrame() {
233 __ leave();
234 __ ret(0);
235 }
237 #undef __
240 // Implementation of Runtime1
242 #define __ sasm->
244 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
245 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
247 // Stack layout for saving/restoring all the registers needed during a runtime
248 // call (this includes deoptimization)
249 // Note: note that users of this frame may well have arguments to some runtime
250 // while these values are on the stack. These positions neglect those arguments
251 // but the code in save_live_registers will take the argument count into
252 // account.
253 //
254 #ifdef _LP64
255 #define SLOT2(x) x,
256 #define SLOT_PER_WORD 2
257 #else
258 #define SLOT2(x)
259 #define SLOT_PER_WORD 1
260 #endif // _LP64
262 enum reg_save_layout {
263 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
264 // happen and will assert if the stack size we create is misaligned
265 #ifdef _LP64
266 align_dummy_0, align_dummy_1,
267 #endif // _LP64
268 #ifdef _WIN64
269 // Windows always allocates space for it's argument registers (see
270 // frame::arg_reg_save_area_bytes).
271 arg_reg_save_1, arg_reg_save_1H, // 0, 4
272 arg_reg_save_2, arg_reg_save_2H, // 8, 12
273 arg_reg_save_3, arg_reg_save_3H, // 16, 20
274 arg_reg_save_4, arg_reg_save_4H, // 24, 28
275 #endif // _WIN64
276 xmm_regs_as_doubles_off, // 32
277 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
278 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
279 // fpu_state_end_off is exclusive
280 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
281 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
282 extra_space_offset, // 360
283 #ifdef _LP64
284 r15_off = extra_space_offset, r15H_off, // 360, 364
285 r14_off, r14H_off, // 368, 372
286 r13_off, r13H_off, // 376, 380
287 r12_off, r12H_off, // 384, 388
288 r11_off, r11H_off, // 392, 396
289 r10_off, r10H_off, // 400, 404
290 r9_off, r9H_off, // 408, 412
291 r8_off, r8H_off, // 416, 420
292 rdi_off, rdiH_off, // 424, 428
293 #else
294 rdi_off = extra_space_offset,
295 #endif // _LP64
296 rsi_off, SLOT2(rsiH_off) // 432, 436
297 rbp_off, SLOT2(rbpH_off) // 440, 444
298 rsp_off, SLOT2(rspH_off) // 448, 452
299 rbx_off, SLOT2(rbxH_off) // 456, 460
300 rdx_off, SLOT2(rdxH_off) // 464, 468
301 rcx_off, SLOT2(rcxH_off) // 472, 476
302 rax_off, SLOT2(raxH_off) // 480, 484
303 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
304 return_off, SLOT2(returnH_off) // 496, 500
305 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
306 };
310 // Save off registers which might be killed by calls into the runtime.
311 // Tries to smart of about FP registers. In particular we separate
312 // saving and describing the FPU registers for deoptimization since we
313 // have to save the FPU registers twice if we describe them and on P4
314 // saving FPU registers which don't contain anything appears
315 // expensive. The deopt blob is the only thing which needs to
316 // describe FPU registers. In all other cases it should be sufficient
317 // to simply save their current value.
319 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
320 bool save_fpu_registers = true) {
322 // In 64bit all the args are in regs so there are no additional stack slots
323 LP64_ONLY(num_rt_args = 0);
324 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
325 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
326 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
328 // record saved value locations in an OopMap
329 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
330 OopMap* map = new OopMap(frame_size_in_slots, 0);
331 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
332 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
333 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
334 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
335 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
336 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
337 #ifdef _LP64
338 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
339 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
340 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
341 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
342 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
343 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
344 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
345 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
347 // This is stupid but needed.
348 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
349 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
350 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
351 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
352 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
353 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
355 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
356 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
357 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
358 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
359 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
360 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
361 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
362 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
363 #endif // _LP64
365 if (save_fpu_registers) {
366 if (UseSSE < 2) {
367 int fpu_off = float_regs_as_doubles_off;
368 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
369 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
370 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
371 // %%% This is really a waste but we'll keep things as they were for now
372 if (true) {
373 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
374 }
375 fpu_off += 2;
376 }
377 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
378 }
380 if (UseSSE >= 2) {
381 int xmm_off = xmm_regs_as_doubles_off;
382 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
383 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
384 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
385 // %%% This is really a waste but we'll keep things as they were for now
386 if (true) {
387 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
388 }
389 xmm_off += 2;
390 }
391 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
393 } else if (UseSSE == 1) {
394 int xmm_off = xmm_regs_as_doubles_off;
395 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
396 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
397 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
398 xmm_off += 2;
399 }
400 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
401 }
402 }
404 return map;
405 }
407 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
408 bool save_fpu_registers = true) {
409 __ block_comment("save_live_registers");
411 __ pusha(); // integer registers
413 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
414 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
416 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
418 #ifdef ASSERT
419 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
420 #endif
422 if (save_fpu_registers) {
423 if (UseSSE < 2) {
424 // save FPU stack
425 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
426 __ fwait();
428 #ifdef ASSERT
429 Label ok;
430 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
431 __ jccb(Assembler::equal, ok);
432 __ stop("corrupted control word detected");
433 __ bind(ok);
434 #endif
436 // Reset the control word to guard against exceptions being unmasked
437 // since fstp_d can cause FPU stack underflow exceptions. Write it
438 // into the on stack copy and then reload that to make sure that the
439 // current and future values are correct.
440 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
441 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
443 // Save the FPU registers in de-opt-able form
444 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
445 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
446 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
447 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
448 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
449 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
450 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
451 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
452 }
454 if (UseSSE >= 2) {
455 // save XMM registers
456 // XMM registers can contain float or double values, but this is not known here,
457 // so always save them as doubles.
458 // note that float values are _not_ converted automatically, so for float values
459 // the second word contains only garbage data.
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
464 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
465 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
467 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
468 #ifdef _LP64
469 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
470 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
471 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
473 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
474 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
475 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
476 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
477 #endif // _LP64
478 } else if (UseSSE == 1) {
479 // save XMM registers as float because double not supported without SSE2
480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
481 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
482 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
483 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
484 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
485 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
486 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
487 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
488 }
489 }
491 // FPU stack must be empty now
492 __ verify_FPU(0, "save_live_registers");
494 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
495 }
498 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
499 if (restore_fpu_registers) {
500 if (UseSSE >= 2) {
501 // restore XMM registers
502 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
503 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
504 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
505 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
506 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
507 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
508 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
509 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
510 #ifdef _LP64
511 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
512 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
513 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
514 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
515 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
516 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
517 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
518 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
519 #endif // _LP64
520 } else if (UseSSE == 1) {
521 // restore XMM registers
522 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
523 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
524 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
525 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
526 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
527 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
528 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
529 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
530 }
532 if (UseSSE < 2) {
533 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
534 } else {
535 // check that FPU stack is really empty
536 __ verify_FPU(0, "restore_live_registers");
537 }
539 } else {
540 // check that FPU stack is really empty
541 __ verify_FPU(0, "restore_live_registers");
542 }
544 #ifdef ASSERT
545 {
546 Label ok;
547 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
548 __ jcc(Assembler::equal, ok);
549 __ stop("bad offsets in frame");
550 __ bind(ok);
551 }
552 #endif // ASSERT
554 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
555 }
558 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
559 __ block_comment("restore_live_registers");
561 restore_fpu(sasm, restore_fpu_registers);
562 __ popa();
563 }
566 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
567 __ block_comment("restore_live_registers_except_rax");
569 restore_fpu(sasm, restore_fpu_registers);
571 #ifdef _LP64
572 __ movptr(r15, Address(rsp, 0));
573 __ movptr(r14, Address(rsp, wordSize));
574 __ movptr(r13, Address(rsp, 2 * wordSize));
575 __ movptr(r12, Address(rsp, 3 * wordSize));
576 __ movptr(r11, Address(rsp, 4 * wordSize));
577 __ movptr(r10, Address(rsp, 5 * wordSize));
578 __ movptr(r9, Address(rsp, 6 * wordSize));
579 __ movptr(r8, Address(rsp, 7 * wordSize));
580 __ movptr(rdi, Address(rsp, 8 * wordSize));
581 __ movptr(rsi, Address(rsp, 9 * wordSize));
582 __ movptr(rbp, Address(rsp, 10 * wordSize));
583 // skip rsp
584 __ movptr(rbx, Address(rsp, 12 * wordSize));
585 __ movptr(rdx, Address(rsp, 13 * wordSize));
586 __ movptr(rcx, Address(rsp, 14 * wordSize));
588 __ addptr(rsp, 16 * wordSize);
589 #else
591 __ pop(rdi);
592 __ pop(rsi);
593 __ pop(rbp);
594 __ pop(rbx); // skip this value
595 __ pop(rbx);
596 __ pop(rdx);
597 __ pop(rcx);
598 __ addptr(rsp, BytesPerWord);
599 #endif // _LP64
600 }
603 void Runtime1::initialize_pd() {
604 // nothing to do
605 }
608 // target: the entry point of the method that creates and posts the exception oop
609 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
611 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
612 // preserve all registers
613 int num_rt_args = has_argument ? 2 : 1;
614 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
616 // now all registers are saved and can be used freely
617 // verify that no old value is used accidentally
618 __ invalidate_registers(true, true, true, true, true, true);
620 // registers used by this stub
621 const Register temp_reg = rbx;
623 // load argument for exception that is passed as an argument into the stub
624 if (has_argument) {
625 #ifdef _LP64
626 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
627 #else
628 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
629 __ push(temp_reg);
630 #endif // _LP64
631 }
632 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
634 OopMapSet* oop_maps = new OopMapSet();
635 oop_maps->add_gc_map(call_offset, oop_map);
637 __ stop("should not reach here");
639 return oop_maps;
640 }
643 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
644 __ block_comment("generate_handle_exception");
646 // incoming parameters
647 const Register exception_oop = rax;
648 const Register exception_pc = rdx;
649 // other registers used in this stub
650 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
652 // Save registers, if required.
653 OopMapSet* oop_maps = new OopMapSet();
654 OopMap* oop_map = NULL;
655 switch (id) {
656 case forward_exception_id:
657 // We're handling an exception in the context of a compiled frame.
658 // The registers have been saved in the standard places. Perform
659 // an exception lookup in the caller and dispatch to the handler
660 // if found. Otherwise unwind and dispatch to the callers
661 // exception handler.
662 oop_map = generate_oop_map(sasm, 1 /*thread*/);
664 // load and clear pending exception oop into RAX
665 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
666 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
668 // load issuing PC (the return address for this stub) into rdx
669 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
671 // make sure that the vm_results are cleared (may be unnecessary)
672 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
673 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
674 break;
675 case handle_exception_nofpu_id:
676 case handle_exception_id:
677 // At this point all registers MAY be live.
678 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
679 break;
680 case handle_exception_from_callee_id: {
681 // At this point all registers except exception oop (RAX) and
682 // exception pc (RDX) are dead.
683 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
684 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
685 sasm->set_frame_size(frame_size);
686 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
687 break;
688 }
689 default: ShouldNotReachHere();
690 }
692 #ifdef TIERED
693 // C2 can leave the fpu stack dirty
694 if (UseSSE < 2) {
695 __ empty_FPU_stack();
696 }
697 #endif // TIERED
699 // verify that only rax, and rdx is valid at this time
700 __ invalidate_registers(false, true, true, false, true, true);
701 // verify that rax, contains a valid exception
702 __ verify_not_null_oop(exception_oop);
704 // load address of JavaThread object for thread-local data
705 NOT_LP64(__ get_thread(thread);)
707 #ifdef ASSERT
708 // check that fields in JavaThread for exception oop and issuing pc are
709 // empty before writing to them
710 Label oop_empty;
711 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
712 __ jcc(Assembler::equal, oop_empty);
713 __ stop("exception oop already set");
714 __ bind(oop_empty);
716 Label pc_empty;
717 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
718 __ jcc(Assembler::equal, pc_empty);
719 __ stop("exception pc already set");
720 __ bind(pc_empty);
721 #endif
723 // save exception oop and issuing pc into JavaThread
724 // (exception handler will load it from here)
725 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
726 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
728 // patch throwing pc into return address (has bci & oop map)
729 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
731 // compute the exception handler.
732 // the exception oop and the throwing pc are read from the fields in JavaThread
733 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
734 oop_maps->add_gc_map(call_offset, oop_map);
736 // rax: handler address
737 // will be the deopt blob if nmethod was deoptimized while we looked up
738 // handler regardless of whether handler existed in the nmethod.
740 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
741 __ invalidate_registers(false, true, true, true, true, true);
743 // patch the return address, this stub will directly return to the exception handler
744 __ movptr(Address(rbp, 1*BytesPerWord), rax);
746 switch (id) {
747 case forward_exception_id:
748 case handle_exception_nofpu_id:
749 case handle_exception_id:
750 // Restore the registers that were saved at the beginning.
751 restore_live_registers(sasm, id == handle_exception_nofpu_id);
752 break;
753 case handle_exception_from_callee_id:
754 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
755 // since we do a leave anyway.
757 // Pop the return address since we are possibly changing SP (restoring from BP).
758 __ leave();
759 __ pop(rcx);
761 // Restore SP from BP if the exception PC is a method handle call site.
762 NOT_LP64(__ get_thread(thread);)
763 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
764 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
765 __ jmp(rcx); // jump to exception handler
766 break;
767 default: ShouldNotReachHere();
768 }
770 return oop_maps;
771 }
774 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
775 // incoming parameters
776 const Register exception_oop = rax;
777 // callee-saved copy of exception_oop during runtime call
778 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
779 // other registers used in this stub
780 const Register exception_pc = rdx;
781 const Register handler_addr = rbx;
782 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
784 // verify that only rax, is valid at this time
785 __ invalidate_registers(false, true, true, true, true, true);
787 #ifdef ASSERT
788 // check that fields in JavaThread for exception oop and issuing pc are empty
789 NOT_LP64(__ get_thread(thread);)
790 Label oop_empty;
791 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
792 __ jcc(Assembler::equal, oop_empty);
793 __ stop("exception oop must be empty");
794 __ bind(oop_empty);
796 Label pc_empty;
797 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
798 __ jcc(Assembler::equal, pc_empty);
799 __ stop("exception pc must be empty");
800 __ bind(pc_empty);
801 #endif
803 // clear the FPU stack in case any FPU results are left behind
804 __ empty_FPU_stack();
806 // save exception_oop in callee-saved register to preserve it during runtime calls
807 __ verify_not_null_oop(exception_oop);
808 __ movptr(exception_oop_callee_saved, exception_oop);
810 NOT_LP64(__ get_thread(thread);)
811 // Get return address (is on top of stack after leave).
812 __ movptr(exception_pc, Address(rsp, 0));
814 // search the exception handler address of the caller (using the return address)
815 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
816 // rax: exception handler address of the caller
818 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
819 __ invalidate_registers(false, true, true, true, false, true);
821 // move result of call into correct register
822 __ movptr(handler_addr, rax);
824 // Restore exception oop to RAX (required convention of exception handler).
825 __ movptr(exception_oop, exception_oop_callee_saved);
827 // verify that there is really a valid exception in rax
828 __ verify_not_null_oop(exception_oop);
830 // get throwing pc (= return address).
831 // rdx has been destroyed by the call, so it must be set again
832 // the pop is also necessary to simulate the effect of a ret(0)
833 __ pop(exception_pc);
835 // Restore SP from BP if the exception PC is a method handle call site.
836 NOT_LP64(__ get_thread(thread);)
837 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
838 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
840 // continue at exception handler (return address removed)
841 // note: do *not* remove arguments when unwinding the
842 // activation since the caller assumes having
843 // all arguments on the stack when entering the
844 // runtime to determine the exception handler
845 // (GC happens at call site with arguments!)
846 // rax: exception oop
847 // rdx: throwing pc
848 // rbx: exception handler
849 __ jmp(handler_addr);
850 }
853 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
854 // use the maximum number of runtime-arguments here because it is difficult to
855 // distinguish each RT-Call.
856 // Note: This number affects also the RT-Call in generate_handle_exception because
857 // the oop-map is shared for all calls.
858 const int num_rt_args = 2; // thread + dummy
860 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
861 assert(deopt_blob != NULL, "deoptimization blob must have been created");
863 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
865 #ifdef _LP64
866 const Register thread = r15_thread;
867 // No need to worry about dummy
868 __ mov(c_rarg0, thread);
869 #else
870 __ push(rax); // push dummy
872 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
873 // push java thread (becomes first argument of C function)
874 __ get_thread(thread);
875 __ push(thread);
876 #endif // _LP64
877 __ set_last_Java_frame(thread, noreg, rbp, NULL);
878 // do the call
879 __ call(RuntimeAddress(target));
880 OopMapSet* oop_maps = new OopMapSet();
881 oop_maps->add_gc_map(__ offset(), oop_map);
882 // verify callee-saved register
883 #ifdef ASSERT
884 guarantee(thread != rax, "change this code");
885 __ push(rax);
886 { Label L;
887 __ get_thread(rax);
888 __ cmpptr(thread, rax);
889 __ jcc(Assembler::equal, L);
890 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
891 __ bind(L);
892 }
893 __ pop(rax);
894 #endif
895 __ reset_last_Java_frame(thread, true, false);
896 #ifndef _LP64
897 __ pop(rcx); // discard thread arg
898 __ pop(rcx); // discard dummy
899 #endif // _LP64
901 // check for pending exceptions
902 { Label L;
903 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
904 __ jcc(Assembler::equal, L);
905 // exception pending => remove activation and forward to exception handler
907 __ testptr(rax, rax); // have we deoptimized?
908 __ jump_cc(Assembler::equal,
909 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
911 // the deopt blob expects exceptions in the special fields of
912 // JavaThread, so copy and clear pending exception.
914 // load and clear pending exception
915 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
916 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
918 // check that there is really a valid exception
919 __ verify_not_null_oop(rax);
921 // load throwing pc: this is the return address of the stub
922 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
924 #ifdef ASSERT
925 // check that fields in JavaThread for exception oop and issuing pc are empty
926 Label oop_empty;
927 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
928 __ jcc(Assembler::equal, oop_empty);
929 __ stop("exception oop must be empty");
930 __ bind(oop_empty);
932 Label pc_empty;
933 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
934 __ jcc(Assembler::equal, pc_empty);
935 __ stop("exception pc must be empty");
936 __ bind(pc_empty);
937 #endif
939 // store exception oop and throwing pc to JavaThread
940 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
941 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
943 restore_live_registers(sasm);
945 __ leave();
946 __ addptr(rsp, BytesPerWord); // remove return address from stack
948 // Forward the exception directly to deopt blob. We can blow no
949 // registers and must leave throwing pc on the stack. A patch may
950 // have values live in registers so the entry point with the
951 // exception in tls.
952 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
954 __ bind(L);
955 }
958 // Runtime will return true if the nmethod has been deoptimized during
959 // the patching process. In that case we must do a deopt reexecute instead.
961 Label reexecuteEntry, cont;
963 __ testptr(rax, rax); // have we deoptimized?
964 __ jcc(Assembler::equal, cont); // no
966 // Will reexecute. Proper return address is already on the stack we just restore
967 // registers, pop all of our frame but the return address and jump to the deopt blob
968 restore_live_registers(sasm);
969 __ leave();
970 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
972 __ bind(cont);
973 restore_live_registers(sasm);
974 __ leave();
975 __ ret(0);
977 return oop_maps;
978 }
981 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
983 // for better readability
984 const bool must_gc_arguments = true;
985 const bool dont_gc_arguments = false;
987 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
988 bool save_fpu_registers = true;
990 // stub code & info for the different stubs
991 OopMapSet* oop_maps = NULL;
992 switch (id) {
993 case forward_exception_id:
994 {
995 oop_maps = generate_handle_exception(id, sasm);
996 __ leave();
997 __ ret(0);
998 }
999 break;
1001 case new_instance_id:
1002 case fast_new_instance_id:
1003 case fast_new_instance_init_check_id:
1004 {
1005 Register klass = rdx; // Incoming
1006 Register obj = rax; // Result
1008 if (id == new_instance_id) {
1009 __ set_info("new_instance", dont_gc_arguments);
1010 } else if (id == fast_new_instance_id) {
1011 __ set_info("fast new_instance", dont_gc_arguments);
1012 } else {
1013 assert(id == fast_new_instance_init_check_id, "bad StubID");
1014 __ set_info("fast new_instance init check", dont_gc_arguments);
1015 }
1017 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1018 UseTLAB && FastTLABRefill) {
1019 Label slow_path;
1020 Register obj_size = rcx;
1021 Register t1 = rbx;
1022 Register t2 = rsi;
1023 assert_different_registers(klass, obj, obj_size, t1, t2);
1025 __ push(rdi);
1026 __ push(rbx);
1028 if (id == fast_new_instance_init_check_id) {
1029 // make sure the klass is initialized
1030 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1031 __ jcc(Assembler::notEqual, slow_path);
1032 }
1034 #ifdef ASSERT
1035 // assert object can be fast path allocated
1036 {
1037 Label ok, not_ok;
1038 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1039 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1040 __ jcc(Assembler::lessEqual, not_ok);
1041 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1042 __ jcc(Assembler::zero, ok);
1043 __ bind(not_ok);
1044 __ stop("assert(can be fast path allocated)");
1045 __ should_not_reach_here();
1046 __ bind(ok);
1047 }
1048 #endif // ASSERT
1050 // if we got here then the TLAB allocation failed, so try
1051 // refilling the TLAB or allocating directly from eden.
1052 Label retry_tlab, try_eden;
1053 const Register thread =
1054 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1056 __ bind(retry_tlab);
1058 // get the instance size (size is postive so movl is fine for 64bit)
1059 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1061 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1063 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1064 __ verify_oop(obj);
1065 __ pop(rbx);
1066 __ pop(rdi);
1067 __ ret(0);
1069 __ bind(try_eden);
1070 // get the instance size (size is postive so movl is fine for 64bit)
1071 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1073 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1074 __ incr_allocated_bytes(thread, obj_size, 0);
1076 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1077 __ verify_oop(obj);
1078 __ pop(rbx);
1079 __ pop(rdi);
1080 __ ret(0);
1082 __ bind(slow_path);
1083 __ pop(rbx);
1084 __ pop(rdi);
1085 }
1087 __ enter();
1088 OopMap* map = save_live_registers(sasm, 2);
1089 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1090 oop_maps = new OopMapSet();
1091 oop_maps->add_gc_map(call_offset, map);
1092 restore_live_registers_except_rax(sasm);
1093 __ verify_oop(obj);
1094 __ leave();
1095 __ ret(0);
1097 // rax,: new instance
1098 }
1100 break;
1102 case counter_overflow_id:
1103 {
1104 Register bci = rax, method = rbx;
1105 __ enter();
1106 OopMap* map = save_live_registers(sasm, 3);
1107 // Retrieve bci
1108 __ movl(bci, Address(rbp, 2*BytesPerWord));
1109 // And a pointer to the Method*
1110 __ movptr(method, Address(rbp, 3*BytesPerWord));
1111 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1112 oop_maps = new OopMapSet();
1113 oop_maps->add_gc_map(call_offset, map);
1114 restore_live_registers(sasm);
1115 __ leave();
1116 __ ret(0);
1117 }
1118 break;
1120 case new_type_array_id:
1121 case new_object_array_id:
1122 {
1123 Register length = rbx; // Incoming
1124 Register klass = rdx; // Incoming
1125 Register obj = rax; // Result
1127 if (id == new_type_array_id) {
1128 __ set_info("new_type_array", dont_gc_arguments);
1129 } else {
1130 __ set_info("new_object_array", dont_gc_arguments);
1131 }
1133 #ifdef ASSERT
1134 // assert object type is really an array of the proper kind
1135 {
1136 Label ok;
1137 Register t0 = obj;
1138 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1139 __ sarl(t0, Klass::_lh_array_tag_shift);
1140 int tag = ((id == new_type_array_id)
1141 ? Klass::_lh_array_tag_type_value
1142 : Klass::_lh_array_tag_obj_value);
1143 __ cmpl(t0, tag);
1144 __ jcc(Assembler::equal, ok);
1145 __ stop("assert(is an array klass)");
1146 __ should_not_reach_here();
1147 __ bind(ok);
1148 }
1149 #endif // ASSERT
1151 if (UseTLAB && FastTLABRefill) {
1152 Register arr_size = rsi;
1153 Register t1 = rcx; // must be rcx for use as shift count
1154 Register t2 = rdi;
1155 Label slow_path;
1156 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1158 // check that array length is small enough for fast path.
1159 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1160 __ jcc(Assembler::above, slow_path);
1162 // if we got here then the TLAB allocation failed, so try
1163 // refilling the TLAB or allocating directly from eden.
1164 Label retry_tlab, try_eden;
1165 const Register thread =
1166 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1168 __ bind(retry_tlab);
1170 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1171 // since size is positive movl does right thing on 64bit
1172 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1173 // since size is postive movl does right thing on 64bit
1174 __ movl(arr_size, length);
1175 assert(t1 == rcx, "fixed register usage");
1176 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1177 __ shrptr(t1, Klass::_lh_header_size_shift);
1178 __ andptr(t1, Klass::_lh_header_size_mask);
1179 __ addptr(arr_size, t1);
1180 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1181 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1183 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1185 __ initialize_header(obj, klass, length, t1, t2);
1186 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1187 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1188 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1189 __ andptr(t1, Klass::_lh_header_size_mask);
1190 __ subptr(arr_size, t1); // body length
1191 __ addptr(t1, obj); // body start
1192 __ initialize_body(t1, arr_size, 0, t2);
1193 __ verify_oop(obj);
1194 __ ret(0);
1196 __ bind(try_eden);
1197 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1198 // since size is positive movl does right thing on 64bit
1199 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1200 // since size is postive movl does right thing on 64bit
1201 __ movl(arr_size, length);
1202 assert(t1 == rcx, "fixed register usage");
1203 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1204 __ shrptr(t1, Klass::_lh_header_size_shift);
1205 __ andptr(t1, Klass::_lh_header_size_mask);
1206 __ addptr(arr_size, t1);
1207 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1208 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1210 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1211 __ incr_allocated_bytes(thread, arr_size, 0);
1213 __ initialize_header(obj, klass, length, t1, t2);
1214 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1215 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1216 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1217 __ andptr(t1, Klass::_lh_header_size_mask);
1218 __ subptr(arr_size, t1); // body length
1219 __ addptr(t1, obj); // body start
1220 __ initialize_body(t1, arr_size, 0, t2);
1221 __ verify_oop(obj);
1222 __ ret(0);
1224 __ bind(slow_path);
1225 }
1227 __ enter();
1228 OopMap* map = save_live_registers(sasm, 3);
1229 int call_offset;
1230 if (id == new_type_array_id) {
1231 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1232 } else {
1233 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1234 }
1236 oop_maps = new OopMapSet();
1237 oop_maps->add_gc_map(call_offset, map);
1238 restore_live_registers_except_rax(sasm);
1240 __ verify_oop(obj);
1241 __ leave();
1242 __ ret(0);
1244 // rax,: new array
1245 }
1246 break;
1248 case new_multi_array_id:
1249 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1250 // rax,: klass
1251 // rbx,: rank
1252 // rcx: address of 1st dimension
1253 OopMap* map = save_live_registers(sasm, 4);
1254 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1256 oop_maps = new OopMapSet();
1257 oop_maps->add_gc_map(call_offset, map);
1258 restore_live_registers_except_rax(sasm);
1260 // rax,: new multi array
1261 __ verify_oop(rax);
1262 }
1263 break;
1265 case register_finalizer_id:
1266 {
1267 __ set_info("register_finalizer", dont_gc_arguments);
1269 // This is called via call_runtime so the arguments
1270 // will be place in C abi locations
1272 #ifdef _LP64
1273 __ verify_oop(c_rarg0);
1274 __ mov(rax, c_rarg0);
1275 #else
1276 // The object is passed on the stack and we haven't pushed a
1277 // frame yet so it's one work away from top of stack.
1278 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1279 __ verify_oop(rax);
1280 #endif // _LP64
1282 // load the klass and check the has finalizer flag
1283 Label register_finalizer;
1284 Register t = rsi;
1285 __ load_klass(t, rax);
1286 __ movl(t, Address(t, Klass::access_flags_offset()));
1287 __ testl(t, JVM_ACC_HAS_FINALIZER);
1288 __ jcc(Assembler::notZero, register_finalizer);
1289 __ ret(0);
1291 __ bind(register_finalizer);
1292 __ enter();
1293 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1294 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1295 oop_maps = new OopMapSet();
1296 oop_maps->add_gc_map(call_offset, oop_map);
1298 // Now restore all the live registers
1299 restore_live_registers(sasm);
1301 __ leave();
1302 __ ret(0);
1303 }
1304 break;
1306 case throw_range_check_failed_id:
1307 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1308 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1309 }
1310 break;
1312 case throw_index_exception_id:
1313 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1314 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1315 }
1316 break;
1318 case throw_div0_exception_id:
1319 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1320 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1321 }
1322 break;
1324 case throw_null_pointer_exception_id:
1325 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1326 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1327 }
1328 break;
1330 case handle_exception_nofpu_id:
1331 case handle_exception_id:
1332 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1333 oop_maps = generate_handle_exception(id, sasm);
1334 }
1335 break;
1337 case handle_exception_from_callee_id:
1338 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1339 oop_maps = generate_handle_exception(id, sasm);
1340 }
1341 break;
1343 case unwind_exception_id:
1344 { __ set_info("unwind_exception", dont_gc_arguments);
1345 // note: no stubframe since we are about to leave the current
1346 // activation and we are calling a leaf VM function only.
1347 generate_unwind_exception(sasm);
1348 }
1349 break;
1351 case throw_array_store_exception_id:
1352 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1353 // tos + 0: link
1354 // + 1: return address
1355 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1356 }
1357 break;
1359 case throw_class_cast_exception_id:
1360 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1361 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1362 }
1363 break;
1365 case throw_incompatible_class_change_error_id:
1366 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1367 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1368 }
1369 break;
1371 case slow_subtype_check_id:
1372 {
1373 // Typical calling sequence:
1374 // __ push(klass_RInfo); // object klass or other subclass
1375 // __ push(sup_k_RInfo); // array element klass or other superclass
1376 // __ call(slow_subtype_check);
1377 // Note that the subclass is pushed first, and is therefore deepest.
1378 // Previous versions of this code reversed the names 'sub' and 'super'.
1379 // This was operationally harmless but made the code unreadable.
1380 enum layout {
1381 rax_off, SLOT2(raxH_off)
1382 rcx_off, SLOT2(rcxH_off)
1383 rsi_off, SLOT2(rsiH_off)
1384 rdi_off, SLOT2(rdiH_off)
1385 // saved_rbp_off, SLOT2(saved_rbpH_off)
1386 return_off, SLOT2(returnH_off)
1387 sup_k_off, SLOT2(sup_kH_off)
1388 klass_off, SLOT2(superH_off)
1389 framesize,
1390 result_off = klass_off // deepest argument is also the return value
1391 };
1393 __ set_info("slow_subtype_check", dont_gc_arguments);
1394 __ push(rdi);
1395 __ push(rsi);
1396 __ push(rcx);
1397 __ push(rax);
1399 // This is called by pushing args and not with C abi
1400 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1401 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1403 Label miss;
1404 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1406 // fallthrough on success:
1407 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1408 __ pop(rax);
1409 __ pop(rcx);
1410 __ pop(rsi);
1411 __ pop(rdi);
1412 __ ret(0);
1414 __ bind(miss);
1415 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1416 __ pop(rax);
1417 __ pop(rcx);
1418 __ pop(rsi);
1419 __ pop(rdi);
1420 __ ret(0);
1421 }
1422 break;
1424 case monitorenter_nofpu_id:
1425 save_fpu_registers = false;
1426 // fall through
1427 case monitorenter_id:
1428 {
1429 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1430 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1432 // Called with store_parameter and not C abi
1434 f.load_argument(1, rax); // rax,: object
1435 f.load_argument(0, rbx); // rbx,: lock address
1437 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1439 oop_maps = new OopMapSet();
1440 oop_maps->add_gc_map(call_offset, map);
1441 restore_live_registers(sasm, save_fpu_registers);
1442 }
1443 break;
1445 case monitorexit_nofpu_id:
1446 save_fpu_registers = false;
1447 // fall through
1448 case monitorexit_id:
1449 {
1450 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1451 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1453 // Called with store_parameter and not C abi
1455 f.load_argument(0, rax); // rax,: lock address
1457 // note: really a leaf routine but must setup last java sp
1458 // => use call_RT for now (speed can be improved by
1459 // doing last java sp setup manually)
1460 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1462 oop_maps = new OopMapSet();
1463 oop_maps->add_gc_map(call_offset, map);
1464 restore_live_registers(sasm, save_fpu_registers);
1465 }
1466 break;
1468 case deoptimize_id:
1469 {
1470 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1471 const int num_rt_args = 1; // thread
1472 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1473 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1474 oop_maps = new OopMapSet();
1475 oop_maps->add_gc_map(call_offset, oop_map);
1476 restore_live_registers(sasm);
1477 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1478 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1479 __ leave();
1480 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1481 }
1482 break;
1484 case access_field_patching_id:
1485 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1486 // we should set up register map
1487 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1488 }
1489 break;
1491 case load_klass_patching_id:
1492 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1493 // we should set up register map
1494 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1495 }
1496 break;
1498 case load_mirror_patching_id:
1499 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1500 // we should set up register map
1501 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1502 }
1503 break;
1505 case load_appendix_patching_id:
1506 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1507 // we should set up register map
1508 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1509 }
1510 break;
1512 case dtrace_object_alloc_id:
1513 { // rax,: object
1514 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1515 // we can't gc here so skip the oopmap but make sure that all
1516 // the live registers get saved.
1517 save_live_registers(sasm, 1);
1519 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1520 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1521 NOT_LP64(__ pop(rax));
1523 restore_live_registers(sasm);
1524 }
1525 break;
1527 case fpu2long_stub_id:
1528 {
1529 // rax, and rdx are destroyed, but should be free since the result is returned there
1530 // preserve rsi,ecx
1531 __ push(rsi);
1532 __ push(rcx);
1533 LP64_ONLY(__ push(rdx);)
1535 // check for NaN
1536 Label return0, do_return, return_min_jlong, do_convert;
1538 Address value_high_word(rsp, wordSize + 4);
1539 Address value_low_word(rsp, wordSize);
1540 Address result_high_word(rsp, 3*wordSize + 4);
1541 Address result_low_word(rsp, 3*wordSize);
1543 __ subptr(rsp, 32); // more than enough on 32bit
1544 __ fst_d(value_low_word);
1545 __ movl(rax, value_high_word);
1546 __ andl(rax, 0x7ff00000);
1547 __ cmpl(rax, 0x7ff00000);
1548 __ jcc(Assembler::notEqual, do_convert);
1549 __ movl(rax, value_high_word);
1550 __ andl(rax, 0xfffff);
1551 __ orl(rax, value_low_word);
1552 __ jcc(Assembler::notZero, return0);
1554 __ bind(do_convert);
1555 __ fnstcw(Address(rsp, 0));
1556 __ movzwl(rax, Address(rsp, 0));
1557 __ orl(rax, 0xc00);
1558 __ movw(Address(rsp, 2), rax);
1559 __ fldcw(Address(rsp, 2));
1560 __ fwait();
1561 __ fistp_d(result_low_word);
1562 __ fldcw(Address(rsp, 0));
1563 __ fwait();
1564 // This gets the entire long in rax on 64bit
1565 __ movptr(rax, result_low_word);
1566 // testing of high bits
1567 __ movl(rdx, result_high_word);
1568 __ mov(rcx, rax);
1569 // What the heck is the point of the next instruction???
1570 __ xorl(rcx, 0x0);
1571 __ movl(rsi, 0x80000000);
1572 __ xorl(rsi, rdx);
1573 __ orl(rcx, rsi);
1574 __ jcc(Assembler::notEqual, do_return);
1575 __ fldz();
1576 __ fcomp_d(value_low_word);
1577 __ fnstsw_ax();
1578 #ifdef _LP64
1579 __ testl(rax, 0x4100); // ZF & CF == 0
1580 __ jcc(Assembler::equal, return_min_jlong);
1581 #else
1582 __ sahf();
1583 __ jcc(Assembler::above, return_min_jlong);
1584 #endif // _LP64
1585 // return max_jlong
1586 #ifndef _LP64
1587 __ movl(rdx, 0x7fffffff);
1588 __ movl(rax, 0xffffffff);
1589 #else
1590 __ mov64(rax, CONST64(0x7fffffffffffffff));
1591 #endif // _LP64
1592 __ jmp(do_return);
1594 __ bind(return_min_jlong);
1595 #ifndef _LP64
1596 __ movl(rdx, 0x80000000);
1597 __ xorl(rax, rax);
1598 #else
1599 __ mov64(rax, CONST64(0x8000000000000000));
1600 #endif // _LP64
1601 __ jmp(do_return);
1603 __ bind(return0);
1604 __ fpop();
1605 #ifndef _LP64
1606 __ xorptr(rdx,rdx);
1607 __ xorptr(rax,rax);
1608 #else
1609 __ xorptr(rax, rax);
1610 #endif // _LP64
1612 __ bind(do_return);
1613 __ addptr(rsp, 32);
1614 LP64_ONLY(__ pop(rdx);)
1615 __ pop(rcx);
1616 __ pop(rsi);
1617 __ ret(0);
1618 }
1619 break;
1621 #if INCLUDE_ALL_GCS
1622 case g1_pre_barrier_slow_id:
1623 {
1624 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1625 // arg0 : previous value of memory
1627 BarrierSet* bs = Universe::heap()->barrier_set();
1628 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1629 __ movptr(rax, (int)id);
1630 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1631 __ should_not_reach_here();
1632 break;
1633 }
1634 __ push(rax);
1635 __ push(rdx);
1637 const Register pre_val = rax;
1638 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1639 const Register tmp = rdx;
1641 NOT_LP64(__ get_thread(thread);)
1643 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1644 PtrQueue::byte_offset_of_active()));
1646 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1647 PtrQueue::byte_offset_of_index()));
1648 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1649 PtrQueue::byte_offset_of_buf()));
1652 Label done;
1653 Label runtime;
1655 // Can we store original value in the thread's buffer?
1657 #ifdef _LP64
1658 __ movslq(tmp, queue_index);
1659 __ cmpq(tmp, 0);
1660 #else
1661 __ cmpl(queue_index, 0);
1662 #endif
1663 __ jcc(Assembler::equal, runtime);
1664 #ifdef _LP64
1665 __ subq(tmp, wordSize);
1666 __ movl(queue_index, tmp);
1667 __ addq(tmp, buffer);
1668 #else
1669 __ subl(queue_index, wordSize);
1670 __ movl(tmp, buffer);
1671 __ addl(tmp, queue_index);
1672 #endif
1674 // prev_val (rax)
1675 f.load_argument(0, pre_val);
1676 __ movptr(Address(tmp, 0), pre_val);
1677 __ jmp(done);
1679 __ bind(runtime);
1680 __ push(rcx);
1681 #ifdef _LP64
1682 __ push(r8);
1683 __ push(r9);
1684 __ push(r10);
1685 __ push(r11);
1686 # ifndef _WIN64
1687 __ push(rdi);
1688 __ push(rsi);
1689 # endif
1690 #endif
1691 // load the pre-value
1692 f.load_argument(0, rcx);
1693 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1694 #ifdef _LP64
1695 # ifndef _WIN64
1696 __ pop(rsi);
1697 __ pop(rdi);
1698 # endif
1699 __ pop(r11);
1700 __ pop(r10);
1701 __ pop(r9);
1702 __ pop(r8);
1703 #endif
1704 __ pop(rcx);
1705 __ bind(done);
1707 __ pop(rdx);
1708 __ pop(rax);
1709 }
1710 break;
1712 case g1_post_barrier_slow_id:
1713 {
1714 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1717 // arg0: store_address
1718 Address store_addr(rbp, 2*BytesPerWord);
1720 BarrierSet* bs = Universe::heap()->barrier_set();
1721 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1722 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1724 Label done;
1725 Label runtime;
1727 // At this point we know new_value is non-NULL and the new_value crosses regions.
1728 // Must check to see if card is already dirty
1730 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1732 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1733 PtrQueue::byte_offset_of_index()));
1734 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1735 PtrQueue::byte_offset_of_buf()));
1737 __ push(rax);
1738 __ push(rcx);
1740 const Register cardtable = rax;
1741 const Register card_addr = rcx;
1743 f.load_argument(0, card_addr);
1744 __ shrptr(card_addr, CardTableModRefBS::card_shift);
1745 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
1746 // a valid address and therefore is not properly handled by the relocation code.
1747 __ movptr(cardtable, (intptr_t)ct->byte_map_base);
1748 __ addptr(card_addr, cardtable);
1750 NOT_LP64(__ get_thread(thread);)
1752 __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
1753 __ jcc(Assembler::equal, done);
1755 __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
1756 __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
1757 __ jcc(Assembler::equal, done);
1759 // storing region crossing non-NULL, card is clean.
1760 // dirty card and log.
1762 __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
1764 __ cmpl(queue_index, 0);
1765 __ jcc(Assembler::equal, runtime);
1766 __ subl(queue_index, wordSize);
1768 const Register buffer_addr = rbx;
1769 __ push(rbx);
1771 __ movptr(buffer_addr, buffer);
1773 #ifdef _LP64
1774 __ movslq(rscratch1, queue_index);
1775 __ addptr(buffer_addr, rscratch1);
1776 #else
1777 __ addptr(buffer_addr, queue_index);
1778 #endif
1779 __ movptr(Address(buffer_addr, 0), card_addr);
1781 __ pop(rbx);
1782 __ jmp(done);
1784 __ bind(runtime);
1785 __ push(rdx);
1786 #ifdef _LP64
1787 __ push(r8);
1788 __ push(r9);
1789 __ push(r10);
1790 __ push(r11);
1791 # ifndef _WIN64
1792 __ push(rdi);
1793 __ push(rsi);
1794 # endif
1795 #endif
1796 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1797 #ifdef _LP64
1798 # ifndef _WIN64
1799 __ pop(rsi);
1800 __ pop(rdi);
1801 # endif
1802 __ pop(r11);
1803 __ pop(r10);
1804 __ pop(r9);
1805 __ pop(r8);
1806 #endif
1807 __ pop(rdx);
1808 __ bind(done);
1810 __ pop(rcx);
1811 __ pop(rax);
1813 }
1814 break;
1815 #endif // INCLUDE_ALL_GCS
1817 case predicate_failed_trap_id:
1818 {
1819 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1821 OopMap* map = save_live_registers(sasm, 1);
1823 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1824 oop_maps = new OopMapSet();
1825 oop_maps->add_gc_map(call_offset, map);
1826 restore_live_registers(sasm);
1827 __ leave();
1828 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1829 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1831 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1832 }
1833 break;
1835 default:
1836 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1837 __ movptr(rax, (int)id);
1838 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1839 __ should_not_reach_here();
1840 }
1841 break;
1842 }
1843 return oop_maps;
1844 }
1846 #undef __
1848 const char *Runtime1::pd_name_for_address(address entry) {
1849 return "<unknown function>";
1850 }