Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "vmreg_x86.inline.hpp"
42 // Implementation of StubAssembler
44 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
45 // setup registers
46 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
47 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
48 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
49 assert(args_size >= 0, "illegal args_size");
50 bool align_stack = false;
51 #ifdef _LP64
52 // At a method handle call, the stack may not be properly aligned
53 // when returning with an exception.
54 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
55 #endif
57 #ifdef _LP64
58 mov(c_rarg0, thread);
59 set_num_rt_args(0); // Nothing on stack
60 #else
61 set_num_rt_args(1 + args_size);
63 // push java thread (becomes first argument of C function)
64 get_thread(thread);
65 push(thread);
66 #endif // _LP64
68 int call_offset;
69 if (!align_stack) {
70 set_last_Java_frame(thread, noreg, rbp, NULL);
71 } else {
72 address the_pc = pc();
73 call_offset = offset();
74 set_last_Java_frame(thread, noreg, rbp, the_pc);
75 andptr(rsp, -(StackAlignmentInBytes)); // Align stack
76 }
78 // do the call
79 call(RuntimeAddress(entry));
80 if (!align_stack) {
81 call_offset = offset();
82 }
83 // verify callee-saved register
84 #ifdef ASSERT
85 guarantee(thread != rax, "change this code");
86 push(rax);
87 { Label L;
88 get_thread(rax);
89 cmpptr(thread, rax);
90 jcc(Assembler::equal, L);
91 int3();
92 stop("StubAssembler::call_RT: rdi not callee saved?");
93 bind(L);
94 }
95 pop(rax);
96 #endif
97 reset_last_Java_frame(thread, true, align_stack);
99 // discard thread and arguments
100 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
102 // check for pending exceptions
103 { Label L;
104 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
105 jcc(Assembler::equal, L);
106 // exception pending => remove activation and forward to exception handler
107 movptr(rax, Address(thread, Thread::pending_exception_offset()));
108 // make sure that the vm_results are cleared
109 if (oop_result1->is_valid()) {
110 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
111 }
112 if (metadata_result->is_valid()) {
113 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
114 }
115 if (frame_size() == no_frame_size) {
116 leave();
117 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
118 } else if (_stub_id == Runtime1::forward_exception_id) {
119 should_not_reach_here();
120 } else {
121 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
122 }
123 bind(L);
124 }
125 // get oop results if there are any and reset the values in the thread
126 if (oop_result1->is_valid()) {
127 get_vm_result(oop_result1, thread);
128 }
129 if (metadata_result->is_valid()) {
130 get_vm_result_2(metadata_result, thread);
131 }
132 return call_offset;
133 }
136 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
137 #ifdef _LP64
138 mov(c_rarg1, arg1);
139 #else
140 push(arg1);
141 #endif // _LP64
142 return call_RT(oop_result1, metadata_result, entry, 1);
143 }
146 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
147 #ifdef _LP64
148 if (c_rarg1 == arg2) {
149 if (c_rarg2 == arg1) {
150 xchgq(arg1, arg2);
151 } else {
152 mov(c_rarg2, arg2);
153 mov(c_rarg1, arg1);
154 }
155 } else {
156 mov(c_rarg1, arg1);
157 mov(c_rarg2, arg2);
158 }
159 #else
160 push(arg2);
161 push(arg1);
162 #endif // _LP64
163 return call_RT(oop_result1, metadata_result, entry, 2);
164 }
167 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
168 #ifdef _LP64
169 // if there is any conflict use the stack
170 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
171 arg2 == c_rarg1 || arg1 == c_rarg3 ||
172 arg3 == c_rarg1 || arg1 == c_rarg2) {
173 push(arg3);
174 push(arg2);
175 push(arg1);
176 pop(c_rarg1);
177 pop(c_rarg2);
178 pop(c_rarg3);
179 } else {
180 mov(c_rarg1, arg1);
181 mov(c_rarg2, arg2);
182 mov(c_rarg3, arg3);
183 }
184 #else
185 push(arg3);
186 push(arg2);
187 push(arg1);
188 #endif // _LP64
189 return call_RT(oop_result1, metadata_result, entry, 3);
190 }
193 // Implementation of StubFrame
195 class StubFrame: public StackObj {
196 private:
197 StubAssembler* _sasm;
199 public:
200 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
201 void load_argument(int offset_in_words, Register reg);
203 ~StubFrame();
204 };
207 #define __ _sasm->
209 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
210 _sasm = sasm;
211 __ set_info(name, must_gc_arguments);
212 __ enter();
213 }
215 // load parameters that were stored with LIR_Assembler::store_parameter
216 // Note: offsets for store_parameter and load_argument must match
217 void StubFrame::load_argument(int offset_in_words, Register reg) {
218 // rbp, + 0: link
219 // + 1: return address
220 // + 2: argument with offset 0
221 // + 3: argument with offset 1
222 // + 4: ...
224 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
225 }
228 StubFrame::~StubFrame() {
229 __ leave();
230 __ ret(0);
231 }
233 #undef __
236 // Implementation of Runtime1
238 #define __ sasm->
240 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
241 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
243 // Stack layout for saving/restoring all the registers needed during a runtime
244 // call (this includes deoptimization)
245 // Note: note that users of this frame may well have arguments to some runtime
246 // while these values are on the stack. These positions neglect those arguments
247 // but the code in save_live_registers will take the argument count into
248 // account.
249 //
250 #ifdef _LP64
251 #define SLOT2(x) x,
252 #define SLOT_PER_WORD 2
253 #else
254 #define SLOT2(x)
255 #define SLOT_PER_WORD 1
256 #endif // _LP64
258 enum reg_save_layout {
259 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
260 // happen and will assert if the stack size we create is misaligned
261 #ifdef _LP64
262 align_dummy_0, align_dummy_1,
263 #endif // _LP64
264 #ifdef _WIN64
265 // Windows always allocates space for it's argument registers (see
266 // frame::arg_reg_save_area_bytes).
267 arg_reg_save_1, arg_reg_save_1H, // 0, 4
268 arg_reg_save_2, arg_reg_save_2H, // 8, 12
269 arg_reg_save_3, arg_reg_save_3H, // 16, 20
270 arg_reg_save_4, arg_reg_save_4H, // 24, 28
271 #endif // _WIN64
272 xmm_regs_as_doubles_off, // 32
273 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
274 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
275 // fpu_state_end_off is exclusive
276 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
277 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
278 extra_space_offset, // 360
279 #ifdef _LP64
280 r15_off = extra_space_offset, r15H_off, // 360, 364
281 r14_off, r14H_off, // 368, 372
282 r13_off, r13H_off, // 376, 380
283 r12_off, r12H_off, // 384, 388
284 r11_off, r11H_off, // 392, 396
285 r10_off, r10H_off, // 400, 404
286 r9_off, r9H_off, // 408, 412
287 r8_off, r8H_off, // 416, 420
288 rdi_off, rdiH_off, // 424, 428
289 #else
290 rdi_off = extra_space_offset,
291 #endif // _LP64
292 rsi_off, SLOT2(rsiH_off) // 432, 436
293 rbp_off, SLOT2(rbpH_off) // 440, 444
294 rsp_off, SLOT2(rspH_off) // 448, 452
295 rbx_off, SLOT2(rbxH_off) // 456, 460
296 rdx_off, SLOT2(rdxH_off) // 464, 468
297 rcx_off, SLOT2(rcxH_off) // 472, 476
298 rax_off, SLOT2(raxH_off) // 480, 484
299 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
300 return_off, SLOT2(returnH_off) // 496, 500
301 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
302 };
306 // Save off registers which might be killed by calls into the runtime.
307 // Tries to smart of about FP registers. In particular we separate
308 // saving and describing the FPU registers for deoptimization since we
309 // have to save the FPU registers twice if we describe them and on P4
310 // saving FPU registers which don't contain anything appears
311 // expensive. The deopt blob is the only thing which needs to
312 // describe FPU registers. In all other cases it should be sufficient
313 // to simply save their current value.
315 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
316 bool save_fpu_registers = true) {
318 // In 64bit all the args are in regs so there are no additional stack slots
319 LP64_ONLY(num_rt_args = 0);
320 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
321 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
322 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
324 // record saved value locations in an OopMap
325 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
326 OopMap* map = new OopMap(frame_size_in_slots, 0);
327 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
330 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
331 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
332 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
333 #ifdef _LP64
334 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
335 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
336 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
337 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
338 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
339 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
340 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
341 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
343 // This is stupid but needed.
344 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
347 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
348 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
349 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
351 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
352 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
353 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
354 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
355 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
356 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
357 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
358 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
359 #endif // _LP64
361 if (save_fpu_registers) {
362 if (UseSSE < 2) {
363 int fpu_off = float_regs_as_doubles_off;
364 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
365 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
366 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
367 // %%% This is really a waste but we'll keep things as they were for now
368 if (true) {
369 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
370 }
371 fpu_off += 2;
372 }
373 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
374 }
376 if (UseSSE >= 2) {
377 int xmm_off = xmm_regs_as_doubles_off;
378 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
379 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
380 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
381 // %%% This is really a waste but we'll keep things as they were for now
382 if (true) {
383 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
384 }
385 xmm_off += 2;
386 }
387 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
389 } else if (UseSSE == 1) {
390 int xmm_off = xmm_regs_as_doubles_off;
391 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
392 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
393 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
394 xmm_off += 2;
395 }
396 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
397 }
398 }
400 return map;
401 }
403 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
404 bool save_fpu_registers = true) {
405 __ block_comment("save_live_registers");
407 __ pusha(); // integer registers
409 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
410 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
412 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
414 #ifdef ASSERT
415 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
416 #endif
418 if (save_fpu_registers) {
419 if (UseSSE < 2) {
420 // save FPU stack
421 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
422 __ fwait();
424 #ifdef ASSERT
425 Label ok;
426 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
427 __ jccb(Assembler::equal, ok);
428 __ stop("corrupted control word detected");
429 __ bind(ok);
430 #endif
432 // Reset the control word to guard against exceptions being unmasked
433 // since fstp_d can cause FPU stack underflow exceptions. Write it
434 // into the on stack copy and then reload that to make sure that the
435 // current and future values are correct.
436 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
437 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
439 // Save the FPU registers in de-opt-able form
440 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
441 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
442 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
443 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
444 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
445 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
446 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
447 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
448 }
450 if (UseSSE >= 2) {
451 // save XMM registers
452 // XMM registers can contain float or double values, but this is not known here,
453 // so always save them as doubles.
454 // note that float values are _not_ converted automatically, so for float values
455 // the second word contains only garbage data.
456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
464 #ifdef _LP64
465 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
467 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
468 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
469 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
470 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
471 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
473 #endif // _LP64
474 } else if (UseSSE == 1) {
475 // save XMM registers as float because double not supported without SSE2
476 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
477 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
478 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
479 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
481 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
482 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
483 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
484 }
485 }
487 // FPU stack must be empty now
488 __ verify_FPU(0, "save_live_registers");
490 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
491 }
494 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
495 if (restore_fpu_registers) {
496 if (UseSSE >= 2) {
497 // restore XMM registers
498 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
499 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
500 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
501 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
502 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
503 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
504 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
505 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
506 #ifdef _LP64
507 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
508 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
509 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
510 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
511 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
512 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
513 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
514 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
515 #endif // _LP64
516 } else if (UseSSE == 1) {
517 // restore XMM registers
518 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
519 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
520 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
521 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
522 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
523 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
524 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
525 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
526 }
528 if (UseSSE < 2) {
529 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
530 } else {
531 // check that FPU stack is really empty
532 __ verify_FPU(0, "restore_live_registers");
533 }
535 } else {
536 // check that FPU stack is really empty
537 __ verify_FPU(0, "restore_live_registers");
538 }
540 #ifdef ASSERT
541 {
542 Label ok;
543 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
544 __ jcc(Assembler::equal, ok);
545 __ stop("bad offsets in frame");
546 __ bind(ok);
547 }
548 #endif // ASSERT
550 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
551 }
554 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
555 __ block_comment("restore_live_registers");
557 restore_fpu(sasm, restore_fpu_registers);
558 __ popa();
559 }
562 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
563 __ block_comment("restore_live_registers_except_rax");
565 restore_fpu(sasm, restore_fpu_registers);
567 #ifdef _LP64
568 __ movptr(r15, Address(rsp, 0));
569 __ movptr(r14, Address(rsp, wordSize));
570 __ movptr(r13, Address(rsp, 2 * wordSize));
571 __ movptr(r12, Address(rsp, 3 * wordSize));
572 __ movptr(r11, Address(rsp, 4 * wordSize));
573 __ movptr(r10, Address(rsp, 5 * wordSize));
574 __ movptr(r9, Address(rsp, 6 * wordSize));
575 __ movptr(r8, Address(rsp, 7 * wordSize));
576 __ movptr(rdi, Address(rsp, 8 * wordSize));
577 __ movptr(rsi, Address(rsp, 9 * wordSize));
578 __ movptr(rbp, Address(rsp, 10 * wordSize));
579 // skip rsp
580 __ movptr(rbx, Address(rsp, 12 * wordSize));
581 __ movptr(rdx, Address(rsp, 13 * wordSize));
582 __ movptr(rcx, Address(rsp, 14 * wordSize));
584 __ addptr(rsp, 16 * wordSize);
585 #else
587 __ pop(rdi);
588 __ pop(rsi);
589 __ pop(rbp);
590 __ pop(rbx); // skip this value
591 __ pop(rbx);
592 __ pop(rdx);
593 __ pop(rcx);
594 __ addptr(rsp, BytesPerWord);
595 #endif // _LP64
596 }
599 void Runtime1::initialize_pd() {
600 // nothing to do
601 }
604 // target: the entry point of the method that creates and posts the exception oop
605 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
607 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
608 // preserve all registers
609 int num_rt_args = has_argument ? 2 : 1;
610 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
612 // now all registers are saved and can be used freely
613 // verify that no old value is used accidentally
614 __ invalidate_registers(true, true, true, true, true, true);
616 // registers used by this stub
617 const Register temp_reg = rbx;
619 // load argument for exception that is passed as an argument into the stub
620 if (has_argument) {
621 #ifdef _LP64
622 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
623 #else
624 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
625 __ push(temp_reg);
626 #endif // _LP64
627 }
628 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
630 OopMapSet* oop_maps = new OopMapSet();
631 oop_maps->add_gc_map(call_offset, oop_map);
633 __ stop("should not reach here");
635 return oop_maps;
636 }
639 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
640 __ block_comment("generate_handle_exception");
642 // incoming parameters
643 const Register exception_oop = rax;
644 const Register exception_pc = rdx;
645 // other registers used in this stub
646 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
648 // Save registers, if required.
649 OopMapSet* oop_maps = new OopMapSet();
650 OopMap* oop_map = NULL;
651 switch (id) {
652 case forward_exception_id:
653 // We're handling an exception in the context of a compiled frame.
654 // The registers have been saved in the standard places. Perform
655 // an exception lookup in the caller and dispatch to the handler
656 // if found. Otherwise unwind and dispatch to the callers
657 // exception handler.
658 oop_map = generate_oop_map(sasm, 1 /*thread*/);
660 // load and clear pending exception oop into RAX
661 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
662 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
664 // load issuing PC (the return address for this stub) into rdx
665 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
667 // make sure that the vm_results are cleared (may be unnecessary)
668 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
669 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
670 break;
671 case handle_exception_nofpu_id:
672 case handle_exception_id:
673 // At this point all registers MAY be live.
674 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
675 break;
676 case handle_exception_from_callee_id: {
677 // At this point all registers except exception oop (RAX) and
678 // exception pc (RDX) are dead.
679 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
680 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
681 sasm->set_frame_size(frame_size);
682 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
683 break;
684 }
685 default: ShouldNotReachHere();
686 }
688 #ifdef TIERED
689 // C2 can leave the fpu stack dirty
690 if (UseSSE < 2) {
691 __ empty_FPU_stack();
692 }
693 #endif // TIERED
695 // verify that only rax, and rdx is valid at this time
696 __ invalidate_registers(false, true, true, false, true, true);
697 // verify that rax, contains a valid exception
698 __ verify_not_null_oop(exception_oop);
700 // load address of JavaThread object for thread-local data
701 NOT_LP64(__ get_thread(thread);)
703 #ifdef ASSERT
704 // check that fields in JavaThread for exception oop and issuing pc are
705 // empty before writing to them
706 Label oop_empty;
707 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
708 __ jcc(Assembler::equal, oop_empty);
709 __ stop("exception oop already set");
710 __ bind(oop_empty);
712 Label pc_empty;
713 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
714 __ jcc(Assembler::equal, pc_empty);
715 __ stop("exception pc already set");
716 __ bind(pc_empty);
717 #endif
719 // save exception oop and issuing pc into JavaThread
720 // (exception handler will load it from here)
721 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
722 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
724 // patch throwing pc into return address (has bci & oop map)
725 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
727 // compute the exception handler.
728 // the exception oop and the throwing pc are read from the fields in JavaThread
729 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
730 oop_maps->add_gc_map(call_offset, oop_map);
732 // rax: handler address
733 // will be the deopt blob if nmethod was deoptimized while we looked up
734 // handler regardless of whether handler existed in the nmethod.
736 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
737 __ invalidate_registers(false, true, true, true, true, true);
739 // patch the return address, this stub will directly return to the exception handler
740 __ movptr(Address(rbp, 1*BytesPerWord), rax);
742 switch (id) {
743 case forward_exception_id:
744 case handle_exception_nofpu_id:
745 case handle_exception_id:
746 // Restore the registers that were saved at the beginning.
747 restore_live_registers(sasm, id == handle_exception_nofpu_id);
748 break;
749 case handle_exception_from_callee_id:
750 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
751 // since we do a leave anyway.
753 // Pop the return address since we are possibly changing SP (restoring from BP).
754 __ leave();
755 __ pop(rcx);
757 // Restore SP from BP if the exception PC is a method handle call site.
758 NOT_LP64(__ get_thread(thread);)
759 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
760 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
761 __ jmp(rcx); // jump to exception handler
762 break;
763 default: ShouldNotReachHere();
764 }
766 return oop_maps;
767 }
770 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
771 // incoming parameters
772 const Register exception_oop = rax;
773 // callee-saved copy of exception_oop during runtime call
774 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
775 // other registers used in this stub
776 const Register exception_pc = rdx;
777 const Register handler_addr = rbx;
778 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
780 // verify that only rax, is valid at this time
781 __ invalidate_registers(false, true, true, true, true, true);
783 #ifdef ASSERT
784 // check that fields in JavaThread for exception oop and issuing pc are empty
785 NOT_LP64(__ get_thread(thread);)
786 Label oop_empty;
787 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
788 __ jcc(Assembler::equal, oop_empty);
789 __ stop("exception oop must be empty");
790 __ bind(oop_empty);
792 Label pc_empty;
793 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
794 __ jcc(Assembler::equal, pc_empty);
795 __ stop("exception pc must be empty");
796 __ bind(pc_empty);
797 #endif
799 // clear the FPU stack in case any FPU results are left behind
800 __ empty_FPU_stack();
802 // save exception_oop in callee-saved register to preserve it during runtime calls
803 __ verify_not_null_oop(exception_oop);
804 __ movptr(exception_oop_callee_saved, exception_oop);
806 NOT_LP64(__ get_thread(thread);)
807 // Get return address (is on top of stack after leave).
808 __ movptr(exception_pc, Address(rsp, 0));
810 // search the exception handler address of the caller (using the return address)
811 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
812 // rax: exception handler address of the caller
814 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
815 __ invalidate_registers(false, true, true, true, false, true);
817 // move result of call into correct register
818 __ movptr(handler_addr, rax);
820 // Restore exception oop to RAX (required convention of exception handler).
821 __ movptr(exception_oop, exception_oop_callee_saved);
823 // verify that there is really a valid exception in rax
824 __ verify_not_null_oop(exception_oop);
826 // get throwing pc (= return address).
827 // rdx has been destroyed by the call, so it must be set again
828 // the pop is also necessary to simulate the effect of a ret(0)
829 __ pop(exception_pc);
831 // Restore SP from BP if the exception PC is a method handle call site.
832 NOT_LP64(__ get_thread(thread);)
833 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
834 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
836 // continue at exception handler (return address removed)
837 // note: do *not* remove arguments when unwinding the
838 // activation since the caller assumes having
839 // all arguments on the stack when entering the
840 // runtime to determine the exception handler
841 // (GC happens at call site with arguments!)
842 // rax: exception oop
843 // rdx: throwing pc
844 // rbx: exception handler
845 __ jmp(handler_addr);
846 }
849 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
850 // use the maximum number of runtime-arguments here because it is difficult to
851 // distinguish each RT-Call.
852 // Note: This number affects also the RT-Call in generate_handle_exception because
853 // the oop-map is shared for all calls.
854 const int num_rt_args = 2; // thread + dummy
856 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
857 assert(deopt_blob != NULL, "deoptimization blob must have been created");
859 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
861 #ifdef _LP64
862 const Register thread = r15_thread;
863 // No need to worry about dummy
864 __ mov(c_rarg0, thread);
865 #else
866 __ push(rax); // push dummy
868 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
869 // push java thread (becomes first argument of C function)
870 __ get_thread(thread);
871 __ push(thread);
872 #endif // _LP64
873 __ set_last_Java_frame(thread, noreg, rbp, NULL);
874 // do the call
875 __ call(RuntimeAddress(target));
876 OopMapSet* oop_maps = new OopMapSet();
877 oop_maps->add_gc_map(__ offset(), oop_map);
878 // verify callee-saved register
879 #ifdef ASSERT
880 guarantee(thread != rax, "change this code");
881 __ push(rax);
882 { Label L;
883 __ get_thread(rax);
884 __ cmpptr(thread, rax);
885 __ jcc(Assembler::equal, L);
886 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
887 __ bind(L);
888 }
889 __ pop(rax);
890 #endif
891 __ reset_last_Java_frame(thread, true, false);
892 #ifndef _LP64
893 __ pop(rcx); // discard thread arg
894 __ pop(rcx); // discard dummy
895 #endif // _LP64
897 // check for pending exceptions
898 { Label L;
899 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
900 __ jcc(Assembler::equal, L);
901 // exception pending => remove activation and forward to exception handler
903 __ testptr(rax, rax); // have we deoptimized?
904 __ jump_cc(Assembler::equal,
905 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
907 // the deopt blob expects exceptions in the special fields of
908 // JavaThread, so copy and clear pending exception.
910 // load and clear pending exception
911 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
912 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
914 // check that there is really a valid exception
915 __ verify_not_null_oop(rax);
917 // load throwing pc: this is the return address of the stub
918 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
920 #ifdef ASSERT
921 // check that fields in JavaThread for exception oop and issuing pc are empty
922 Label oop_empty;
923 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
924 __ jcc(Assembler::equal, oop_empty);
925 __ stop("exception oop must be empty");
926 __ bind(oop_empty);
928 Label pc_empty;
929 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
930 __ jcc(Assembler::equal, pc_empty);
931 __ stop("exception pc must be empty");
932 __ bind(pc_empty);
933 #endif
935 // store exception oop and throwing pc to JavaThread
936 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
937 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
939 restore_live_registers(sasm);
941 __ leave();
942 __ addptr(rsp, BytesPerWord); // remove return address from stack
944 // Forward the exception directly to deopt blob. We can blow no
945 // registers and must leave throwing pc on the stack. A patch may
946 // have values live in registers so the entry point with the
947 // exception in tls.
948 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
950 __ bind(L);
951 }
954 // Runtime will return true if the nmethod has been deoptimized during
955 // the patching process. In that case we must do a deopt reexecute instead.
957 Label reexecuteEntry, cont;
959 __ testptr(rax, rax); // have we deoptimized?
960 __ jcc(Assembler::equal, cont); // no
962 // Will reexecute. Proper return address is already on the stack we just restore
963 // registers, pop all of our frame but the return address and jump to the deopt blob
964 restore_live_registers(sasm);
965 __ leave();
966 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
968 __ bind(cont);
969 restore_live_registers(sasm);
970 __ leave();
971 __ ret(0);
973 return oop_maps;
974 }
977 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
979 // for better readability
980 const bool must_gc_arguments = true;
981 const bool dont_gc_arguments = false;
983 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
984 bool save_fpu_registers = true;
986 // stub code & info for the different stubs
987 OopMapSet* oop_maps = NULL;
988 switch (id) {
989 case forward_exception_id:
990 {
991 oop_maps = generate_handle_exception(id, sasm);
992 __ leave();
993 __ ret(0);
994 }
995 break;
997 case new_instance_id:
998 case fast_new_instance_id:
999 case fast_new_instance_init_check_id:
1000 {
1001 Register klass = rdx; // Incoming
1002 Register obj = rax; // Result
1004 if (id == new_instance_id) {
1005 __ set_info("new_instance", dont_gc_arguments);
1006 } else if (id == fast_new_instance_id) {
1007 __ set_info("fast new_instance", dont_gc_arguments);
1008 } else {
1009 assert(id == fast_new_instance_init_check_id, "bad StubID");
1010 __ set_info("fast new_instance init check", dont_gc_arguments);
1011 }
1013 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1014 UseTLAB && FastTLABRefill) {
1015 Label slow_path;
1016 Register obj_size = rcx;
1017 Register t1 = rbx;
1018 Register t2 = rsi;
1019 assert_different_registers(klass, obj, obj_size, t1, t2);
1021 __ push(rdi);
1022 __ push(rbx);
1024 if (id == fast_new_instance_init_check_id) {
1025 // make sure the klass is initialized
1026 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1027 __ jcc(Assembler::notEqual, slow_path);
1028 }
1030 #ifdef ASSERT
1031 // assert object can be fast path allocated
1032 {
1033 Label ok, not_ok;
1034 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1035 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1036 __ jcc(Assembler::lessEqual, not_ok);
1037 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1038 __ jcc(Assembler::zero, ok);
1039 __ bind(not_ok);
1040 __ stop("assert(can be fast path allocated)");
1041 __ should_not_reach_here();
1042 __ bind(ok);
1043 }
1044 #endif // ASSERT
1046 // if we got here then the TLAB allocation failed, so try
1047 // refilling the TLAB or allocating directly from eden.
1048 Label retry_tlab, try_eden;
1049 const Register thread =
1050 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1052 __ bind(retry_tlab);
1054 // get the instance size (size is postive so movl is fine for 64bit)
1055 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1057 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1059 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1060 __ verify_oop(obj);
1061 __ pop(rbx);
1062 __ pop(rdi);
1063 __ ret(0);
1065 __ bind(try_eden);
1066 // get the instance size (size is postive so movl is fine for 64bit)
1067 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1069 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1070 __ incr_allocated_bytes(thread, obj_size, 0);
1072 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1073 __ verify_oop(obj);
1074 __ pop(rbx);
1075 __ pop(rdi);
1076 __ ret(0);
1078 __ bind(slow_path);
1079 __ pop(rbx);
1080 __ pop(rdi);
1081 }
1083 __ enter();
1084 OopMap* map = save_live_registers(sasm, 2);
1085 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1086 oop_maps = new OopMapSet();
1087 oop_maps->add_gc_map(call_offset, map);
1088 restore_live_registers_except_rax(sasm);
1089 __ verify_oop(obj);
1090 __ leave();
1091 __ ret(0);
1093 // rax,: new instance
1094 }
1096 break;
1098 case counter_overflow_id:
1099 {
1100 Register bci = rax, method = rbx;
1101 __ enter();
1102 OopMap* map = save_live_registers(sasm, 3);
1103 // Retrieve bci
1104 __ movl(bci, Address(rbp, 2*BytesPerWord));
1105 // And a pointer to the Method*
1106 __ movptr(method, Address(rbp, 3*BytesPerWord));
1107 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1108 oop_maps = new OopMapSet();
1109 oop_maps->add_gc_map(call_offset, map);
1110 restore_live_registers(sasm);
1111 __ leave();
1112 __ ret(0);
1113 }
1114 break;
1116 case new_type_array_id:
1117 case new_object_array_id:
1118 {
1119 Register length = rbx; // Incoming
1120 Register klass = rdx; // Incoming
1121 Register obj = rax; // Result
1123 if (id == new_type_array_id) {
1124 __ set_info("new_type_array", dont_gc_arguments);
1125 } else {
1126 __ set_info("new_object_array", dont_gc_arguments);
1127 }
1129 #ifdef ASSERT
1130 // assert object type is really an array of the proper kind
1131 {
1132 Label ok;
1133 Register t0 = obj;
1134 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1135 __ sarl(t0, Klass::_lh_array_tag_shift);
1136 int tag = ((id == new_type_array_id)
1137 ? Klass::_lh_array_tag_type_value
1138 : Klass::_lh_array_tag_obj_value);
1139 __ cmpl(t0, tag);
1140 __ jcc(Assembler::equal, ok);
1141 __ stop("assert(is an array klass)");
1142 __ should_not_reach_here();
1143 __ bind(ok);
1144 }
1145 #endif // ASSERT
1147 if (UseTLAB && FastTLABRefill) {
1148 Register arr_size = rsi;
1149 Register t1 = rcx; // must be rcx for use as shift count
1150 Register t2 = rdi;
1151 Label slow_path;
1152 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1154 // check that array length is small enough for fast path.
1155 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1156 __ jcc(Assembler::above, slow_path);
1158 // if we got here then the TLAB allocation failed, so try
1159 // refilling the TLAB or allocating directly from eden.
1160 Label retry_tlab, try_eden;
1161 const Register thread =
1162 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1164 __ bind(retry_tlab);
1166 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1167 // since size is positive movl does right thing on 64bit
1168 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1169 // since size is postive movl does right thing on 64bit
1170 __ movl(arr_size, length);
1171 assert(t1 == rcx, "fixed register usage");
1172 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1173 __ shrptr(t1, Klass::_lh_header_size_shift);
1174 __ andptr(t1, Klass::_lh_header_size_mask);
1175 __ addptr(arr_size, t1);
1176 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1177 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1179 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1181 __ initialize_header(obj, klass, length, t1, t2);
1182 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1183 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1184 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1185 __ andptr(t1, Klass::_lh_header_size_mask);
1186 __ subptr(arr_size, t1); // body length
1187 __ addptr(t1, obj); // body start
1188 __ initialize_body(t1, arr_size, 0, t2);
1189 __ verify_oop(obj);
1190 __ ret(0);
1192 __ bind(try_eden);
1193 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1194 // since size is positive movl does right thing on 64bit
1195 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1196 // since size is postive movl does right thing on 64bit
1197 __ movl(arr_size, length);
1198 assert(t1 == rcx, "fixed register usage");
1199 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1200 __ shrptr(t1, Klass::_lh_header_size_shift);
1201 __ andptr(t1, Klass::_lh_header_size_mask);
1202 __ addptr(arr_size, t1);
1203 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1204 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1206 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1207 __ incr_allocated_bytes(thread, arr_size, 0);
1209 __ initialize_header(obj, klass, length, t1, t2);
1210 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1211 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1212 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1213 __ andptr(t1, Klass::_lh_header_size_mask);
1214 __ subptr(arr_size, t1); // body length
1215 __ addptr(t1, obj); // body start
1216 __ initialize_body(t1, arr_size, 0, t2);
1217 __ verify_oop(obj);
1218 __ ret(0);
1220 __ bind(slow_path);
1221 }
1223 __ enter();
1224 OopMap* map = save_live_registers(sasm, 3);
1225 int call_offset;
1226 if (id == new_type_array_id) {
1227 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1228 } else {
1229 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1230 }
1232 oop_maps = new OopMapSet();
1233 oop_maps->add_gc_map(call_offset, map);
1234 restore_live_registers_except_rax(sasm);
1236 __ verify_oop(obj);
1237 __ leave();
1238 __ ret(0);
1240 // rax,: new array
1241 }
1242 break;
1244 case new_multi_array_id:
1245 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1246 // rax,: klass
1247 // rbx,: rank
1248 // rcx: address of 1st dimension
1249 OopMap* map = save_live_registers(sasm, 4);
1250 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1252 oop_maps = new OopMapSet();
1253 oop_maps->add_gc_map(call_offset, map);
1254 restore_live_registers_except_rax(sasm);
1256 // rax,: new multi array
1257 __ verify_oop(rax);
1258 }
1259 break;
1261 case register_finalizer_id:
1262 {
1263 __ set_info("register_finalizer", dont_gc_arguments);
1265 // This is called via call_runtime so the arguments
1266 // will be place in C abi locations
1268 #ifdef _LP64
1269 __ verify_oop(c_rarg0);
1270 __ mov(rax, c_rarg0);
1271 #else
1272 // The object is passed on the stack and we haven't pushed a
1273 // frame yet so it's one work away from top of stack.
1274 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1275 __ verify_oop(rax);
1276 #endif // _LP64
1278 // load the klass and check the has finalizer flag
1279 Label register_finalizer;
1280 Register t = rsi;
1281 __ load_klass(t, rax);
1282 __ movl(t, Address(t, Klass::access_flags_offset()));
1283 __ testl(t, JVM_ACC_HAS_FINALIZER);
1284 __ jcc(Assembler::notZero, register_finalizer);
1285 __ ret(0);
1287 __ bind(register_finalizer);
1288 __ enter();
1289 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1290 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1291 oop_maps = new OopMapSet();
1292 oop_maps->add_gc_map(call_offset, oop_map);
1294 // Now restore all the live registers
1295 restore_live_registers(sasm);
1297 __ leave();
1298 __ ret(0);
1299 }
1300 break;
1302 case throw_range_check_failed_id:
1303 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1304 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1305 }
1306 break;
1308 case throw_index_exception_id:
1309 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1311 }
1312 break;
1314 case throw_div0_exception_id:
1315 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1316 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1317 }
1318 break;
1320 case throw_null_pointer_exception_id:
1321 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1322 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1323 }
1324 break;
1326 case handle_exception_nofpu_id:
1327 case handle_exception_id:
1328 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1329 oop_maps = generate_handle_exception(id, sasm);
1330 }
1331 break;
1333 case handle_exception_from_callee_id:
1334 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1335 oop_maps = generate_handle_exception(id, sasm);
1336 }
1337 break;
1339 case unwind_exception_id:
1340 { __ set_info("unwind_exception", dont_gc_arguments);
1341 // note: no stubframe since we are about to leave the current
1342 // activation and we are calling a leaf VM function only.
1343 generate_unwind_exception(sasm);
1344 }
1345 break;
1347 case throw_array_store_exception_id:
1348 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1349 // tos + 0: link
1350 // + 1: return address
1351 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1352 }
1353 break;
1355 case throw_class_cast_exception_id:
1356 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1357 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1358 }
1359 break;
1361 case throw_incompatible_class_change_error_id:
1362 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1363 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1364 }
1365 break;
1367 case slow_subtype_check_id:
1368 {
1369 // Typical calling sequence:
1370 // __ push(klass_RInfo); // object klass or other subclass
1371 // __ push(sup_k_RInfo); // array element klass or other superclass
1372 // __ call(slow_subtype_check);
1373 // Note that the subclass is pushed first, and is therefore deepest.
1374 // Previous versions of this code reversed the names 'sub' and 'super'.
1375 // This was operationally harmless but made the code unreadable.
1376 enum layout {
1377 rax_off, SLOT2(raxH_off)
1378 rcx_off, SLOT2(rcxH_off)
1379 rsi_off, SLOT2(rsiH_off)
1380 rdi_off, SLOT2(rdiH_off)
1381 // saved_rbp_off, SLOT2(saved_rbpH_off)
1382 return_off, SLOT2(returnH_off)
1383 sup_k_off, SLOT2(sup_kH_off)
1384 klass_off, SLOT2(superH_off)
1385 framesize,
1386 result_off = klass_off // deepest argument is also the return value
1387 };
1389 __ set_info("slow_subtype_check", dont_gc_arguments);
1390 __ push(rdi);
1391 __ push(rsi);
1392 __ push(rcx);
1393 __ push(rax);
1395 // This is called by pushing args and not with C abi
1396 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1397 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1399 Label miss;
1400 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1402 // fallthrough on success:
1403 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1404 __ pop(rax);
1405 __ pop(rcx);
1406 __ pop(rsi);
1407 __ pop(rdi);
1408 __ ret(0);
1410 __ bind(miss);
1411 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1412 __ pop(rax);
1413 __ pop(rcx);
1414 __ pop(rsi);
1415 __ pop(rdi);
1416 __ ret(0);
1417 }
1418 break;
1420 case monitorenter_nofpu_id:
1421 save_fpu_registers = false;
1422 // fall through
1423 case monitorenter_id:
1424 {
1425 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1426 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1428 // Called with store_parameter and not C abi
1430 f.load_argument(1, rax); // rax,: object
1431 f.load_argument(0, rbx); // rbx,: lock address
1433 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1435 oop_maps = new OopMapSet();
1436 oop_maps->add_gc_map(call_offset, map);
1437 restore_live_registers(sasm, save_fpu_registers);
1438 }
1439 break;
1441 case monitorexit_nofpu_id:
1442 save_fpu_registers = false;
1443 // fall through
1444 case monitorexit_id:
1445 {
1446 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1447 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1449 // Called with store_parameter and not C abi
1451 f.load_argument(0, rax); // rax,: lock address
1453 // note: really a leaf routine but must setup last java sp
1454 // => use call_RT for now (speed can be improved by
1455 // doing last java sp setup manually)
1456 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1458 oop_maps = new OopMapSet();
1459 oop_maps->add_gc_map(call_offset, map);
1460 restore_live_registers(sasm, save_fpu_registers);
1461 }
1462 break;
1464 case deoptimize_id:
1465 {
1466 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1467 const int num_rt_args = 1; // thread
1468 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1469 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1470 oop_maps = new OopMapSet();
1471 oop_maps->add_gc_map(call_offset, oop_map);
1472 restore_live_registers(sasm);
1473 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1474 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1475 __ leave();
1476 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1477 }
1478 break;
1480 case access_field_patching_id:
1481 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1482 // we should set up register map
1483 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1484 }
1485 break;
1487 case load_klass_patching_id:
1488 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1489 // we should set up register map
1490 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1491 }
1492 break;
1494 case load_mirror_patching_id:
1495 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1496 // we should set up register map
1497 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1498 }
1499 break;
1501 case dtrace_object_alloc_id:
1502 { // rax,: object
1503 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1504 // we can't gc here so skip the oopmap but make sure that all
1505 // the live registers get saved.
1506 save_live_registers(sasm, 1);
1508 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1509 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1510 NOT_LP64(__ pop(rax));
1512 restore_live_registers(sasm);
1513 }
1514 break;
1516 case fpu2long_stub_id:
1517 {
1518 // rax, and rdx are destroyed, but should be free since the result is returned there
1519 // preserve rsi,ecx
1520 __ push(rsi);
1521 __ push(rcx);
1522 LP64_ONLY(__ push(rdx);)
1524 // check for NaN
1525 Label return0, do_return, return_min_jlong, do_convert;
1527 Address value_high_word(rsp, wordSize + 4);
1528 Address value_low_word(rsp, wordSize);
1529 Address result_high_word(rsp, 3*wordSize + 4);
1530 Address result_low_word(rsp, 3*wordSize);
1532 __ subptr(rsp, 32); // more than enough on 32bit
1533 __ fst_d(value_low_word);
1534 __ movl(rax, value_high_word);
1535 __ andl(rax, 0x7ff00000);
1536 __ cmpl(rax, 0x7ff00000);
1537 __ jcc(Assembler::notEqual, do_convert);
1538 __ movl(rax, value_high_word);
1539 __ andl(rax, 0xfffff);
1540 __ orl(rax, value_low_word);
1541 __ jcc(Assembler::notZero, return0);
1543 __ bind(do_convert);
1544 __ fnstcw(Address(rsp, 0));
1545 __ movzwl(rax, Address(rsp, 0));
1546 __ orl(rax, 0xc00);
1547 __ movw(Address(rsp, 2), rax);
1548 __ fldcw(Address(rsp, 2));
1549 __ fwait();
1550 __ fistp_d(result_low_word);
1551 __ fldcw(Address(rsp, 0));
1552 __ fwait();
1553 // This gets the entire long in rax on 64bit
1554 __ movptr(rax, result_low_word);
1555 // testing of high bits
1556 __ movl(rdx, result_high_word);
1557 __ mov(rcx, rax);
1558 // What the heck is the point of the next instruction???
1559 __ xorl(rcx, 0x0);
1560 __ movl(rsi, 0x80000000);
1561 __ xorl(rsi, rdx);
1562 __ orl(rcx, rsi);
1563 __ jcc(Assembler::notEqual, do_return);
1564 __ fldz();
1565 __ fcomp_d(value_low_word);
1566 __ fnstsw_ax();
1567 #ifdef _LP64
1568 __ testl(rax, 0x4100); // ZF & CF == 0
1569 __ jcc(Assembler::equal, return_min_jlong);
1570 #else
1571 __ sahf();
1572 __ jcc(Assembler::above, return_min_jlong);
1573 #endif // _LP64
1574 // return max_jlong
1575 #ifndef _LP64
1576 __ movl(rdx, 0x7fffffff);
1577 __ movl(rax, 0xffffffff);
1578 #else
1579 __ mov64(rax, CONST64(0x7fffffffffffffff));
1580 #endif // _LP64
1581 __ jmp(do_return);
1583 __ bind(return_min_jlong);
1584 #ifndef _LP64
1585 __ movl(rdx, 0x80000000);
1586 __ xorl(rax, rax);
1587 #else
1588 __ mov64(rax, CONST64(0x8000000000000000));
1589 #endif // _LP64
1590 __ jmp(do_return);
1592 __ bind(return0);
1593 __ fpop();
1594 #ifndef _LP64
1595 __ xorptr(rdx,rdx);
1596 __ xorptr(rax,rax);
1597 #else
1598 __ xorptr(rax, rax);
1599 #endif // _LP64
1601 __ bind(do_return);
1602 __ addptr(rsp, 32);
1603 LP64_ONLY(__ pop(rdx);)
1604 __ pop(rcx);
1605 __ pop(rsi);
1606 __ ret(0);
1607 }
1608 break;
1610 #ifndef SERIALGC
1611 case g1_pre_barrier_slow_id:
1612 {
1613 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1614 // arg0 : previous value of memory
1616 BarrierSet* bs = Universe::heap()->barrier_set();
1617 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1618 __ movptr(rax, (int)id);
1619 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1620 __ should_not_reach_here();
1621 break;
1622 }
1623 __ push(rax);
1624 __ push(rdx);
1626 const Register pre_val = rax;
1627 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1628 const Register tmp = rdx;
1630 NOT_LP64(__ get_thread(thread);)
1632 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1633 PtrQueue::byte_offset_of_active()));
1635 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1636 PtrQueue::byte_offset_of_index()));
1637 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1638 PtrQueue::byte_offset_of_buf()));
1641 Label done;
1642 Label runtime;
1644 // Can we store original value in the thread's buffer?
1646 #ifdef _LP64
1647 __ movslq(tmp, queue_index);
1648 __ cmpq(tmp, 0);
1649 #else
1650 __ cmpl(queue_index, 0);
1651 #endif
1652 __ jcc(Assembler::equal, runtime);
1653 #ifdef _LP64
1654 __ subq(tmp, wordSize);
1655 __ movl(queue_index, tmp);
1656 __ addq(tmp, buffer);
1657 #else
1658 __ subl(queue_index, wordSize);
1659 __ movl(tmp, buffer);
1660 __ addl(tmp, queue_index);
1661 #endif
1663 // prev_val (rax)
1664 f.load_argument(0, pre_val);
1665 __ movptr(Address(tmp, 0), pre_val);
1666 __ jmp(done);
1668 __ bind(runtime);
1669 __ push(rcx);
1670 #ifdef _LP64
1671 __ push(r8);
1672 __ push(r9);
1673 __ push(r10);
1674 __ push(r11);
1675 # ifndef _WIN64
1676 __ push(rdi);
1677 __ push(rsi);
1678 # endif
1679 #endif
1680 // load the pre-value
1681 f.load_argument(0, rcx);
1682 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1683 #ifdef _LP64
1684 # ifndef _WIN64
1685 __ pop(rsi);
1686 __ pop(rdi);
1687 # endif
1688 __ pop(r11);
1689 __ pop(r10);
1690 __ pop(r9);
1691 __ pop(r8);
1692 #endif
1693 __ pop(rcx);
1694 __ bind(done);
1696 __ pop(rdx);
1697 __ pop(rax);
1698 }
1699 break;
1701 case g1_post_barrier_slow_id:
1702 {
1703 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1706 // arg0: store_address
1707 Address store_addr(rbp, 2*BytesPerWord);
1709 BarrierSet* bs = Universe::heap()->barrier_set();
1710 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1711 Label done;
1712 Label runtime;
1714 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1715 // Must check to see if card is already dirty
1717 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1719 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1720 PtrQueue::byte_offset_of_index()));
1721 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1722 PtrQueue::byte_offset_of_buf()));
1724 __ push(rax);
1725 __ push(rcx);
1727 NOT_LP64(__ get_thread(thread);)
1728 ExternalAddress cardtable((address)ct->byte_map_base);
1729 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1731 const Register card_addr = rcx;
1732 #ifdef _LP64
1733 const Register tmp = rscratch1;
1734 f.load_argument(0, card_addr);
1735 __ shrq(card_addr, CardTableModRefBS::card_shift);
1736 __ lea(tmp, cardtable);
1737 // get the address of the card
1738 __ addq(card_addr, tmp);
1739 #else
1740 const Register card_index = rcx;
1741 f.load_argument(0, card_index);
1742 __ shrl(card_index, CardTableModRefBS::card_shift);
1744 Address index(noreg, card_index, Address::times_1);
1745 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1746 #endif
1748 __ cmpb(Address(card_addr, 0), 0);
1749 __ jcc(Assembler::equal, done);
1751 // storing region crossing non-NULL, card is clean.
1752 // dirty card and log.
1754 __ movb(Address(card_addr, 0), 0);
1756 __ cmpl(queue_index, 0);
1757 __ jcc(Assembler::equal, runtime);
1758 __ subl(queue_index, wordSize);
1760 const Register buffer_addr = rbx;
1761 __ push(rbx);
1763 __ movptr(buffer_addr, buffer);
1765 #ifdef _LP64
1766 __ movslq(rscratch1, queue_index);
1767 __ addptr(buffer_addr, rscratch1);
1768 #else
1769 __ addptr(buffer_addr, queue_index);
1770 #endif
1771 __ movptr(Address(buffer_addr, 0), card_addr);
1773 __ pop(rbx);
1774 __ jmp(done);
1776 __ bind(runtime);
1777 __ push(rdx);
1778 #ifdef _LP64
1779 __ push(r8);
1780 __ push(r9);
1781 __ push(r10);
1782 __ push(r11);
1783 # ifndef _WIN64
1784 __ push(rdi);
1785 __ push(rsi);
1786 # endif
1787 #endif
1788 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1789 #ifdef _LP64
1790 # ifndef _WIN64
1791 __ pop(rsi);
1792 __ pop(rdi);
1793 # endif
1794 __ pop(r11);
1795 __ pop(r10);
1796 __ pop(r9);
1797 __ pop(r8);
1798 #endif
1799 __ pop(rdx);
1800 __ bind(done);
1802 __ pop(rcx);
1803 __ pop(rax);
1805 }
1806 break;
1807 #endif // !SERIALGC
1809 default:
1810 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1811 __ movptr(rax, (int)id);
1812 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1813 __ should_not_reach_here();
1814 }
1815 break;
1816 }
1817 return oop_maps;
1818 }
1820 #undef __
1822 const char *Runtime1::pd_name_for_address(address entry) {
1823 return "<unknown function>";
1824 }