Thu, 14 Apr 2011 13:45:41 -0700
Merge
1 /*
2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolderOop.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "vmreg_x86.inline.hpp"
42 // Implementation of StubAssembler
44 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
45 // setup registers
46 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
47 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
48 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
49 assert(args_size >= 0, "illegal args_size");
51 #ifdef _LP64
52 mov(c_rarg0, thread);
53 set_num_rt_args(0); // Nothing on stack
54 #else
55 set_num_rt_args(1 + args_size);
57 // push java thread (becomes first argument of C function)
58 get_thread(thread);
59 push(thread);
60 #endif // _LP64
62 set_last_Java_frame(thread, noreg, rbp, NULL);
64 // do the call
65 call(RuntimeAddress(entry));
66 int call_offset = offset();
67 // verify callee-saved register
68 #ifdef ASSERT
69 guarantee(thread != rax, "change this code");
70 push(rax);
71 { Label L;
72 get_thread(rax);
73 cmpptr(thread, rax);
74 jcc(Assembler::equal, L);
75 int3();
76 stop("StubAssembler::call_RT: rdi not callee saved?");
77 bind(L);
78 }
79 pop(rax);
80 #endif
81 reset_last_Java_frame(thread, true, false);
83 // discard thread and arguments
84 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
86 // check for pending exceptions
87 { Label L;
88 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
89 jcc(Assembler::equal, L);
90 // exception pending => remove activation and forward to exception handler
91 movptr(rax, Address(thread, Thread::pending_exception_offset()));
92 // make sure that the vm_results are cleared
93 if (oop_result1->is_valid()) {
94 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
95 }
96 if (oop_result2->is_valid()) {
97 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
98 }
99 if (frame_size() == no_frame_size) {
100 leave();
101 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
102 } else if (_stub_id == Runtime1::forward_exception_id) {
103 should_not_reach_here();
104 } else {
105 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
106 }
107 bind(L);
108 }
109 // get oop results if there are any and reset the values in the thread
110 if (oop_result1->is_valid()) {
111 movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
112 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
113 verify_oop(oop_result1);
114 }
115 if (oop_result2->is_valid()) {
116 movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
117 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
118 verify_oop(oop_result2);
119 }
120 return call_offset;
121 }
124 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
125 #ifdef _LP64
126 mov(c_rarg1, arg1);
127 #else
128 push(arg1);
129 #endif // _LP64
130 return call_RT(oop_result1, oop_result2, entry, 1);
131 }
134 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
135 #ifdef _LP64
136 if (c_rarg1 == arg2) {
137 if (c_rarg2 == arg1) {
138 xchgq(arg1, arg2);
139 } else {
140 mov(c_rarg2, arg2);
141 mov(c_rarg1, arg1);
142 }
143 } else {
144 mov(c_rarg1, arg1);
145 mov(c_rarg2, arg2);
146 }
147 #else
148 push(arg2);
149 push(arg1);
150 #endif // _LP64
151 return call_RT(oop_result1, oop_result2, entry, 2);
152 }
155 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
156 #ifdef _LP64
157 // if there is any conflict use the stack
158 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
159 arg2 == c_rarg1 || arg1 == c_rarg3 ||
160 arg3 == c_rarg1 || arg1 == c_rarg2) {
161 push(arg3);
162 push(arg2);
163 push(arg1);
164 pop(c_rarg1);
165 pop(c_rarg2);
166 pop(c_rarg3);
167 } else {
168 mov(c_rarg1, arg1);
169 mov(c_rarg2, arg2);
170 mov(c_rarg3, arg3);
171 }
172 #else
173 push(arg3);
174 push(arg2);
175 push(arg1);
176 #endif // _LP64
177 return call_RT(oop_result1, oop_result2, entry, 3);
178 }
181 // Implementation of StubFrame
183 class StubFrame: public StackObj {
184 private:
185 StubAssembler* _sasm;
187 public:
188 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
189 void load_argument(int offset_in_words, Register reg);
191 ~StubFrame();
192 };
195 #define __ _sasm->
197 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
198 _sasm = sasm;
199 __ set_info(name, must_gc_arguments);
200 __ enter();
201 }
203 // load parameters that were stored with LIR_Assembler::store_parameter
204 // Note: offsets for store_parameter and load_argument must match
205 void StubFrame::load_argument(int offset_in_words, Register reg) {
206 // rbp, + 0: link
207 // + 1: return address
208 // + 2: argument with offset 0
209 // + 3: argument with offset 1
210 // + 4: ...
212 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
213 }
216 StubFrame::~StubFrame() {
217 __ leave();
218 __ ret(0);
219 }
221 #undef __
224 // Implementation of Runtime1
226 #define __ sasm->
228 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
229 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
231 // Stack layout for saving/restoring all the registers needed during a runtime
232 // call (this includes deoptimization)
233 // Note: note that users of this frame may well have arguments to some runtime
234 // while these values are on the stack. These positions neglect those arguments
235 // but the code in save_live_registers will take the argument count into
236 // account.
237 //
238 #ifdef _LP64
239 #define SLOT2(x) x,
240 #define SLOT_PER_WORD 2
241 #else
242 #define SLOT2(x)
243 #define SLOT_PER_WORD 1
244 #endif // _LP64
246 enum reg_save_layout {
247 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
248 // happen and will assert if the stack size we create is misaligned
249 #ifdef _LP64
250 align_dummy_0, align_dummy_1,
251 #endif // _LP64
252 #ifdef _WIN64
253 // Windows always allocates space for it's argument registers (see
254 // frame::arg_reg_save_area_bytes).
255 arg_reg_save_1, arg_reg_save_1H, // 0, 4
256 arg_reg_save_2, arg_reg_save_2H, // 8, 12
257 arg_reg_save_3, arg_reg_save_3H, // 16, 20
258 arg_reg_save_4, arg_reg_save_4H, // 24, 28
259 #endif // _WIN64
260 xmm_regs_as_doubles_off, // 32
261 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
262 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
263 // fpu_state_end_off is exclusive
264 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
265 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
266 extra_space_offset, // 360
267 #ifdef _LP64
268 r15_off = extra_space_offset, r15H_off, // 360, 364
269 r14_off, r14H_off, // 368, 372
270 r13_off, r13H_off, // 376, 380
271 r12_off, r12H_off, // 384, 388
272 r11_off, r11H_off, // 392, 396
273 r10_off, r10H_off, // 400, 404
274 r9_off, r9H_off, // 408, 412
275 r8_off, r8H_off, // 416, 420
276 rdi_off, rdiH_off, // 424, 428
277 #else
278 rdi_off = extra_space_offset,
279 #endif // _LP64
280 rsi_off, SLOT2(rsiH_off) // 432, 436
281 rbp_off, SLOT2(rbpH_off) // 440, 444
282 rsp_off, SLOT2(rspH_off) // 448, 452
283 rbx_off, SLOT2(rbxH_off) // 456, 460
284 rdx_off, SLOT2(rdxH_off) // 464, 468
285 rcx_off, SLOT2(rcxH_off) // 472, 476
286 rax_off, SLOT2(raxH_off) // 480, 484
287 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
288 return_off, SLOT2(returnH_off) // 496, 500
289 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
290 };
294 // Save off registers which might be killed by calls into the runtime.
295 // Tries to smart of about FP registers. In particular we separate
296 // saving and describing the FPU registers for deoptimization since we
297 // have to save the FPU registers twice if we describe them and on P4
298 // saving FPU registers which don't contain anything appears
299 // expensive. The deopt blob is the only thing which needs to
300 // describe FPU registers. In all other cases it should be sufficient
301 // to simply save their current value.
303 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
304 bool save_fpu_registers = true) {
306 // In 64bit all the args are in regs so there are no additional stack slots
307 LP64_ONLY(num_rt_args = 0);
308 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
309 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
310 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
312 // record saved value locations in an OopMap
313 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
314 OopMap* map = new OopMap(frame_size_in_slots, 0);
315 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
316 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
317 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
318 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
319 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
320 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
321 #ifdef _LP64
322 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
323 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
324 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
325 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
326 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
327 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
331 // This is stupid but needed.
332 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
333 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
334 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
335 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
336 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
337 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
339 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
340 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
341 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
342 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
343 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
344 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
347 #endif // _LP64
349 if (save_fpu_registers) {
350 if (UseSSE < 2) {
351 int fpu_off = float_regs_as_doubles_off;
352 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
353 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
354 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
355 // %%% This is really a waste but we'll keep things as they were for now
356 if (true) {
357 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
358 }
359 fpu_off += 2;
360 }
361 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
362 }
364 if (UseSSE >= 2) {
365 int xmm_off = xmm_regs_as_doubles_off;
366 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
367 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
368 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
369 // %%% This is really a waste but we'll keep things as they were for now
370 if (true) {
371 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
372 }
373 xmm_off += 2;
374 }
375 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
377 } else if (UseSSE == 1) {
378 int xmm_off = xmm_regs_as_doubles_off;
379 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
380 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
381 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
382 xmm_off += 2;
383 }
384 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
385 }
386 }
388 return map;
389 }
391 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
392 bool save_fpu_registers = true) {
393 __ block_comment("save_live_registers");
395 __ pusha(); // integer registers
397 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
398 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
400 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
402 #ifdef ASSERT
403 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
404 #endif
406 if (save_fpu_registers) {
407 if (UseSSE < 2) {
408 // save FPU stack
409 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
410 __ fwait();
412 #ifdef ASSERT
413 Label ok;
414 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
415 __ jccb(Assembler::equal, ok);
416 __ stop("corrupted control word detected");
417 __ bind(ok);
418 #endif
420 // Reset the control word to guard against exceptions being unmasked
421 // since fstp_d can cause FPU stack underflow exceptions. Write it
422 // into the on stack copy and then reload that to make sure that the
423 // current and future values are correct.
424 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
425 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
427 // Save the FPU registers in de-opt-able form
428 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
429 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
430 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
431 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
432 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
433 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
435 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
436 }
438 if (UseSSE >= 2) {
439 // save XMM registers
440 // XMM registers can contain float or double values, but this is not known here,
441 // so always save them as doubles.
442 // note that float values are _not_ converted automatically, so for float values
443 // the second word contains only garbage data.
444 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
445 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
446 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
447 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
448 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
449 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
451 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
452 #ifdef _LP64
453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
454 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
461 #endif // _LP64
462 } else if (UseSSE == 1) {
463 // save XMM registers as float because double not supported without SSE2
464 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
465 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
466 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
467 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
468 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
469 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
471 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
472 }
473 }
475 // FPU stack must be empty now
476 __ verify_FPU(0, "save_live_registers");
478 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
479 }
482 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
483 if (restore_fpu_registers) {
484 if (UseSSE >= 2) {
485 // restore XMM registers
486 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
487 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
488 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
489 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
490 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
491 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
492 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
493 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
494 #ifdef _LP64
495 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
496 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
497 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
498 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
499 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
500 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
501 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
502 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
503 #endif // _LP64
504 } else if (UseSSE == 1) {
505 // restore XMM registers
506 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
507 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
508 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
509 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
510 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
511 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
512 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
513 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
514 }
516 if (UseSSE < 2) {
517 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
518 } else {
519 // check that FPU stack is really empty
520 __ verify_FPU(0, "restore_live_registers");
521 }
523 } else {
524 // check that FPU stack is really empty
525 __ verify_FPU(0, "restore_live_registers");
526 }
528 #ifdef ASSERT
529 {
530 Label ok;
531 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
532 __ jcc(Assembler::equal, ok);
533 __ stop("bad offsets in frame");
534 __ bind(ok);
535 }
536 #endif // ASSERT
538 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
539 }
542 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
543 __ block_comment("restore_live_registers");
545 restore_fpu(sasm, restore_fpu_registers);
546 __ popa();
547 }
550 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
551 __ block_comment("restore_live_registers_except_rax");
553 restore_fpu(sasm, restore_fpu_registers);
555 #ifdef _LP64
556 __ movptr(r15, Address(rsp, 0));
557 __ movptr(r14, Address(rsp, wordSize));
558 __ movptr(r13, Address(rsp, 2 * wordSize));
559 __ movptr(r12, Address(rsp, 3 * wordSize));
560 __ movptr(r11, Address(rsp, 4 * wordSize));
561 __ movptr(r10, Address(rsp, 5 * wordSize));
562 __ movptr(r9, Address(rsp, 6 * wordSize));
563 __ movptr(r8, Address(rsp, 7 * wordSize));
564 __ movptr(rdi, Address(rsp, 8 * wordSize));
565 __ movptr(rsi, Address(rsp, 9 * wordSize));
566 __ movptr(rbp, Address(rsp, 10 * wordSize));
567 // skip rsp
568 __ movptr(rbx, Address(rsp, 12 * wordSize));
569 __ movptr(rdx, Address(rsp, 13 * wordSize));
570 __ movptr(rcx, Address(rsp, 14 * wordSize));
572 __ addptr(rsp, 16 * wordSize);
573 #else
575 __ pop(rdi);
576 __ pop(rsi);
577 __ pop(rbp);
578 __ pop(rbx); // skip this value
579 __ pop(rbx);
580 __ pop(rdx);
581 __ pop(rcx);
582 __ addptr(rsp, BytesPerWord);
583 #endif // _LP64
584 }
587 void Runtime1::initialize_pd() {
588 // nothing to do
589 }
592 // target: the entry point of the method that creates and posts the exception oop
593 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
595 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
596 // preserve all registers
597 int num_rt_args = has_argument ? 2 : 1;
598 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
600 // now all registers are saved and can be used freely
601 // verify that no old value is used accidentally
602 __ invalidate_registers(true, true, true, true, true, true);
604 // registers used by this stub
605 const Register temp_reg = rbx;
607 // load argument for exception that is passed as an argument into the stub
608 if (has_argument) {
609 #ifdef _LP64
610 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
611 #else
612 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
613 __ push(temp_reg);
614 #endif // _LP64
615 }
616 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
618 OopMapSet* oop_maps = new OopMapSet();
619 oop_maps->add_gc_map(call_offset, oop_map);
621 __ stop("should not reach here");
623 return oop_maps;
624 }
627 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
628 __ block_comment("generate_handle_exception");
630 // incoming parameters
631 const Register exception_oop = rax;
632 const Register exception_pc = rdx;
633 // other registers used in this stub
634 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
636 // Save registers, if required.
637 OopMapSet* oop_maps = new OopMapSet();
638 OopMap* oop_map = NULL;
639 switch (id) {
640 case forward_exception_id:
641 // We're handling an exception in the context of a compiled frame.
642 // The registers have been saved in the standard places. Perform
643 // an exception lookup in the caller and dispatch to the handler
644 // if found. Otherwise unwind and dispatch to the callers
645 // exception handler.
646 oop_map = generate_oop_map(sasm, 1 /*thread*/);
648 // load and clear pending exception oop into RAX
649 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
650 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
652 // load issuing PC (the return address for this stub) into rdx
653 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
655 // make sure that the vm_results are cleared (may be unnecessary)
656 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
657 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
658 break;
659 case handle_exception_nofpu_id:
660 case handle_exception_id:
661 // At this point all registers MAY be live.
662 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
663 break;
664 case handle_exception_from_callee_id: {
665 // At this point all registers except exception oop (RAX) and
666 // exception pc (RDX) are dead.
667 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
668 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
669 sasm->set_frame_size(frame_size);
670 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
671 break;
672 }
673 default: ShouldNotReachHere();
674 }
676 #ifdef TIERED
677 // C2 can leave the fpu stack dirty
678 if (UseSSE < 2) {
679 __ empty_FPU_stack();
680 }
681 #endif // TIERED
683 // verify that only rax, and rdx is valid at this time
684 __ invalidate_registers(false, true, true, false, true, true);
685 // verify that rax, contains a valid exception
686 __ verify_not_null_oop(exception_oop);
688 // load address of JavaThread object for thread-local data
689 NOT_LP64(__ get_thread(thread);)
691 #ifdef ASSERT
692 // check that fields in JavaThread for exception oop and issuing pc are
693 // empty before writing to them
694 Label oop_empty;
695 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
696 __ jcc(Assembler::equal, oop_empty);
697 __ stop("exception oop already set");
698 __ bind(oop_empty);
700 Label pc_empty;
701 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
702 __ jcc(Assembler::equal, pc_empty);
703 __ stop("exception pc already set");
704 __ bind(pc_empty);
705 #endif
707 // save exception oop and issuing pc into JavaThread
708 // (exception handler will load it from here)
709 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
710 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
712 // patch throwing pc into return address (has bci & oop map)
713 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
715 // compute the exception handler.
716 // the exception oop and the throwing pc are read from the fields in JavaThread
717 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
718 oop_maps->add_gc_map(call_offset, oop_map);
720 // rax: handler address
721 // will be the deopt blob if nmethod was deoptimized while we looked up
722 // handler regardless of whether handler existed in the nmethod.
724 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
725 __ invalidate_registers(false, true, true, true, true, true);
727 // patch the return address, this stub will directly return to the exception handler
728 __ movptr(Address(rbp, 1*BytesPerWord), rax);
730 switch (id) {
731 case forward_exception_id:
732 case handle_exception_nofpu_id:
733 case handle_exception_id:
734 // Restore the registers that were saved at the beginning.
735 restore_live_registers(sasm, id == handle_exception_nofpu_id);
736 break;
737 case handle_exception_from_callee_id:
738 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
739 // since we do a leave anyway.
741 // Pop the return address since we are possibly changing SP (restoring from BP).
742 __ leave();
743 __ pop(rcx);
745 // Restore SP from BP if the exception PC is a method handle call site.
746 NOT_LP64(__ get_thread(thread);)
747 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
748 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
749 __ jmp(rcx); // jump to exception handler
750 break;
751 default: ShouldNotReachHere();
752 }
754 return oop_maps;
755 }
758 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
759 // incoming parameters
760 const Register exception_oop = rax;
761 // callee-saved copy of exception_oop during runtime call
762 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
763 // other registers used in this stub
764 const Register exception_pc = rdx;
765 const Register handler_addr = rbx;
766 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
768 // verify that only rax, is valid at this time
769 __ invalidate_registers(false, true, true, true, true, true);
771 #ifdef ASSERT
772 // check that fields in JavaThread for exception oop and issuing pc are empty
773 NOT_LP64(__ get_thread(thread);)
774 Label oop_empty;
775 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
776 __ jcc(Assembler::equal, oop_empty);
777 __ stop("exception oop must be empty");
778 __ bind(oop_empty);
780 Label pc_empty;
781 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
782 __ jcc(Assembler::equal, pc_empty);
783 __ stop("exception pc must be empty");
784 __ bind(pc_empty);
785 #endif
787 // clear the FPU stack in case any FPU results are left behind
788 __ empty_FPU_stack();
790 // save exception_oop in callee-saved register to preserve it during runtime calls
791 __ verify_not_null_oop(exception_oop);
792 __ movptr(exception_oop_callee_saved, exception_oop);
794 NOT_LP64(__ get_thread(thread);)
795 // Get return address (is on top of stack after leave).
796 __ movptr(exception_pc, Address(rsp, 0));
798 // search the exception handler address of the caller (using the return address)
799 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
800 // rax: exception handler address of the caller
802 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
803 __ invalidate_registers(false, true, true, true, false, true);
805 // move result of call into correct register
806 __ movptr(handler_addr, rax);
808 // Restore exception oop to RAX (required convention of exception handler).
809 __ movptr(exception_oop, exception_oop_callee_saved);
811 // verify that there is really a valid exception in rax
812 __ verify_not_null_oop(exception_oop);
814 // get throwing pc (= return address).
815 // rdx has been destroyed by the call, so it must be set again
816 // the pop is also necessary to simulate the effect of a ret(0)
817 __ pop(exception_pc);
819 // Restore SP from BP if the exception PC is a method handle call site.
820 NOT_LP64(__ get_thread(thread);)
821 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
822 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
824 // continue at exception handler (return address removed)
825 // note: do *not* remove arguments when unwinding the
826 // activation since the caller assumes having
827 // all arguments on the stack when entering the
828 // runtime to determine the exception handler
829 // (GC happens at call site with arguments!)
830 // rax: exception oop
831 // rdx: throwing pc
832 // rbx: exception handler
833 __ jmp(handler_addr);
834 }
837 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
838 // use the maximum number of runtime-arguments here because it is difficult to
839 // distinguish each RT-Call.
840 // Note: This number affects also the RT-Call in generate_handle_exception because
841 // the oop-map is shared for all calls.
842 const int num_rt_args = 2; // thread + dummy
844 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
845 assert(deopt_blob != NULL, "deoptimization blob must have been created");
847 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
849 #ifdef _LP64
850 const Register thread = r15_thread;
851 // No need to worry about dummy
852 __ mov(c_rarg0, thread);
853 #else
854 __ push(rax); // push dummy
856 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
857 // push java thread (becomes first argument of C function)
858 __ get_thread(thread);
859 __ push(thread);
860 #endif // _LP64
861 __ set_last_Java_frame(thread, noreg, rbp, NULL);
862 // do the call
863 __ call(RuntimeAddress(target));
864 OopMapSet* oop_maps = new OopMapSet();
865 oop_maps->add_gc_map(__ offset(), oop_map);
866 // verify callee-saved register
867 #ifdef ASSERT
868 guarantee(thread != rax, "change this code");
869 __ push(rax);
870 { Label L;
871 __ get_thread(rax);
872 __ cmpptr(thread, rax);
873 __ jcc(Assembler::equal, L);
874 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
875 __ bind(L);
876 }
877 __ pop(rax);
878 #endif
879 __ reset_last_Java_frame(thread, true, false);
880 #ifndef _LP64
881 __ pop(rcx); // discard thread arg
882 __ pop(rcx); // discard dummy
883 #endif // _LP64
885 // check for pending exceptions
886 { Label L;
887 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
888 __ jcc(Assembler::equal, L);
889 // exception pending => remove activation and forward to exception handler
891 __ testptr(rax, rax); // have we deoptimized?
892 __ jump_cc(Assembler::equal,
893 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
895 // the deopt blob expects exceptions in the special fields of
896 // JavaThread, so copy and clear pending exception.
898 // load and clear pending exception
899 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
900 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
902 // check that there is really a valid exception
903 __ verify_not_null_oop(rax);
905 // load throwing pc: this is the return address of the stub
906 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
908 #ifdef ASSERT
909 // check that fields in JavaThread for exception oop and issuing pc are empty
910 Label oop_empty;
911 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
912 __ jcc(Assembler::equal, oop_empty);
913 __ stop("exception oop must be empty");
914 __ bind(oop_empty);
916 Label pc_empty;
917 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
918 __ jcc(Assembler::equal, pc_empty);
919 __ stop("exception pc must be empty");
920 __ bind(pc_empty);
921 #endif
923 // store exception oop and throwing pc to JavaThread
924 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
925 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
927 restore_live_registers(sasm);
929 __ leave();
930 __ addptr(rsp, BytesPerWord); // remove return address from stack
932 // Forward the exception directly to deopt blob. We can blow no
933 // registers and must leave throwing pc on the stack. A patch may
934 // have values live in registers so the entry point with the
935 // exception in tls.
936 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
938 __ bind(L);
939 }
942 // Runtime will return true if the nmethod has been deoptimized during
943 // the patching process. In that case we must do a deopt reexecute instead.
945 Label reexecuteEntry, cont;
947 __ testptr(rax, rax); // have we deoptimized?
948 __ jcc(Assembler::equal, cont); // no
950 // Will reexecute. Proper return address is already on the stack we just restore
951 // registers, pop all of our frame but the return address and jump to the deopt blob
952 restore_live_registers(sasm);
953 __ leave();
954 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
956 __ bind(cont);
957 restore_live_registers(sasm);
958 __ leave();
959 __ ret(0);
961 return oop_maps;
962 }
965 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
967 // for better readability
968 const bool must_gc_arguments = true;
969 const bool dont_gc_arguments = false;
971 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
972 bool save_fpu_registers = true;
974 // stub code & info for the different stubs
975 OopMapSet* oop_maps = NULL;
976 switch (id) {
977 case forward_exception_id:
978 {
979 oop_maps = generate_handle_exception(id, sasm);
980 __ leave();
981 __ ret(0);
982 }
983 break;
985 case new_instance_id:
986 case fast_new_instance_id:
987 case fast_new_instance_init_check_id:
988 {
989 Register klass = rdx; // Incoming
990 Register obj = rax; // Result
992 if (id == new_instance_id) {
993 __ set_info("new_instance", dont_gc_arguments);
994 } else if (id == fast_new_instance_id) {
995 __ set_info("fast new_instance", dont_gc_arguments);
996 } else {
997 assert(id == fast_new_instance_init_check_id, "bad StubID");
998 __ set_info("fast new_instance init check", dont_gc_arguments);
999 }
1001 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1002 UseTLAB && FastTLABRefill) {
1003 Label slow_path;
1004 Register obj_size = rcx;
1005 Register t1 = rbx;
1006 Register t2 = rsi;
1007 assert_different_registers(klass, obj, obj_size, t1, t2);
1009 __ push(rdi);
1010 __ push(rbx);
1012 if (id == fast_new_instance_init_check_id) {
1013 // make sure the klass is initialized
1014 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
1015 __ jcc(Assembler::notEqual, slow_path);
1016 }
1018 #ifdef ASSERT
1019 // assert object can be fast path allocated
1020 {
1021 Label ok, not_ok;
1022 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1023 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1024 __ jcc(Assembler::lessEqual, not_ok);
1025 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1026 __ jcc(Assembler::zero, ok);
1027 __ bind(not_ok);
1028 __ stop("assert(can be fast path allocated)");
1029 __ should_not_reach_here();
1030 __ bind(ok);
1031 }
1032 #endif // ASSERT
1034 // if we got here then the TLAB allocation failed, so try
1035 // refilling the TLAB or allocating directly from eden.
1036 Label retry_tlab, try_eden;
1037 const Register thread =
1038 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1040 __ bind(retry_tlab);
1042 // get the instance size (size is postive so movl is fine for 64bit)
1043 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1045 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1047 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1048 __ verify_oop(obj);
1049 __ pop(rbx);
1050 __ pop(rdi);
1051 __ ret(0);
1053 __ bind(try_eden);
1054 // get the instance size (size is postive so movl is fine for 64bit)
1055 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1057 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1058 __ incr_allocated_bytes(thread, obj_size, 0);
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1061 __ verify_oop(obj);
1062 __ pop(rbx);
1063 __ pop(rdi);
1064 __ ret(0);
1066 __ bind(slow_path);
1067 __ pop(rbx);
1068 __ pop(rdi);
1069 }
1071 __ enter();
1072 OopMap* map = save_live_registers(sasm, 2);
1073 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1074 oop_maps = new OopMapSet();
1075 oop_maps->add_gc_map(call_offset, map);
1076 restore_live_registers_except_rax(sasm);
1077 __ verify_oop(obj);
1078 __ leave();
1079 __ ret(0);
1081 // rax,: new instance
1082 }
1084 break;
1086 case counter_overflow_id:
1087 {
1088 Register bci = rax, method = rbx;
1089 __ enter();
1090 OopMap* map = save_live_registers(sasm, 3);
1091 // Retrieve bci
1092 __ movl(bci, Address(rbp, 2*BytesPerWord));
1093 // And a pointer to the methodOop
1094 __ movptr(method, Address(rbp, 3*BytesPerWord));
1095 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1096 oop_maps = new OopMapSet();
1097 oop_maps->add_gc_map(call_offset, map);
1098 restore_live_registers(sasm);
1099 __ leave();
1100 __ ret(0);
1101 }
1102 break;
1104 case new_type_array_id:
1105 case new_object_array_id:
1106 {
1107 Register length = rbx; // Incoming
1108 Register klass = rdx; // Incoming
1109 Register obj = rax; // Result
1111 if (id == new_type_array_id) {
1112 __ set_info("new_type_array", dont_gc_arguments);
1113 } else {
1114 __ set_info("new_object_array", dont_gc_arguments);
1115 }
1117 #ifdef ASSERT
1118 // assert object type is really an array of the proper kind
1119 {
1120 Label ok;
1121 Register t0 = obj;
1122 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1123 __ sarl(t0, Klass::_lh_array_tag_shift);
1124 int tag = ((id == new_type_array_id)
1125 ? Klass::_lh_array_tag_type_value
1126 : Klass::_lh_array_tag_obj_value);
1127 __ cmpl(t0, tag);
1128 __ jcc(Assembler::equal, ok);
1129 __ stop("assert(is an array klass)");
1130 __ should_not_reach_here();
1131 __ bind(ok);
1132 }
1133 #endif // ASSERT
1135 if (UseTLAB && FastTLABRefill) {
1136 Register arr_size = rsi;
1137 Register t1 = rcx; // must be rcx for use as shift count
1138 Register t2 = rdi;
1139 Label slow_path;
1140 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1142 // check that array length is small enough for fast path.
1143 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1144 __ jcc(Assembler::above, slow_path);
1146 // if we got here then the TLAB allocation failed, so try
1147 // refilling the TLAB or allocating directly from eden.
1148 Label retry_tlab, try_eden;
1149 const Register thread =
1150 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1152 __ bind(retry_tlab);
1154 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1155 // since size is positive movl does right thing on 64bit
1156 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1157 // since size is postive movl does right thing on 64bit
1158 __ movl(arr_size, length);
1159 assert(t1 == rcx, "fixed register usage");
1160 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1161 __ shrptr(t1, Klass::_lh_header_size_shift);
1162 __ andptr(t1, Klass::_lh_header_size_mask);
1163 __ addptr(arr_size, t1);
1164 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1165 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1167 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1169 __ initialize_header(obj, klass, length, t1, t2);
1170 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1171 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1172 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1173 __ andptr(t1, Klass::_lh_header_size_mask);
1174 __ subptr(arr_size, t1); // body length
1175 __ addptr(t1, obj); // body start
1176 __ initialize_body(t1, arr_size, 0, t2);
1177 __ verify_oop(obj);
1178 __ ret(0);
1180 __ bind(try_eden);
1181 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1182 // since size is positive movl does right thing on 64bit
1183 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1184 // since size is postive movl does right thing on 64bit
1185 __ movl(arr_size, length);
1186 assert(t1 == rcx, "fixed register usage");
1187 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1188 __ shrptr(t1, Klass::_lh_header_size_shift);
1189 __ andptr(t1, Klass::_lh_header_size_mask);
1190 __ addptr(arr_size, t1);
1191 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1192 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1194 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1195 __ incr_allocated_bytes(thread, arr_size, 0);
1197 __ initialize_header(obj, klass, length, t1, t2);
1198 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1199 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1200 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1201 __ andptr(t1, Klass::_lh_header_size_mask);
1202 __ subptr(arr_size, t1); // body length
1203 __ addptr(t1, obj); // body start
1204 __ initialize_body(t1, arr_size, 0, t2);
1205 __ verify_oop(obj);
1206 __ ret(0);
1208 __ bind(slow_path);
1209 }
1211 __ enter();
1212 OopMap* map = save_live_registers(sasm, 3);
1213 int call_offset;
1214 if (id == new_type_array_id) {
1215 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1216 } else {
1217 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1218 }
1220 oop_maps = new OopMapSet();
1221 oop_maps->add_gc_map(call_offset, map);
1222 restore_live_registers_except_rax(sasm);
1224 __ verify_oop(obj);
1225 __ leave();
1226 __ ret(0);
1228 // rax,: new array
1229 }
1230 break;
1232 case new_multi_array_id:
1233 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1234 // rax,: klass
1235 // rbx,: rank
1236 // rcx: address of 1st dimension
1237 OopMap* map = save_live_registers(sasm, 4);
1238 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1240 oop_maps = new OopMapSet();
1241 oop_maps->add_gc_map(call_offset, map);
1242 restore_live_registers_except_rax(sasm);
1244 // rax,: new multi array
1245 __ verify_oop(rax);
1246 }
1247 break;
1249 case register_finalizer_id:
1250 {
1251 __ set_info("register_finalizer", dont_gc_arguments);
1253 // This is called via call_runtime so the arguments
1254 // will be place in C abi locations
1256 #ifdef _LP64
1257 __ verify_oop(c_rarg0);
1258 __ mov(rax, c_rarg0);
1259 #else
1260 // The object is passed on the stack and we haven't pushed a
1261 // frame yet so it's one work away from top of stack.
1262 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1263 __ verify_oop(rax);
1264 #endif // _LP64
1266 // load the klass and check the has finalizer flag
1267 Label register_finalizer;
1268 Register t = rsi;
1269 __ load_klass(t, rax);
1270 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1271 __ testl(t, JVM_ACC_HAS_FINALIZER);
1272 __ jcc(Assembler::notZero, register_finalizer);
1273 __ ret(0);
1275 __ bind(register_finalizer);
1276 __ enter();
1277 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1278 int call_offset = __ call_RT(noreg, noreg,
1279 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1280 oop_maps = new OopMapSet();
1281 oop_maps->add_gc_map(call_offset, oop_map);
1283 // Now restore all the live registers
1284 restore_live_registers(sasm);
1286 __ leave();
1287 __ ret(0);
1288 }
1289 break;
1291 case throw_range_check_failed_id:
1292 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1293 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1294 }
1295 break;
1297 case throw_index_exception_id:
1298 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1299 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1300 }
1301 break;
1303 case throw_div0_exception_id:
1304 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1305 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1306 }
1307 break;
1309 case throw_null_pointer_exception_id:
1310 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1311 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1312 }
1313 break;
1315 case handle_exception_nofpu_id:
1316 case handle_exception_id:
1317 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1318 oop_maps = generate_handle_exception(id, sasm);
1319 }
1320 break;
1322 case handle_exception_from_callee_id:
1323 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1324 oop_maps = generate_handle_exception(id, sasm);
1325 }
1326 break;
1328 case unwind_exception_id:
1329 { __ set_info("unwind_exception", dont_gc_arguments);
1330 // note: no stubframe since we are about to leave the current
1331 // activation and we are calling a leaf VM function only.
1332 generate_unwind_exception(sasm);
1333 }
1334 break;
1336 case throw_array_store_exception_id:
1337 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1338 // tos + 0: link
1339 // + 1: return address
1340 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1341 }
1342 break;
1344 case throw_class_cast_exception_id:
1345 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1346 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1347 }
1348 break;
1350 case throw_incompatible_class_change_error_id:
1351 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1352 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1353 }
1354 break;
1356 case slow_subtype_check_id:
1357 {
1358 // Typical calling sequence:
1359 // __ push(klass_RInfo); // object klass or other subclass
1360 // __ push(sup_k_RInfo); // array element klass or other superclass
1361 // __ call(slow_subtype_check);
1362 // Note that the subclass is pushed first, and is therefore deepest.
1363 // Previous versions of this code reversed the names 'sub' and 'super'.
1364 // This was operationally harmless but made the code unreadable.
1365 enum layout {
1366 rax_off, SLOT2(raxH_off)
1367 rcx_off, SLOT2(rcxH_off)
1368 rsi_off, SLOT2(rsiH_off)
1369 rdi_off, SLOT2(rdiH_off)
1370 // saved_rbp_off, SLOT2(saved_rbpH_off)
1371 return_off, SLOT2(returnH_off)
1372 sup_k_off, SLOT2(sup_kH_off)
1373 klass_off, SLOT2(superH_off)
1374 framesize,
1375 result_off = klass_off // deepest argument is also the return value
1376 };
1378 __ set_info("slow_subtype_check", dont_gc_arguments);
1379 __ push(rdi);
1380 __ push(rsi);
1381 __ push(rcx);
1382 __ push(rax);
1384 // This is called by pushing args and not with C abi
1385 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1386 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1388 Label miss;
1389 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1391 // fallthrough on success:
1392 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1393 __ pop(rax);
1394 __ pop(rcx);
1395 __ pop(rsi);
1396 __ pop(rdi);
1397 __ ret(0);
1399 __ bind(miss);
1400 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1401 __ pop(rax);
1402 __ pop(rcx);
1403 __ pop(rsi);
1404 __ pop(rdi);
1405 __ ret(0);
1406 }
1407 break;
1409 case monitorenter_nofpu_id:
1410 save_fpu_registers = false;
1411 // fall through
1412 case monitorenter_id:
1413 {
1414 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1415 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1417 // Called with store_parameter and not C abi
1419 f.load_argument(1, rax); // rax,: object
1420 f.load_argument(0, rbx); // rbx,: lock address
1422 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1424 oop_maps = new OopMapSet();
1425 oop_maps->add_gc_map(call_offset, map);
1426 restore_live_registers(sasm, save_fpu_registers);
1427 }
1428 break;
1430 case monitorexit_nofpu_id:
1431 save_fpu_registers = false;
1432 // fall through
1433 case monitorexit_id:
1434 {
1435 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1436 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1438 // Called with store_parameter and not C abi
1440 f.load_argument(0, rax); // rax,: lock address
1442 // note: really a leaf routine but must setup last java sp
1443 // => use call_RT for now (speed can be improved by
1444 // doing last java sp setup manually)
1445 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1447 oop_maps = new OopMapSet();
1448 oop_maps->add_gc_map(call_offset, map);
1449 restore_live_registers(sasm, save_fpu_registers);
1451 }
1452 break;
1454 case access_field_patching_id:
1455 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1456 // we should set up register map
1457 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1458 }
1459 break;
1461 case load_klass_patching_id:
1462 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1463 // we should set up register map
1464 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1465 }
1466 break;
1468 case jvmti_exception_throw_id:
1469 { // rax,: exception oop
1470 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
1471 // Preserve all registers across this potentially blocking call
1472 const int num_rt_args = 2; // thread, exception oop
1473 OopMap* map = save_live_registers(sasm, num_rt_args);
1474 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
1475 oop_maps = new OopMapSet();
1476 oop_maps->add_gc_map(call_offset, map);
1477 restore_live_registers(sasm);
1478 }
1479 break;
1481 case dtrace_object_alloc_id:
1482 { // rax,: object
1483 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1484 // we can't gc here so skip the oopmap but make sure that all
1485 // the live registers get saved.
1486 save_live_registers(sasm, 1);
1488 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1489 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1490 NOT_LP64(__ pop(rax));
1492 restore_live_registers(sasm);
1493 }
1494 break;
1496 case fpu2long_stub_id:
1497 {
1498 // rax, and rdx are destroyed, but should be free since the result is returned there
1499 // preserve rsi,ecx
1500 __ push(rsi);
1501 __ push(rcx);
1502 LP64_ONLY(__ push(rdx);)
1504 // check for NaN
1505 Label return0, do_return, return_min_jlong, do_convert;
1507 Address value_high_word(rsp, wordSize + 4);
1508 Address value_low_word(rsp, wordSize);
1509 Address result_high_word(rsp, 3*wordSize + 4);
1510 Address result_low_word(rsp, 3*wordSize);
1512 __ subptr(rsp, 32); // more than enough on 32bit
1513 __ fst_d(value_low_word);
1514 __ movl(rax, value_high_word);
1515 __ andl(rax, 0x7ff00000);
1516 __ cmpl(rax, 0x7ff00000);
1517 __ jcc(Assembler::notEqual, do_convert);
1518 __ movl(rax, value_high_word);
1519 __ andl(rax, 0xfffff);
1520 __ orl(rax, value_low_word);
1521 __ jcc(Assembler::notZero, return0);
1523 __ bind(do_convert);
1524 __ fnstcw(Address(rsp, 0));
1525 __ movzwl(rax, Address(rsp, 0));
1526 __ orl(rax, 0xc00);
1527 __ movw(Address(rsp, 2), rax);
1528 __ fldcw(Address(rsp, 2));
1529 __ fwait();
1530 __ fistp_d(result_low_word);
1531 __ fldcw(Address(rsp, 0));
1532 __ fwait();
1533 // This gets the entire long in rax on 64bit
1534 __ movptr(rax, result_low_word);
1535 // testing of high bits
1536 __ movl(rdx, result_high_word);
1537 __ mov(rcx, rax);
1538 // What the heck is the point of the next instruction???
1539 __ xorl(rcx, 0x0);
1540 __ movl(rsi, 0x80000000);
1541 __ xorl(rsi, rdx);
1542 __ orl(rcx, rsi);
1543 __ jcc(Assembler::notEqual, do_return);
1544 __ fldz();
1545 __ fcomp_d(value_low_word);
1546 __ fnstsw_ax();
1547 #ifdef _LP64
1548 __ testl(rax, 0x4100); // ZF & CF == 0
1549 __ jcc(Assembler::equal, return_min_jlong);
1550 #else
1551 __ sahf();
1552 __ jcc(Assembler::above, return_min_jlong);
1553 #endif // _LP64
1554 // return max_jlong
1555 #ifndef _LP64
1556 __ movl(rdx, 0x7fffffff);
1557 __ movl(rax, 0xffffffff);
1558 #else
1559 __ mov64(rax, CONST64(0x7fffffffffffffff));
1560 #endif // _LP64
1561 __ jmp(do_return);
1563 __ bind(return_min_jlong);
1564 #ifndef _LP64
1565 __ movl(rdx, 0x80000000);
1566 __ xorl(rax, rax);
1567 #else
1568 __ mov64(rax, CONST64(0x8000000000000000));
1569 #endif // _LP64
1570 __ jmp(do_return);
1572 __ bind(return0);
1573 __ fpop();
1574 #ifndef _LP64
1575 __ xorptr(rdx,rdx);
1576 __ xorptr(rax,rax);
1577 #else
1578 __ xorptr(rax, rax);
1579 #endif // _LP64
1581 __ bind(do_return);
1582 __ addptr(rsp, 32);
1583 LP64_ONLY(__ pop(rdx);)
1584 __ pop(rcx);
1585 __ pop(rsi);
1586 __ ret(0);
1587 }
1588 break;
1590 #ifndef SERIALGC
1591 case g1_pre_barrier_slow_id:
1592 {
1593 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1594 // arg0 : previous value of memory
1596 BarrierSet* bs = Universe::heap()->barrier_set();
1597 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1598 __ movptr(rax, (int)id);
1599 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1600 __ should_not_reach_here();
1601 break;
1602 }
1603 __ push(rax);
1604 __ push(rdx);
1606 const Register pre_val = rax;
1607 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1608 const Register tmp = rdx;
1610 NOT_LP64(__ get_thread(thread);)
1612 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1613 PtrQueue::byte_offset_of_active()));
1615 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1616 PtrQueue::byte_offset_of_index()));
1617 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1618 PtrQueue::byte_offset_of_buf()));
1621 Label done;
1622 Label runtime;
1624 // Can we store original value in the thread's buffer?
1626 #ifdef _LP64
1627 __ movslq(tmp, queue_index);
1628 __ cmpq(tmp, 0);
1629 #else
1630 __ cmpl(queue_index, 0);
1631 #endif
1632 __ jcc(Assembler::equal, runtime);
1633 #ifdef _LP64
1634 __ subq(tmp, wordSize);
1635 __ movl(queue_index, tmp);
1636 __ addq(tmp, buffer);
1637 #else
1638 __ subl(queue_index, wordSize);
1639 __ movl(tmp, buffer);
1640 __ addl(tmp, queue_index);
1641 #endif
1643 // prev_val (rax)
1644 f.load_argument(0, pre_val);
1645 __ movptr(Address(tmp, 0), pre_val);
1646 __ jmp(done);
1648 __ bind(runtime);
1649 __ push(rcx);
1650 #ifdef _LP64
1651 __ push(r8);
1652 __ push(r9);
1653 __ push(r10);
1654 __ push(r11);
1655 # ifndef _WIN64
1656 __ push(rdi);
1657 __ push(rsi);
1658 # endif
1659 #endif
1660 // load the pre-value
1661 f.load_argument(0, rcx);
1662 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1663 #ifdef _LP64
1664 # ifndef _WIN64
1665 __ pop(rsi);
1666 __ pop(rdi);
1667 # endif
1668 __ pop(r11);
1669 __ pop(r10);
1670 __ pop(r9);
1671 __ pop(r8);
1672 #endif
1673 __ pop(rcx);
1674 __ bind(done);
1676 __ pop(rdx);
1677 __ pop(rax);
1678 }
1679 break;
1681 case g1_post_barrier_slow_id:
1682 {
1683 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1686 // arg0: store_address
1687 Address store_addr(rbp, 2*BytesPerWord);
1689 BarrierSet* bs = Universe::heap()->barrier_set();
1690 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1691 Label done;
1692 Label runtime;
1694 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1695 // Must check to see if card is already dirty
1697 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1699 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1700 PtrQueue::byte_offset_of_index()));
1701 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1702 PtrQueue::byte_offset_of_buf()));
1704 __ push(rax);
1705 __ push(rcx);
1707 NOT_LP64(__ get_thread(thread);)
1708 ExternalAddress cardtable((address)ct->byte_map_base);
1709 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1711 const Register card_addr = rcx;
1712 #ifdef _LP64
1713 const Register tmp = rscratch1;
1714 f.load_argument(0, card_addr);
1715 __ shrq(card_addr, CardTableModRefBS::card_shift);
1716 __ lea(tmp, cardtable);
1717 // get the address of the card
1718 __ addq(card_addr, tmp);
1719 #else
1720 const Register card_index = rcx;
1721 f.load_argument(0, card_index);
1722 __ shrl(card_index, CardTableModRefBS::card_shift);
1724 Address index(noreg, card_index, Address::times_1);
1725 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1726 #endif
1728 __ cmpb(Address(card_addr, 0), 0);
1729 __ jcc(Assembler::equal, done);
1731 // storing region crossing non-NULL, card is clean.
1732 // dirty card and log.
1734 __ movb(Address(card_addr, 0), 0);
1736 __ cmpl(queue_index, 0);
1737 __ jcc(Assembler::equal, runtime);
1738 __ subl(queue_index, wordSize);
1740 const Register buffer_addr = rbx;
1741 __ push(rbx);
1743 __ movptr(buffer_addr, buffer);
1745 #ifdef _LP64
1746 __ movslq(rscratch1, queue_index);
1747 __ addptr(buffer_addr, rscratch1);
1748 #else
1749 __ addptr(buffer_addr, queue_index);
1750 #endif
1751 __ movptr(Address(buffer_addr, 0), card_addr);
1753 __ pop(rbx);
1754 __ jmp(done);
1756 __ bind(runtime);
1757 __ push(rdx);
1758 #ifdef _LP64
1759 __ push(r8);
1760 __ push(r9);
1761 __ push(r10);
1762 __ push(r11);
1763 # ifndef _WIN64
1764 __ push(rdi);
1765 __ push(rsi);
1766 # endif
1767 #endif
1768 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1769 #ifdef _LP64
1770 # ifndef _WIN64
1771 __ pop(rsi);
1772 __ pop(rdi);
1773 # endif
1774 __ pop(r11);
1775 __ pop(r10);
1776 __ pop(r9);
1777 __ pop(r8);
1778 #endif
1779 __ pop(rdx);
1780 __ bind(done);
1782 __ pop(rcx);
1783 __ pop(rax);
1785 }
1786 break;
1787 #endif // !SERIALGC
1789 default:
1790 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1791 __ movptr(rax, (int)id);
1792 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1793 __ should_not_reach_here();
1794 }
1795 break;
1796 }
1797 return oop_maps;
1798 }
1800 #undef __
1802 const char *Runtime1::pd_name_for_address(address entry) {
1803 return "<unknown function>";
1804 }