Mon, 09 Mar 2009 13:28:46 -0700
6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair
1 /*
2 * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_c1_Runtime1_x86.cpp.incl"
29 // Implementation of StubAssembler
31 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
32 // setup registers
33 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
34 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
35 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
36 assert(args_size >= 0, "illegal args_size");
38 #ifdef _LP64
39 mov(c_rarg0, thread);
40 set_num_rt_args(0); // Nothing on stack
41 #else
42 set_num_rt_args(1 + args_size);
44 // push java thread (becomes first argument of C function)
45 get_thread(thread);
46 push(thread);
47 #endif // _LP64
49 set_last_Java_frame(thread, noreg, rbp, NULL);
51 // do the call
52 call(RuntimeAddress(entry));
53 int call_offset = offset();
54 // verify callee-saved register
55 #ifdef ASSERT
56 guarantee(thread != rax, "change this code");
57 push(rax);
58 { Label L;
59 get_thread(rax);
60 cmpptr(thread, rax);
61 jcc(Assembler::equal, L);
62 int3();
63 stop("StubAssembler::call_RT: rdi not callee saved?");
64 bind(L);
65 }
66 pop(rax);
67 #endif
68 reset_last_Java_frame(thread, true, false);
70 // discard thread and arguments
71 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
73 // check for pending exceptions
74 { Label L;
75 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
76 jcc(Assembler::equal, L);
77 // exception pending => remove activation and forward to exception handler
78 movptr(rax, Address(thread, Thread::pending_exception_offset()));
79 // make sure that the vm_results are cleared
80 if (oop_result1->is_valid()) {
81 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
82 }
83 if (oop_result2->is_valid()) {
84 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
85 }
86 if (frame_size() == no_frame_size) {
87 leave();
88 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
89 } else if (_stub_id == Runtime1::forward_exception_id) {
90 should_not_reach_here();
91 } else {
92 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
93 }
94 bind(L);
95 }
96 // get oop results if there are any and reset the values in the thread
97 if (oop_result1->is_valid()) {
98 movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
99 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
100 verify_oop(oop_result1);
101 }
102 if (oop_result2->is_valid()) {
103 movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
104 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
105 verify_oop(oop_result2);
106 }
107 return call_offset;
108 }
111 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
112 #ifdef _LP64
113 mov(c_rarg1, arg1);
114 #else
115 push(arg1);
116 #endif // _LP64
117 return call_RT(oop_result1, oop_result2, entry, 1);
118 }
121 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
122 #ifdef _LP64
123 if (c_rarg1 == arg2) {
124 if (c_rarg2 == arg1) {
125 xchgq(arg1, arg2);
126 } else {
127 mov(c_rarg2, arg2);
128 mov(c_rarg1, arg1);
129 }
130 } else {
131 mov(c_rarg1, arg1);
132 mov(c_rarg2, arg2);
133 }
134 #else
135 push(arg2);
136 push(arg1);
137 #endif // _LP64
138 return call_RT(oop_result1, oop_result2, entry, 2);
139 }
142 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
143 #ifdef _LP64
144 // if there is any conflict use the stack
145 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
146 arg2 == c_rarg1 || arg1 == c_rarg3 ||
147 arg3 == c_rarg1 || arg1 == c_rarg2) {
148 push(arg3);
149 push(arg2);
150 push(arg1);
151 pop(c_rarg1);
152 pop(c_rarg2);
153 pop(c_rarg3);
154 } else {
155 mov(c_rarg1, arg1);
156 mov(c_rarg2, arg2);
157 mov(c_rarg3, arg3);
158 }
159 #else
160 push(arg3);
161 push(arg2);
162 push(arg1);
163 #endif // _LP64
164 return call_RT(oop_result1, oop_result2, entry, 3);
165 }
168 // Implementation of StubFrame
170 class StubFrame: public StackObj {
171 private:
172 StubAssembler* _sasm;
174 public:
175 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
176 void load_argument(int offset_in_words, Register reg);
178 ~StubFrame();
179 };
182 #define __ _sasm->
184 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
185 _sasm = sasm;
186 __ set_info(name, must_gc_arguments);
187 __ enter();
188 }
190 // load parameters that were stored with LIR_Assembler::store_parameter
191 // Note: offsets for store_parameter and load_argument must match
192 void StubFrame::load_argument(int offset_in_words, Register reg) {
193 // rbp, + 0: link
194 // + 1: return address
195 // + 2: argument with offset 0
196 // + 3: argument with offset 1
197 // + 4: ...
199 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
200 }
203 StubFrame::~StubFrame() {
204 __ leave();
205 __ ret(0);
206 }
208 #undef __
211 // Implementation of Runtime1
213 #define __ sasm->
215 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
216 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
218 // Stack layout for saving/restoring all the registers needed during a runtime
219 // call (this includes deoptimization)
220 // Note: note that users of this frame may well have arguments to some runtime
221 // while these values are on the stack. These positions neglect those arguments
222 // but the code in save_live_registers will take the argument count into
223 // account.
224 //
225 #ifdef _LP64
226 #define SLOT2(x) x,
227 #define SLOT_PER_WORD 2
228 #else
229 #define SLOT2(x)
230 #define SLOT_PER_WORD 1
231 #endif // _LP64
233 enum reg_save_layout {
234 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
235 // happen and will assert if the stack size we create is misaligned
236 #ifdef _LP64
237 align_dummy_0, align_dummy_1,
238 #endif // _LP64
239 dummy1, SLOT2(dummy1H) // 0, 4
240 dummy2, SLOT2(dummy2H) // 8, 12
241 // Two temps to be used as needed by users of save/restore callee registers
242 temp_2_off, SLOT2(temp_2H_off) // 16, 20
243 temp_1_off, SLOT2(temp_1H_off) // 24, 28
244 xmm_regs_as_doubles_off, // 32
245 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
246 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
247 // fpu_state_end_off is exclusive
248 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
249 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
250 extra_space_offset, // 360
251 #ifdef _LP64
252 r15_off = extra_space_offset, r15H_off, // 360, 364
253 r14_off, r14H_off, // 368, 372
254 r13_off, r13H_off, // 376, 380
255 r12_off, r12H_off, // 384, 388
256 r11_off, r11H_off, // 392, 396
257 r10_off, r10H_off, // 400, 404
258 r9_off, r9H_off, // 408, 412
259 r8_off, r8H_off, // 416, 420
260 rdi_off, rdiH_off, // 424, 428
261 #else
262 rdi_off = extra_space_offset,
263 #endif // _LP64
264 rsi_off, SLOT2(rsiH_off) // 432, 436
265 rbp_off, SLOT2(rbpH_off) // 440, 444
266 rsp_off, SLOT2(rspH_off) // 448, 452
267 rbx_off, SLOT2(rbxH_off) // 456, 460
268 rdx_off, SLOT2(rdxH_off) // 464, 468
269 rcx_off, SLOT2(rcxH_off) // 472, 476
270 rax_off, SLOT2(raxH_off) // 480, 484
271 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
272 return_off, SLOT2(returnH_off) // 496, 500
273 reg_save_frame_size, // As noted: neglects any parameters to runtime // 504
275 #ifdef _WIN64
276 c_rarg0_off = rcx_off,
277 #else
278 c_rarg0_off = rdi_off,
279 #endif // WIN64
281 // equates
283 // illegal instruction handler
284 continue_dest_off = temp_1_off,
286 // deoptimization equates
287 fp0_off = float_regs_as_doubles_off, // slot for java float/double return value
288 xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value
289 deopt_type = temp_2_off, // slot for type of deopt in progress
290 ret_type = temp_1_off // slot for return type
291 };
295 // Save off registers which might be killed by calls into the runtime.
296 // Tries to smart of about FP registers. In particular we separate
297 // saving and describing the FPU registers for deoptimization since we
298 // have to save the FPU registers twice if we describe them and on P4
299 // saving FPU registers which don't contain anything appears
300 // expensive. The deopt blob is the only thing which needs to
301 // describe FPU registers. In all other cases it should be sufficient
302 // to simply save their current value.
304 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
305 bool save_fpu_registers = true) {
307 // In 64bit all the args are in regs so there are no additional stack slots
308 LP64_ONLY(num_rt_args = 0);
309 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
310 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
311 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
313 // record saved value locations in an OopMap
314 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
315 OopMap* map = new OopMap(frame_size_in_slots, 0);
316 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
317 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
318 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
319 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
320 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
321 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
322 #ifdef _LP64
323 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
324 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
325 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
326 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
327 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
330 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
332 // This is stupid but needed.
333 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
334 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
335 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
336 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
337 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
338 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
340 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
341 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
342 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
343 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
344 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
347 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
348 #endif // _LP64
350 if (save_fpu_registers) {
351 if (UseSSE < 2) {
352 int fpu_off = float_regs_as_doubles_off;
353 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
354 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
355 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
356 // %%% This is really a waste but we'll keep things as they were for now
357 if (true) {
358 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
359 }
360 fpu_off += 2;
361 }
362 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
363 }
365 if (UseSSE >= 2) {
366 int xmm_off = xmm_regs_as_doubles_off;
367 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
368 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
369 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
370 // %%% This is really a waste but we'll keep things as they were for now
371 if (true) {
372 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
373 }
374 xmm_off += 2;
375 }
376 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
378 } else if (UseSSE == 1) {
379 int xmm_off = xmm_regs_as_doubles_off;
380 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
381 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
382 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
383 xmm_off += 2;
384 }
385 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
386 }
387 }
389 return map;
390 }
392 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
393 bool save_fpu_registers = true) {
394 __ block_comment("save_live_registers");
396 // 64bit passes the args in regs to the c++ runtime
397 int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
398 // frame_size = round_to(frame_size, 4);
399 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
401 __ pusha(); // integer registers
403 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
404 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
406 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
408 #ifdef ASSERT
409 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
410 #endif
412 if (save_fpu_registers) {
413 if (UseSSE < 2) {
414 // save FPU stack
415 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
416 __ fwait();
418 #ifdef ASSERT
419 Label ok;
420 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
421 __ jccb(Assembler::equal, ok);
422 __ stop("corrupted control word detected");
423 __ bind(ok);
424 #endif
426 // Reset the control word to guard against exceptions being unmasked
427 // since fstp_d can cause FPU stack underflow exceptions. Write it
428 // into the on stack copy and then reload that to make sure that the
429 // current and future values are correct.
430 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
431 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
433 // Save the FPU registers in de-opt-able form
434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
435 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
436 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
437 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
438 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
439 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
440 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
441 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
442 }
444 if (UseSSE >= 2) {
445 // save XMM registers
446 // XMM registers can contain float or double values, but this is not known here,
447 // so always save them as doubles.
448 // note that float values are _not_ converted automatically, so for float values
449 // the second word contains only garbage data.
450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
451 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
452 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
454 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
458 #ifdef _LP64
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
464 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
465 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
467 #endif // _LP64
468 } else if (UseSSE == 1) {
469 // save XMM registers as float because double not supported without SSE2
470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
471 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
472 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
473 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
474 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
475 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
476 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
477 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
478 }
479 }
481 // FPU stack must be empty now
482 __ verify_FPU(0, "save_live_registers");
484 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
485 }
488 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
489 if (restore_fpu_registers) {
490 if (UseSSE >= 2) {
491 // restore XMM registers
492 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
493 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
494 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
495 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
496 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
497 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
498 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
499 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
500 #ifdef _LP64
501 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
502 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
503 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
504 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
505 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
506 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
507 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
508 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
509 #endif // _LP64
510 } else if (UseSSE == 1) {
511 // restore XMM registers
512 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
513 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
514 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
515 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
516 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
517 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
518 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
519 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
520 }
522 if (UseSSE < 2) {
523 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
524 } else {
525 // check that FPU stack is really empty
526 __ verify_FPU(0, "restore_live_registers");
527 }
529 } else {
530 // check that FPU stack is really empty
531 __ verify_FPU(0, "restore_live_registers");
532 }
534 #ifdef ASSERT
535 {
536 Label ok;
537 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
538 __ jcc(Assembler::equal, ok);
539 __ stop("bad offsets in frame");
540 __ bind(ok);
541 }
542 #endif // ASSERT
544 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
545 }
548 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
549 __ block_comment("restore_live_registers");
551 restore_fpu(sasm, restore_fpu_registers);
552 __ popa();
553 }
556 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
557 __ block_comment("restore_live_registers_except_rax");
559 restore_fpu(sasm, restore_fpu_registers);
561 #ifdef _LP64
562 __ movptr(r15, Address(rsp, 0));
563 __ movptr(r14, Address(rsp, wordSize));
564 __ movptr(r13, Address(rsp, 2 * wordSize));
565 __ movptr(r12, Address(rsp, 3 * wordSize));
566 __ movptr(r11, Address(rsp, 4 * wordSize));
567 __ movptr(r10, Address(rsp, 5 * wordSize));
568 __ movptr(r9, Address(rsp, 6 * wordSize));
569 __ movptr(r8, Address(rsp, 7 * wordSize));
570 __ movptr(rdi, Address(rsp, 8 * wordSize));
571 __ movptr(rsi, Address(rsp, 9 * wordSize));
572 __ movptr(rbp, Address(rsp, 10 * wordSize));
573 // skip rsp
574 __ movptr(rbx, Address(rsp, 12 * wordSize));
575 __ movptr(rdx, Address(rsp, 13 * wordSize));
576 __ movptr(rcx, Address(rsp, 14 * wordSize));
578 __ addptr(rsp, 16 * wordSize);
579 #else
581 __ pop(rdi);
582 __ pop(rsi);
583 __ pop(rbp);
584 __ pop(rbx); // skip this value
585 __ pop(rbx);
586 __ pop(rdx);
587 __ pop(rcx);
588 __ addptr(rsp, BytesPerWord);
589 #endif // _LP64
590 }
593 void Runtime1::initialize_pd() {
594 // nothing to do
595 }
598 // target: the entry point of the method that creates and posts the exception oop
599 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
601 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
602 // preserve all registers
603 int num_rt_args = has_argument ? 2 : 1;
604 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
606 // now all registers are saved and can be used freely
607 // verify that no old value is used accidentally
608 __ invalidate_registers(true, true, true, true, true, true);
610 // registers used by this stub
611 const Register temp_reg = rbx;
613 // load argument for exception that is passed as an argument into the stub
614 if (has_argument) {
615 #ifdef _LP64
616 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
617 #else
618 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
619 __ push(temp_reg);
620 #endif // _LP64
621 }
622 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
624 OopMapSet* oop_maps = new OopMapSet();
625 oop_maps->add_gc_map(call_offset, oop_map);
627 __ stop("should not reach here");
629 return oop_maps;
630 }
633 void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) {
634 // incoming parameters
635 const Register exception_oop = rax;
636 const Register exception_pc = rdx;
637 // other registers used in this stub
638 const Register real_return_addr = rbx;
639 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
641 __ block_comment("generate_handle_exception");
643 #ifdef TIERED
644 // C2 can leave the fpu stack dirty
645 if (UseSSE < 2 ) {
646 __ empty_FPU_stack();
647 }
648 #endif // TIERED
650 // verify that only rax, and rdx is valid at this time
651 __ invalidate_registers(false, true, true, false, true, true);
652 // verify that rax, contains a valid exception
653 __ verify_not_null_oop(exception_oop);
655 // load address of JavaThread object for thread-local data
656 NOT_LP64(__ get_thread(thread);)
658 #ifdef ASSERT
659 // check that fields in JavaThread for exception oop and issuing pc are
660 // empty before writing to them
661 Label oop_empty;
662 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
663 __ jcc(Assembler::equal, oop_empty);
664 __ stop("exception oop already set");
665 __ bind(oop_empty);
667 Label pc_empty;
668 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
669 __ jcc(Assembler::equal, pc_empty);
670 __ stop("exception pc already set");
671 __ bind(pc_empty);
672 #endif
674 // save exception oop and issuing pc into JavaThread
675 // (exception handler will load it from here)
676 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
677 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
679 // save real return address (pc that called this stub)
680 __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
681 __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
683 // patch throwing pc into return address (has bci & oop map)
684 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
686 // compute the exception handler.
687 // the exception oop and the throwing pc are read from the fields in JavaThread
688 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
689 oop_maps->add_gc_map(call_offset, oop_map);
691 // rax,: handler address or NULL if no handler exists
692 // will be the deopt blob if nmethod was deoptimized while we looked up
693 // handler regardless of whether handler existed in the nmethod.
695 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
696 __ invalidate_registers(false, true, true, true, true, true);
698 // Do we have an exception handler in the nmethod?
699 Label no_handler;
700 Label done;
701 __ testptr(rax, rax);
702 __ jcc(Assembler::zero, no_handler);
704 // exception handler found
705 // patch the return address -> the stub will directly return to the exception handler
706 __ movptr(Address(rbp, 1*BytesPerWord), rax);
708 // restore registers
709 restore_live_registers(sasm, save_fpu_registers);
711 // return to exception handler
712 __ leave();
713 __ ret(0);
715 __ bind(no_handler);
716 // no exception handler found in this method, so the exception is
717 // forwarded to the caller (using the unwind code of the nmethod)
718 // there is no need to restore the registers
720 // restore the real return address that was saved before the RT-call
721 __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
722 __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
724 // load address of JavaThread object for thread-local data
725 NOT_LP64(__ get_thread(thread);)
726 // restore exception oop into rax, (convention for unwind code)
727 __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
729 // clear exception fields in JavaThread because they are no longer needed
730 // (fields must be cleared because they are processed by GC otherwise)
731 __ movptr(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
732 __ movptr(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
734 // pop the stub frame off
735 __ leave();
737 generate_unwind_exception(sasm);
738 __ stop("should not reach here");
739 }
742 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
743 // incoming parameters
744 const Register exception_oop = rax;
745 // other registers used in this stub
746 const Register exception_pc = rdx;
747 const Register handler_addr = rbx;
748 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
750 // verify that only rax, is valid at this time
751 __ invalidate_registers(false, true, true, true, true, true);
753 #ifdef ASSERT
754 // check that fields in JavaThread for exception oop and issuing pc are empty
755 NOT_LP64(__ get_thread(thread);)
756 Label oop_empty;
757 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
758 __ jcc(Assembler::equal, oop_empty);
759 __ stop("exception oop must be empty");
760 __ bind(oop_empty);
762 Label pc_empty;
763 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
764 __ jcc(Assembler::equal, pc_empty);
765 __ stop("exception pc must be empty");
766 __ bind(pc_empty);
767 #endif
769 // clear the FPU stack in case any FPU results are left behind
770 __ empty_FPU_stack();
772 // leave activation of nmethod
773 __ leave();
774 // store return address (is on top of stack after leave)
775 __ movptr(exception_pc, Address(rsp, 0));
777 __ verify_oop(exception_oop);
779 // save exception oop from rax, to stack before call
780 __ push(exception_oop);
782 // search the exception handler address of the caller (using the return address)
783 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc);
784 // rax,: exception handler address of the caller
786 // only rax, is valid at this time, all other registers have been destroyed by the call
787 __ invalidate_registers(false, true, true, true, true, true);
789 // move result of call into correct register
790 __ movptr(handler_addr, rax);
792 // restore exception oop in rax, (required convention of exception handler)
793 __ pop(exception_oop);
795 __ verify_oop(exception_oop);
797 // get throwing pc (= return address).
798 // rdx has been destroyed by the call, so it must be set again
799 // the pop is also necessary to simulate the effect of a ret(0)
800 __ pop(exception_pc);
802 // verify that that there is really a valid exception in rax,
803 __ verify_not_null_oop(exception_oop);
805 // continue at exception handler (return address removed)
806 // note: do *not* remove arguments when unwinding the
807 // activation since the caller assumes having
808 // all arguments on the stack when entering the
809 // runtime to determine the exception handler
810 // (GC happens at call site with arguments!)
811 // rax,: exception oop
812 // rdx: throwing pc
813 // rbx,: exception handler
814 __ jmp(handler_addr);
815 }
818 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
819 // use the maximum number of runtime-arguments here because it is difficult to
820 // distinguish each RT-Call.
821 // Note: This number affects also the RT-Call in generate_handle_exception because
822 // the oop-map is shared for all calls.
823 const int num_rt_args = 2; // thread + dummy
825 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
826 assert(deopt_blob != NULL, "deoptimization blob must have been created");
828 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
830 #ifdef _LP64
831 const Register thread = r15_thread;
832 // No need to worry about dummy
833 __ mov(c_rarg0, thread);
834 #else
835 __ push(rax); // push dummy
837 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
838 // push java thread (becomes first argument of C function)
839 __ get_thread(thread);
840 __ push(thread);
841 #endif // _LP64
842 __ set_last_Java_frame(thread, noreg, rbp, NULL);
843 // do the call
844 __ call(RuntimeAddress(target));
845 OopMapSet* oop_maps = new OopMapSet();
846 oop_maps->add_gc_map(__ offset(), oop_map);
847 // verify callee-saved register
848 #ifdef ASSERT
849 guarantee(thread != rax, "change this code");
850 __ push(rax);
851 { Label L;
852 __ get_thread(rax);
853 __ cmpptr(thread, rax);
854 __ jcc(Assembler::equal, L);
855 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
856 __ bind(L);
857 }
858 __ pop(rax);
859 #endif
860 __ reset_last_Java_frame(thread, true, false);
861 #ifndef _LP64
862 __ pop(rcx); // discard thread arg
863 __ pop(rcx); // discard dummy
864 #endif // _LP64
866 // check for pending exceptions
867 { Label L;
868 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
869 __ jcc(Assembler::equal, L);
870 // exception pending => remove activation and forward to exception handler
872 __ testptr(rax, rax); // have we deoptimized?
873 __ jump_cc(Assembler::equal,
874 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
876 // the deopt blob expects exceptions in the special fields of
877 // JavaThread, so copy and clear pending exception.
879 // load and clear pending exception
880 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
881 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
883 // check that there is really a valid exception
884 __ verify_not_null_oop(rax);
886 // load throwing pc: this is the return address of the stub
887 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
889 #ifdef ASSERT
890 // check that fields in JavaThread for exception oop and issuing pc are empty
891 Label oop_empty;
892 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
893 __ jcc(Assembler::equal, oop_empty);
894 __ stop("exception oop must be empty");
895 __ bind(oop_empty);
897 Label pc_empty;
898 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
899 __ jcc(Assembler::equal, pc_empty);
900 __ stop("exception pc must be empty");
901 __ bind(pc_empty);
902 #endif
904 // store exception oop and throwing pc to JavaThread
905 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
906 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
908 restore_live_registers(sasm);
910 __ leave();
911 __ addptr(rsp, BytesPerWord); // remove return address from stack
913 // Forward the exception directly to deopt blob. We can blow no
914 // registers and must leave throwing pc on the stack. A patch may
915 // have values live in registers so the entry point with the
916 // exception in tls.
917 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
919 __ bind(L);
920 }
923 // Runtime will return true if the nmethod has been deoptimized during
924 // the patching process. In that case we must do a deopt reexecute instead.
926 Label reexecuteEntry, cont;
928 __ testptr(rax, rax); // have we deoptimized?
929 __ jcc(Assembler::equal, cont); // no
931 // Will reexecute. Proper return address is already on the stack we just restore
932 // registers, pop all of our frame but the return address and jump to the deopt blob
933 restore_live_registers(sasm);
934 __ leave();
935 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
937 __ bind(cont);
938 restore_live_registers(sasm);
939 __ leave();
940 __ ret(0);
942 return oop_maps;
944 }
947 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
949 // for better readability
950 const bool must_gc_arguments = true;
951 const bool dont_gc_arguments = false;
953 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
954 bool save_fpu_registers = true;
956 // stub code & info for the different stubs
957 OopMapSet* oop_maps = NULL;
958 switch (id) {
959 case forward_exception_id:
960 {
961 // we're handling an exception in the context of a compiled
962 // frame. The registers have been saved in the standard
963 // places. Perform an exception lookup in the caller and
964 // dispatch to the handler if found. Otherwise unwind and
965 // dispatch to the callers exception handler.
967 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
968 const Register exception_oop = rax;
969 const Register exception_pc = rdx;
971 // load pending exception oop into rax,
972 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
973 // clear pending exception
974 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
976 // load issuing PC (the return address for this stub) into rdx
977 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
979 // make sure that the vm_results are cleared (may be unnecessary)
980 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
981 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
983 // verify that that there is really a valid exception in rax,
984 __ verify_not_null_oop(exception_oop);
987 oop_maps = new OopMapSet();
988 OopMap* oop_map = generate_oop_map(sasm, 1);
989 generate_handle_exception(sasm, oop_maps, oop_map);
990 __ stop("should not reach here");
991 }
992 break;
994 case new_instance_id:
995 case fast_new_instance_id:
996 case fast_new_instance_init_check_id:
997 {
998 Register klass = rdx; // Incoming
999 Register obj = rax; // Result
1001 if (id == new_instance_id) {
1002 __ set_info("new_instance", dont_gc_arguments);
1003 } else if (id == fast_new_instance_id) {
1004 __ set_info("fast new_instance", dont_gc_arguments);
1005 } else {
1006 assert(id == fast_new_instance_init_check_id, "bad StubID");
1007 __ set_info("fast new_instance init check", dont_gc_arguments);
1008 }
1010 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1011 UseTLAB && FastTLABRefill) {
1012 Label slow_path;
1013 Register obj_size = rcx;
1014 Register t1 = rbx;
1015 Register t2 = rsi;
1016 assert_different_registers(klass, obj, obj_size, t1, t2);
1018 __ push(rdi);
1019 __ push(rbx);
1021 if (id == fast_new_instance_init_check_id) {
1022 // make sure the klass is initialized
1023 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
1024 __ jcc(Assembler::notEqual, slow_path);
1025 }
1027 #ifdef ASSERT
1028 // assert object can be fast path allocated
1029 {
1030 Label ok, not_ok;
1031 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1032 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1033 __ jcc(Assembler::lessEqual, not_ok);
1034 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1035 __ jcc(Assembler::zero, ok);
1036 __ bind(not_ok);
1037 __ stop("assert(can be fast path allocated)");
1038 __ should_not_reach_here();
1039 __ bind(ok);
1040 }
1041 #endif // ASSERT
1043 // if we got here then the TLAB allocation failed, so try
1044 // refilling the TLAB or allocating directly from eden.
1045 Label retry_tlab, try_eden;
1046 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass)
1048 __ bind(retry_tlab);
1050 // get the instance size (size is postive so movl is fine for 64bit)
1051 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1052 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1053 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1054 __ verify_oop(obj);
1055 __ pop(rbx);
1056 __ pop(rdi);
1057 __ ret(0);
1059 __ bind(try_eden);
1060 // get the instance size (size is postive so movl is fine for 64bit)
1061 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1062 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1063 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1064 __ verify_oop(obj);
1065 __ pop(rbx);
1066 __ pop(rdi);
1067 __ ret(0);
1069 __ bind(slow_path);
1070 __ pop(rbx);
1071 __ pop(rdi);
1072 }
1074 __ enter();
1075 OopMap* map = save_live_registers(sasm, 2);
1076 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1077 oop_maps = new OopMapSet();
1078 oop_maps->add_gc_map(call_offset, map);
1079 restore_live_registers_except_rax(sasm);
1080 __ verify_oop(obj);
1081 __ leave();
1082 __ ret(0);
1084 // rax,: new instance
1085 }
1087 break;
1089 #ifdef TIERED
1090 case counter_overflow_id:
1091 {
1092 Register bci = rax;
1093 __ enter();
1094 OopMap* map = save_live_registers(sasm, 2);
1095 // Retrieve bci
1096 __ movl(bci, Address(rbp, 2*BytesPerWord));
1097 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
1098 oop_maps = new OopMapSet();
1099 oop_maps->add_gc_map(call_offset, map);
1100 restore_live_registers(sasm);
1101 __ leave();
1102 __ ret(0);
1103 }
1104 break;
1105 #endif // TIERED
1107 case new_type_array_id:
1108 case new_object_array_id:
1109 {
1110 Register length = rbx; // Incoming
1111 Register klass = rdx; // Incoming
1112 Register obj = rax; // Result
1114 if (id == new_type_array_id) {
1115 __ set_info("new_type_array", dont_gc_arguments);
1116 } else {
1117 __ set_info("new_object_array", dont_gc_arguments);
1118 }
1120 #ifdef ASSERT
1121 // assert object type is really an array of the proper kind
1122 {
1123 Label ok;
1124 Register t0 = obj;
1125 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1126 __ sarl(t0, Klass::_lh_array_tag_shift);
1127 int tag = ((id == new_type_array_id)
1128 ? Klass::_lh_array_tag_type_value
1129 : Klass::_lh_array_tag_obj_value);
1130 __ cmpl(t0, tag);
1131 __ jcc(Assembler::equal, ok);
1132 __ stop("assert(is an array klass)");
1133 __ should_not_reach_here();
1134 __ bind(ok);
1135 }
1136 #endif // ASSERT
1138 if (UseTLAB && FastTLABRefill) {
1139 Register arr_size = rsi;
1140 Register t1 = rcx; // must be rcx for use as shift count
1141 Register t2 = rdi;
1142 Label slow_path;
1143 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1145 // check that array length is small enough for fast path.
1146 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1147 __ jcc(Assembler::above, slow_path);
1149 // if we got here then the TLAB allocation failed, so try
1150 // refilling the TLAB or allocating directly from eden.
1151 Label retry_tlab, try_eden;
1152 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx, & rdx
1154 __ bind(retry_tlab);
1156 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1157 // since size is postive movl does right thing on 64bit
1158 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1159 // since size is postive movl does right thing on 64bit
1160 __ movl(arr_size, length);
1161 assert(t1 == rcx, "fixed register usage");
1162 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1163 __ shrptr(t1, Klass::_lh_header_size_shift);
1164 __ andptr(t1, Klass::_lh_header_size_mask);
1165 __ addptr(arr_size, t1);
1166 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1167 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1169 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1171 __ initialize_header(obj, klass, length, t1, t2);
1172 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1173 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1174 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1175 __ andptr(t1, Klass::_lh_header_size_mask);
1176 __ subptr(arr_size, t1); // body length
1177 __ addptr(t1, obj); // body start
1178 __ initialize_body(t1, arr_size, 0, t2);
1179 __ verify_oop(obj);
1180 __ ret(0);
1182 __ bind(try_eden);
1183 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1184 // since size is postive movl does right thing on 64bit
1185 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1186 // since size is postive movl does right thing on 64bit
1187 __ movl(arr_size, length);
1188 assert(t1 == rcx, "fixed register usage");
1189 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1190 __ shrptr(t1, Klass::_lh_header_size_shift);
1191 __ andptr(t1, Klass::_lh_header_size_mask);
1192 __ addptr(arr_size, t1);
1193 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1194 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1196 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1198 __ initialize_header(obj, klass, length, t1, t2);
1199 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1200 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1201 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1202 __ andptr(t1, Klass::_lh_header_size_mask);
1203 __ subptr(arr_size, t1); // body length
1204 __ addptr(t1, obj); // body start
1205 __ initialize_body(t1, arr_size, 0, t2);
1206 __ verify_oop(obj);
1207 __ ret(0);
1209 __ bind(slow_path);
1210 }
1212 __ enter();
1213 OopMap* map = save_live_registers(sasm, 3);
1214 int call_offset;
1215 if (id == new_type_array_id) {
1216 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1217 } else {
1218 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1219 }
1221 oop_maps = new OopMapSet();
1222 oop_maps->add_gc_map(call_offset, map);
1223 restore_live_registers_except_rax(sasm);
1225 __ verify_oop(obj);
1226 __ leave();
1227 __ ret(0);
1229 // rax,: new array
1230 }
1231 break;
1233 case new_multi_array_id:
1234 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1235 // rax,: klass
1236 // rbx,: rank
1237 // rcx: address of 1st dimension
1238 OopMap* map = save_live_registers(sasm, 4);
1239 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1241 oop_maps = new OopMapSet();
1242 oop_maps->add_gc_map(call_offset, map);
1243 restore_live_registers_except_rax(sasm);
1245 // rax,: new multi array
1246 __ verify_oop(rax);
1247 }
1248 break;
1250 case register_finalizer_id:
1251 {
1252 __ set_info("register_finalizer", dont_gc_arguments);
1254 // This is called via call_runtime so the arguments
1255 // will be place in C abi locations
1257 #ifdef _LP64
1258 __ verify_oop(c_rarg0);
1259 __ mov(rax, c_rarg0);
1260 #else
1261 // The object is passed on the stack and we haven't pushed a
1262 // frame yet so it's one work away from top of stack.
1263 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1264 __ verify_oop(rax);
1265 #endif // _LP64
1267 // load the klass and check the has finalizer flag
1268 Label register_finalizer;
1269 Register t = rsi;
1270 __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
1271 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1272 __ testl(t, JVM_ACC_HAS_FINALIZER);
1273 __ jcc(Assembler::notZero, register_finalizer);
1274 __ ret(0);
1276 __ bind(register_finalizer);
1277 __ enter();
1278 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1279 int call_offset = __ call_RT(noreg, noreg,
1280 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1281 oop_maps = new OopMapSet();
1282 oop_maps->add_gc_map(call_offset, oop_map);
1284 // Now restore all the live registers
1285 restore_live_registers(sasm);
1287 __ leave();
1288 __ ret(0);
1289 }
1290 break;
1292 case throw_range_check_failed_id:
1293 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1294 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1295 }
1296 break;
1298 case throw_index_exception_id:
1299 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1300 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1301 }
1302 break;
1304 case throw_div0_exception_id:
1305 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1306 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1307 }
1308 break;
1310 case throw_null_pointer_exception_id:
1311 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1312 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1313 }
1314 break;
1316 case handle_exception_nofpu_id:
1317 save_fpu_registers = false;
1318 // fall through
1319 case handle_exception_id:
1320 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1321 oop_maps = new OopMapSet();
1322 OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers);
1323 generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers);
1324 }
1325 break;
1327 case unwind_exception_id:
1328 { __ set_info("unwind_exception", dont_gc_arguments);
1329 // note: no stubframe since we are about to leave the current
1330 // activation and we are calling a leaf VM function only.
1331 generate_unwind_exception(sasm);
1332 }
1333 break;
1335 case throw_array_store_exception_id:
1336 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1337 // tos + 0: link
1338 // + 1: return address
1339 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), false);
1340 }
1341 break;
1343 case throw_class_cast_exception_id:
1344 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1345 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1346 }
1347 break;
1349 case throw_incompatible_class_change_error_id:
1350 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1351 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1352 }
1353 break;
1355 case slow_subtype_check_id:
1356 {
1357 enum layout {
1358 rax_off, SLOT2(raxH_off)
1359 rcx_off, SLOT2(rcxH_off)
1360 rsi_off, SLOT2(rsiH_off)
1361 rdi_off, SLOT2(rdiH_off)
1362 // saved_rbp_off, SLOT2(saved_rbpH_off)
1363 return_off, SLOT2(returnH_off)
1364 sub_off, SLOT2(subH_off)
1365 super_off, SLOT2(superH_off)
1366 framesize
1367 };
1369 __ set_info("slow_subtype_check", dont_gc_arguments);
1370 __ push(rdi);
1371 __ push(rsi);
1372 __ push(rcx);
1373 __ push(rax);
1375 // This is called by pushing args and not with C abi
1376 __ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super
1377 __ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub
1379 __ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
1380 // since size is postive movl does right thing on 64bit
1381 __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
1382 __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1384 Label miss;
1385 __ repne_scan();
1386 __ jcc(Assembler::notEqual, miss);
1387 __ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax);
1388 __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result
1389 __ pop(rax);
1390 __ pop(rcx);
1391 __ pop(rsi);
1392 __ pop(rdi);
1393 __ ret(0);
1395 __ bind(miss);
1396 __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1397 __ pop(rax);
1398 __ pop(rcx);
1399 __ pop(rsi);
1400 __ pop(rdi);
1401 __ ret(0);
1402 }
1403 break;
1405 case monitorenter_nofpu_id:
1406 save_fpu_registers = false;
1407 // fall through
1408 case monitorenter_id:
1409 {
1410 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1411 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1413 // Called with store_parameter and not C abi
1415 f.load_argument(1, rax); // rax,: object
1416 f.load_argument(0, rbx); // rbx,: lock address
1418 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1420 oop_maps = new OopMapSet();
1421 oop_maps->add_gc_map(call_offset, map);
1422 restore_live_registers(sasm, save_fpu_registers);
1423 }
1424 break;
1426 case monitorexit_nofpu_id:
1427 save_fpu_registers = false;
1428 // fall through
1429 case monitorexit_id:
1430 {
1431 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1432 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1434 // Called with store_parameter and not C abi
1436 f.load_argument(0, rax); // rax,: lock address
1438 // note: really a leaf routine but must setup last java sp
1439 // => use call_RT for now (speed can be improved by
1440 // doing last java sp setup manually)
1441 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1443 oop_maps = new OopMapSet();
1444 oop_maps->add_gc_map(call_offset, map);
1445 restore_live_registers(sasm, save_fpu_registers);
1447 }
1448 break;
1450 case access_field_patching_id:
1451 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1452 // we should set up register map
1453 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1454 }
1455 break;
1457 case load_klass_patching_id:
1458 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1459 // we should set up register map
1460 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1461 }
1462 break;
1464 case jvmti_exception_throw_id:
1465 { // rax,: exception oop
1466 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
1467 // Preserve all registers across this potentially blocking call
1468 const int num_rt_args = 2; // thread, exception oop
1469 OopMap* map = save_live_registers(sasm, num_rt_args);
1470 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
1471 oop_maps = new OopMapSet();
1472 oop_maps->add_gc_map(call_offset, map);
1473 restore_live_registers(sasm);
1474 }
1475 break;
1477 case dtrace_object_alloc_id:
1478 { // rax,: object
1479 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1480 // we can't gc here so skip the oopmap but make sure that all
1481 // the live registers get saved.
1482 save_live_registers(sasm, 1);
1484 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1485 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1486 NOT_LP64(__ pop(rax));
1488 restore_live_registers(sasm);
1489 }
1490 break;
1492 case fpu2long_stub_id:
1493 {
1494 // rax, and rdx are destroyed, but should be free since the result is returned there
1495 // preserve rsi,ecx
1496 __ push(rsi);
1497 __ push(rcx);
1498 LP64_ONLY(__ push(rdx);)
1500 // check for NaN
1501 Label return0, do_return, return_min_jlong, do_convert;
1503 Address value_high_word(rsp, wordSize + 4);
1504 Address value_low_word(rsp, wordSize);
1505 Address result_high_word(rsp, 3*wordSize + 4);
1506 Address result_low_word(rsp, 3*wordSize);
1508 __ subptr(rsp, 32); // more than enough on 32bit
1509 __ fst_d(value_low_word);
1510 __ movl(rax, value_high_word);
1511 __ andl(rax, 0x7ff00000);
1512 __ cmpl(rax, 0x7ff00000);
1513 __ jcc(Assembler::notEqual, do_convert);
1514 __ movl(rax, value_high_word);
1515 __ andl(rax, 0xfffff);
1516 __ orl(rax, value_low_word);
1517 __ jcc(Assembler::notZero, return0);
1519 __ bind(do_convert);
1520 __ fnstcw(Address(rsp, 0));
1521 __ movzwl(rax, Address(rsp, 0));
1522 __ orl(rax, 0xc00);
1523 __ movw(Address(rsp, 2), rax);
1524 __ fldcw(Address(rsp, 2));
1525 __ fwait();
1526 __ fistp_d(result_low_word);
1527 __ fldcw(Address(rsp, 0));
1528 __ fwait();
1529 // This gets the entire long in rax on 64bit
1530 __ movptr(rax, result_low_word);
1531 // testing of high bits
1532 __ movl(rdx, result_high_word);
1533 __ mov(rcx, rax);
1534 // What the heck is the point of the next instruction???
1535 __ xorl(rcx, 0x0);
1536 __ movl(rsi, 0x80000000);
1537 __ xorl(rsi, rdx);
1538 __ orl(rcx, rsi);
1539 __ jcc(Assembler::notEqual, do_return);
1540 __ fldz();
1541 __ fcomp_d(value_low_word);
1542 __ fnstsw_ax();
1543 #ifdef _LP64
1544 __ testl(rax, 0x4100); // ZF & CF == 0
1545 __ jcc(Assembler::equal, return_min_jlong);
1546 #else
1547 __ sahf();
1548 __ jcc(Assembler::above, return_min_jlong);
1549 #endif // _LP64
1550 // return max_jlong
1551 #ifndef _LP64
1552 __ movl(rdx, 0x7fffffff);
1553 __ movl(rax, 0xffffffff);
1554 #else
1555 __ mov64(rax, CONST64(0x7fffffffffffffff));
1556 #endif // _LP64
1557 __ jmp(do_return);
1559 __ bind(return_min_jlong);
1560 #ifndef _LP64
1561 __ movl(rdx, 0x80000000);
1562 __ xorl(rax, rax);
1563 #else
1564 __ mov64(rax, CONST64(0x8000000000000000));
1565 #endif // _LP64
1566 __ jmp(do_return);
1568 __ bind(return0);
1569 __ fpop();
1570 #ifndef _LP64
1571 __ xorptr(rdx,rdx);
1572 __ xorptr(rax,rax);
1573 #else
1574 __ xorptr(rax, rax);
1575 #endif // _LP64
1577 __ bind(do_return);
1578 __ addptr(rsp, 32);
1579 LP64_ONLY(__ pop(rdx);)
1580 __ pop(rcx);
1581 __ pop(rsi);
1582 __ ret(0);
1583 }
1584 break;
1586 #ifndef SERIALGC
1587 case g1_pre_barrier_slow_id:
1588 {
1589 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1590 // arg0 : previous value of memory
1592 BarrierSet* bs = Universe::heap()->barrier_set();
1593 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1594 __ movptr(rax, (int)id);
1595 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1596 __ should_not_reach_here();
1597 break;
1598 }
1600 __ push(rax);
1601 __ push(rdx);
1603 const Register pre_val = rax;
1604 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1605 const Register tmp = rdx;
1607 NOT_LP64(__ get_thread(thread);)
1609 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1610 PtrQueue::byte_offset_of_active()));
1612 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1613 PtrQueue::byte_offset_of_index()));
1614 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1615 PtrQueue::byte_offset_of_buf()));
1618 Label done;
1619 Label runtime;
1621 // Can we store original value in the thread's buffer?
1623 LP64_ONLY(__ movslq(tmp, queue_index);)
1624 #ifdef _LP64
1625 __ cmpq(tmp, 0);
1626 #else
1627 __ cmpl(queue_index, 0);
1628 #endif
1629 __ jcc(Assembler::equal, runtime);
1630 #ifdef _LP64
1631 __ subq(tmp, wordSize);
1632 __ movl(queue_index, tmp);
1633 __ addq(tmp, buffer);
1634 #else
1635 __ subl(queue_index, wordSize);
1636 __ movl(tmp, buffer);
1637 __ addl(tmp, queue_index);
1638 #endif
1640 // prev_val (rax)
1641 f.load_argument(0, pre_val);
1642 __ movptr(Address(tmp, 0), pre_val);
1643 __ jmp(done);
1645 __ bind(runtime);
1646 // load the pre-value
1647 __ push(rcx);
1648 f.load_argument(0, rcx);
1649 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1650 __ pop(rcx);
1652 __ bind(done);
1653 __ pop(rdx);
1654 __ pop(rax);
1655 }
1656 break;
1658 case g1_post_barrier_slow_id:
1659 {
1660 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1663 // arg0: store_address
1664 Address store_addr(rbp, 2*BytesPerWord);
1666 BarrierSet* bs = Universe::heap()->barrier_set();
1667 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1668 Label done;
1669 Label runtime;
1671 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1672 // Must check to see if card is already dirty
1674 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1676 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1677 PtrQueue::byte_offset_of_index()));
1678 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1679 PtrQueue::byte_offset_of_buf()));
1681 __ push(rax);
1682 __ push(rdx);
1684 NOT_LP64(__ get_thread(thread);)
1685 ExternalAddress cardtable((address)ct->byte_map_base);
1686 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1688 const Register card_addr = rdx;
1689 #ifdef _LP64
1690 const Register tmp = rscratch1;
1691 f.load_argument(0, card_addr);
1692 __ shrq(card_addr, CardTableModRefBS::card_shift);
1693 __ lea(tmp, cardtable);
1694 // get the address of the card
1695 __ addq(card_addr, tmp);
1696 #else
1697 const Register card_index = rdx;
1698 f.load_argument(0, card_index);
1699 __ shrl(card_index, CardTableModRefBS::card_shift);
1701 Address index(noreg, card_index, Address::times_1);
1702 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1703 #endif
1705 __ cmpb(Address(card_addr, 0), 0);
1706 __ jcc(Assembler::equal, done);
1708 // storing region crossing non-NULL, card is clean.
1709 // dirty card and log.
1711 __ movb(Address(card_addr, 0), 0);
1713 __ cmpl(queue_index, 0);
1714 __ jcc(Assembler::equal, runtime);
1715 __ subl(queue_index, wordSize);
1717 const Register buffer_addr = rbx;
1718 __ push(rbx);
1720 __ movptr(buffer_addr, buffer);
1722 #ifdef _LP64
1723 __ movslq(rscratch1, queue_index);
1724 __ addptr(buffer_addr, rscratch1);
1725 #else
1726 __ addptr(buffer_addr, queue_index);
1727 #endif
1728 __ movptr(Address(buffer_addr, 0), card_addr);
1730 __ pop(rbx);
1731 __ jmp(done);
1733 __ bind(runtime);
1734 NOT_LP64(__ push(rcx);)
1735 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1736 NOT_LP64(__ pop(rcx);)
1738 __ bind(done);
1739 __ pop(rdx);
1740 __ pop(rax);
1742 }
1743 break;
1744 #endif // !SERIALGC
1746 default:
1747 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1748 __ movptr(rax, (int)id);
1749 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1750 __ should_not_reach_here();
1751 }
1752 break;
1753 }
1754 return oop_maps;
1755 }
1757 #undef __