Fri, 18 Mar 2011 16:00:34 -0700
7017732: move static fields into Class to prepare for perm gen removal
Reviewed-by: kvn, coleenp, twisti, stefank
1 /*
2 * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Defs.hpp"
27 #include "c1/c1_MacroAssembler.hpp"
28 #include "c1/c1_Runtime1.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "nativeInst_x86.hpp"
31 #include "oops/compiledICHolderOop.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "register_x86.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/signature.hpp"
37 #include "runtime/vframeArray.hpp"
38 #include "vmreg_x86.inline.hpp"
41 // Implementation of StubAssembler
43 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
44 // setup registers
45 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
46 assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
47 assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
48 assert(args_size >= 0, "illegal args_size");
50 #ifdef _LP64
51 mov(c_rarg0, thread);
52 set_num_rt_args(0); // Nothing on stack
53 #else
54 set_num_rt_args(1 + args_size);
56 // push java thread (becomes first argument of C function)
57 get_thread(thread);
58 push(thread);
59 #endif // _LP64
61 set_last_Java_frame(thread, noreg, rbp, NULL);
63 // do the call
64 call(RuntimeAddress(entry));
65 int call_offset = offset();
66 // verify callee-saved register
67 #ifdef ASSERT
68 guarantee(thread != rax, "change this code");
69 push(rax);
70 { Label L;
71 get_thread(rax);
72 cmpptr(thread, rax);
73 jcc(Assembler::equal, L);
74 int3();
75 stop("StubAssembler::call_RT: rdi not callee saved?");
76 bind(L);
77 }
78 pop(rax);
79 #endif
80 reset_last_Java_frame(thread, true, false);
82 // discard thread and arguments
83 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
85 // check for pending exceptions
86 { Label L;
87 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
88 jcc(Assembler::equal, L);
89 // exception pending => remove activation and forward to exception handler
90 movptr(rax, Address(thread, Thread::pending_exception_offset()));
91 // make sure that the vm_results are cleared
92 if (oop_result1->is_valid()) {
93 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
94 }
95 if (oop_result2->is_valid()) {
96 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
97 }
98 if (frame_size() == no_frame_size) {
99 leave();
100 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
101 } else if (_stub_id == Runtime1::forward_exception_id) {
102 should_not_reach_here();
103 } else {
104 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
105 }
106 bind(L);
107 }
108 // get oop results if there are any and reset the values in the thread
109 if (oop_result1->is_valid()) {
110 movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
111 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
112 verify_oop(oop_result1);
113 }
114 if (oop_result2->is_valid()) {
115 movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
116 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
117 verify_oop(oop_result2);
118 }
119 return call_offset;
120 }
123 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
124 #ifdef _LP64
125 mov(c_rarg1, arg1);
126 #else
127 push(arg1);
128 #endif // _LP64
129 return call_RT(oop_result1, oop_result2, entry, 1);
130 }
133 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
134 #ifdef _LP64
135 if (c_rarg1 == arg2) {
136 if (c_rarg2 == arg1) {
137 xchgq(arg1, arg2);
138 } else {
139 mov(c_rarg2, arg2);
140 mov(c_rarg1, arg1);
141 }
142 } else {
143 mov(c_rarg1, arg1);
144 mov(c_rarg2, arg2);
145 }
146 #else
147 push(arg2);
148 push(arg1);
149 #endif // _LP64
150 return call_RT(oop_result1, oop_result2, entry, 2);
151 }
154 int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
155 #ifdef _LP64
156 // if there is any conflict use the stack
157 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
158 arg2 == c_rarg1 || arg1 == c_rarg3 ||
159 arg3 == c_rarg1 || arg1 == c_rarg2) {
160 push(arg3);
161 push(arg2);
162 push(arg1);
163 pop(c_rarg1);
164 pop(c_rarg2);
165 pop(c_rarg3);
166 } else {
167 mov(c_rarg1, arg1);
168 mov(c_rarg2, arg2);
169 mov(c_rarg3, arg3);
170 }
171 #else
172 push(arg3);
173 push(arg2);
174 push(arg1);
175 #endif // _LP64
176 return call_RT(oop_result1, oop_result2, entry, 3);
177 }
180 // Implementation of StubFrame
182 class StubFrame: public StackObj {
183 private:
184 StubAssembler* _sasm;
186 public:
187 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
188 void load_argument(int offset_in_words, Register reg);
190 ~StubFrame();
191 };
194 #define __ _sasm->
196 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
197 _sasm = sasm;
198 __ set_info(name, must_gc_arguments);
199 __ enter();
200 }
202 // load parameters that were stored with LIR_Assembler::store_parameter
203 // Note: offsets for store_parameter and load_argument must match
204 void StubFrame::load_argument(int offset_in_words, Register reg) {
205 // rbp, + 0: link
206 // + 1: return address
207 // + 2: argument with offset 0
208 // + 3: argument with offset 1
209 // + 4: ...
211 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
212 }
215 StubFrame::~StubFrame() {
216 __ leave();
217 __ ret(0);
218 }
220 #undef __
223 // Implementation of Runtime1
225 #define __ sasm->
227 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
228 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
230 // Stack layout for saving/restoring all the registers needed during a runtime
231 // call (this includes deoptimization)
232 // Note: note that users of this frame may well have arguments to some runtime
233 // while these values are on the stack. These positions neglect those arguments
234 // but the code in save_live_registers will take the argument count into
235 // account.
236 //
237 #ifdef _LP64
238 #define SLOT2(x) x,
239 #define SLOT_PER_WORD 2
240 #else
241 #define SLOT2(x)
242 #define SLOT_PER_WORD 1
243 #endif // _LP64
245 enum reg_save_layout {
246 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
247 // happen and will assert if the stack size we create is misaligned
248 #ifdef _LP64
249 align_dummy_0, align_dummy_1,
250 #endif // _LP64
251 #ifdef _WIN64
252 // Windows always allocates space for it's argument registers (see
253 // frame::arg_reg_save_area_bytes).
254 arg_reg_save_1, arg_reg_save_1H, // 0, 4
255 arg_reg_save_2, arg_reg_save_2H, // 8, 12
256 arg_reg_save_3, arg_reg_save_3H, // 16, 20
257 arg_reg_save_4, arg_reg_save_4H, // 24, 28
258 #endif // _WIN64
259 xmm_regs_as_doubles_off, // 32
260 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
261 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
262 // fpu_state_end_off is exclusive
263 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
264 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
265 extra_space_offset, // 360
266 #ifdef _LP64
267 r15_off = extra_space_offset, r15H_off, // 360, 364
268 r14_off, r14H_off, // 368, 372
269 r13_off, r13H_off, // 376, 380
270 r12_off, r12H_off, // 384, 388
271 r11_off, r11H_off, // 392, 396
272 r10_off, r10H_off, // 400, 404
273 r9_off, r9H_off, // 408, 412
274 r8_off, r8H_off, // 416, 420
275 rdi_off, rdiH_off, // 424, 428
276 #else
277 rdi_off = extra_space_offset,
278 #endif // _LP64
279 rsi_off, SLOT2(rsiH_off) // 432, 436
280 rbp_off, SLOT2(rbpH_off) // 440, 444
281 rsp_off, SLOT2(rspH_off) // 448, 452
282 rbx_off, SLOT2(rbxH_off) // 456, 460
283 rdx_off, SLOT2(rdxH_off) // 464, 468
284 rcx_off, SLOT2(rcxH_off) // 472, 476
285 rax_off, SLOT2(raxH_off) // 480, 484
286 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
287 return_off, SLOT2(returnH_off) // 496, 500
288 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
289 };
293 // Save off registers which might be killed by calls into the runtime.
294 // Tries to smart of about FP registers. In particular we separate
295 // saving and describing the FPU registers for deoptimization since we
296 // have to save the FPU registers twice if we describe them and on P4
297 // saving FPU registers which don't contain anything appears
298 // expensive. The deopt blob is the only thing which needs to
299 // describe FPU registers. In all other cases it should be sufficient
300 // to simply save their current value.
302 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
303 bool save_fpu_registers = true) {
305 // In 64bit all the args are in regs so there are no additional stack slots
306 LP64_ONLY(num_rt_args = 0);
307 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
308 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
309 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
311 // record saved value locations in an OopMap
312 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
313 OopMap* map = new OopMap(frame_size_in_slots, 0);
314 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
315 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
316 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
317 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
318 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
319 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
320 #ifdef _LP64
321 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
322 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
323 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
324 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
325 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
326 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
327 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
328 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
330 // This is stupid but needed.
331 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
332 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
333 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
334 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
335 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
336 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
338 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
339 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
340 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
341 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
342 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
343 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
344 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
345 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
346 #endif // _LP64
348 if (save_fpu_registers) {
349 if (UseSSE < 2) {
350 int fpu_off = float_regs_as_doubles_off;
351 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
352 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
353 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
354 // %%% This is really a waste but we'll keep things as they were for now
355 if (true) {
356 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
357 }
358 fpu_off += 2;
359 }
360 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
361 }
363 if (UseSSE >= 2) {
364 int xmm_off = xmm_regs_as_doubles_off;
365 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
366 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
367 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
368 // %%% This is really a waste but we'll keep things as they were for now
369 if (true) {
370 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
371 }
372 xmm_off += 2;
373 }
374 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
376 } else if (UseSSE == 1) {
377 int xmm_off = xmm_regs_as_doubles_off;
378 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
379 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
380 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
381 xmm_off += 2;
382 }
383 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
384 }
385 }
387 return map;
388 }
390 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
391 bool save_fpu_registers = true) {
392 __ block_comment("save_live_registers");
394 __ pusha(); // integer registers
396 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
397 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
399 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
401 #ifdef ASSERT
402 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
403 #endif
405 if (save_fpu_registers) {
406 if (UseSSE < 2) {
407 // save FPU stack
408 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
409 __ fwait();
411 #ifdef ASSERT
412 Label ok;
413 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
414 __ jccb(Assembler::equal, ok);
415 __ stop("corrupted control word detected");
416 __ bind(ok);
417 #endif
419 // Reset the control word to guard against exceptions being unmasked
420 // since fstp_d can cause FPU stack underflow exceptions. Write it
421 // into the on stack copy and then reload that to make sure that the
422 // current and future values are correct.
423 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
424 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
426 // Save the FPU registers in de-opt-able form
427 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
428 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
429 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
430 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
431 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
432 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
433 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
434 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
435 }
437 if (UseSSE >= 2) {
438 // save XMM registers
439 // XMM registers can contain float or double values, but this is not known here,
440 // so always save them as doubles.
441 // note that float values are _not_ converted automatically, so for float values
442 // the second word contains only garbage data.
443 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
444 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
445 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
446 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
447 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
448 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
449 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
450 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
451 #ifdef _LP64
452 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
453 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
454 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
455 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
456 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
460 #endif // _LP64
461 } else if (UseSSE == 1) {
462 // save XMM registers as float because double not supported without SSE2
463 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
464 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
465 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
466 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
467 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
468 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
469 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
470 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
471 }
472 }
474 // FPU stack must be empty now
475 __ verify_FPU(0, "save_live_registers");
477 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
478 }
481 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
482 if (restore_fpu_registers) {
483 if (UseSSE >= 2) {
484 // restore XMM registers
485 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
486 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
487 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
488 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
489 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
490 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
491 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
492 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
493 #ifdef _LP64
494 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
495 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
496 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
497 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
498 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
499 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
500 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
501 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
502 #endif // _LP64
503 } else if (UseSSE == 1) {
504 // restore XMM registers
505 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
506 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
507 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
508 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
509 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
510 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
511 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
512 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
513 }
515 if (UseSSE < 2) {
516 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
517 } else {
518 // check that FPU stack is really empty
519 __ verify_FPU(0, "restore_live_registers");
520 }
522 } else {
523 // check that FPU stack is really empty
524 __ verify_FPU(0, "restore_live_registers");
525 }
527 #ifdef ASSERT
528 {
529 Label ok;
530 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
531 __ jcc(Assembler::equal, ok);
532 __ stop("bad offsets in frame");
533 __ bind(ok);
534 }
535 #endif // ASSERT
537 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
538 }
541 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
542 __ block_comment("restore_live_registers");
544 restore_fpu(sasm, restore_fpu_registers);
545 __ popa();
546 }
549 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
550 __ block_comment("restore_live_registers_except_rax");
552 restore_fpu(sasm, restore_fpu_registers);
554 #ifdef _LP64
555 __ movptr(r15, Address(rsp, 0));
556 __ movptr(r14, Address(rsp, wordSize));
557 __ movptr(r13, Address(rsp, 2 * wordSize));
558 __ movptr(r12, Address(rsp, 3 * wordSize));
559 __ movptr(r11, Address(rsp, 4 * wordSize));
560 __ movptr(r10, Address(rsp, 5 * wordSize));
561 __ movptr(r9, Address(rsp, 6 * wordSize));
562 __ movptr(r8, Address(rsp, 7 * wordSize));
563 __ movptr(rdi, Address(rsp, 8 * wordSize));
564 __ movptr(rsi, Address(rsp, 9 * wordSize));
565 __ movptr(rbp, Address(rsp, 10 * wordSize));
566 // skip rsp
567 __ movptr(rbx, Address(rsp, 12 * wordSize));
568 __ movptr(rdx, Address(rsp, 13 * wordSize));
569 __ movptr(rcx, Address(rsp, 14 * wordSize));
571 __ addptr(rsp, 16 * wordSize);
572 #else
574 __ pop(rdi);
575 __ pop(rsi);
576 __ pop(rbp);
577 __ pop(rbx); // skip this value
578 __ pop(rbx);
579 __ pop(rdx);
580 __ pop(rcx);
581 __ addptr(rsp, BytesPerWord);
582 #endif // _LP64
583 }
586 void Runtime1::initialize_pd() {
587 // nothing to do
588 }
591 // target: the entry point of the method that creates and posts the exception oop
592 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
594 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
595 // preserve all registers
596 int num_rt_args = has_argument ? 2 : 1;
597 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
599 // now all registers are saved and can be used freely
600 // verify that no old value is used accidentally
601 __ invalidate_registers(true, true, true, true, true, true);
603 // registers used by this stub
604 const Register temp_reg = rbx;
606 // load argument for exception that is passed as an argument into the stub
607 if (has_argument) {
608 #ifdef _LP64
609 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
610 #else
611 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
612 __ push(temp_reg);
613 #endif // _LP64
614 }
615 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
617 OopMapSet* oop_maps = new OopMapSet();
618 oop_maps->add_gc_map(call_offset, oop_map);
620 __ stop("should not reach here");
622 return oop_maps;
623 }
626 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
627 __ block_comment("generate_handle_exception");
629 // incoming parameters
630 const Register exception_oop = rax;
631 const Register exception_pc = rdx;
632 // other registers used in this stub
633 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
635 // Save registers, if required.
636 OopMapSet* oop_maps = new OopMapSet();
637 OopMap* oop_map = NULL;
638 switch (id) {
639 case forward_exception_id:
640 // We're handling an exception in the context of a compiled frame.
641 // The registers have been saved in the standard places. Perform
642 // an exception lookup in the caller and dispatch to the handler
643 // if found. Otherwise unwind and dispatch to the callers
644 // exception handler.
645 oop_map = generate_oop_map(sasm, 1 /*thread*/);
647 // load and clear pending exception oop into RAX
648 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
649 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
651 // load issuing PC (the return address for this stub) into rdx
652 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
654 // make sure that the vm_results are cleared (may be unnecessary)
655 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
656 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
657 break;
658 case handle_exception_nofpu_id:
659 case handle_exception_id:
660 // At this point all registers MAY be live.
661 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
662 break;
663 case handle_exception_from_callee_id: {
664 // At this point all registers except exception oop (RAX) and
665 // exception pc (RDX) are dead.
666 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
667 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
668 sasm->set_frame_size(frame_size);
669 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
670 break;
671 }
672 default: ShouldNotReachHere();
673 }
675 #ifdef TIERED
676 // C2 can leave the fpu stack dirty
677 if (UseSSE < 2) {
678 __ empty_FPU_stack();
679 }
680 #endif // TIERED
682 // verify that only rax, and rdx is valid at this time
683 __ invalidate_registers(false, true, true, false, true, true);
684 // verify that rax, contains a valid exception
685 __ verify_not_null_oop(exception_oop);
687 // load address of JavaThread object for thread-local data
688 NOT_LP64(__ get_thread(thread);)
690 #ifdef ASSERT
691 // check that fields in JavaThread for exception oop and issuing pc are
692 // empty before writing to them
693 Label oop_empty;
694 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
695 __ jcc(Assembler::equal, oop_empty);
696 __ stop("exception oop already set");
697 __ bind(oop_empty);
699 Label pc_empty;
700 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
701 __ jcc(Assembler::equal, pc_empty);
702 __ stop("exception pc already set");
703 __ bind(pc_empty);
704 #endif
706 // save exception oop and issuing pc into JavaThread
707 // (exception handler will load it from here)
708 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
709 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
711 // patch throwing pc into return address (has bci & oop map)
712 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
714 // compute the exception handler.
715 // the exception oop and the throwing pc are read from the fields in JavaThread
716 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
717 oop_maps->add_gc_map(call_offset, oop_map);
719 // rax: handler address
720 // will be the deopt blob if nmethod was deoptimized while we looked up
721 // handler regardless of whether handler existed in the nmethod.
723 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
724 __ invalidate_registers(false, true, true, true, true, true);
726 // patch the return address, this stub will directly return to the exception handler
727 __ movptr(Address(rbp, 1*BytesPerWord), rax);
729 switch (id) {
730 case forward_exception_id:
731 case handle_exception_nofpu_id:
732 case handle_exception_id:
733 // Restore the registers that were saved at the beginning.
734 restore_live_registers(sasm, id == handle_exception_nofpu_id);
735 break;
736 case handle_exception_from_callee_id:
737 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
738 // since we do a leave anyway.
740 // Pop the return address since we are possibly changing SP (restoring from BP).
741 __ leave();
742 __ pop(rcx);
744 // Restore SP from BP if the exception PC is a method handle call site.
745 NOT_LP64(__ get_thread(thread);)
746 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
747 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
748 __ jmp(rcx); // jump to exception handler
749 break;
750 default: ShouldNotReachHere();
751 }
753 return oop_maps;
754 }
757 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
758 // incoming parameters
759 const Register exception_oop = rax;
760 // callee-saved copy of exception_oop during runtime call
761 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
762 // other registers used in this stub
763 const Register exception_pc = rdx;
764 const Register handler_addr = rbx;
765 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
767 // verify that only rax, is valid at this time
768 __ invalidate_registers(false, true, true, true, true, true);
770 #ifdef ASSERT
771 // check that fields in JavaThread for exception oop and issuing pc are empty
772 NOT_LP64(__ get_thread(thread);)
773 Label oop_empty;
774 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
775 __ jcc(Assembler::equal, oop_empty);
776 __ stop("exception oop must be empty");
777 __ bind(oop_empty);
779 Label pc_empty;
780 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
781 __ jcc(Assembler::equal, pc_empty);
782 __ stop("exception pc must be empty");
783 __ bind(pc_empty);
784 #endif
786 // clear the FPU stack in case any FPU results are left behind
787 __ empty_FPU_stack();
789 // save exception_oop in callee-saved register to preserve it during runtime calls
790 __ verify_not_null_oop(exception_oop);
791 __ movptr(exception_oop_callee_saved, exception_oop);
793 NOT_LP64(__ get_thread(thread);)
794 // Get return address (is on top of stack after leave).
795 __ movptr(exception_pc, Address(rsp, 0));
797 // search the exception handler address of the caller (using the return address)
798 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
799 // rax: exception handler address of the caller
801 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
802 __ invalidate_registers(false, true, true, true, false, true);
804 // move result of call into correct register
805 __ movptr(handler_addr, rax);
807 // Restore exception oop to RAX (required convention of exception handler).
808 __ movptr(exception_oop, exception_oop_callee_saved);
810 // verify that there is really a valid exception in rax
811 __ verify_not_null_oop(exception_oop);
813 // get throwing pc (= return address).
814 // rdx has been destroyed by the call, so it must be set again
815 // the pop is also necessary to simulate the effect of a ret(0)
816 __ pop(exception_pc);
818 // Restore SP from BP if the exception PC is a method handle call site.
819 NOT_LP64(__ get_thread(thread);)
820 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
821 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
823 // continue at exception handler (return address removed)
824 // note: do *not* remove arguments when unwinding the
825 // activation since the caller assumes having
826 // all arguments on the stack when entering the
827 // runtime to determine the exception handler
828 // (GC happens at call site with arguments!)
829 // rax: exception oop
830 // rdx: throwing pc
831 // rbx: exception handler
832 __ jmp(handler_addr);
833 }
836 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
837 // use the maximum number of runtime-arguments here because it is difficult to
838 // distinguish each RT-Call.
839 // Note: This number affects also the RT-Call in generate_handle_exception because
840 // the oop-map is shared for all calls.
841 const int num_rt_args = 2; // thread + dummy
843 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
844 assert(deopt_blob != NULL, "deoptimization blob must have been created");
846 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
848 #ifdef _LP64
849 const Register thread = r15_thread;
850 // No need to worry about dummy
851 __ mov(c_rarg0, thread);
852 #else
853 __ push(rax); // push dummy
855 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
856 // push java thread (becomes first argument of C function)
857 __ get_thread(thread);
858 __ push(thread);
859 #endif // _LP64
860 __ set_last_Java_frame(thread, noreg, rbp, NULL);
861 // do the call
862 __ call(RuntimeAddress(target));
863 OopMapSet* oop_maps = new OopMapSet();
864 oop_maps->add_gc_map(__ offset(), oop_map);
865 // verify callee-saved register
866 #ifdef ASSERT
867 guarantee(thread != rax, "change this code");
868 __ push(rax);
869 { Label L;
870 __ get_thread(rax);
871 __ cmpptr(thread, rax);
872 __ jcc(Assembler::equal, L);
873 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
874 __ bind(L);
875 }
876 __ pop(rax);
877 #endif
878 __ reset_last_Java_frame(thread, true, false);
879 #ifndef _LP64
880 __ pop(rcx); // discard thread arg
881 __ pop(rcx); // discard dummy
882 #endif // _LP64
884 // check for pending exceptions
885 { Label L;
886 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
887 __ jcc(Assembler::equal, L);
888 // exception pending => remove activation and forward to exception handler
890 __ testptr(rax, rax); // have we deoptimized?
891 __ jump_cc(Assembler::equal,
892 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
894 // the deopt blob expects exceptions in the special fields of
895 // JavaThread, so copy and clear pending exception.
897 // load and clear pending exception
898 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
899 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
901 // check that there is really a valid exception
902 __ verify_not_null_oop(rax);
904 // load throwing pc: this is the return address of the stub
905 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
907 #ifdef ASSERT
908 // check that fields in JavaThread for exception oop and issuing pc are empty
909 Label oop_empty;
910 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
911 __ jcc(Assembler::equal, oop_empty);
912 __ stop("exception oop must be empty");
913 __ bind(oop_empty);
915 Label pc_empty;
916 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
917 __ jcc(Assembler::equal, pc_empty);
918 __ stop("exception pc must be empty");
919 __ bind(pc_empty);
920 #endif
922 // store exception oop and throwing pc to JavaThread
923 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
924 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
926 restore_live_registers(sasm);
928 __ leave();
929 __ addptr(rsp, BytesPerWord); // remove return address from stack
931 // Forward the exception directly to deopt blob. We can blow no
932 // registers and must leave throwing pc on the stack. A patch may
933 // have values live in registers so the entry point with the
934 // exception in tls.
935 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
937 __ bind(L);
938 }
941 // Runtime will return true if the nmethod has been deoptimized during
942 // the patching process. In that case we must do a deopt reexecute instead.
944 Label reexecuteEntry, cont;
946 __ testptr(rax, rax); // have we deoptimized?
947 __ jcc(Assembler::equal, cont); // no
949 // Will reexecute. Proper return address is already on the stack we just restore
950 // registers, pop all of our frame but the return address and jump to the deopt blob
951 restore_live_registers(sasm);
952 __ leave();
953 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
955 __ bind(cont);
956 restore_live_registers(sasm);
957 __ leave();
958 __ ret(0);
960 return oop_maps;
961 }
964 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
966 // for better readability
967 const bool must_gc_arguments = true;
968 const bool dont_gc_arguments = false;
970 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
971 bool save_fpu_registers = true;
973 // stub code & info for the different stubs
974 OopMapSet* oop_maps = NULL;
975 switch (id) {
976 case forward_exception_id:
977 {
978 oop_maps = generate_handle_exception(id, sasm);
979 __ leave();
980 __ ret(0);
981 }
982 break;
984 case new_instance_id:
985 case fast_new_instance_id:
986 case fast_new_instance_init_check_id:
987 {
988 Register klass = rdx; // Incoming
989 Register obj = rax; // Result
991 if (id == new_instance_id) {
992 __ set_info("new_instance", dont_gc_arguments);
993 } else if (id == fast_new_instance_id) {
994 __ set_info("fast new_instance", dont_gc_arguments);
995 } else {
996 assert(id == fast_new_instance_init_check_id, "bad StubID");
997 __ set_info("fast new_instance init check", dont_gc_arguments);
998 }
1000 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1001 UseTLAB && FastTLABRefill) {
1002 Label slow_path;
1003 Register obj_size = rcx;
1004 Register t1 = rbx;
1005 Register t2 = rsi;
1006 assert_different_registers(klass, obj, obj_size, t1, t2);
1008 __ push(rdi);
1009 __ push(rbx);
1011 if (id == fast_new_instance_init_check_id) {
1012 // make sure the klass is initialized
1013 __ cmpl(Address(klass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc)), instanceKlass::fully_initialized);
1014 __ jcc(Assembler::notEqual, slow_path);
1015 }
1017 #ifdef ASSERT
1018 // assert object can be fast path allocated
1019 {
1020 Label ok, not_ok;
1021 __ movl(obj_size, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1022 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1023 __ jcc(Assembler::lessEqual, not_ok);
1024 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1025 __ jcc(Assembler::zero, ok);
1026 __ bind(not_ok);
1027 __ stop("assert(can be fast path allocated)");
1028 __ should_not_reach_here();
1029 __ bind(ok);
1030 }
1031 #endif // ASSERT
1033 // if we got here then the TLAB allocation failed, so try
1034 // refilling the TLAB or allocating directly from eden.
1035 Label retry_tlab, try_eden;
1036 const Register thread =
1037 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1039 __ bind(retry_tlab);
1041 // get the instance size (size is postive so movl is fine for 64bit)
1042 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1044 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1046 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1047 __ verify_oop(obj);
1048 __ pop(rbx);
1049 __ pop(rdi);
1050 __ ret(0);
1052 __ bind(try_eden);
1053 // get the instance size (size is postive so movl is fine for 64bit)
1054 __ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1056 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1057 __ incr_allocated_bytes(thread, obj_size, 0);
1059 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1060 __ verify_oop(obj);
1061 __ pop(rbx);
1062 __ pop(rdi);
1063 __ ret(0);
1065 __ bind(slow_path);
1066 __ pop(rbx);
1067 __ pop(rdi);
1068 }
1070 __ enter();
1071 OopMap* map = save_live_registers(sasm, 2);
1072 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1073 oop_maps = new OopMapSet();
1074 oop_maps->add_gc_map(call_offset, map);
1075 restore_live_registers_except_rax(sasm);
1076 __ verify_oop(obj);
1077 __ leave();
1078 __ ret(0);
1080 // rax,: new instance
1081 }
1083 break;
1085 case counter_overflow_id:
1086 {
1087 Register bci = rax, method = rbx;
1088 __ enter();
1089 OopMap* map = save_live_registers(sasm, 3);
1090 // Retrieve bci
1091 __ movl(bci, Address(rbp, 2*BytesPerWord));
1092 // And a pointer to the methodOop
1093 __ movptr(method, Address(rbp, 3*BytesPerWord));
1094 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1095 oop_maps = new OopMapSet();
1096 oop_maps->add_gc_map(call_offset, map);
1097 restore_live_registers(sasm);
1098 __ leave();
1099 __ ret(0);
1100 }
1101 break;
1103 case new_type_array_id:
1104 case new_object_array_id:
1105 {
1106 Register length = rbx; // Incoming
1107 Register klass = rdx; // Incoming
1108 Register obj = rax; // Result
1110 if (id == new_type_array_id) {
1111 __ set_info("new_type_array", dont_gc_arguments);
1112 } else {
1113 __ set_info("new_object_array", dont_gc_arguments);
1114 }
1116 #ifdef ASSERT
1117 // assert object type is really an array of the proper kind
1118 {
1119 Label ok;
1120 Register t0 = obj;
1121 __ movl(t0, Address(klass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc)));
1122 __ sarl(t0, Klass::_lh_array_tag_shift);
1123 int tag = ((id == new_type_array_id)
1124 ? Klass::_lh_array_tag_type_value
1125 : Klass::_lh_array_tag_obj_value);
1126 __ cmpl(t0, tag);
1127 __ jcc(Assembler::equal, ok);
1128 __ stop("assert(is an array klass)");
1129 __ should_not_reach_here();
1130 __ bind(ok);
1131 }
1132 #endif // ASSERT
1134 if (UseTLAB && FastTLABRefill) {
1135 Register arr_size = rsi;
1136 Register t1 = rcx; // must be rcx for use as shift count
1137 Register t2 = rdi;
1138 Label slow_path;
1139 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1141 // check that array length is small enough for fast path.
1142 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1143 __ jcc(Assembler::above, slow_path);
1145 // if we got here then the TLAB allocation failed, so try
1146 // refilling the TLAB or allocating directly from eden.
1147 Label retry_tlab, try_eden;
1148 const Register thread =
1149 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1151 __ bind(retry_tlab);
1153 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1154 // since size is positive movl does right thing on 64bit
1155 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1156 // since size is postive movl does right thing on 64bit
1157 __ movl(arr_size, length);
1158 assert(t1 == rcx, "fixed register usage");
1159 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1160 __ shrptr(t1, Klass::_lh_header_size_shift);
1161 __ andptr(t1, Klass::_lh_header_size_mask);
1162 __ addptr(arr_size, t1);
1163 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1164 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1166 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1168 __ initialize_header(obj, klass, length, t1, t2);
1169 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1170 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1171 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1172 __ andptr(t1, Klass::_lh_header_size_mask);
1173 __ subptr(arr_size, t1); // body length
1174 __ addptr(t1, obj); // body start
1175 __ initialize_body(t1, arr_size, 0, t2);
1176 __ verify_oop(obj);
1177 __ ret(0);
1179 __ bind(try_eden);
1180 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1181 // since size is positive movl does right thing on 64bit
1182 __ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
1183 // since size is postive movl does right thing on 64bit
1184 __ movl(arr_size, length);
1185 assert(t1 == rcx, "fixed register usage");
1186 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1187 __ shrptr(t1, Klass::_lh_header_size_shift);
1188 __ andptr(t1, Klass::_lh_header_size_mask);
1189 __ addptr(arr_size, t1);
1190 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1191 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1193 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1194 __ incr_allocated_bytes(thread, arr_size, 0);
1196 __ initialize_header(obj, klass, length, t1, t2);
1197 __ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
1198 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1199 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1200 __ andptr(t1, Klass::_lh_header_size_mask);
1201 __ subptr(arr_size, t1); // body length
1202 __ addptr(t1, obj); // body start
1203 __ initialize_body(t1, arr_size, 0, t2);
1204 __ verify_oop(obj);
1205 __ ret(0);
1207 __ bind(slow_path);
1208 }
1210 __ enter();
1211 OopMap* map = save_live_registers(sasm, 3);
1212 int call_offset;
1213 if (id == new_type_array_id) {
1214 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1215 } else {
1216 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1217 }
1219 oop_maps = new OopMapSet();
1220 oop_maps->add_gc_map(call_offset, map);
1221 restore_live_registers_except_rax(sasm);
1223 __ verify_oop(obj);
1224 __ leave();
1225 __ ret(0);
1227 // rax,: new array
1228 }
1229 break;
1231 case new_multi_array_id:
1232 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1233 // rax,: klass
1234 // rbx,: rank
1235 // rcx: address of 1st dimension
1236 OopMap* map = save_live_registers(sasm, 4);
1237 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1239 oop_maps = new OopMapSet();
1240 oop_maps->add_gc_map(call_offset, map);
1241 restore_live_registers_except_rax(sasm);
1243 // rax,: new multi array
1244 __ verify_oop(rax);
1245 }
1246 break;
1248 case register_finalizer_id:
1249 {
1250 __ set_info("register_finalizer", dont_gc_arguments);
1252 // This is called via call_runtime so the arguments
1253 // will be place in C abi locations
1255 #ifdef _LP64
1256 __ verify_oop(c_rarg0);
1257 __ mov(rax, c_rarg0);
1258 #else
1259 // The object is passed on the stack and we haven't pushed a
1260 // frame yet so it's one work away from top of stack.
1261 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1262 __ verify_oop(rax);
1263 #endif // _LP64
1265 // load the klass and check the has finalizer flag
1266 Label register_finalizer;
1267 Register t = rsi;
1268 __ load_klass(t, rax);
1269 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
1270 __ testl(t, JVM_ACC_HAS_FINALIZER);
1271 __ jcc(Assembler::notZero, register_finalizer);
1272 __ ret(0);
1274 __ bind(register_finalizer);
1275 __ enter();
1276 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1277 int call_offset = __ call_RT(noreg, noreg,
1278 CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1279 oop_maps = new OopMapSet();
1280 oop_maps->add_gc_map(call_offset, oop_map);
1282 // Now restore all the live registers
1283 restore_live_registers(sasm);
1285 __ leave();
1286 __ ret(0);
1287 }
1288 break;
1290 case throw_range_check_failed_id:
1291 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1292 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1293 }
1294 break;
1296 case throw_index_exception_id:
1297 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1298 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1299 }
1300 break;
1302 case throw_div0_exception_id:
1303 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1304 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1305 }
1306 break;
1308 case throw_null_pointer_exception_id:
1309 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1310 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1311 }
1312 break;
1314 case handle_exception_nofpu_id:
1315 case handle_exception_id:
1316 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1317 oop_maps = generate_handle_exception(id, sasm);
1318 }
1319 break;
1321 case handle_exception_from_callee_id:
1322 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1323 oop_maps = generate_handle_exception(id, sasm);
1324 }
1325 break;
1327 case unwind_exception_id:
1328 { __ set_info("unwind_exception", dont_gc_arguments);
1329 // note: no stubframe since we are about to leave the current
1330 // activation and we are calling a leaf VM function only.
1331 generate_unwind_exception(sasm);
1332 }
1333 break;
1335 case throw_array_store_exception_id:
1336 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1337 // tos + 0: link
1338 // + 1: return address
1339 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1340 }
1341 break;
1343 case throw_class_cast_exception_id:
1344 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1345 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1346 }
1347 break;
1349 case throw_incompatible_class_change_error_id:
1350 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1351 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1352 }
1353 break;
1355 case slow_subtype_check_id:
1356 {
1357 // Typical calling sequence:
1358 // __ push(klass_RInfo); // object klass or other subclass
1359 // __ push(sup_k_RInfo); // array element klass or other superclass
1360 // __ call(slow_subtype_check);
1361 // Note that the subclass is pushed first, and is therefore deepest.
1362 // Previous versions of this code reversed the names 'sub' and 'super'.
1363 // This was operationally harmless but made the code unreadable.
1364 enum layout {
1365 rax_off, SLOT2(raxH_off)
1366 rcx_off, SLOT2(rcxH_off)
1367 rsi_off, SLOT2(rsiH_off)
1368 rdi_off, SLOT2(rdiH_off)
1369 // saved_rbp_off, SLOT2(saved_rbpH_off)
1370 return_off, SLOT2(returnH_off)
1371 sup_k_off, SLOT2(sup_kH_off)
1372 klass_off, SLOT2(superH_off)
1373 framesize,
1374 result_off = klass_off // deepest argument is also the return value
1375 };
1377 __ set_info("slow_subtype_check", dont_gc_arguments);
1378 __ push(rdi);
1379 __ push(rsi);
1380 __ push(rcx);
1381 __ push(rax);
1383 // This is called by pushing args and not with C abi
1384 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1385 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1387 Label miss;
1388 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1390 // fallthrough on success:
1391 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1392 __ pop(rax);
1393 __ pop(rcx);
1394 __ pop(rsi);
1395 __ pop(rdi);
1396 __ ret(0);
1398 __ bind(miss);
1399 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1400 __ pop(rax);
1401 __ pop(rcx);
1402 __ pop(rsi);
1403 __ pop(rdi);
1404 __ ret(0);
1405 }
1406 break;
1408 case monitorenter_nofpu_id:
1409 save_fpu_registers = false;
1410 // fall through
1411 case monitorenter_id:
1412 {
1413 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1414 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1416 // Called with store_parameter and not C abi
1418 f.load_argument(1, rax); // rax,: object
1419 f.load_argument(0, rbx); // rbx,: lock address
1421 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1423 oop_maps = new OopMapSet();
1424 oop_maps->add_gc_map(call_offset, map);
1425 restore_live_registers(sasm, save_fpu_registers);
1426 }
1427 break;
1429 case monitorexit_nofpu_id:
1430 save_fpu_registers = false;
1431 // fall through
1432 case monitorexit_id:
1433 {
1434 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1435 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1437 // Called with store_parameter and not C abi
1439 f.load_argument(0, rax); // rax,: lock address
1441 // note: really a leaf routine but must setup last java sp
1442 // => use call_RT for now (speed can be improved by
1443 // doing last java sp setup manually)
1444 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1446 oop_maps = new OopMapSet();
1447 oop_maps->add_gc_map(call_offset, map);
1448 restore_live_registers(sasm, save_fpu_registers);
1450 }
1451 break;
1453 case access_field_patching_id:
1454 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1455 // we should set up register map
1456 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1457 }
1458 break;
1460 case load_klass_patching_id:
1461 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1462 // we should set up register map
1463 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1464 }
1465 break;
1467 case jvmti_exception_throw_id:
1468 { // rax,: exception oop
1469 StubFrame f(sasm, "jvmti_exception_throw", dont_gc_arguments);
1470 // Preserve all registers across this potentially blocking call
1471 const int num_rt_args = 2; // thread, exception oop
1472 OopMap* map = save_live_registers(sasm, num_rt_args);
1473 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, Runtime1::post_jvmti_exception_throw), rax);
1474 oop_maps = new OopMapSet();
1475 oop_maps->add_gc_map(call_offset, map);
1476 restore_live_registers(sasm);
1477 }
1478 break;
1480 case dtrace_object_alloc_id:
1481 { // rax,: object
1482 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1483 // we can't gc here so skip the oopmap but make sure that all
1484 // the live registers get saved.
1485 save_live_registers(sasm, 1);
1487 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1488 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1489 NOT_LP64(__ pop(rax));
1491 restore_live_registers(sasm);
1492 }
1493 break;
1495 case fpu2long_stub_id:
1496 {
1497 // rax, and rdx are destroyed, but should be free since the result is returned there
1498 // preserve rsi,ecx
1499 __ push(rsi);
1500 __ push(rcx);
1501 LP64_ONLY(__ push(rdx);)
1503 // check for NaN
1504 Label return0, do_return, return_min_jlong, do_convert;
1506 Address value_high_word(rsp, wordSize + 4);
1507 Address value_low_word(rsp, wordSize);
1508 Address result_high_word(rsp, 3*wordSize + 4);
1509 Address result_low_word(rsp, 3*wordSize);
1511 __ subptr(rsp, 32); // more than enough on 32bit
1512 __ fst_d(value_low_word);
1513 __ movl(rax, value_high_word);
1514 __ andl(rax, 0x7ff00000);
1515 __ cmpl(rax, 0x7ff00000);
1516 __ jcc(Assembler::notEqual, do_convert);
1517 __ movl(rax, value_high_word);
1518 __ andl(rax, 0xfffff);
1519 __ orl(rax, value_low_word);
1520 __ jcc(Assembler::notZero, return0);
1522 __ bind(do_convert);
1523 __ fnstcw(Address(rsp, 0));
1524 __ movzwl(rax, Address(rsp, 0));
1525 __ orl(rax, 0xc00);
1526 __ movw(Address(rsp, 2), rax);
1527 __ fldcw(Address(rsp, 2));
1528 __ fwait();
1529 __ fistp_d(result_low_word);
1530 __ fldcw(Address(rsp, 0));
1531 __ fwait();
1532 // This gets the entire long in rax on 64bit
1533 __ movptr(rax, result_low_word);
1534 // testing of high bits
1535 __ movl(rdx, result_high_word);
1536 __ mov(rcx, rax);
1537 // What the heck is the point of the next instruction???
1538 __ xorl(rcx, 0x0);
1539 __ movl(rsi, 0x80000000);
1540 __ xorl(rsi, rdx);
1541 __ orl(rcx, rsi);
1542 __ jcc(Assembler::notEqual, do_return);
1543 __ fldz();
1544 __ fcomp_d(value_low_word);
1545 __ fnstsw_ax();
1546 #ifdef _LP64
1547 __ testl(rax, 0x4100); // ZF & CF == 0
1548 __ jcc(Assembler::equal, return_min_jlong);
1549 #else
1550 __ sahf();
1551 __ jcc(Assembler::above, return_min_jlong);
1552 #endif // _LP64
1553 // return max_jlong
1554 #ifndef _LP64
1555 __ movl(rdx, 0x7fffffff);
1556 __ movl(rax, 0xffffffff);
1557 #else
1558 __ mov64(rax, CONST64(0x7fffffffffffffff));
1559 #endif // _LP64
1560 __ jmp(do_return);
1562 __ bind(return_min_jlong);
1563 #ifndef _LP64
1564 __ movl(rdx, 0x80000000);
1565 __ xorl(rax, rax);
1566 #else
1567 __ mov64(rax, CONST64(0x8000000000000000));
1568 #endif // _LP64
1569 __ jmp(do_return);
1571 __ bind(return0);
1572 __ fpop();
1573 #ifndef _LP64
1574 __ xorptr(rdx,rdx);
1575 __ xorptr(rax,rax);
1576 #else
1577 __ xorptr(rax, rax);
1578 #endif // _LP64
1580 __ bind(do_return);
1581 __ addptr(rsp, 32);
1582 LP64_ONLY(__ pop(rdx);)
1583 __ pop(rcx);
1584 __ pop(rsi);
1585 __ ret(0);
1586 }
1587 break;
1589 #ifndef SERIALGC
1590 case g1_pre_barrier_slow_id:
1591 {
1592 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1593 // arg0 : previous value of memory
1595 BarrierSet* bs = Universe::heap()->barrier_set();
1596 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1597 __ movptr(rax, (int)id);
1598 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1599 __ should_not_reach_here();
1600 break;
1601 }
1602 __ push(rax);
1603 __ push(rdx);
1605 const Register pre_val = rax;
1606 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1607 const Register tmp = rdx;
1609 NOT_LP64(__ get_thread(thread);)
1611 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1612 PtrQueue::byte_offset_of_active()));
1614 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1615 PtrQueue::byte_offset_of_index()));
1616 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1617 PtrQueue::byte_offset_of_buf()));
1620 Label done;
1621 Label runtime;
1623 // Can we store original value in the thread's buffer?
1625 #ifdef _LP64
1626 __ movslq(tmp, queue_index);
1627 __ cmpq(tmp, 0);
1628 #else
1629 __ cmpl(queue_index, 0);
1630 #endif
1631 __ jcc(Assembler::equal, runtime);
1632 #ifdef _LP64
1633 __ subq(tmp, wordSize);
1634 __ movl(queue_index, tmp);
1635 __ addq(tmp, buffer);
1636 #else
1637 __ subl(queue_index, wordSize);
1638 __ movl(tmp, buffer);
1639 __ addl(tmp, queue_index);
1640 #endif
1642 // prev_val (rax)
1643 f.load_argument(0, pre_val);
1644 __ movptr(Address(tmp, 0), pre_val);
1645 __ jmp(done);
1647 __ bind(runtime);
1648 __ push(rcx);
1649 #ifdef _LP64
1650 __ push(r8);
1651 __ push(r9);
1652 __ push(r10);
1653 __ push(r11);
1654 # ifndef _WIN64
1655 __ push(rdi);
1656 __ push(rsi);
1657 # endif
1658 #endif
1659 // load the pre-value
1660 f.load_argument(0, rcx);
1661 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1662 #ifdef _LP64
1663 # ifndef _WIN64
1664 __ pop(rsi);
1665 __ pop(rdi);
1666 # endif
1667 __ pop(r11);
1668 __ pop(r10);
1669 __ pop(r9);
1670 __ pop(r8);
1671 #endif
1672 __ pop(rcx);
1673 __ bind(done);
1675 __ pop(rdx);
1676 __ pop(rax);
1677 }
1678 break;
1680 case g1_post_barrier_slow_id:
1681 {
1682 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1685 // arg0: store_address
1686 Address store_addr(rbp, 2*BytesPerWord);
1688 BarrierSet* bs = Universe::heap()->barrier_set();
1689 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1690 Label done;
1691 Label runtime;
1693 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1694 // Must check to see if card is already dirty
1696 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1698 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1699 PtrQueue::byte_offset_of_index()));
1700 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1701 PtrQueue::byte_offset_of_buf()));
1703 __ push(rax);
1704 __ push(rcx);
1706 NOT_LP64(__ get_thread(thread);)
1707 ExternalAddress cardtable((address)ct->byte_map_base);
1708 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1710 const Register card_addr = rcx;
1711 #ifdef _LP64
1712 const Register tmp = rscratch1;
1713 f.load_argument(0, card_addr);
1714 __ shrq(card_addr, CardTableModRefBS::card_shift);
1715 __ lea(tmp, cardtable);
1716 // get the address of the card
1717 __ addq(card_addr, tmp);
1718 #else
1719 const Register card_index = rcx;
1720 f.load_argument(0, card_index);
1721 __ shrl(card_index, CardTableModRefBS::card_shift);
1723 Address index(noreg, card_index, Address::times_1);
1724 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1725 #endif
1727 __ cmpb(Address(card_addr, 0), 0);
1728 __ jcc(Assembler::equal, done);
1730 // storing region crossing non-NULL, card is clean.
1731 // dirty card and log.
1733 __ movb(Address(card_addr, 0), 0);
1735 __ cmpl(queue_index, 0);
1736 __ jcc(Assembler::equal, runtime);
1737 __ subl(queue_index, wordSize);
1739 const Register buffer_addr = rbx;
1740 __ push(rbx);
1742 __ movptr(buffer_addr, buffer);
1744 #ifdef _LP64
1745 __ movslq(rscratch1, queue_index);
1746 __ addptr(buffer_addr, rscratch1);
1747 #else
1748 __ addptr(buffer_addr, queue_index);
1749 #endif
1750 __ movptr(Address(buffer_addr, 0), card_addr);
1752 __ pop(rbx);
1753 __ jmp(done);
1755 __ bind(runtime);
1756 __ push(rdx);
1757 #ifdef _LP64
1758 __ push(r8);
1759 __ push(r9);
1760 __ push(r10);
1761 __ push(r11);
1762 # ifndef _WIN64
1763 __ push(rdi);
1764 __ push(rsi);
1765 # endif
1766 #endif
1767 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1768 #ifdef _LP64
1769 # ifndef _WIN64
1770 __ pop(rsi);
1771 __ pop(rdi);
1772 # endif
1773 __ pop(r11);
1774 __ pop(r10);
1775 __ pop(r9);
1776 __ pop(r8);
1777 #endif
1778 __ pop(rdx);
1779 __ bind(done);
1781 __ pop(rcx);
1782 __ pop(rax);
1784 }
1785 break;
1786 #endif // !SERIALGC
1788 default:
1789 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1790 __ movptr(rax, (int)id);
1791 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1792 __ should_not_reach_here();
1793 }
1794 break;
1795 }
1796 return oop_maps;
1797 }
1799 #undef __
1801 const char *Runtime1::pd_name_for_address(address entry) {
1802 return "<unknown function>";
1803 }