Mon, 12 Aug 2013 17:37:02 +0200
8015107: NPG: Use consistent naming for metaspace concepts
Reviewed-by: coleenp, mgerdin, hseigel
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "c1/c1_Defs.hpp"
28 #include "c1/c1_MacroAssembler.hpp"
29 #include "c1/c1_Runtime1.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "nativeInst_x86.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/jvmtiExport.hpp"
35 #include "register_x86.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/signature.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "utilities/macros.hpp"
40 #include "vmreg_x86.inline.hpp"
43 // Implementation of StubAssembler
45 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) {
46 // setup registers
47 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
48 assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different");
49 assert(oop_result1 != thread && metadata_result != thread, "registers must be different");
50 assert(args_size >= 0, "illegal args_size");
51 bool align_stack = false;
52 #ifdef _LP64
53 // At a method handle call, the stack may not be properly aligned
54 // when returning with an exception.
55 align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
56 #endif
58 #ifdef _LP64
59 mov(c_rarg0, thread);
60 set_num_rt_args(0); // Nothing on stack
61 #else
62 set_num_rt_args(1 + args_size);
64 // push java thread (becomes first argument of C function)
65 get_thread(thread);
66 push(thread);
67 #endif // _LP64
69 int call_offset;
70 if (!align_stack) {
71 set_last_Java_frame(thread, noreg, rbp, NULL);
72 } else {
73 address the_pc = pc();
74 call_offset = offset();
75 set_last_Java_frame(thread, noreg, rbp, the_pc);
76 andptr(rsp, -(StackAlignmentInBytes)); // Align stack
77 }
79 // do the call
80 call(RuntimeAddress(entry));
81 if (!align_stack) {
82 call_offset = offset();
83 }
84 // verify callee-saved register
85 #ifdef ASSERT
86 guarantee(thread != rax, "change this code");
87 push(rax);
88 { Label L;
89 get_thread(rax);
90 cmpptr(thread, rax);
91 jcc(Assembler::equal, L);
92 int3();
93 stop("StubAssembler::call_RT: rdi not callee saved?");
94 bind(L);
95 }
96 pop(rax);
97 #endif
98 reset_last_Java_frame(thread, true, align_stack);
100 // discard thread and arguments
101 NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
103 // check for pending exceptions
104 { Label L;
105 cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
106 jcc(Assembler::equal, L);
107 // exception pending => remove activation and forward to exception handler
108 movptr(rax, Address(thread, Thread::pending_exception_offset()));
109 // make sure that the vm_results are cleared
110 if (oop_result1->is_valid()) {
111 movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
112 }
113 if (metadata_result->is_valid()) {
114 movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
115 }
116 if (frame_size() == no_frame_size) {
117 leave();
118 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
119 } else if (_stub_id == Runtime1::forward_exception_id) {
120 should_not_reach_here();
121 } else {
122 jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
123 }
124 bind(L);
125 }
126 // get oop results if there are any and reset the values in the thread
127 if (oop_result1->is_valid()) {
128 get_vm_result(oop_result1, thread);
129 }
130 if (metadata_result->is_valid()) {
131 get_vm_result_2(metadata_result, thread);
132 }
133 return call_offset;
134 }
137 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) {
138 #ifdef _LP64
139 mov(c_rarg1, arg1);
140 #else
141 push(arg1);
142 #endif // _LP64
143 return call_RT(oop_result1, metadata_result, entry, 1);
144 }
147 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) {
148 #ifdef _LP64
149 if (c_rarg1 == arg2) {
150 if (c_rarg2 == arg1) {
151 xchgq(arg1, arg2);
152 } else {
153 mov(c_rarg2, arg2);
154 mov(c_rarg1, arg1);
155 }
156 } else {
157 mov(c_rarg1, arg1);
158 mov(c_rarg2, arg2);
159 }
160 #else
161 push(arg2);
162 push(arg1);
163 #endif // _LP64
164 return call_RT(oop_result1, metadata_result, entry, 2);
165 }
168 int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) {
169 #ifdef _LP64
170 // if there is any conflict use the stack
171 if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
172 arg2 == c_rarg1 || arg1 == c_rarg3 ||
173 arg3 == c_rarg1 || arg1 == c_rarg2) {
174 push(arg3);
175 push(arg2);
176 push(arg1);
177 pop(c_rarg1);
178 pop(c_rarg2);
179 pop(c_rarg3);
180 } else {
181 mov(c_rarg1, arg1);
182 mov(c_rarg2, arg2);
183 mov(c_rarg3, arg3);
184 }
185 #else
186 push(arg3);
187 push(arg2);
188 push(arg1);
189 #endif // _LP64
190 return call_RT(oop_result1, metadata_result, entry, 3);
191 }
194 // Implementation of StubFrame
196 class StubFrame: public StackObj {
197 private:
198 StubAssembler* _sasm;
200 public:
201 StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments);
202 void load_argument(int offset_in_words, Register reg);
204 ~StubFrame();
205 };
208 #define __ _sasm->
210 StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
211 _sasm = sasm;
212 __ set_info(name, must_gc_arguments);
213 __ enter();
214 }
216 // load parameters that were stored with LIR_Assembler::store_parameter
217 // Note: offsets for store_parameter and load_argument must match
218 void StubFrame::load_argument(int offset_in_words, Register reg) {
219 // rbp, + 0: link
220 // + 1: return address
221 // + 2: argument with offset 0
222 // + 3: argument with offset 1
223 // + 4: ...
225 __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
226 }
229 StubFrame::~StubFrame() {
230 __ leave();
231 __ ret(0);
232 }
234 #undef __
237 // Implementation of Runtime1
239 #define __ sasm->
241 const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
242 const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
244 // Stack layout for saving/restoring all the registers needed during a runtime
245 // call (this includes deoptimization)
246 // Note: note that users of this frame may well have arguments to some runtime
247 // while these values are on the stack. These positions neglect those arguments
248 // but the code in save_live_registers will take the argument count into
249 // account.
250 //
251 #ifdef _LP64
252 #define SLOT2(x) x,
253 #define SLOT_PER_WORD 2
254 #else
255 #define SLOT2(x)
256 #define SLOT_PER_WORD 1
257 #endif // _LP64
259 enum reg_save_layout {
260 // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
261 // happen and will assert if the stack size we create is misaligned
262 #ifdef _LP64
263 align_dummy_0, align_dummy_1,
264 #endif // _LP64
265 #ifdef _WIN64
266 // Windows always allocates space for it's argument registers (see
267 // frame::arg_reg_save_area_bytes).
268 arg_reg_save_1, arg_reg_save_1H, // 0, 4
269 arg_reg_save_2, arg_reg_save_2H, // 8, 12
270 arg_reg_save_3, arg_reg_save_3H, // 16, 20
271 arg_reg_save_4, arg_reg_save_4H, // 24, 28
272 #endif // _WIN64
273 xmm_regs_as_doubles_off, // 32
274 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
275 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
276 // fpu_state_end_off is exclusive
277 fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
278 marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
279 extra_space_offset, // 360
280 #ifdef _LP64
281 r15_off = extra_space_offset, r15H_off, // 360, 364
282 r14_off, r14H_off, // 368, 372
283 r13_off, r13H_off, // 376, 380
284 r12_off, r12H_off, // 384, 388
285 r11_off, r11H_off, // 392, 396
286 r10_off, r10H_off, // 400, 404
287 r9_off, r9H_off, // 408, 412
288 r8_off, r8H_off, // 416, 420
289 rdi_off, rdiH_off, // 424, 428
290 #else
291 rdi_off = extra_space_offset,
292 #endif // _LP64
293 rsi_off, SLOT2(rsiH_off) // 432, 436
294 rbp_off, SLOT2(rbpH_off) // 440, 444
295 rsp_off, SLOT2(rspH_off) // 448, 452
296 rbx_off, SLOT2(rbxH_off) // 456, 460
297 rdx_off, SLOT2(rdxH_off) // 464, 468
298 rcx_off, SLOT2(rcxH_off) // 472, 476
299 rax_off, SLOT2(raxH_off) // 480, 484
300 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
301 return_off, SLOT2(returnH_off) // 496, 500
302 reg_save_frame_size // As noted: neglects any parameters to runtime // 504
303 };
307 // Save off registers which might be killed by calls into the runtime.
308 // Tries to smart of about FP registers. In particular we separate
309 // saving and describing the FPU registers for deoptimization since we
310 // have to save the FPU registers twice if we describe them and on P4
311 // saving FPU registers which don't contain anything appears
312 // expensive. The deopt blob is the only thing which needs to
313 // describe FPU registers. In all other cases it should be sufficient
314 // to simply save their current value.
316 static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
317 bool save_fpu_registers = true) {
319 // In 64bit all the args are in regs so there are no additional stack slots
320 LP64_ONLY(num_rt_args = 0);
321 LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
322 int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
323 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
325 // record saved value locations in an OopMap
326 // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
327 OopMap* map = new OopMap(frame_size_in_slots, 0);
328 map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
329 map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
330 map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
331 map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
332 map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
333 map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
334 #ifdef _LP64
335 map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
336 map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
337 map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
338 map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
339 map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
340 map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
341 map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
342 map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
344 // This is stupid but needed.
345 map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
346 map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
347 map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
348 map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
349 map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
350 map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
352 map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
353 map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
354 map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
355 map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
356 map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
357 map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
358 map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
359 map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
360 #endif // _LP64
362 if (save_fpu_registers) {
363 if (UseSSE < 2) {
364 int fpu_off = float_regs_as_doubles_off;
365 for (int n = 0; n < FrameMap::nof_fpu_regs; n++) {
366 VMReg fpu_name_0 = FrameMap::fpu_regname(n);
367 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0);
368 // %%% This is really a waste but we'll keep things as they were for now
369 if (true) {
370 map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next());
371 }
372 fpu_off += 2;
373 }
374 assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots");
375 }
377 if (UseSSE >= 2) {
378 int xmm_off = xmm_regs_as_doubles_off;
379 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
380 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
381 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
382 // %%% This is really a waste but we'll keep things as they were for now
383 if (true) {
384 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next());
385 }
386 xmm_off += 2;
387 }
388 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
390 } else if (UseSSE == 1) {
391 int xmm_off = xmm_regs_as_doubles_off;
392 for (int n = 0; n < FrameMap::nof_xmm_regs; n++) {
393 VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg();
394 map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0);
395 xmm_off += 2;
396 }
397 assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers");
398 }
399 }
401 return map;
402 }
404 static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
405 bool save_fpu_registers = true) {
406 __ block_comment("save_live_registers");
408 __ pusha(); // integer registers
410 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
411 // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
413 __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
415 #ifdef ASSERT
416 __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
417 #endif
419 if (save_fpu_registers) {
420 if (UseSSE < 2) {
421 // save FPU stack
422 __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
423 __ fwait();
425 #ifdef ASSERT
426 Label ok;
427 __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
428 __ jccb(Assembler::equal, ok);
429 __ stop("corrupted control word detected");
430 __ bind(ok);
431 #endif
433 // Reset the control word to guard against exceptions being unmasked
434 // since fstp_d can cause FPU stack underflow exceptions. Write it
435 // into the on stack copy and then reload that to make sure that the
436 // current and future values are correct.
437 __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
438 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
440 // Save the FPU registers in de-opt-able form
441 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
442 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
443 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
444 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
445 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
446 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
447 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
448 __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
449 }
451 if (UseSSE >= 2) {
452 // save XMM registers
453 // XMM registers can contain float or double values, but this is not known here,
454 // so always save them as doubles.
455 // note that float values are _not_ converted automatically, so for float values
456 // the second word contains only garbage data.
457 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
458 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
459 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
460 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
461 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
462 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
463 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
464 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
465 #ifdef _LP64
466 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
467 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
468 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
469 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
470 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
471 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
472 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
473 __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
474 #endif // _LP64
475 } else if (UseSSE == 1) {
476 // save XMM registers as float because double not supported without SSE2
477 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
478 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
479 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
480 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
481 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
482 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
483 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
484 __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
485 }
486 }
488 // FPU stack must be empty now
489 __ verify_FPU(0, "save_live_registers");
491 return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
492 }
495 static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
496 if (restore_fpu_registers) {
497 if (UseSSE >= 2) {
498 // restore XMM registers
499 __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
500 __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
501 __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
502 __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
503 __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
504 __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
505 __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
506 __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
507 #ifdef _LP64
508 __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
509 __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
510 __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
511 __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
512 __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
513 __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
514 __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
515 __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
516 #endif // _LP64
517 } else if (UseSSE == 1) {
518 // restore XMM registers
519 __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
520 __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
521 __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
522 __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
523 __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
524 __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
525 __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
526 __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
527 }
529 if (UseSSE < 2) {
530 __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
531 } else {
532 // check that FPU stack is really empty
533 __ verify_FPU(0, "restore_live_registers");
534 }
536 } else {
537 // check that FPU stack is really empty
538 __ verify_FPU(0, "restore_live_registers");
539 }
541 #ifdef ASSERT
542 {
543 Label ok;
544 __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
545 __ jcc(Assembler::equal, ok);
546 __ stop("bad offsets in frame");
547 __ bind(ok);
548 }
549 #endif // ASSERT
551 __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
552 }
555 static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
556 __ block_comment("restore_live_registers");
558 restore_fpu(sasm, restore_fpu_registers);
559 __ popa();
560 }
563 static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
564 __ block_comment("restore_live_registers_except_rax");
566 restore_fpu(sasm, restore_fpu_registers);
568 #ifdef _LP64
569 __ movptr(r15, Address(rsp, 0));
570 __ movptr(r14, Address(rsp, wordSize));
571 __ movptr(r13, Address(rsp, 2 * wordSize));
572 __ movptr(r12, Address(rsp, 3 * wordSize));
573 __ movptr(r11, Address(rsp, 4 * wordSize));
574 __ movptr(r10, Address(rsp, 5 * wordSize));
575 __ movptr(r9, Address(rsp, 6 * wordSize));
576 __ movptr(r8, Address(rsp, 7 * wordSize));
577 __ movptr(rdi, Address(rsp, 8 * wordSize));
578 __ movptr(rsi, Address(rsp, 9 * wordSize));
579 __ movptr(rbp, Address(rsp, 10 * wordSize));
580 // skip rsp
581 __ movptr(rbx, Address(rsp, 12 * wordSize));
582 __ movptr(rdx, Address(rsp, 13 * wordSize));
583 __ movptr(rcx, Address(rsp, 14 * wordSize));
585 __ addptr(rsp, 16 * wordSize);
586 #else
588 __ pop(rdi);
589 __ pop(rsi);
590 __ pop(rbp);
591 __ pop(rbx); // skip this value
592 __ pop(rbx);
593 __ pop(rdx);
594 __ pop(rcx);
595 __ addptr(rsp, BytesPerWord);
596 #endif // _LP64
597 }
600 void Runtime1::initialize_pd() {
601 // nothing to do
602 }
605 // target: the entry point of the method that creates and posts the exception oop
606 // has_argument: true if the exception needs an argument (passed on stack because registers must be preserved)
608 OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) {
609 // preserve all registers
610 int num_rt_args = has_argument ? 2 : 1;
611 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
613 // now all registers are saved and can be used freely
614 // verify that no old value is used accidentally
615 __ invalidate_registers(true, true, true, true, true, true);
617 // registers used by this stub
618 const Register temp_reg = rbx;
620 // load argument for exception that is passed as an argument into the stub
621 if (has_argument) {
622 #ifdef _LP64
623 __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
624 #else
625 __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
626 __ push(temp_reg);
627 #endif // _LP64
628 }
629 int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
631 OopMapSet* oop_maps = new OopMapSet();
632 oop_maps->add_gc_map(call_offset, oop_map);
634 __ stop("should not reach here");
636 return oop_maps;
637 }
640 OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
641 __ block_comment("generate_handle_exception");
643 // incoming parameters
644 const Register exception_oop = rax;
645 const Register exception_pc = rdx;
646 // other registers used in this stub
647 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
649 // Save registers, if required.
650 OopMapSet* oop_maps = new OopMapSet();
651 OopMap* oop_map = NULL;
652 switch (id) {
653 case forward_exception_id:
654 // We're handling an exception in the context of a compiled frame.
655 // The registers have been saved in the standard places. Perform
656 // an exception lookup in the caller and dispatch to the handler
657 // if found. Otherwise unwind and dispatch to the callers
658 // exception handler.
659 oop_map = generate_oop_map(sasm, 1 /*thread*/);
661 // load and clear pending exception oop into RAX
662 __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
663 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
665 // load issuing PC (the return address for this stub) into rdx
666 __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
668 // make sure that the vm_results are cleared (may be unnecessary)
669 __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
670 __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
671 break;
672 case handle_exception_nofpu_id:
673 case handle_exception_id:
674 // At this point all registers MAY be live.
675 oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
676 break;
677 case handle_exception_from_callee_id: {
678 // At this point all registers except exception oop (RAX) and
679 // exception pc (RDX) are dead.
680 const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
681 oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0);
682 sasm->set_frame_size(frame_size);
683 WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes));
684 break;
685 }
686 default: ShouldNotReachHere();
687 }
689 #ifdef TIERED
690 // C2 can leave the fpu stack dirty
691 if (UseSSE < 2) {
692 __ empty_FPU_stack();
693 }
694 #endif // TIERED
696 // verify that only rax, and rdx is valid at this time
697 __ invalidate_registers(false, true, true, false, true, true);
698 // verify that rax, contains a valid exception
699 __ verify_not_null_oop(exception_oop);
701 // load address of JavaThread object for thread-local data
702 NOT_LP64(__ get_thread(thread);)
704 #ifdef ASSERT
705 // check that fields in JavaThread for exception oop and issuing pc are
706 // empty before writing to them
707 Label oop_empty;
708 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
709 __ jcc(Assembler::equal, oop_empty);
710 __ stop("exception oop already set");
711 __ bind(oop_empty);
713 Label pc_empty;
714 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
715 __ jcc(Assembler::equal, pc_empty);
716 __ stop("exception pc already set");
717 __ bind(pc_empty);
718 #endif
720 // save exception oop and issuing pc into JavaThread
721 // (exception handler will load it from here)
722 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
723 __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
725 // patch throwing pc into return address (has bci & oop map)
726 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
728 // compute the exception handler.
729 // the exception oop and the throwing pc are read from the fields in JavaThread
730 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc));
731 oop_maps->add_gc_map(call_offset, oop_map);
733 // rax: handler address
734 // will be the deopt blob if nmethod was deoptimized while we looked up
735 // handler regardless of whether handler existed in the nmethod.
737 // only rax, is valid at this time, all other registers have been destroyed by the runtime call
738 __ invalidate_registers(false, true, true, true, true, true);
740 // patch the return address, this stub will directly return to the exception handler
741 __ movptr(Address(rbp, 1*BytesPerWord), rax);
743 switch (id) {
744 case forward_exception_id:
745 case handle_exception_nofpu_id:
746 case handle_exception_id:
747 // Restore the registers that were saved at the beginning.
748 restore_live_registers(sasm, id == handle_exception_nofpu_id);
749 break;
750 case handle_exception_from_callee_id:
751 // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
752 // since we do a leave anyway.
754 // Pop the return address since we are possibly changing SP (restoring from BP).
755 __ leave();
756 __ pop(rcx);
758 // Restore SP from BP if the exception PC is a method handle call site.
759 NOT_LP64(__ get_thread(thread);)
760 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
761 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
762 __ jmp(rcx); // jump to exception handler
763 break;
764 default: ShouldNotReachHere();
765 }
767 return oop_maps;
768 }
771 void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
772 // incoming parameters
773 const Register exception_oop = rax;
774 // callee-saved copy of exception_oop during runtime call
775 const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14);
776 // other registers used in this stub
777 const Register exception_pc = rdx;
778 const Register handler_addr = rbx;
779 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
781 // verify that only rax, is valid at this time
782 __ invalidate_registers(false, true, true, true, true, true);
784 #ifdef ASSERT
785 // check that fields in JavaThread for exception oop and issuing pc are empty
786 NOT_LP64(__ get_thread(thread);)
787 Label oop_empty;
788 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
789 __ jcc(Assembler::equal, oop_empty);
790 __ stop("exception oop must be empty");
791 __ bind(oop_empty);
793 Label pc_empty;
794 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
795 __ jcc(Assembler::equal, pc_empty);
796 __ stop("exception pc must be empty");
797 __ bind(pc_empty);
798 #endif
800 // clear the FPU stack in case any FPU results are left behind
801 __ empty_FPU_stack();
803 // save exception_oop in callee-saved register to preserve it during runtime calls
804 __ verify_not_null_oop(exception_oop);
805 __ movptr(exception_oop_callee_saved, exception_oop);
807 NOT_LP64(__ get_thread(thread);)
808 // Get return address (is on top of stack after leave).
809 __ movptr(exception_pc, Address(rsp, 0));
811 // search the exception handler address of the caller (using the return address)
812 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc);
813 // rax: exception handler address of the caller
815 // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call.
816 __ invalidate_registers(false, true, true, true, false, true);
818 // move result of call into correct register
819 __ movptr(handler_addr, rax);
821 // Restore exception oop to RAX (required convention of exception handler).
822 __ movptr(exception_oop, exception_oop_callee_saved);
824 // verify that there is really a valid exception in rax
825 __ verify_not_null_oop(exception_oop);
827 // get throwing pc (= return address).
828 // rdx has been destroyed by the call, so it must be set again
829 // the pop is also necessary to simulate the effect of a ret(0)
830 __ pop(exception_pc);
832 // Restore SP from BP if the exception PC is a method handle call site.
833 NOT_LP64(__ get_thread(thread);)
834 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0);
835 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
837 // continue at exception handler (return address removed)
838 // note: do *not* remove arguments when unwinding the
839 // activation since the caller assumes having
840 // all arguments on the stack when entering the
841 // runtime to determine the exception handler
842 // (GC happens at call site with arguments!)
843 // rax: exception oop
844 // rdx: throwing pc
845 // rbx: exception handler
846 __ jmp(handler_addr);
847 }
850 OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
851 // use the maximum number of runtime-arguments here because it is difficult to
852 // distinguish each RT-Call.
853 // Note: This number affects also the RT-Call in generate_handle_exception because
854 // the oop-map is shared for all calls.
855 const int num_rt_args = 2; // thread + dummy
857 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
858 assert(deopt_blob != NULL, "deoptimization blob must have been created");
860 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
862 #ifdef _LP64
863 const Register thread = r15_thread;
864 // No need to worry about dummy
865 __ mov(c_rarg0, thread);
866 #else
867 __ push(rax); // push dummy
869 const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
870 // push java thread (becomes first argument of C function)
871 __ get_thread(thread);
872 __ push(thread);
873 #endif // _LP64
874 __ set_last_Java_frame(thread, noreg, rbp, NULL);
875 // do the call
876 __ call(RuntimeAddress(target));
877 OopMapSet* oop_maps = new OopMapSet();
878 oop_maps->add_gc_map(__ offset(), oop_map);
879 // verify callee-saved register
880 #ifdef ASSERT
881 guarantee(thread != rax, "change this code");
882 __ push(rax);
883 { Label L;
884 __ get_thread(rax);
885 __ cmpptr(thread, rax);
886 __ jcc(Assembler::equal, L);
887 __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
888 __ bind(L);
889 }
890 __ pop(rax);
891 #endif
892 __ reset_last_Java_frame(thread, true, false);
893 #ifndef _LP64
894 __ pop(rcx); // discard thread arg
895 __ pop(rcx); // discard dummy
896 #endif // _LP64
898 // check for pending exceptions
899 { Label L;
900 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
901 __ jcc(Assembler::equal, L);
902 // exception pending => remove activation and forward to exception handler
904 __ testptr(rax, rax); // have we deoptimized?
905 __ jump_cc(Assembler::equal,
906 RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
908 // the deopt blob expects exceptions in the special fields of
909 // JavaThread, so copy and clear pending exception.
911 // load and clear pending exception
912 __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
913 __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
915 // check that there is really a valid exception
916 __ verify_not_null_oop(rax);
918 // load throwing pc: this is the return address of the stub
919 __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
921 #ifdef ASSERT
922 // check that fields in JavaThread for exception oop and issuing pc are empty
923 Label oop_empty;
924 __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
925 __ jcc(Assembler::equal, oop_empty);
926 __ stop("exception oop must be empty");
927 __ bind(oop_empty);
929 Label pc_empty;
930 __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
931 __ jcc(Assembler::equal, pc_empty);
932 __ stop("exception pc must be empty");
933 __ bind(pc_empty);
934 #endif
936 // store exception oop and throwing pc to JavaThread
937 __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
938 __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
940 restore_live_registers(sasm);
942 __ leave();
943 __ addptr(rsp, BytesPerWord); // remove return address from stack
945 // Forward the exception directly to deopt blob. We can blow no
946 // registers and must leave throwing pc on the stack. A patch may
947 // have values live in registers so the entry point with the
948 // exception in tls.
949 __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls()));
951 __ bind(L);
952 }
955 // Runtime will return true if the nmethod has been deoptimized during
956 // the patching process. In that case we must do a deopt reexecute instead.
958 Label reexecuteEntry, cont;
960 __ testptr(rax, rax); // have we deoptimized?
961 __ jcc(Assembler::equal, cont); // no
963 // Will reexecute. Proper return address is already on the stack we just restore
964 // registers, pop all of our frame but the return address and jump to the deopt blob
965 restore_live_registers(sasm);
966 __ leave();
967 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
969 __ bind(cont);
970 restore_live_registers(sasm);
971 __ leave();
972 __ ret(0);
974 return oop_maps;
975 }
978 OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
980 // for better readability
981 const bool must_gc_arguments = true;
982 const bool dont_gc_arguments = false;
984 // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu
985 bool save_fpu_registers = true;
987 // stub code & info for the different stubs
988 OopMapSet* oop_maps = NULL;
989 switch (id) {
990 case forward_exception_id:
991 {
992 oop_maps = generate_handle_exception(id, sasm);
993 __ leave();
994 __ ret(0);
995 }
996 break;
998 case new_instance_id:
999 case fast_new_instance_id:
1000 case fast_new_instance_init_check_id:
1001 {
1002 Register klass = rdx; // Incoming
1003 Register obj = rax; // Result
1005 if (id == new_instance_id) {
1006 __ set_info("new_instance", dont_gc_arguments);
1007 } else if (id == fast_new_instance_id) {
1008 __ set_info("fast new_instance", dont_gc_arguments);
1009 } else {
1010 assert(id == fast_new_instance_init_check_id, "bad StubID");
1011 __ set_info("fast new_instance init check", dont_gc_arguments);
1012 }
1014 if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) &&
1015 UseTLAB && FastTLABRefill) {
1016 Label slow_path;
1017 Register obj_size = rcx;
1018 Register t1 = rbx;
1019 Register t2 = rsi;
1020 assert_different_registers(klass, obj, obj_size, t1, t2);
1022 __ push(rdi);
1023 __ push(rbx);
1025 if (id == fast_new_instance_init_check_id) {
1026 // make sure the klass is initialized
1027 __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
1028 __ jcc(Assembler::notEqual, slow_path);
1029 }
1031 #ifdef ASSERT
1032 // assert object can be fast path allocated
1033 {
1034 Label ok, not_ok;
1035 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1036 __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0)
1037 __ jcc(Assembler::lessEqual, not_ok);
1038 __ testl(obj_size, Klass::_lh_instance_slow_path_bit);
1039 __ jcc(Assembler::zero, ok);
1040 __ bind(not_ok);
1041 __ stop("assert(can be fast path allocated)");
1042 __ should_not_reach_here();
1043 __ bind(ok);
1044 }
1045 #endif // ASSERT
1047 // if we got here then the TLAB allocation failed, so try
1048 // refilling the TLAB or allocating directly from eden.
1049 Label retry_tlab, try_eden;
1050 const Register thread =
1051 __ tlab_refill(retry_tlab, try_eden, slow_path); // does not destroy rdx (klass), returns rdi
1053 __ bind(retry_tlab);
1055 // get the instance size (size is postive so movl is fine for 64bit)
1056 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1058 __ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
1060 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1061 __ verify_oop(obj);
1062 __ pop(rbx);
1063 __ pop(rdi);
1064 __ ret(0);
1066 __ bind(try_eden);
1067 // get the instance size (size is postive so movl is fine for 64bit)
1068 __ movl(obj_size, Address(klass, Klass::layout_helper_offset()));
1070 __ eden_allocate(obj, obj_size, 0, t1, slow_path);
1071 __ incr_allocated_bytes(thread, obj_size, 0);
1073 __ initialize_object(obj, klass, obj_size, 0, t1, t2);
1074 __ verify_oop(obj);
1075 __ pop(rbx);
1076 __ pop(rdi);
1077 __ ret(0);
1079 __ bind(slow_path);
1080 __ pop(rbx);
1081 __ pop(rdi);
1082 }
1084 __ enter();
1085 OopMap* map = save_live_registers(sasm, 2);
1086 int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass);
1087 oop_maps = new OopMapSet();
1088 oop_maps->add_gc_map(call_offset, map);
1089 restore_live_registers_except_rax(sasm);
1090 __ verify_oop(obj);
1091 __ leave();
1092 __ ret(0);
1094 // rax,: new instance
1095 }
1097 break;
1099 case counter_overflow_id:
1100 {
1101 Register bci = rax, method = rbx;
1102 __ enter();
1103 OopMap* map = save_live_registers(sasm, 3);
1104 // Retrieve bci
1105 __ movl(bci, Address(rbp, 2*BytesPerWord));
1106 // And a pointer to the Method*
1107 __ movptr(method, Address(rbp, 3*BytesPerWord));
1108 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
1109 oop_maps = new OopMapSet();
1110 oop_maps->add_gc_map(call_offset, map);
1111 restore_live_registers(sasm);
1112 __ leave();
1113 __ ret(0);
1114 }
1115 break;
1117 case new_type_array_id:
1118 case new_object_array_id:
1119 {
1120 Register length = rbx; // Incoming
1121 Register klass = rdx; // Incoming
1122 Register obj = rax; // Result
1124 if (id == new_type_array_id) {
1125 __ set_info("new_type_array", dont_gc_arguments);
1126 } else {
1127 __ set_info("new_object_array", dont_gc_arguments);
1128 }
1130 #ifdef ASSERT
1131 // assert object type is really an array of the proper kind
1132 {
1133 Label ok;
1134 Register t0 = obj;
1135 __ movl(t0, Address(klass, Klass::layout_helper_offset()));
1136 __ sarl(t0, Klass::_lh_array_tag_shift);
1137 int tag = ((id == new_type_array_id)
1138 ? Klass::_lh_array_tag_type_value
1139 : Klass::_lh_array_tag_obj_value);
1140 __ cmpl(t0, tag);
1141 __ jcc(Assembler::equal, ok);
1142 __ stop("assert(is an array klass)");
1143 __ should_not_reach_here();
1144 __ bind(ok);
1145 }
1146 #endif // ASSERT
1148 if (UseTLAB && FastTLABRefill) {
1149 Register arr_size = rsi;
1150 Register t1 = rcx; // must be rcx for use as shift count
1151 Register t2 = rdi;
1152 Label slow_path;
1153 assert_different_registers(length, klass, obj, arr_size, t1, t2);
1155 // check that array length is small enough for fast path.
1156 __ cmpl(length, C1_MacroAssembler::max_array_allocation_length);
1157 __ jcc(Assembler::above, slow_path);
1159 // if we got here then the TLAB allocation failed, so try
1160 // refilling the TLAB or allocating directly from eden.
1161 Label retry_tlab, try_eden;
1162 const Register thread =
1163 __ tlab_refill(retry_tlab, try_eden, slow_path); // preserves rbx & rdx, returns rdi
1165 __ bind(retry_tlab);
1167 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1168 // since size is positive movl does right thing on 64bit
1169 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1170 // since size is postive movl does right thing on 64bit
1171 __ movl(arr_size, length);
1172 assert(t1 == rcx, "fixed register usage");
1173 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1174 __ shrptr(t1, Klass::_lh_header_size_shift);
1175 __ andptr(t1, Klass::_lh_header_size_mask);
1176 __ addptr(arr_size, t1);
1177 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1178 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1180 __ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
1182 __ initialize_header(obj, klass, length, t1, t2);
1183 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1184 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1185 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1186 __ andptr(t1, Klass::_lh_header_size_mask);
1187 __ subptr(arr_size, t1); // body length
1188 __ addptr(t1, obj); // body start
1189 __ initialize_body(t1, arr_size, 0, t2);
1190 __ verify_oop(obj);
1191 __ ret(0);
1193 __ bind(try_eden);
1194 // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
1195 // since size is positive movl does right thing on 64bit
1196 __ movl(t1, Address(klass, Klass::layout_helper_offset()));
1197 // since size is postive movl does right thing on 64bit
1198 __ movl(arr_size, length);
1199 assert(t1 == rcx, "fixed register usage");
1200 __ shlptr(arr_size /* by t1=rcx, mod 32 */);
1201 __ shrptr(t1, Klass::_lh_header_size_shift);
1202 __ andptr(t1, Klass::_lh_header_size_mask);
1203 __ addptr(arr_size, t1);
1204 __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
1205 __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
1207 __ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
1208 __ incr_allocated_bytes(thread, arr_size, 0);
1210 __ initialize_header(obj, klass, length, t1, t2);
1211 __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte)));
1212 assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
1213 assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
1214 __ andptr(t1, Klass::_lh_header_size_mask);
1215 __ subptr(arr_size, t1); // body length
1216 __ addptr(t1, obj); // body start
1217 __ initialize_body(t1, arr_size, 0, t2);
1218 __ verify_oop(obj);
1219 __ ret(0);
1221 __ bind(slow_path);
1222 }
1224 __ enter();
1225 OopMap* map = save_live_registers(sasm, 3);
1226 int call_offset;
1227 if (id == new_type_array_id) {
1228 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
1229 } else {
1230 call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
1231 }
1233 oop_maps = new OopMapSet();
1234 oop_maps->add_gc_map(call_offset, map);
1235 restore_live_registers_except_rax(sasm);
1237 __ verify_oop(obj);
1238 __ leave();
1239 __ ret(0);
1241 // rax,: new array
1242 }
1243 break;
1245 case new_multi_array_id:
1246 { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
1247 // rax,: klass
1248 // rbx,: rank
1249 // rcx: address of 1st dimension
1250 OopMap* map = save_live_registers(sasm, 4);
1251 int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx);
1253 oop_maps = new OopMapSet();
1254 oop_maps->add_gc_map(call_offset, map);
1255 restore_live_registers_except_rax(sasm);
1257 // rax,: new multi array
1258 __ verify_oop(rax);
1259 }
1260 break;
1262 case register_finalizer_id:
1263 {
1264 __ set_info("register_finalizer", dont_gc_arguments);
1266 // This is called via call_runtime so the arguments
1267 // will be place in C abi locations
1269 #ifdef _LP64
1270 __ verify_oop(c_rarg0);
1271 __ mov(rax, c_rarg0);
1272 #else
1273 // The object is passed on the stack and we haven't pushed a
1274 // frame yet so it's one work away from top of stack.
1275 __ movptr(rax, Address(rsp, 1 * BytesPerWord));
1276 __ verify_oop(rax);
1277 #endif // _LP64
1279 // load the klass and check the has finalizer flag
1280 Label register_finalizer;
1281 Register t = rsi;
1282 __ load_klass(t, rax);
1283 __ movl(t, Address(t, Klass::access_flags_offset()));
1284 __ testl(t, JVM_ACC_HAS_FINALIZER);
1285 __ jcc(Assembler::notZero, register_finalizer);
1286 __ ret(0);
1288 __ bind(register_finalizer);
1289 __ enter();
1290 OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */);
1291 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax);
1292 oop_maps = new OopMapSet();
1293 oop_maps->add_gc_map(call_offset, oop_map);
1295 // Now restore all the live registers
1296 restore_live_registers(sasm);
1298 __ leave();
1299 __ ret(0);
1300 }
1301 break;
1303 case throw_range_check_failed_id:
1304 { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
1305 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
1306 }
1307 break;
1309 case throw_index_exception_id:
1310 { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
1311 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
1312 }
1313 break;
1315 case throw_div0_exception_id:
1316 { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
1317 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
1318 }
1319 break;
1321 case throw_null_pointer_exception_id:
1322 { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
1323 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
1324 }
1325 break;
1327 case handle_exception_nofpu_id:
1328 case handle_exception_id:
1329 { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
1330 oop_maps = generate_handle_exception(id, sasm);
1331 }
1332 break;
1334 case handle_exception_from_callee_id:
1335 { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
1336 oop_maps = generate_handle_exception(id, sasm);
1337 }
1338 break;
1340 case unwind_exception_id:
1341 { __ set_info("unwind_exception", dont_gc_arguments);
1342 // note: no stubframe since we are about to leave the current
1343 // activation and we are calling a leaf VM function only.
1344 generate_unwind_exception(sasm);
1345 }
1346 break;
1348 case throw_array_store_exception_id:
1349 { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
1350 // tos + 0: link
1351 // + 1: return address
1352 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
1353 }
1354 break;
1356 case throw_class_cast_exception_id:
1357 { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
1358 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
1359 }
1360 break;
1362 case throw_incompatible_class_change_error_id:
1363 { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
1364 oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
1365 }
1366 break;
1368 case slow_subtype_check_id:
1369 {
1370 // Typical calling sequence:
1371 // __ push(klass_RInfo); // object klass or other subclass
1372 // __ push(sup_k_RInfo); // array element klass or other superclass
1373 // __ call(slow_subtype_check);
1374 // Note that the subclass is pushed first, and is therefore deepest.
1375 // Previous versions of this code reversed the names 'sub' and 'super'.
1376 // This was operationally harmless but made the code unreadable.
1377 enum layout {
1378 rax_off, SLOT2(raxH_off)
1379 rcx_off, SLOT2(rcxH_off)
1380 rsi_off, SLOT2(rsiH_off)
1381 rdi_off, SLOT2(rdiH_off)
1382 // saved_rbp_off, SLOT2(saved_rbpH_off)
1383 return_off, SLOT2(returnH_off)
1384 sup_k_off, SLOT2(sup_kH_off)
1385 klass_off, SLOT2(superH_off)
1386 framesize,
1387 result_off = klass_off // deepest argument is also the return value
1388 };
1390 __ set_info("slow_subtype_check", dont_gc_arguments);
1391 __ push(rdi);
1392 __ push(rsi);
1393 __ push(rcx);
1394 __ push(rax);
1396 // This is called by pushing args and not with C abi
1397 __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass
1398 __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass
1400 Label miss;
1401 __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss);
1403 // fallthrough on success:
1404 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result
1405 __ pop(rax);
1406 __ pop(rcx);
1407 __ pop(rsi);
1408 __ pop(rdi);
1409 __ ret(0);
1411 __ bind(miss);
1412 __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result
1413 __ pop(rax);
1414 __ pop(rcx);
1415 __ pop(rsi);
1416 __ pop(rdi);
1417 __ ret(0);
1418 }
1419 break;
1421 case monitorenter_nofpu_id:
1422 save_fpu_registers = false;
1423 // fall through
1424 case monitorenter_id:
1425 {
1426 StubFrame f(sasm, "monitorenter", dont_gc_arguments);
1427 OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
1429 // Called with store_parameter and not C abi
1431 f.load_argument(1, rax); // rax,: object
1432 f.load_argument(0, rbx); // rbx,: lock address
1434 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx);
1436 oop_maps = new OopMapSet();
1437 oop_maps->add_gc_map(call_offset, map);
1438 restore_live_registers(sasm, save_fpu_registers);
1439 }
1440 break;
1442 case monitorexit_nofpu_id:
1443 save_fpu_registers = false;
1444 // fall through
1445 case monitorexit_id:
1446 {
1447 StubFrame f(sasm, "monitorexit", dont_gc_arguments);
1448 OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
1450 // Called with store_parameter and not C abi
1452 f.load_argument(0, rax); // rax,: lock address
1454 // note: really a leaf routine but must setup last java sp
1455 // => use call_RT for now (speed can be improved by
1456 // doing last java sp setup manually)
1457 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax);
1459 oop_maps = new OopMapSet();
1460 oop_maps->add_gc_map(call_offset, map);
1461 restore_live_registers(sasm, save_fpu_registers);
1462 }
1463 break;
1465 case deoptimize_id:
1466 {
1467 StubFrame f(sasm, "deoptimize", dont_gc_arguments);
1468 const int num_rt_args = 1; // thread
1469 OopMap* oop_map = save_live_registers(sasm, num_rt_args);
1470 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize));
1471 oop_maps = new OopMapSet();
1472 oop_maps->add_gc_map(call_offset, oop_map);
1473 restore_live_registers(sasm);
1474 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1475 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1476 __ leave();
1477 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1478 }
1479 break;
1481 case access_field_patching_id:
1482 { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
1483 // we should set up register map
1484 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
1485 }
1486 break;
1488 case load_klass_patching_id:
1489 { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
1490 // we should set up register map
1491 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
1492 }
1493 break;
1495 case load_mirror_patching_id:
1496 { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
1497 // we should set up register map
1498 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
1499 }
1500 break;
1502 case load_appendix_patching_id:
1503 { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
1504 // we should set up register map
1505 oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
1506 }
1507 break;
1509 case dtrace_object_alloc_id:
1510 { // rax,: object
1511 StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
1512 // we can't gc here so skip the oopmap but make sure that all
1513 // the live registers get saved.
1514 save_live_registers(sasm, 1);
1516 __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
1517 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
1518 NOT_LP64(__ pop(rax));
1520 restore_live_registers(sasm);
1521 }
1522 break;
1524 case fpu2long_stub_id:
1525 {
1526 // rax, and rdx are destroyed, but should be free since the result is returned there
1527 // preserve rsi,ecx
1528 __ push(rsi);
1529 __ push(rcx);
1530 LP64_ONLY(__ push(rdx);)
1532 // check for NaN
1533 Label return0, do_return, return_min_jlong, do_convert;
1535 Address value_high_word(rsp, wordSize + 4);
1536 Address value_low_word(rsp, wordSize);
1537 Address result_high_word(rsp, 3*wordSize + 4);
1538 Address result_low_word(rsp, 3*wordSize);
1540 __ subptr(rsp, 32); // more than enough on 32bit
1541 __ fst_d(value_low_word);
1542 __ movl(rax, value_high_word);
1543 __ andl(rax, 0x7ff00000);
1544 __ cmpl(rax, 0x7ff00000);
1545 __ jcc(Assembler::notEqual, do_convert);
1546 __ movl(rax, value_high_word);
1547 __ andl(rax, 0xfffff);
1548 __ orl(rax, value_low_word);
1549 __ jcc(Assembler::notZero, return0);
1551 __ bind(do_convert);
1552 __ fnstcw(Address(rsp, 0));
1553 __ movzwl(rax, Address(rsp, 0));
1554 __ orl(rax, 0xc00);
1555 __ movw(Address(rsp, 2), rax);
1556 __ fldcw(Address(rsp, 2));
1557 __ fwait();
1558 __ fistp_d(result_low_word);
1559 __ fldcw(Address(rsp, 0));
1560 __ fwait();
1561 // This gets the entire long in rax on 64bit
1562 __ movptr(rax, result_low_word);
1563 // testing of high bits
1564 __ movl(rdx, result_high_word);
1565 __ mov(rcx, rax);
1566 // What the heck is the point of the next instruction???
1567 __ xorl(rcx, 0x0);
1568 __ movl(rsi, 0x80000000);
1569 __ xorl(rsi, rdx);
1570 __ orl(rcx, rsi);
1571 __ jcc(Assembler::notEqual, do_return);
1572 __ fldz();
1573 __ fcomp_d(value_low_word);
1574 __ fnstsw_ax();
1575 #ifdef _LP64
1576 __ testl(rax, 0x4100); // ZF & CF == 0
1577 __ jcc(Assembler::equal, return_min_jlong);
1578 #else
1579 __ sahf();
1580 __ jcc(Assembler::above, return_min_jlong);
1581 #endif // _LP64
1582 // return max_jlong
1583 #ifndef _LP64
1584 __ movl(rdx, 0x7fffffff);
1585 __ movl(rax, 0xffffffff);
1586 #else
1587 __ mov64(rax, CONST64(0x7fffffffffffffff));
1588 #endif // _LP64
1589 __ jmp(do_return);
1591 __ bind(return_min_jlong);
1592 #ifndef _LP64
1593 __ movl(rdx, 0x80000000);
1594 __ xorl(rax, rax);
1595 #else
1596 __ mov64(rax, CONST64(0x8000000000000000));
1597 #endif // _LP64
1598 __ jmp(do_return);
1600 __ bind(return0);
1601 __ fpop();
1602 #ifndef _LP64
1603 __ xorptr(rdx,rdx);
1604 __ xorptr(rax,rax);
1605 #else
1606 __ xorptr(rax, rax);
1607 #endif // _LP64
1609 __ bind(do_return);
1610 __ addptr(rsp, 32);
1611 LP64_ONLY(__ pop(rdx);)
1612 __ pop(rcx);
1613 __ pop(rsi);
1614 __ ret(0);
1615 }
1616 break;
1618 #if INCLUDE_ALL_GCS
1619 case g1_pre_barrier_slow_id:
1620 {
1621 StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
1622 // arg0 : previous value of memory
1624 BarrierSet* bs = Universe::heap()->barrier_set();
1625 if (bs->kind() != BarrierSet::G1SATBCTLogging) {
1626 __ movptr(rax, (int)id);
1627 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1628 __ should_not_reach_here();
1629 break;
1630 }
1631 __ push(rax);
1632 __ push(rdx);
1634 const Register pre_val = rax;
1635 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1636 const Register tmp = rdx;
1638 NOT_LP64(__ get_thread(thread);)
1640 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1641 PtrQueue::byte_offset_of_active()));
1643 Address queue_index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1644 PtrQueue::byte_offset_of_index()));
1645 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
1646 PtrQueue::byte_offset_of_buf()));
1649 Label done;
1650 Label runtime;
1652 // Can we store original value in the thread's buffer?
1654 #ifdef _LP64
1655 __ movslq(tmp, queue_index);
1656 __ cmpq(tmp, 0);
1657 #else
1658 __ cmpl(queue_index, 0);
1659 #endif
1660 __ jcc(Assembler::equal, runtime);
1661 #ifdef _LP64
1662 __ subq(tmp, wordSize);
1663 __ movl(queue_index, tmp);
1664 __ addq(tmp, buffer);
1665 #else
1666 __ subl(queue_index, wordSize);
1667 __ movl(tmp, buffer);
1668 __ addl(tmp, queue_index);
1669 #endif
1671 // prev_val (rax)
1672 f.load_argument(0, pre_val);
1673 __ movptr(Address(tmp, 0), pre_val);
1674 __ jmp(done);
1676 __ bind(runtime);
1677 __ push(rcx);
1678 #ifdef _LP64
1679 __ push(r8);
1680 __ push(r9);
1681 __ push(r10);
1682 __ push(r11);
1683 # ifndef _WIN64
1684 __ push(rdi);
1685 __ push(rsi);
1686 # endif
1687 #endif
1688 // load the pre-value
1689 f.load_argument(0, rcx);
1690 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
1691 #ifdef _LP64
1692 # ifndef _WIN64
1693 __ pop(rsi);
1694 __ pop(rdi);
1695 # endif
1696 __ pop(r11);
1697 __ pop(r10);
1698 __ pop(r9);
1699 __ pop(r8);
1700 #endif
1701 __ pop(rcx);
1702 __ bind(done);
1704 __ pop(rdx);
1705 __ pop(rax);
1706 }
1707 break;
1709 case g1_post_barrier_slow_id:
1710 {
1711 StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
1714 // arg0: store_address
1715 Address store_addr(rbp, 2*BytesPerWord);
1717 BarrierSet* bs = Universe::heap()->barrier_set();
1718 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
1719 Label done;
1720 Label runtime;
1722 // At this point we know new_value is non-NULL and the new_value crosses regsion.
1723 // Must check to see if card is already dirty
1725 const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1727 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1728 PtrQueue::byte_offset_of_index()));
1729 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
1730 PtrQueue::byte_offset_of_buf()));
1732 __ push(rax);
1733 __ push(rcx);
1735 NOT_LP64(__ get_thread(thread);)
1736 ExternalAddress cardtable((address)ct->byte_map_base);
1737 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
1739 const Register card_addr = rcx;
1740 #ifdef _LP64
1741 const Register tmp = rscratch1;
1742 f.load_argument(0, card_addr);
1743 __ shrq(card_addr, CardTableModRefBS::card_shift);
1744 __ lea(tmp, cardtable);
1745 // get the address of the card
1746 __ addq(card_addr, tmp);
1747 #else
1748 const Register card_index = rcx;
1749 f.load_argument(0, card_index);
1750 __ shrl(card_index, CardTableModRefBS::card_shift);
1752 Address index(noreg, card_index, Address::times_1);
1753 __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
1754 #endif
1756 __ cmpb(Address(card_addr, 0), 0);
1757 __ jcc(Assembler::equal, done);
1759 // storing region crossing non-NULL, card is clean.
1760 // dirty card and log.
1762 __ movb(Address(card_addr, 0), 0);
1764 __ cmpl(queue_index, 0);
1765 __ jcc(Assembler::equal, runtime);
1766 __ subl(queue_index, wordSize);
1768 const Register buffer_addr = rbx;
1769 __ push(rbx);
1771 __ movptr(buffer_addr, buffer);
1773 #ifdef _LP64
1774 __ movslq(rscratch1, queue_index);
1775 __ addptr(buffer_addr, rscratch1);
1776 #else
1777 __ addptr(buffer_addr, queue_index);
1778 #endif
1779 __ movptr(Address(buffer_addr, 0), card_addr);
1781 __ pop(rbx);
1782 __ jmp(done);
1784 __ bind(runtime);
1785 __ push(rdx);
1786 #ifdef _LP64
1787 __ push(r8);
1788 __ push(r9);
1789 __ push(r10);
1790 __ push(r11);
1791 # ifndef _WIN64
1792 __ push(rdi);
1793 __ push(rsi);
1794 # endif
1795 #endif
1796 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
1797 #ifdef _LP64
1798 # ifndef _WIN64
1799 __ pop(rsi);
1800 __ pop(rdi);
1801 # endif
1802 __ pop(r11);
1803 __ pop(r10);
1804 __ pop(r9);
1805 __ pop(r8);
1806 #endif
1807 __ pop(rdx);
1808 __ bind(done);
1810 __ pop(rcx);
1811 __ pop(rax);
1813 }
1814 break;
1815 #endif // INCLUDE_ALL_GCS
1817 case predicate_failed_trap_id:
1818 {
1819 StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
1821 OopMap* map = save_live_registers(sasm, 1);
1823 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap));
1824 oop_maps = new OopMapSet();
1825 oop_maps->add_gc_map(call_offset, map);
1826 restore_live_registers(sasm);
1827 __ leave();
1828 DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
1829 assert(deopt_blob != NULL, "deoptimization blob must have been created");
1831 __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution()));
1832 }
1833 break;
1835 default:
1836 { StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
1837 __ movptr(rax, (int)id);
1838 __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
1839 __ should_not_reach_here();
1840 }
1841 break;
1842 }
1843 return oop_maps;
1844 }
1846 #undef __
1848 const char *Runtime1::pd_name_for_address(address entry) {
1849 return "<unknown function>";
1850 }