Mon, 20 Aug 2012 09:58:58 -0700
7190310: Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
Summary: In C2 add software membar after load from Reference.referent field to prevent commoning of loads across safepoint since GC can change its value. In C1 always generate Reference.get() intrinsic.
Reviewed-by: roland, twisti, dholmes, johnc
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_x86.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolderOop.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_x86.inline.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_Runtime1.hpp"
39 #endif
40 #ifdef COMPILER2
41 #include "opto/runtime.hpp"
42 #endif
44 #define __ masm->
46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
48 class SimpleRuntimeFrame {
50 public:
52 // Most of the runtime stubs have this simple frame layout.
53 // This class exists to make the layout shared in one place.
54 // Offsets are for compiler stack slots, which are jints.
55 enum layout {
56 // The frame sender code expects that rbp will be in the "natural" place and
57 // will override any oopMap setting for it. We must therefore force the layout
58 // so that it agrees with the frame sender code.
59 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
60 rbp_off2,
61 return_off, return_off2,
62 framesize
63 };
64 };
66 class RegisterSaver {
67 // Capture info about frame layout. Layout offsets are in jint
68 // units because compiler frame slots are jints.
69 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
70 enum layout {
71 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
72 xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
73 DEF_XMM_OFFS(0),
74 DEF_XMM_OFFS(1),
75 DEF_XMM_OFFS(2),
76 DEF_XMM_OFFS(3),
77 DEF_XMM_OFFS(4),
78 DEF_XMM_OFFS(5),
79 DEF_XMM_OFFS(6),
80 DEF_XMM_OFFS(7),
81 DEF_XMM_OFFS(8),
82 DEF_XMM_OFFS(9),
83 DEF_XMM_OFFS(10),
84 DEF_XMM_OFFS(11),
85 DEF_XMM_OFFS(12),
86 DEF_XMM_OFFS(13),
87 DEF_XMM_OFFS(14),
88 DEF_XMM_OFFS(15),
89 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
90 fpu_stateH_end,
91 r15_off, r15H_off,
92 r14_off, r14H_off,
93 r13_off, r13H_off,
94 r12_off, r12H_off,
95 r11_off, r11H_off,
96 r10_off, r10H_off,
97 r9_off, r9H_off,
98 r8_off, r8H_off,
99 rdi_off, rdiH_off,
100 rsi_off, rsiH_off,
101 ignore_off, ignoreH_off, // extra copy of rbp
102 rsp_off, rspH_off,
103 rbx_off, rbxH_off,
104 rdx_off, rdxH_off,
105 rcx_off, rcxH_off,
106 rax_off, raxH_off,
107 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
108 align_off, alignH_off,
109 flags_off, flagsH_off,
110 // The frame sender code expects that rbp will be in the "natural" place and
111 // will override any oopMap setting for it. We must therefore force the layout
112 // so that it agrees with the frame sender code.
113 rbp_off, rbpH_off, // copy of rbp we will restore
114 return_off, returnH_off, // slot for return address
115 reg_save_size // size in compiler stack slots
116 };
118 public:
119 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
120 static void restore_live_registers(MacroAssembler* masm);
122 // Offsets into the register save area
123 // Used by deoptimization when it is managing result register
124 // values on its own
126 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
127 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
128 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
129 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
130 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
132 // During deoptimization only the result registers need to be restored,
133 // all the other values have already been extracted.
134 static void restore_result_registers(MacroAssembler* masm);
135 };
137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
139 // Always make the frame size 16-byte aligned
140 int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
141 reg_save_size*BytesPerInt, 16);
142 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
143 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
144 // The caller will allocate additional_frame_words
145 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
146 // CodeBlob frame size is in words.
147 int frame_size_in_words = frame_size_in_bytes / wordSize;
148 *total_frame_words = frame_size_in_words;
150 // Save registers, fpu state, and flags.
151 // We assume caller has already pushed the return address onto the
152 // stack, so rsp is 8-byte aligned here.
153 // We push rpb twice in this sequence because we want the real rbp
154 // to be under the return like a normal enter.
156 __ enter(); // rsp becomes 16-byte aligned here
157 __ push_CPU_state(); // Push a multiple of 16 bytes
158 if (frame::arg_reg_save_area_bytes != 0) {
159 // Allocate argument register save area
160 __ subptr(rsp, frame::arg_reg_save_area_bytes);
161 }
163 // Set an oopmap for the call site. This oopmap will map all
164 // oop-registers and debug-info registers as callee-saved. This
165 // will allow deoptimization at this safepoint to find all possible
166 // debug-info recordings, as well as let GC find all oops.
168 OopMapSet *oop_maps = new OopMapSet();
169 OopMap* map = new OopMap(frame_size_in_slots, 0);
170 map->set_callee_saved(VMRegImpl::stack2reg( rax_off + additional_frame_slots), rax->as_VMReg());
171 map->set_callee_saved(VMRegImpl::stack2reg( rcx_off + additional_frame_slots), rcx->as_VMReg());
172 map->set_callee_saved(VMRegImpl::stack2reg( rdx_off + additional_frame_slots), rdx->as_VMReg());
173 map->set_callee_saved(VMRegImpl::stack2reg( rbx_off + additional_frame_slots), rbx->as_VMReg());
174 // rbp location is known implicitly by the frame sender code, needs no oopmap
175 // and the location where rbp was saved by is ignored
176 map->set_callee_saved(VMRegImpl::stack2reg( rsi_off + additional_frame_slots), rsi->as_VMReg());
177 map->set_callee_saved(VMRegImpl::stack2reg( rdi_off + additional_frame_slots), rdi->as_VMReg());
178 map->set_callee_saved(VMRegImpl::stack2reg( r8_off + additional_frame_slots), r8->as_VMReg());
179 map->set_callee_saved(VMRegImpl::stack2reg( r9_off + additional_frame_slots), r9->as_VMReg());
180 map->set_callee_saved(VMRegImpl::stack2reg( r10_off + additional_frame_slots), r10->as_VMReg());
181 map->set_callee_saved(VMRegImpl::stack2reg( r11_off + additional_frame_slots), r11->as_VMReg());
182 map->set_callee_saved(VMRegImpl::stack2reg( r12_off + additional_frame_slots), r12->as_VMReg());
183 map->set_callee_saved(VMRegImpl::stack2reg( r13_off + additional_frame_slots), r13->as_VMReg());
184 map->set_callee_saved(VMRegImpl::stack2reg( r14_off + additional_frame_slots), r14->as_VMReg());
185 map->set_callee_saved(VMRegImpl::stack2reg( r15_off + additional_frame_slots), r15->as_VMReg());
186 map->set_callee_saved(VMRegImpl::stack2reg(xmm0_off + additional_frame_slots), xmm0->as_VMReg());
187 map->set_callee_saved(VMRegImpl::stack2reg(xmm1_off + additional_frame_slots), xmm1->as_VMReg());
188 map->set_callee_saved(VMRegImpl::stack2reg(xmm2_off + additional_frame_slots), xmm2->as_VMReg());
189 map->set_callee_saved(VMRegImpl::stack2reg(xmm3_off + additional_frame_slots), xmm3->as_VMReg());
190 map->set_callee_saved(VMRegImpl::stack2reg(xmm4_off + additional_frame_slots), xmm4->as_VMReg());
191 map->set_callee_saved(VMRegImpl::stack2reg(xmm5_off + additional_frame_slots), xmm5->as_VMReg());
192 map->set_callee_saved(VMRegImpl::stack2reg(xmm6_off + additional_frame_slots), xmm6->as_VMReg());
193 map->set_callee_saved(VMRegImpl::stack2reg(xmm7_off + additional_frame_slots), xmm7->as_VMReg());
194 map->set_callee_saved(VMRegImpl::stack2reg(xmm8_off + additional_frame_slots), xmm8->as_VMReg());
195 map->set_callee_saved(VMRegImpl::stack2reg(xmm9_off + additional_frame_slots), xmm9->as_VMReg());
196 map->set_callee_saved(VMRegImpl::stack2reg(xmm10_off + additional_frame_slots), xmm10->as_VMReg());
197 map->set_callee_saved(VMRegImpl::stack2reg(xmm11_off + additional_frame_slots), xmm11->as_VMReg());
198 map->set_callee_saved(VMRegImpl::stack2reg(xmm12_off + additional_frame_slots), xmm12->as_VMReg());
199 map->set_callee_saved(VMRegImpl::stack2reg(xmm13_off + additional_frame_slots), xmm13->as_VMReg());
200 map->set_callee_saved(VMRegImpl::stack2reg(xmm14_off + additional_frame_slots), xmm14->as_VMReg());
201 map->set_callee_saved(VMRegImpl::stack2reg(xmm15_off + additional_frame_slots), xmm15->as_VMReg());
203 // %%% These should all be a waste but we'll keep things as they were for now
204 if (true) {
205 map->set_callee_saved(VMRegImpl::stack2reg( raxH_off + additional_frame_slots),
206 rax->as_VMReg()->next());
207 map->set_callee_saved(VMRegImpl::stack2reg( rcxH_off + additional_frame_slots),
208 rcx->as_VMReg()->next());
209 map->set_callee_saved(VMRegImpl::stack2reg( rdxH_off + additional_frame_slots),
210 rdx->as_VMReg()->next());
211 map->set_callee_saved(VMRegImpl::stack2reg( rbxH_off + additional_frame_slots),
212 rbx->as_VMReg()->next());
213 // rbp location is known implicitly by the frame sender code, needs no oopmap
214 map->set_callee_saved(VMRegImpl::stack2reg( rsiH_off + additional_frame_slots),
215 rsi->as_VMReg()->next());
216 map->set_callee_saved(VMRegImpl::stack2reg( rdiH_off + additional_frame_slots),
217 rdi->as_VMReg()->next());
218 map->set_callee_saved(VMRegImpl::stack2reg( r8H_off + additional_frame_slots),
219 r8->as_VMReg()->next());
220 map->set_callee_saved(VMRegImpl::stack2reg( r9H_off + additional_frame_slots),
221 r9->as_VMReg()->next());
222 map->set_callee_saved(VMRegImpl::stack2reg( r10H_off + additional_frame_slots),
223 r10->as_VMReg()->next());
224 map->set_callee_saved(VMRegImpl::stack2reg( r11H_off + additional_frame_slots),
225 r11->as_VMReg()->next());
226 map->set_callee_saved(VMRegImpl::stack2reg( r12H_off + additional_frame_slots),
227 r12->as_VMReg()->next());
228 map->set_callee_saved(VMRegImpl::stack2reg( r13H_off + additional_frame_slots),
229 r13->as_VMReg()->next());
230 map->set_callee_saved(VMRegImpl::stack2reg( r14H_off + additional_frame_slots),
231 r14->as_VMReg()->next());
232 map->set_callee_saved(VMRegImpl::stack2reg( r15H_off + additional_frame_slots),
233 r15->as_VMReg()->next());
234 map->set_callee_saved(VMRegImpl::stack2reg(xmm0H_off + additional_frame_slots),
235 xmm0->as_VMReg()->next());
236 map->set_callee_saved(VMRegImpl::stack2reg(xmm1H_off + additional_frame_slots),
237 xmm1->as_VMReg()->next());
238 map->set_callee_saved(VMRegImpl::stack2reg(xmm2H_off + additional_frame_slots),
239 xmm2->as_VMReg()->next());
240 map->set_callee_saved(VMRegImpl::stack2reg(xmm3H_off + additional_frame_slots),
241 xmm3->as_VMReg()->next());
242 map->set_callee_saved(VMRegImpl::stack2reg(xmm4H_off + additional_frame_slots),
243 xmm4->as_VMReg()->next());
244 map->set_callee_saved(VMRegImpl::stack2reg(xmm5H_off + additional_frame_slots),
245 xmm5->as_VMReg()->next());
246 map->set_callee_saved(VMRegImpl::stack2reg(xmm6H_off + additional_frame_slots),
247 xmm6->as_VMReg()->next());
248 map->set_callee_saved(VMRegImpl::stack2reg(xmm7H_off + additional_frame_slots),
249 xmm7->as_VMReg()->next());
250 map->set_callee_saved(VMRegImpl::stack2reg(xmm8H_off + additional_frame_slots),
251 xmm8->as_VMReg()->next());
252 map->set_callee_saved(VMRegImpl::stack2reg(xmm9H_off + additional_frame_slots),
253 xmm9->as_VMReg()->next());
254 map->set_callee_saved(VMRegImpl::stack2reg(xmm10H_off + additional_frame_slots),
255 xmm10->as_VMReg()->next());
256 map->set_callee_saved(VMRegImpl::stack2reg(xmm11H_off + additional_frame_slots),
257 xmm11->as_VMReg()->next());
258 map->set_callee_saved(VMRegImpl::stack2reg(xmm12H_off + additional_frame_slots),
259 xmm12->as_VMReg()->next());
260 map->set_callee_saved(VMRegImpl::stack2reg(xmm13H_off + additional_frame_slots),
261 xmm13->as_VMReg()->next());
262 map->set_callee_saved(VMRegImpl::stack2reg(xmm14H_off + additional_frame_slots),
263 xmm14->as_VMReg()->next());
264 map->set_callee_saved(VMRegImpl::stack2reg(xmm15H_off + additional_frame_slots),
265 xmm15->as_VMReg()->next());
266 }
268 return map;
269 }
271 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
272 if (frame::arg_reg_save_area_bytes != 0) {
273 // Pop arg register save area
274 __ addptr(rsp, frame::arg_reg_save_area_bytes);
275 }
276 // Recover CPU state
277 __ pop_CPU_state();
278 // Get the rbp described implicitly by the calling convention (no oopMap)
279 __ pop(rbp);
280 }
282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
284 // Just restore result register. Only used by deoptimization. By
285 // now any callee save register that needs to be restored to a c2
286 // caller of the deoptee has been extracted into the vframeArray
287 // and will be stuffed into the c2i adapter we create for later
288 // restoration so only result registers need to be restored here.
290 // Restore fp result register
291 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
292 // Restore integer result register
293 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
294 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
296 // Pop all of the register save are off the stack except the return address
297 __ addptr(rsp, return_offset_in_bytes());
298 }
300 // The java_calling_convention describes stack locations as ideal slots on
301 // a frame with no abi restrictions. Since we must observe abi restrictions
302 // (like the placement of the register window) the slots must be biased by
303 // the following value.
304 static int reg2offset_in(VMReg r) {
305 // Account for saved rbp and return address
306 // This should really be in_preserve_stack_slots
307 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
308 }
310 static int reg2offset_out(VMReg r) {
311 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
312 }
314 // ---------------------------------------------------------------------------
315 // Read the array of BasicTypes from a signature, and compute where the
316 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
317 // quantities. Values less than VMRegImpl::stack0 are registers, those above
318 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
319 // as framesizes are fixed.
320 // VMRegImpl::stack0 refers to the first slot 0(sp).
321 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
322 // up to RegisterImpl::number_of_registers) are the 64-bit
323 // integer registers.
325 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
326 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
327 // units regardless of build. Of course for i486 there is no 64 bit build
329 // The Java calling convention is a "shifted" version of the C ABI.
330 // By skipping the first C ABI register we can call non-static jni methods
331 // with small numbers of arguments without having to shuffle the arguments
332 // at all. Since we control the java ABI we ought to at least get some
333 // advantage out of it.
335 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
336 VMRegPair *regs,
337 int total_args_passed,
338 int is_outgoing) {
340 // Create the mapping between argument positions and
341 // registers.
342 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
343 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
344 };
345 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
346 j_farg0, j_farg1, j_farg2, j_farg3,
347 j_farg4, j_farg5, j_farg6, j_farg7
348 };
351 uint int_args = 0;
352 uint fp_args = 0;
353 uint stk_args = 0; // inc by 2 each time
355 for (int i = 0; i < total_args_passed; i++) {
356 switch (sig_bt[i]) {
357 case T_BOOLEAN:
358 case T_CHAR:
359 case T_BYTE:
360 case T_SHORT:
361 case T_INT:
362 if (int_args < Argument::n_int_register_parameters_j) {
363 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
364 } else {
365 regs[i].set1(VMRegImpl::stack2reg(stk_args));
366 stk_args += 2;
367 }
368 break;
369 case T_VOID:
370 // halves of T_LONG or T_DOUBLE
371 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
372 regs[i].set_bad();
373 break;
374 case T_LONG:
375 assert(sig_bt[i + 1] == T_VOID, "expecting half");
376 // fall through
377 case T_OBJECT:
378 case T_ARRAY:
379 case T_ADDRESS:
380 if (int_args < Argument::n_int_register_parameters_j) {
381 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
382 } else {
383 regs[i].set2(VMRegImpl::stack2reg(stk_args));
384 stk_args += 2;
385 }
386 break;
387 case T_FLOAT:
388 if (fp_args < Argument::n_float_register_parameters_j) {
389 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
390 } else {
391 regs[i].set1(VMRegImpl::stack2reg(stk_args));
392 stk_args += 2;
393 }
394 break;
395 case T_DOUBLE:
396 assert(sig_bt[i + 1] == T_VOID, "expecting half");
397 if (fp_args < Argument::n_float_register_parameters_j) {
398 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
399 } else {
400 regs[i].set2(VMRegImpl::stack2reg(stk_args));
401 stk_args += 2;
402 }
403 break;
404 default:
405 ShouldNotReachHere();
406 break;
407 }
408 }
410 return round_to(stk_args, 2);
411 }
413 // Patch the callers callsite with entry to compiled code if it exists.
414 static void patch_callers_callsite(MacroAssembler *masm) {
415 Label L;
416 __ verify_oop(rbx);
417 __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
418 __ jcc(Assembler::equal, L);
420 // Save the current stack pointer
421 __ mov(r13, rsp);
422 // Schedule the branch target address early.
423 // Call into the VM to patch the caller, then jump to compiled callee
424 // rax isn't live so capture return address while we easily can
425 __ movptr(rax, Address(rsp, 0));
427 // align stack so push_CPU_state doesn't fault
428 __ andptr(rsp, -(StackAlignmentInBytes));
429 __ push_CPU_state();
432 __ verify_oop(rbx);
433 // VM needs caller's callsite
434 // VM needs target method
435 // This needs to be a long call since we will relocate this adapter to
436 // the codeBuffer and it may not reach
438 // Allocate argument register save area
439 if (frame::arg_reg_save_area_bytes != 0) {
440 __ subptr(rsp, frame::arg_reg_save_area_bytes);
441 }
442 __ mov(c_rarg0, rbx);
443 __ mov(c_rarg1, rax);
444 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
446 // De-allocate argument register save area
447 if (frame::arg_reg_save_area_bytes != 0) {
448 __ addptr(rsp, frame::arg_reg_save_area_bytes);
449 }
451 __ pop_CPU_state();
452 // restore sp
453 __ mov(rsp, r13);
454 __ bind(L);
455 }
458 static void gen_c2i_adapter(MacroAssembler *masm,
459 int total_args_passed,
460 int comp_args_on_stack,
461 const BasicType *sig_bt,
462 const VMRegPair *regs,
463 Label& skip_fixup) {
464 // Before we get into the guts of the C2I adapter, see if we should be here
465 // at all. We've come from compiled code and are attempting to jump to the
466 // interpreter, which means the caller made a static call to get here
467 // (vcalls always get a compiled target if there is one). Check for a
468 // compiled target. If there is one, we need to patch the caller's call.
469 patch_callers_callsite(masm);
471 __ bind(skip_fixup);
473 // Since all args are passed on the stack, total_args_passed *
474 // Interpreter::stackElementSize is the space we need. Plus 1 because
475 // we also account for the return address location since
476 // we store it first rather than hold it in rax across all the shuffling
478 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
480 // stack is aligned, keep it that way
481 extraspace = round_to(extraspace, 2*wordSize);
483 // Get return address
484 __ pop(rax);
486 // set senderSP value
487 __ mov(r13, rsp);
489 __ subptr(rsp, extraspace);
491 // Store the return address in the expected location
492 __ movptr(Address(rsp, 0), rax);
494 // Now write the args into the outgoing interpreter space
495 for (int i = 0; i < total_args_passed; i++) {
496 if (sig_bt[i] == T_VOID) {
497 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
498 continue;
499 }
501 // offset to start parameters
502 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
503 int next_off = st_off - Interpreter::stackElementSize;
505 // Say 4 args:
506 // i st_off
507 // 0 32 T_LONG
508 // 1 24 T_VOID
509 // 2 16 T_OBJECT
510 // 3 8 T_BOOL
511 // - 0 return address
512 //
513 // However to make thing extra confusing. Because we can fit a long/double in
514 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
515 // leaves one slot empty and only stores to a single slot. In this case the
516 // slot that is occupied is the T_VOID slot. See I said it was confusing.
518 VMReg r_1 = regs[i].first();
519 VMReg r_2 = regs[i].second();
520 if (!r_1->is_valid()) {
521 assert(!r_2->is_valid(), "");
522 continue;
523 }
524 if (r_1->is_stack()) {
525 // memory to memory use rax
526 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
527 if (!r_2->is_valid()) {
528 // sign extend??
529 __ movl(rax, Address(rsp, ld_off));
530 __ movptr(Address(rsp, st_off), rax);
532 } else {
534 __ movq(rax, Address(rsp, ld_off));
536 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
537 // T_DOUBLE and T_LONG use two slots in the interpreter
538 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
539 // ld_off == LSW, ld_off+wordSize == MSW
540 // st_off == MSW, next_off == LSW
541 __ movq(Address(rsp, next_off), rax);
542 #ifdef ASSERT
543 // Overwrite the unused slot with known junk
544 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
545 __ movptr(Address(rsp, st_off), rax);
546 #endif /* ASSERT */
547 } else {
548 __ movq(Address(rsp, st_off), rax);
549 }
550 }
551 } else if (r_1->is_Register()) {
552 Register r = r_1->as_Register();
553 if (!r_2->is_valid()) {
554 // must be only an int (or less ) so move only 32bits to slot
555 // why not sign extend??
556 __ movl(Address(rsp, st_off), r);
557 } else {
558 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
559 // T_DOUBLE and T_LONG use two slots in the interpreter
560 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
561 // long/double in gpr
562 #ifdef ASSERT
563 // Overwrite the unused slot with known junk
564 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
565 __ movptr(Address(rsp, st_off), rax);
566 #endif /* ASSERT */
567 __ movq(Address(rsp, next_off), r);
568 } else {
569 __ movptr(Address(rsp, st_off), r);
570 }
571 }
572 } else {
573 assert(r_1->is_XMMRegister(), "");
574 if (!r_2->is_valid()) {
575 // only a float use just part of the slot
576 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
577 } else {
578 #ifdef ASSERT
579 // Overwrite the unused slot with known junk
580 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
581 __ movptr(Address(rsp, st_off), rax);
582 #endif /* ASSERT */
583 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
584 }
585 }
586 }
588 // Schedule the branch target address early.
589 __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
590 __ jmp(rcx);
591 }
593 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
594 address code_start, address code_end,
595 Label& L_ok) {
596 Label L_fail;
597 __ lea(temp_reg, ExternalAddress(code_start));
598 __ cmpptr(pc_reg, temp_reg);
599 __ jcc(Assembler::belowEqual, L_fail);
600 __ lea(temp_reg, ExternalAddress(code_end));
601 __ cmpptr(pc_reg, temp_reg);
602 __ jcc(Assembler::below, L_ok);
603 __ bind(L_fail);
604 }
606 static void gen_i2c_adapter(MacroAssembler *masm,
607 int total_args_passed,
608 int comp_args_on_stack,
609 const BasicType *sig_bt,
610 const VMRegPair *regs) {
612 // Note: r13 contains the senderSP on entry. We must preserve it since
613 // we may do a i2c -> c2i transition if we lose a race where compiled
614 // code goes non-entrant while we get args ready.
615 // In addition we use r13 to locate all the interpreter args as
616 // we must align the stack to 16 bytes on an i2c entry else we
617 // lose alignment we expect in all compiled code and register
618 // save code can segv when fxsave instructions find improperly
619 // aligned stack pointer.
621 // Adapters can be frameless because they do not require the caller
622 // to perform additional cleanup work, such as correcting the stack pointer.
623 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
624 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
625 // even if a callee has modified the stack pointer.
626 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
627 // routinely repairs its caller's stack pointer (from sender_sp, which is set
628 // up via the senderSP register).
629 // In other words, if *either* the caller or callee is interpreted, we can
630 // get the stack pointer repaired after a call.
631 // This is why c2i and i2c adapters cannot be indefinitely composed.
632 // In particular, if a c2i adapter were to somehow call an i2c adapter,
633 // both caller and callee would be compiled methods, and neither would
634 // clean up the stack pointer changes performed by the two adapters.
635 // If this happens, control eventually transfers back to the compiled
636 // caller, but with an uncorrected stack, causing delayed havoc.
638 // Pick up the return address
639 __ movptr(rax, Address(rsp, 0));
641 if (VerifyAdapterCalls &&
642 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
643 // So, let's test for cascading c2i/i2c adapters right now.
644 // assert(Interpreter::contains($return_addr) ||
645 // StubRoutines::contains($return_addr),
646 // "i2c adapter must return to an interpreter frame");
647 __ block_comment("verify_i2c { ");
648 Label L_ok;
649 if (Interpreter::code() != NULL)
650 range_check(masm, rax, r11,
651 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
652 L_ok);
653 if (StubRoutines::code1() != NULL)
654 range_check(masm, rax, r11,
655 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
656 L_ok);
657 if (StubRoutines::code2() != NULL)
658 range_check(masm, rax, r11,
659 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
660 L_ok);
661 const char* msg = "i2c adapter must return to an interpreter frame";
662 __ block_comment(msg);
663 __ stop(msg);
664 __ bind(L_ok);
665 __ block_comment("} verify_i2ce ");
666 }
668 // Must preserve original SP for loading incoming arguments because
669 // we need to align the outgoing SP for compiled code.
670 __ movptr(r11, rsp);
672 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
673 // in registers, we will occasionally have no stack args.
674 int comp_words_on_stack = 0;
675 if (comp_args_on_stack) {
676 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
677 // registers are below. By subtracting stack0, we either get a negative
678 // number (all values in registers) or the maximum stack slot accessed.
680 // Convert 4-byte c2 stack slots to words.
681 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
682 // Round up to miminum stack alignment, in wordSize
683 comp_words_on_stack = round_to(comp_words_on_stack, 2);
684 __ subptr(rsp, comp_words_on_stack * wordSize);
685 }
688 // Ensure compiled code always sees stack at proper alignment
689 __ andptr(rsp, -16);
691 // push the return address and misalign the stack that youngest frame always sees
692 // as far as the placement of the call instruction
693 __ push(rax);
695 // Put saved SP in another register
696 const Register saved_sp = rax;
697 __ movptr(saved_sp, r11);
699 // Will jump to the compiled code just as if compiled code was doing it.
700 // Pre-load the register-jump target early, to schedule it better.
701 __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
703 // Now generate the shuffle code. Pick up all register args and move the
704 // rest through the floating point stack top.
705 for (int i = 0; i < total_args_passed; i++) {
706 if (sig_bt[i] == T_VOID) {
707 // Longs and doubles are passed in native word order, but misaligned
708 // in the 32-bit build.
709 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
710 continue;
711 }
713 // Pick up 0, 1 or 2 words from SP+offset.
715 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
716 "scrambled load targets?");
717 // Load in argument order going down.
718 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
719 // Point to interpreter value (vs. tag)
720 int next_off = ld_off - Interpreter::stackElementSize;
721 //
722 //
723 //
724 VMReg r_1 = regs[i].first();
725 VMReg r_2 = regs[i].second();
726 if (!r_1->is_valid()) {
727 assert(!r_2->is_valid(), "");
728 continue;
729 }
730 if (r_1->is_stack()) {
731 // Convert stack slot to an SP offset (+ wordSize to account for return address )
732 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
734 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
735 // and if we end up going thru a c2i because of a miss a reasonable value of r13
736 // will be generated.
737 if (!r_2->is_valid()) {
738 // sign extend???
739 __ movl(r13, Address(saved_sp, ld_off));
740 __ movptr(Address(rsp, st_off), r13);
741 } else {
742 //
743 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
744 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
745 // So we must adjust where to pick up the data to match the interpreter.
746 //
747 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
748 // are accessed as negative so LSW is at LOW address
750 // ld_off is MSW so get LSW
751 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
752 next_off : ld_off;
753 __ movq(r13, Address(saved_sp, offset));
754 // st_off is LSW (i.e. reg.first())
755 __ movq(Address(rsp, st_off), r13);
756 }
757 } else if (r_1->is_Register()) { // Register argument
758 Register r = r_1->as_Register();
759 assert(r != rax, "must be different");
760 if (r_2->is_valid()) {
761 //
762 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
763 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
764 // So we must adjust where to pick up the data to match the interpreter.
766 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
767 next_off : ld_off;
769 // this can be a misaligned move
770 __ movq(r, Address(saved_sp, offset));
771 } else {
772 // sign extend and use a full word?
773 __ movl(r, Address(saved_sp, ld_off));
774 }
775 } else {
776 if (!r_2->is_valid()) {
777 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
778 } else {
779 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
780 }
781 }
782 }
784 // 6243940 We might end up in handle_wrong_method if
785 // the callee is deoptimized as we race thru here. If that
786 // happens we don't want to take a safepoint because the
787 // caller frame will look interpreted and arguments are now
788 // "compiled" so it is much better to make this transition
789 // invisible to the stack walking code. Unfortunately if
790 // we try and find the callee by normal means a safepoint
791 // is possible. So we stash the desired callee in the thread
792 // and the vm will find there should this case occur.
794 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
796 // put methodOop where a c2i would expect should we end up there
797 // only needed becaus eof c2 resolve stubs return methodOop as a result in
798 // rax
799 __ mov(rax, rbx);
800 __ jmp(r11);
801 }
803 // ---------------------------------------------------------------
804 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
805 int total_args_passed,
806 int comp_args_on_stack,
807 const BasicType *sig_bt,
808 const VMRegPair *regs,
809 AdapterFingerPrint* fingerprint) {
810 address i2c_entry = __ pc();
812 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
814 // -------------------------------------------------------------------------
815 // Generate a C2I adapter. On entry we know rbx holds the methodOop during calls
816 // to the interpreter. The args start out packed in the compiled layout. They
817 // need to be unpacked into the interpreter layout. This will almost always
818 // require some stack space. We grow the current (compiled) stack, then repack
819 // the args. We finally end in a jump to the generic interpreter entry point.
820 // On exit from the interpreter, the interpreter will restore our SP (lest the
821 // compiled code, which relys solely on SP and not RBP, get sick).
823 address c2i_unverified_entry = __ pc();
824 Label skip_fixup;
825 Label ok;
827 Register holder = rax;
828 Register receiver = j_rarg0;
829 Register temp = rbx;
831 {
832 __ verify_oop(holder);
833 __ load_klass(temp, receiver);
834 __ verify_oop(temp);
836 __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
837 __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
838 __ jcc(Assembler::equal, ok);
839 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
841 __ bind(ok);
842 // Method might have been compiled since the call site was patched to
843 // interpreted if that is the case treat it as a miss so we can get
844 // the call site corrected.
845 __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
846 __ jcc(Assembler::equal, skip_fixup);
847 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
848 }
850 address c2i_entry = __ pc();
852 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
854 __ flush();
855 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
856 }
858 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
859 VMRegPair *regs,
860 int total_args_passed) {
861 // We return the amount of VMRegImpl stack slots we need to reserve for all
862 // the arguments NOT counting out_preserve_stack_slots.
864 // NOTE: These arrays will have to change when c1 is ported
865 #ifdef _WIN64
866 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
867 c_rarg0, c_rarg1, c_rarg2, c_rarg3
868 };
869 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
870 c_farg0, c_farg1, c_farg2, c_farg3
871 };
872 #else
873 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
874 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
875 };
876 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
877 c_farg0, c_farg1, c_farg2, c_farg3,
878 c_farg4, c_farg5, c_farg6, c_farg7
879 };
880 #endif // _WIN64
883 uint int_args = 0;
884 uint fp_args = 0;
885 uint stk_args = 0; // inc by 2 each time
887 for (int i = 0; i < total_args_passed; i++) {
888 switch (sig_bt[i]) {
889 case T_BOOLEAN:
890 case T_CHAR:
891 case T_BYTE:
892 case T_SHORT:
893 case T_INT:
894 if (int_args < Argument::n_int_register_parameters_c) {
895 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
896 #ifdef _WIN64
897 fp_args++;
898 // Allocate slots for callee to stuff register args the stack.
899 stk_args += 2;
900 #endif
901 } else {
902 regs[i].set1(VMRegImpl::stack2reg(stk_args));
903 stk_args += 2;
904 }
905 break;
906 case T_LONG:
907 assert(sig_bt[i + 1] == T_VOID, "expecting half");
908 // fall through
909 case T_OBJECT:
910 case T_ARRAY:
911 case T_ADDRESS:
912 if (int_args < Argument::n_int_register_parameters_c) {
913 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
914 #ifdef _WIN64
915 fp_args++;
916 stk_args += 2;
917 #endif
918 } else {
919 regs[i].set2(VMRegImpl::stack2reg(stk_args));
920 stk_args += 2;
921 }
922 break;
923 case T_FLOAT:
924 if (fp_args < Argument::n_float_register_parameters_c) {
925 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
926 #ifdef _WIN64
927 int_args++;
928 // Allocate slots for callee to stuff register args the stack.
929 stk_args += 2;
930 #endif
931 } else {
932 regs[i].set1(VMRegImpl::stack2reg(stk_args));
933 stk_args += 2;
934 }
935 break;
936 case T_DOUBLE:
937 assert(sig_bt[i + 1] == T_VOID, "expecting half");
938 if (fp_args < Argument::n_float_register_parameters_c) {
939 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
940 #ifdef _WIN64
941 int_args++;
942 // Allocate slots for callee to stuff register args the stack.
943 stk_args += 2;
944 #endif
945 } else {
946 regs[i].set2(VMRegImpl::stack2reg(stk_args));
947 stk_args += 2;
948 }
949 break;
950 case T_VOID: // Halves of longs and doubles
951 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
952 regs[i].set_bad();
953 break;
954 default:
955 ShouldNotReachHere();
956 break;
957 }
958 }
959 #ifdef _WIN64
960 // windows abi requires that we always allocate enough stack space
961 // for 4 64bit registers to be stored down.
962 if (stk_args < 8) {
963 stk_args = 8;
964 }
965 #endif // _WIN64
967 return stk_args;
968 }
970 // On 64 bit we will store integer like items to the stack as
971 // 64 bits items (sparc abi) even though java would only store
972 // 32bits for a parameter. On 32bit it will simply be 32 bits
973 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
974 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
975 if (src.first()->is_stack()) {
976 if (dst.first()->is_stack()) {
977 // stack to stack
978 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
979 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
980 } else {
981 // stack to reg
982 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
983 }
984 } else if (dst.first()->is_stack()) {
985 // reg to stack
986 // Do we really have to sign extend???
987 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
988 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
989 } else {
990 // Do we really have to sign extend???
991 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
992 if (dst.first() != src.first()) {
993 __ movq(dst.first()->as_Register(), src.first()->as_Register());
994 }
995 }
996 }
998 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
999 if (src.first()->is_stack()) {
1000 if (dst.first()->is_stack()) {
1001 // stack to stack
1002 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1003 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1004 } else {
1005 // stack to reg
1006 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1007 }
1008 } else if (dst.first()->is_stack()) {
1009 // reg to stack
1010 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1011 } else {
1012 if (dst.first() != src.first()) {
1013 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1014 }
1015 }
1016 }
1018 // An oop arg. Must pass a handle not the oop itself
1019 static void object_move(MacroAssembler* masm,
1020 OopMap* map,
1021 int oop_handle_offset,
1022 int framesize_in_slots,
1023 VMRegPair src,
1024 VMRegPair dst,
1025 bool is_receiver,
1026 int* receiver_offset) {
1028 // must pass a handle. First figure out the location we use as a handle
1030 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1032 // See if oop is NULL if it is we need no handle
1034 if (src.first()->is_stack()) {
1036 // Oop is already on the stack as an argument
1037 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1038 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1039 if (is_receiver) {
1040 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1041 }
1043 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1044 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1045 // conditionally move a NULL
1046 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1047 } else {
1049 // Oop is in an a register we must store it to the space we reserve
1050 // on the stack for oop_handles and pass a handle if oop is non-NULL
1052 const Register rOop = src.first()->as_Register();
1053 int oop_slot;
1054 if (rOop == j_rarg0)
1055 oop_slot = 0;
1056 else if (rOop == j_rarg1)
1057 oop_slot = 1;
1058 else if (rOop == j_rarg2)
1059 oop_slot = 2;
1060 else if (rOop == j_rarg3)
1061 oop_slot = 3;
1062 else if (rOop == j_rarg4)
1063 oop_slot = 4;
1064 else {
1065 assert(rOop == j_rarg5, "wrong register");
1066 oop_slot = 5;
1067 }
1069 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1070 int offset = oop_slot*VMRegImpl::stack_slot_size;
1072 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1073 // Store oop in handle area, may be NULL
1074 __ movptr(Address(rsp, offset), rOop);
1075 if (is_receiver) {
1076 *receiver_offset = offset;
1077 }
1079 __ cmpptr(rOop, (int32_t)NULL_WORD);
1080 __ lea(rHandle, Address(rsp, offset));
1081 // conditionally move a NULL from the handle area where it was just stored
1082 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1083 }
1085 // If arg is on the stack then place it otherwise it is already in correct reg.
1086 if (dst.first()->is_stack()) {
1087 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1088 }
1089 }
1091 // A float arg may have to do float reg int reg conversion
1092 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1093 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1095 // The calling conventions assures us that each VMregpair is either
1096 // all really one physical register or adjacent stack slots.
1097 // This greatly simplifies the cases here compared to sparc.
1099 if (src.first()->is_stack()) {
1100 if (dst.first()->is_stack()) {
1101 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1102 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1103 } else {
1104 // stack to reg
1105 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1106 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1107 }
1108 } else if (dst.first()->is_stack()) {
1109 // reg to stack
1110 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1111 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1112 } else {
1113 // reg to reg
1114 // In theory these overlap but the ordering is such that this is likely a nop
1115 if ( src.first() != dst.first()) {
1116 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1117 }
1118 }
1119 }
1121 // A long move
1122 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1124 // The calling conventions assures us that each VMregpair is either
1125 // all really one physical register or adjacent stack slots.
1126 // This greatly simplifies the cases here compared to sparc.
1128 if (src.is_single_phys_reg() ) {
1129 if (dst.is_single_phys_reg()) {
1130 if (dst.first() != src.first()) {
1131 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1132 }
1133 } else {
1134 assert(dst.is_single_reg(), "not a stack pair");
1135 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1136 }
1137 } else if (dst.is_single_phys_reg()) {
1138 assert(src.is_single_reg(), "not a stack pair");
1139 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1140 } else {
1141 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1142 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1143 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1144 }
1145 }
1147 // A double move
1148 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1150 // The calling conventions assures us that each VMregpair is either
1151 // all really one physical register or adjacent stack slots.
1152 // This greatly simplifies the cases here compared to sparc.
1154 if (src.is_single_phys_reg() ) {
1155 if (dst.is_single_phys_reg()) {
1156 // In theory these overlap but the ordering is such that this is likely a nop
1157 if ( src.first() != dst.first()) {
1158 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1159 }
1160 } else {
1161 assert(dst.is_single_reg(), "not a stack pair");
1162 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1163 }
1164 } else if (dst.is_single_phys_reg()) {
1165 assert(src.is_single_reg(), "not a stack pair");
1166 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1167 } else {
1168 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1169 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1170 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1171 }
1172 }
1175 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1176 // We always ignore the frame_slots arg and just use the space just below frame pointer
1177 // which by this time is free to use
1178 switch (ret_type) {
1179 case T_FLOAT:
1180 __ movflt(Address(rbp, -wordSize), xmm0);
1181 break;
1182 case T_DOUBLE:
1183 __ movdbl(Address(rbp, -wordSize), xmm0);
1184 break;
1185 case T_VOID: break;
1186 default: {
1187 __ movptr(Address(rbp, -wordSize), rax);
1188 }
1189 }
1190 }
1192 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1193 // We always ignore the frame_slots arg and just use the space just below frame pointer
1194 // which by this time is free to use
1195 switch (ret_type) {
1196 case T_FLOAT:
1197 __ movflt(xmm0, Address(rbp, -wordSize));
1198 break;
1199 case T_DOUBLE:
1200 __ movdbl(xmm0, Address(rbp, -wordSize));
1201 break;
1202 case T_VOID: break;
1203 default: {
1204 __ movptr(rax, Address(rbp, -wordSize));
1205 }
1206 }
1207 }
1209 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1210 for ( int i = first_arg ; i < arg_count ; i++ ) {
1211 if (args[i].first()->is_Register()) {
1212 __ push(args[i].first()->as_Register());
1213 } else if (args[i].first()->is_XMMRegister()) {
1214 __ subptr(rsp, 2*wordSize);
1215 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1216 }
1217 }
1218 }
1220 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1221 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1222 if (args[i].first()->is_Register()) {
1223 __ pop(args[i].first()->as_Register());
1224 } else if (args[i].first()->is_XMMRegister()) {
1225 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1226 __ addptr(rsp, 2*wordSize);
1227 }
1228 }
1229 }
1232 static void save_or_restore_arguments(MacroAssembler* masm,
1233 const int stack_slots,
1234 const int total_in_args,
1235 const int arg_save_area,
1236 OopMap* map,
1237 VMRegPair* in_regs,
1238 BasicType* in_sig_bt) {
1239 // if map is non-NULL then the code should store the values,
1240 // otherwise it should load them.
1241 int slot = arg_save_area;
1242 // Save down double word first
1243 for ( int i = 0; i < total_in_args; i++) {
1244 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1245 int offset = slot * VMRegImpl::stack_slot_size;
1246 slot += VMRegImpl::slots_per_word;
1247 assert(slot <= stack_slots, "overflow");
1248 if (map != NULL) {
1249 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1250 } else {
1251 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1252 }
1253 }
1254 if (in_regs[i].first()->is_Register() &&
1255 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1256 int offset = slot * VMRegImpl::stack_slot_size;
1257 if (map != NULL) {
1258 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1259 if (in_sig_bt[i] == T_ARRAY) {
1260 map->set_oop(VMRegImpl::stack2reg(slot));;
1261 }
1262 } else {
1263 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1264 }
1265 slot += VMRegImpl::slots_per_word;
1266 }
1267 }
1268 // Save or restore single word registers
1269 for ( int i = 0; i < total_in_args; i++) {
1270 if (in_regs[i].first()->is_Register()) {
1271 int offset = slot * VMRegImpl::stack_slot_size;
1272 slot++;
1273 assert(slot <= stack_slots, "overflow");
1275 // Value is in an input register pass we must flush it to the stack
1276 const Register reg = in_regs[i].first()->as_Register();
1277 switch (in_sig_bt[i]) {
1278 case T_BOOLEAN:
1279 case T_CHAR:
1280 case T_BYTE:
1281 case T_SHORT:
1282 case T_INT:
1283 if (map != NULL) {
1284 __ movl(Address(rsp, offset), reg);
1285 } else {
1286 __ movl(reg, Address(rsp, offset));
1287 }
1288 break;
1289 case T_ARRAY:
1290 case T_LONG:
1291 // handled above
1292 break;
1293 case T_OBJECT:
1294 default: ShouldNotReachHere();
1295 }
1296 } else if (in_regs[i].first()->is_XMMRegister()) {
1297 if (in_sig_bt[i] == T_FLOAT) {
1298 int offset = slot * VMRegImpl::stack_slot_size;
1299 slot++;
1300 assert(slot <= stack_slots, "overflow");
1301 if (map != NULL) {
1302 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1303 } else {
1304 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1305 }
1306 }
1307 } else if (in_regs[i].first()->is_stack()) {
1308 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1309 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1310 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1311 }
1312 }
1313 }
1314 }
1317 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1318 // keeps a new JNI critical region from starting until a GC has been
1319 // forced. Save down any oops in registers and describe them in an
1320 // OopMap.
1321 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1322 int stack_slots,
1323 int total_c_args,
1324 int total_in_args,
1325 int arg_save_area,
1326 OopMapSet* oop_maps,
1327 VMRegPair* in_regs,
1328 BasicType* in_sig_bt) {
1329 __ block_comment("check GC_locker::needs_gc");
1330 Label cont;
1331 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1332 __ jcc(Assembler::equal, cont);
1334 // Save down any incoming oops and call into the runtime to halt for a GC
1336 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1337 save_or_restore_arguments(masm, stack_slots, total_in_args,
1338 arg_save_area, map, in_regs, in_sig_bt);
1340 address the_pc = __ pc();
1341 oop_maps->add_gc_map( __ offset(), map);
1342 __ set_last_Java_frame(rsp, noreg, the_pc);
1344 __ block_comment("block_for_jni_critical");
1345 __ movptr(c_rarg0, r15_thread);
1346 __ mov(r12, rsp); // remember sp
1347 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1348 __ andptr(rsp, -16); // align stack as required by ABI
1349 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1350 __ mov(rsp, r12); // restore sp
1351 __ reinit_heapbase();
1353 __ reset_last_Java_frame(false, true);
1355 save_or_restore_arguments(masm, stack_slots, total_in_args,
1356 arg_save_area, NULL, in_regs, in_sig_bt);
1358 __ bind(cont);
1359 #ifdef ASSERT
1360 if (StressCriticalJNINatives) {
1361 // Stress register saving
1362 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1363 save_or_restore_arguments(masm, stack_slots, total_in_args,
1364 arg_save_area, map, in_regs, in_sig_bt);
1365 // Destroy argument registers
1366 for (int i = 0; i < total_in_args - 1; i++) {
1367 if (in_regs[i].first()->is_Register()) {
1368 const Register reg = in_regs[i].first()->as_Register();
1369 __ xorptr(reg, reg);
1370 } else if (in_regs[i].first()->is_XMMRegister()) {
1371 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1372 } else if (in_regs[i].first()->is_FloatRegister()) {
1373 ShouldNotReachHere();
1374 } else if (in_regs[i].first()->is_stack()) {
1375 // Nothing to do
1376 } else {
1377 ShouldNotReachHere();
1378 }
1379 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1380 i++;
1381 }
1382 }
1384 save_or_restore_arguments(masm, stack_slots, total_in_args,
1385 arg_save_area, NULL, in_regs, in_sig_bt);
1386 }
1387 #endif
1388 }
1390 // Unpack an array argument into a pointer to the body and the length
1391 // if the array is non-null, otherwise pass 0 for both.
1392 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1393 Register tmp_reg = rax;
1394 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1395 "possible collision");
1396 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1397 "possible collision");
1399 // Pass the length, ptr pair
1400 Label is_null, done;
1401 VMRegPair tmp;
1402 tmp.set_ptr(tmp_reg->as_VMReg());
1403 if (reg.first()->is_stack()) {
1404 // Load the arg up from the stack
1405 move_ptr(masm, reg, tmp);
1406 reg = tmp;
1407 }
1408 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1409 __ jccb(Assembler::equal, is_null);
1410 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1411 move_ptr(masm, tmp, body_arg);
1412 // load the length relative to the body.
1413 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1414 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1415 move32_64(masm, tmp, length_arg);
1416 __ jmpb(done);
1417 __ bind(is_null);
1418 // Pass zeros
1419 __ xorptr(tmp_reg, tmp_reg);
1420 move_ptr(masm, tmp, body_arg);
1421 move32_64(masm, tmp, length_arg);
1422 __ bind(done);
1423 }
1426 // Different signatures may require very different orders for the move
1427 // to avoid clobbering other arguments. There's no simple way to
1428 // order them safely. Compute a safe order for issuing stores and
1429 // break any cycles in those stores. This code is fairly general but
1430 // it's not necessary on the other platforms so we keep it in the
1431 // platform dependent code instead of moving it into a shared file.
1432 // (See bugs 7013347 & 7145024.)
1433 // Note that this code is specific to LP64.
1434 class ComputeMoveOrder: public StackObj {
1435 class MoveOperation: public ResourceObj {
1436 friend class ComputeMoveOrder;
1437 private:
1438 VMRegPair _src;
1439 VMRegPair _dst;
1440 int _src_index;
1441 int _dst_index;
1442 bool _processed;
1443 MoveOperation* _next;
1444 MoveOperation* _prev;
1446 static int get_id(VMRegPair r) {
1447 return r.first()->value();
1448 }
1450 public:
1451 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1452 _src(src)
1453 , _src_index(src_index)
1454 , _dst(dst)
1455 , _dst_index(dst_index)
1456 , _next(NULL)
1457 , _prev(NULL)
1458 , _processed(false) {
1459 }
1461 VMRegPair src() const { return _src; }
1462 int src_id() const { return get_id(src()); }
1463 int src_index() const { return _src_index; }
1464 VMRegPair dst() const { return _dst; }
1465 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1466 int dst_index() const { return _dst_index; }
1467 int dst_id() const { return get_id(dst()); }
1468 MoveOperation* next() const { return _next; }
1469 MoveOperation* prev() const { return _prev; }
1470 void set_processed() { _processed = true; }
1471 bool is_processed() const { return _processed; }
1473 // insert
1474 void break_cycle(VMRegPair temp_register) {
1475 // create a new store following the last store
1476 // to move from the temp_register to the original
1477 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1479 // break the cycle of links and insert new_store at the end
1480 // break the reverse link.
1481 MoveOperation* p = prev();
1482 assert(p->next() == this, "must be");
1483 _prev = NULL;
1484 p->_next = new_store;
1485 new_store->_prev = p;
1487 // change the original store to save it's value in the temp.
1488 set_dst(-1, temp_register);
1489 }
1491 void link(GrowableArray<MoveOperation*>& killer) {
1492 // link this store in front the store that it depends on
1493 MoveOperation* n = killer.at_grow(src_id(), NULL);
1494 if (n != NULL) {
1495 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1496 _next = n;
1497 n->_prev = this;
1498 }
1499 }
1500 };
1502 private:
1503 GrowableArray<MoveOperation*> edges;
1505 public:
1506 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1507 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1508 // Move operations where the dest is the stack can all be
1509 // scheduled first since they can't interfere with the other moves.
1510 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1511 if (in_sig_bt[i] == T_ARRAY) {
1512 c_arg--;
1513 if (out_regs[c_arg].first()->is_stack() &&
1514 out_regs[c_arg + 1].first()->is_stack()) {
1515 arg_order.push(i);
1516 arg_order.push(c_arg);
1517 } else {
1518 if (out_regs[c_arg].first()->is_stack() ||
1519 in_regs[i].first() == out_regs[c_arg].first()) {
1520 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1521 } else {
1522 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1523 }
1524 }
1525 } else if (in_sig_bt[i] == T_VOID) {
1526 arg_order.push(i);
1527 arg_order.push(c_arg);
1528 } else {
1529 if (out_regs[c_arg].first()->is_stack() ||
1530 in_regs[i].first() == out_regs[c_arg].first()) {
1531 arg_order.push(i);
1532 arg_order.push(c_arg);
1533 } else {
1534 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1535 }
1536 }
1537 }
1538 // Break any cycles in the register moves and emit the in the
1539 // proper order.
1540 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1541 for (int i = 0; i < stores->length(); i++) {
1542 arg_order.push(stores->at(i)->src_index());
1543 arg_order.push(stores->at(i)->dst_index());
1544 }
1545 }
1547 // Collected all the move operations
1548 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1549 if (src.first() == dst.first()) return;
1550 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1551 }
1553 // Walk the edges breaking cycles between moves. The result list
1554 // can be walked in order to produce the proper set of loads
1555 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1556 // Record which moves kill which values
1557 GrowableArray<MoveOperation*> killer;
1558 for (int i = 0; i < edges.length(); i++) {
1559 MoveOperation* s = edges.at(i);
1560 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1561 killer.at_put_grow(s->dst_id(), s, NULL);
1562 }
1563 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1564 "make sure temp isn't in the registers that are killed");
1566 // create links between loads and stores
1567 for (int i = 0; i < edges.length(); i++) {
1568 edges.at(i)->link(killer);
1569 }
1571 // at this point, all the move operations are chained together
1572 // in a doubly linked list. Processing it backwards finds
1573 // the beginning of the chain, forwards finds the end. If there's
1574 // a cycle it can be broken at any point, so pick an edge and walk
1575 // backward until the list ends or we end where we started.
1576 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1577 for (int e = 0; e < edges.length(); e++) {
1578 MoveOperation* s = edges.at(e);
1579 if (!s->is_processed()) {
1580 MoveOperation* start = s;
1581 // search for the beginning of the chain or cycle
1582 while (start->prev() != NULL && start->prev() != s) {
1583 start = start->prev();
1584 }
1585 if (start->prev() == s) {
1586 start->break_cycle(temp_register);
1587 }
1588 // walk the chain forward inserting to store list
1589 while (start != NULL) {
1590 stores->append(start);
1591 start->set_processed();
1592 start = start->next();
1593 }
1594 }
1595 }
1596 return stores;
1597 }
1598 };
1600 static void verify_oop_args(MacroAssembler* masm,
1601 int total_args_passed,
1602 const BasicType* sig_bt,
1603 const VMRegPair* regs) {
1604 Register temp_reg = rbx; // not part of any compiled calling seq
1605 if (VerifyOops) {
1606 for (int i = 0; i < total_args_passed; i++) {
1607 if (sig_bt[i] == T_OBJECT ||
1608 sig_bt[i] == T_ARRAY) {
1609 VMReg r = regs[i].first();
1610 assert(r->is_valid(), "bad oop arg");
1611 if (r->is_stack()) {
1612 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1613 __ verify_oop(temp_reg);
1614 } else {
1615 __ verify_oop(r->as_Register());
1616 }
1617 }
1618 }
1619 }
1620 }
1622 static void gen_special_dispatch(MacroAssembler* masm,
1623 int total_args_passed,
1624 int comp_args_on_stack,
1625 vmIntrinsics::ID special_dispatch,
1626 const BasicType* sig_bt,
1627 const VMRegPair* regs) {
1628 verify_oop_args(masm, total_args_passed, sig_bt, regs);
1630 // Now write the args into the outgoing interpreter space
1631 bool has_receiver = false;
1632 Register receiver_reg = noreg;
1633 int member_arg_pos = -1;
1634 Register member_reg = noreg;
1635 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(special_dispatch);
1636 if (ref_kind != 0) {
1637 member_arg_pos = total_args_passed - 1; // trailing MemberName argument
1638 member_reg = rbx; // known to be free at this point
1639 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1640 } else if (special_dispatch == vmIntrinsics::_invokeBasic) {
1641 has_receiver = true;
1642 } else {
1643 guarantee(false, err_msg("special_dispatch=%d", special_dispatch));
1644 }
1646 if (member_reg != noreg) {
1647 // Load the member_arg into register, if necessary.
1648 assert(member_arg_pos >= 0 && member_arg_pos < total_args_passed, "oob");
1649 assert(sig_bt[member_arg_pos] == T_OBJECT, "dispatch argument must be an object");
1650 VMReg r = regs[member_arg_pos].first();
1651 assert(r->is_valid(), "bad member arg");
1652 if (r->is_stack()) {
1653 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1654 } else {
1655 // no data motion is needed
1656 member_reg = r->as_Register();
1657 }
1658 }
1660 if (has_receiver) {
1661 // Make sure the receiver is loaded into a register.
1662 assert(total_args_passed > 0, "oob");
1663 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1664 VMReg r = regs[0].first();
1665 assert(r->is_valid(), "bad receiver arg");
1666 if (r->is_stack()) {
1667 // Porting note: This assumes that compiled calling conventions always
1668 // pass the receiver oop in a register. If this is not true on some
1669 // platform, pick a temp and load the receiver from stack.
1670 assert(false, "receiver always in a register");
1671 receiver_reg = j_rarg0; // known to be free at this point
1672 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1673 } else {
1674 // no data motion is needed
1675 receiver_reg = r->as_Register();
1676 }
1677 }
1679 // Figure out which address we are really jumping to:
1680 MethodHandles::generate_method_handle_dispatch(masm, special_dispatch,
1681 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1682 }
1684 // ---------------------------------------------------------------------------
1685 // Generate a native wrapper for a given method. The method takes arguments
1686 // in the Java compiled code convention, marshals them to the native
1687 // convention (handlizes oops, etc), transitions to native, makes the call,
1688 // returns to java state (possibly blocking), unhandlizes any result and
1689 // returns.
1690 //
1691 // Critical native functions are a shorthand for the use of
1692 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1693 // functions. The wrapper is expected to unpack the arguments before
1694 // passing them to the callee and perform checks before and after the
1695 // native call to ensure that they GC_locker
1696 // lock_critical/unlock_critical semantics are followed. Some other
1697 // parts of JNI setup are skipped like the tear down of the JNI handle
1698 // block and the check for pending exceptions it's impossible for them
1699 // to be thrown.
1700 //
1701 // They are roughly structured like this:
1702 // if (GC_locker::needs_gc())
1703 // SharedRuntime::block_for_jni_critical();
1704 // tranistion to thread_in_native
1705 // unpack arrray arguments and call native entry point
1706 // check for safepoint in progress
1707 // check if any thread suspend flags are set
1708 // call into JVM and possible unlock the JNI critical
1709 // if a GC was suppressed while in the critical native.
1710 // transition back to thread_in_Java
1711 // return to caller
1712 //
1713 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1714 methodHandle method,
1715 int compile_id,
1716 int total_in_args,
1717 int comp_args_on_stack,
1718 BasicType* in_sig_bt,
1719 VMRegPair* in_regs,
1720 BasicType ret_type) {
1721 if (method->is_method_handle_intrinsic()) {
1722 vmIntrinsics::ID iid = method->intrinsic_id();
1723 intptr_t start = (intptr_t)__ pc();
1724 int vep_offset = ((intptr_t)__ pc()) - start;
1725 gen_special_dispatch(masm,
1726 total_in_args,
1727 comp_args_on_stack,
1728 method->intrinsic_id(),
1729 in_sig_bt,
1730 in_regs);
1731 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1732 __ flush();
1733 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1734 return nmethod::new_native_nmethod(method,
1735 compile_id,
1736 masm->code(),
1737 vep_offset,
1738 frame_complete,
1739 stack_slots / VMRegImpl::slots_per_word,
1740 in_ByteSize(-1),
1741 in_ByteSize(-1),
1742 (OopMapSet*)NULL);
1743 }
1744 bool is_critical_native = true;
1745 address native_func = method->critical_native_function();
1746 if (native_func == NULL) {
1747 native_func = method->native_function();
1748 is_critical_native = false;
1749 }
1750 assert(native_func != NULL, "must have function");
1752 // An OopMap for lock (and class if static)
1753 OopMapSet *oop_maps = new OopMapSet();
1754 intptr_t start = (intptr_t)__ pc();
1756 // We have received a description of where all the java arg are located
1757 // on entry to the wrapper. We need to convert these args to where
1758 // the jni function will expect them. To figure out where they go
1759 // we convert the java signature to a C signature by inserting
1760 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1762 int total_c_args = total_in_args;
1763 if (!is_critical_native) {
1764 total_c_args += 1;
1765 if (method->is_static()) {
1766 total_c_args++;
1767 }
1768 } else {
1769 for (int i = 0; i < total_in_args; i++) {
1770 if (in_sig_bt[i] == T_ARRAY) {
1771 total_c_args++;
1772 }
1773 }
1774 }
1776 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1777 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1778 BasicType* in_elem_bt = NULL;
1780 int argc = 0;
1781 if (!is_critical_native) {
1782 out_sig_bt[argc++] = T_ADDRESS;
1783 if (method->is_static()) {
1784 out_sig_bt[argc++] = T_OBJECT;
1785 }
1787 for (int i = 0; i < total_in_args ; i++ ) {
1788 out_sig_bt[argc++] = in_sig_bt[i];
1789 }
1790 } else {
1791 Thread* THREAD = Thread::current();
1792 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1793 SignatureStream ss(method->signature());
1794 for (int i = 0; i < total_in_args ; i++ ) {
1795 if (in_sig_bt[i] == T_ARRAY) {
1796 // Arrays are passed as int, elem* pair
1797 out_sig_bt[argc++] = T_INT;
1798 out_sig_bt[argc++] = T_ADDRESS;
1799 Symbol* atype = ss.as_symbol(CHECK_NULL);
1800 const char* at = atype->as_C_string();
1801 if (strlen(at) == 2) {
1802 assert(at[0] == '[', "must be");
1803 switch (at[1]) {
1804 case 'B': in_elem_bt[i] = T_BYTE; break;
1805 case 'C': in_elem_bt[i] = T_CHAR; break;
1806 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1807 case 'F': in_elem_bt[i] = T_FLOAT; break;
1808 case 'I': in_elem_bt[i] = T_INT; break;
1809 case 'J': in_elem_bt[i] = T_LONG; break;
1810 case 'S': in_elem_bt[i] = T_SHORT; break;
1811 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1812 default: ShouldNotReachHere();
1813 }
1814 }
1815 } else {
1816 out_sig_bt[argc++] = in_sig_bt[i];
1817 in_elem_bt[i] = T_VOID;
1818 }
1819 if (in_sig_bt[i] != T_VOID) {
1820 assert(in_sig_bt[i] == ss.type(), "must match");
1821 ss.next();
1822 }
1823 }
1824 }
1826 // Now figure out where the args must be stored and how much stack space
1827 // they require.
1828 int out_arg_slots;
1829 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1831 // Compute framesize for the wrapper. We need to handlize all oops in
1832 // incoming registers
1834 // Calculate the total number of stack slots we will need.
1836 // First count the abi requirement plus all of the outgoing args
1837 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1839 // Now the space for the inbound oop handle area
1840 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1841 if (is_critical_native) {
1842 // Critical natives may have to call out so they need a save area
1843 // for register arguments.
1844 int double_slots = 0;
1845 int single_slots = 0;
1846 for ( int i = 0; i < total_in_args; i++) {
1847 if (in_regs[i].first()->is_Register()) {
1848 const Register reg = in_regs[i].first()->as_Register();
1849 switch (in_sig_bt[i]) {
1850 case T_BOOLEAN:
1851 case T_BYTE:
1852 case T_SHORT:
1853 case T_CHAR:
1854 case T_INT: single_slots++; break;
1855 case T_ARRAY: // specific to LP64 (7145024)
1856 case T_LONG: double_slots++; break;
1857 default: ShouldNotReachHere();
1858 }
1859 } else if (in_regs[i].first()->is_XMMRegister()) {
1860 switch (in_sig_bt[i]) {
1861 case T_FLOAT: single_slots++; break;
1862 case T_DOUBLE: double_slots++; break;
1863 default: ShouldNotReachHere();
1864 }
1865 } else if (in_regs[i].first()->is_FloatRegister()) {
1866 ShouldNotReachHere();
1867 }
1868 }
1869 total_save_slots = double_slots * 2 + single_slots;
1870 // align the save area
1871 if (double_slots != 0) {
1872 stack_slots = round_to(stack_slots, 2);
1873 }
1874 }
1876 int oop_handle_offset = stack_slots;
1877 stack_slots += total_save_slots;
1879 // Now any space we need for handlizing a klass if static method
1881 int klass_slot_offset = 0;
1882 int klass_offset = -1;
1883 int lock_slot_offset = 0;
1884 bool is_static = false;
1886 if (method->is_static()) {
1887 klass_slot_offset = stack_slots;
1888 stack_slots += VMRegImpl::slots_per_word;
1889 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1890 is_static = true;
1891 }
1893 // Plus a lock if needed
1895 if (method->is_synchronized()) {
1896 lock_slot_offset = stack_slots;
1897 stack_slots += VMRegImpl::slots_per_word;
1898 }
1900 // Now a place (+2) to save return values or temp during shuffling
1901 // + 4 for return address (which we own) and saved rbp
1902 stack_slots += 6;
1904 // Ok The space we have allocated will look like:
1905 //
1906 //
1907 // FP-> | |
1908 // |---------------------|
1909 // | 2 slots for moves |
1910 // |---------------------|
1911 // | lock box (if sync) |
1912 // |---------------------| <- lock_slot_offset
1913 // | klass (if static) |
1914 // |---------------------| <- klass_slot_offset
1915 // | oopHandle area |
1916 // |---------------------| <- oop_handle_offset (6 java arg registers)
1917 // | outbound memory |
1918 // | based arguments |
1919 // | |
1920 // |---------------------|
1921 // | |
1922 // SP-> | out_preserved_slots |
1923 //
1924 //
1927 // Now compute actual number of stack words we need rounding to make
1928 // stack properly aligned.
1929 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1931 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1933 // First thing make an ic check to see if we should even be here
1935 // We are free to use all registers as temps without saving them and
1936 // restoring them except rbp. rbp is the only callee save register
1937 // as far as the interpreter and the compiler(s) are concerned.
1940 const Register ic_reg = rax;
1941 const Register receiver = j_rarg0;
1943 Label hit;
1944 Label exception_pending;
1946 assert_different_registers(ic_reg, receiver, rscratch1);
1947 __ verify_oop(receiver);
1948 __ load_klass(rscratch1, receiver);
1949 __ cmpq(ic_reg, rscratch1);
1950 __ jcc(Assembler::equal, hit);
1952 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1954 // Verified entry point must be aligned
1955 __ align(8);
1957 __ bind(hit);
1959 int vep_offset = ((intptr_t)__ pc()) - start;
1961 // The instruction at the verified entry point must be 5 bytes or longer
1962 // because it can be patched on the fly by make_non_entrant. The stack bang
1963 // instruction fits that requirement.
1965 // Generate stack overflow check
1967 if (UseStackBanging) {
1968 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1969 } else {
1970 // need a 5 byte instruction to allow MT safe patching to non-entrant
1971 __ fat_nop();
1972 }
1974 // Generate a new frame for the wrapper.
1975 __ enter();
1976 // -2 because return address is already present and so is saved rbp
1977 __ subptr(rsp, stack_size - 2*wordSize);
1979 // Frame is now completed as far as size and linkage.
1980 int frame_complete = ((intptr_t)__ pc()) - start;
1982 #ifdef ASSERT
1983 {
1984 Label L;
1985 __ mov(rax, rsp);
1986 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1987 __ cmpptr(rax, rsp);
1988 __ jcc(Assembler::equal, L);
1989 __ stop("improperly aligned stack");
1990 __ bind(L);
1991 }
1992 #endif /* ASSERT */
1995 // We use r14 as the oop handle for the receiver/klass
1996 // It is callee save so it survives the call to native
1998 const Register oop_handle_reg = r14;
2000 if (is_critical_native) {
2001 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2002 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2003 }
2005 //
2006 // We immediately shuffle the arguments so that any vm call we have to
2007 // make from here on out (sync slow path, jvmti, etc.) we will have
2008 // captured the oops from our caller and have a valid oopMap for
2009 // them.
2011 // -----------------
2012 // The Grand Shuffle
2014 // The Java calling convention is either equal (linux) or denser (win64) than the
2015 // c calling convention. However the because of the jni_env argument the c calling
2016 // convention always has at least one more (and two for static) arguments than Java.
2017 // Therefore if we move the args from java -> c backwards then we will never have
2018 // a register->register conflict and we don't have to build a dependency graph
2019 // and figure out how to break any cycles.
2020 //
2022 // Record esp-based slot for receiver on stack for non-static methods
2023 int receiver_offset = -1;
2025 // This is a trick. We double the stack slots so we can claim
2026 // the oops in the caller's frame. Since we are sure to have
2027 // more args than the caller doubling is enough to make
2028 // sure we can capture all the incoming oop args from the
2029 // caller.
2030 //
2031 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2033 // Mark location of rbp (someday)
2034 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2036 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2037 // All inbound args are referenced based on rbp and all outbound args via rsp.
2040 #ifdef ASSERT
2041 bool reg_destroyed[RegisterImpl::number_of_registers];
2042 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2043 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2044 reg_destroyed[r] = false;
2045 }
2046 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2047 freg_destroyed[f] = false;
2048 }
2050 #endif /* ASSERT */
2052 // This may iterate in two different directions depending on the
2053 // kind of native it is. The reason is that for regular JNI natives
2054 // the incoming and outgoing registers are offset upwards and for
2055 // critical natives they are offset down.
2056 GrowableArray<int> arg_order(2 * total_in_args);
2057 VMRegPair tmp_vmreg;
2058 tmp_vmreg.set1(rbx->as_VMReg());
2060 if (!is_critical_native) {
2061 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2062 arg_order.push(i);
2063 arg_order.push(c_arg);
2064 }
2065 } else {
2066 // Compute a valid move order, using tmp_vmreg to break any cycles
2067 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2068 }
2070 int temploc = -1;
2071 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2072 int i = arg_order.at(ai);
2073 int c_arg = arg_order.at(ai + 1);
2074 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2075 if (c_arg == -1) {
2076 assert(is_critical_native, "should only be required for critical natives");
2077 // This arg needs to be moved to a temporary
2078 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2079 in_regs[i] = tmp_vmreg;
2080 temploc = i;
2081 continue;
2082 } else if (i == -1) {
2083 assert(is_critical_native, "should only be required for critical natives");
2084 // Read from the temporary location
2085 assert(temploc != -1, "must be valid");
2086 i = temploc;
2087 temploc = -1;
2088 }
2089 #ifdef ASSERT
2090 if (in_regs[i].first()->is_Register()) {
2091 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2092 } else if (in_regs[i].first()->is_XMMRegister()) {
2093 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2094 }
2095 if (out_regs[c_arg].first()->is_Register()) {
2096 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2097 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2098 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2099 }
2100 #endif /* ASSERT */
2101 switch (in_sig_bt[i]) {
2102 case T_ARRAY:
2103 if (is_critical_native) {
2104 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2105 c_arg++;
2106 #ifdef ASSERT
2107 if (out_regs[c_arg].first()->is_Register()) {
2108 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2109 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2110 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2111 }
2112 #endif
2113 break;
2114 }
2115 case T_OBJECT:
2116 assert(!is_critical_native, "no oop arguments");
2117 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2118 ((i == 0) && (!is_static)),
2119 &receiver_offset);
2120 break;
2121 case T_VOID:
2122 break;
2124 case T_FLOAT:
2125 float_move(masm, in_regs[i], out_regs[c_arg]);
2126 break;
2128 case T_DOUBLE:
2129 assert( i + 1 < total_in_args &&
2130 in_sig_bt[i + 1] == T_VOID &&
2131 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2132 double_move(masm, in_regs[i], out_regs[c_arg]);
2133 break;
2135 case T_LONG :
2136 long_move(masm, in_regs[i], out_regs[c_arg]);
2137 break;
2139 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2141 default:
2142 move32_64(masm, in_regs[i], out_regs[c_arg]);
2143 }
2144 }
2146 // point c_arg at the first arg that is already loaded in case we
2147 // need to spill before we call out
2148 int c_arg = total_c_args - total_in_args;
2150 // Pre-load a static method's oop into r14. Used both by locking code and
2151 // the normal JNI call code.
2152 if (method->is_static() && !is_critical_native) {
2154 // load oop into a register
2155 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
2157 // Now handlize the static class mirror it's known not-null.
2158 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2159 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2161 // Now get the handle
2162 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2163 // store the klass handle as second argument
2164 __ movptr(c_rarg1, oop_handle_reg);
2165 // and protect the arg if we must spill
2166 c_arg--;
2167 }
2169 // Change state to native (we save the return address in the thread, since it might not
2170 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2171 // points into the right code segment. It does not have to be the correct return pc.
2172 // We use the same pc/oopMap repeatedly when we call out
2174 intptr_t the_pc = (intptr_t) __ pc();
2175 oop_maps->add_gc_map(the_pc - start, map);
2177 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2180 // We have all of the arguments setup at this point. We must not touch any register
2181 // argument registers at this point (what if we save/restore them there are no oop?
2183 {
2184 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2185 // protect the args we've loaded
2186 save_args(masm, total_c_args, c_arg, out_regs);
2187 __ movoop(c_rarg1, JNIHandles::make_local(method()));
2188 __ call_VM_leaf(
2189 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2190 r15_thread, c_rarg1);
2191 restore_args(masm, total_c_args, c_arg, out_regs);
2192 }
2194 // RedefineClasses() tracing support for obsolete method entry
2195 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2196 // protect the args we've loaded
2197 save_args(masm, total_c_args, c_arg, out_regs);
2198 __ movoop(c_rarg1, JNIHandles::make_local(method()));
2199 __ call_VM_leaf(
2200 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2201 r15_thread, c_rarg1);
2202 restore_args(masm, total_c_args, c_arg, out_regs);
2203 }
2205 // Lock a synchronized method
2207 // Register definitions used by locking and unlocking
2209 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2210 const Register obj_reg = rbx; // Will contain the oop
2211 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2212 const Register old_hdr = r13; // value of old header at unlock time
2214 Label slow_path_lock;
2215 Label lock_done;
2217 if (method->is_synchronized()) {
2218 assert(!is_critical_native, "unhandled");
2221 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2223 // Get the handle (the 2nd argument)
2224 __ mov(oop_handle_reg, c_rarg1);
2226 // Get address of the box
2228 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2230 // Load the oop from the handle
2231 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2233 if (UseBiasedLocking) {
2234 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2235 }
2237 // Load immediate 1 into swap_reg %rax
2238 __ movl(swap_reg, 1);
2240 // Load (object->mark() | 1) into swap_reg %rax
2241 __ orptr(swap_reg, Address(obj_reg, 0));
2243 // Save (object->mark() | 1) into BasicLock's displaced header
2244 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2246 if (os::is_MP()) {
2247 __ lock();
2248 }
2250 // src -> dest iff dest == rax else rax <- dest
2251 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2252 __ jcc(Assembler::equal, lock_done);
2254 // Hmm should this move to the slow path code area???
2256 // Test if the oopMark is an obvious stack pointer, i.e.,
2257 // 1) (mark & 3) == 0, and
2258 // 2) rsp <= mark < mark + os::pagesize()
2259 // These 3 tests can be done by evaluating the following
2260 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2261 // assuming both stack pointer and pagesize have their
2262 // least significant 2 bits clear.
2263 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2265 __ subptr(swap_reg, rsp);
2266 __ andptr(swap_reg, 3 - os::vm_page_size());
2268 // Save the test result, for recursive case, the result is zero
2269 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2270 __ jcc(Assembler::notEqual, slow_path_lock);
2272 // Slow path will re-enter here
2274 __ bind(lock_done);
2275 }
2278 // Finally just about ready to make the JNI call
2281 // get JNIEnv* which is first argument to native
2282 if (!is_critical_native) {
2283 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2284 }
2286 // Now set thread in native
2287 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2289 __ call(RuntimeAddress(native_func));
2291 // Either restore the MXCSR register after returning from the JNI Call
2292 // or verify that it wasn't changed.
2293 if (RestoreMXCSROnJNICalls) {
2294 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
2296 }
2297 else if (CheckJNICalls ) {
2298 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
2299 }
2302 // Unpack native results.
2303 switch (ret_type) {
2304 case T_BOOLEAN: __ c2bool(rax); break;
2305 case T_CHAR : __ movzwl(rax, rax); break;
2306 case T_BYTE : __ sign_extend_byte (rax); break;
2307 case T_SHORT : __ sign_extend_short(rax); break;
2308 case T_INT : /* nothing to do */ break;
2309 case T_DOUBLE :
2310 case T_FLOAT :
2311 // Result is in xmm0 we'll save as needed
2312 break;
2313 case T_ARRAY: // Really a handle
2314 case T_OBJECT: // Really a handle
2315 break; // can't de-handlize until after safepoint check
2316 case T_VOID: break;
2317 case T_LONG: break;
2318 default : ShouldNotReachHere();
2319 }
2321 // Switch thread to "native transition" state before reading the synchronization state.
2322 // This additional state is necessary because reading and testing the synchronization
2323 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2324 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2325 // VM thread changes sync state to synchronizing and suspends threads for GC.
2326 // Thread A is resumed to finish this native method, but doesn't block here since it
2327 // didn't see any synchronization is progress, and escapes.
2328 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2330 if(os::is_MP()) {
2331 if (UseMembar) {
2332 // Force this write out before the read below
2333 __ membar(Assembler::Membar_mask_bits(
2334 Assembler::LoadLoad | Assembler::LoadStore |
2335 Assembler::StoreLoad | Assembler::StoreStore));
2336 } else {
2337 // Write serialization page so VM thread can do a pseudo remote membar.
2338 // We use the current thread pointer to calculate a thread specific
2339 // offset to write to within the page. This minimizes bus traffic
2340 // due to cache line collision.
2341 __ serialize_memory(r15_thread, rcx);
2342 }
2343 }
2345 Label after_transition;
2347 // check for safepoint operation in progress and/or pending suspend requests
2348 {
2349 Label Continue;
2351 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2352 SafepointSynchronize::_not_synchronized);
2354 Label L;
2355 __ jcc(Assembler::notEqual, L);
2356 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2357 __ jcc(Assembler::equal, Continue);
2358 __ bind(L);
2360 // Don't use call_VM as it will see a possible pending exception and forward it
2361 // and never return here preventing us from clearing _last_native_pc down below.
2362 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2363 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2364 // by hand.
2365 //
2366 save_native_result(masm, ret_type, stack_slots);
2367 __ mov(c_rarg0, r15_thread);
2368 __ mov(r12, rsp); // remember sp
2369 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2370 __ andptr(rsp, -16); // align stack as required by ABI
2371 if (!is_critical_native) {
2372 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2373 } else {
2374 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2375 }
2376 __ mov(rsp, r12); // restore sp
2377 __ reinit_heapbase();
2378 // Restore any method result value
2379 restore_native_result(masm, ret_type, stack_slots);
2381 if (is_critical_native) {
2382 // The call above performed the transition to thread_in_Java so
2383 // skip the transition logic below.
2384 __ jmpb(after_transition);
2385 }
2387 __ bind(Continue);
2388 }
2390 // change thread state
2391 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2392 __ bind(after_transition);
2394 Label reguard;
2395 Label reguard_done;
2396 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2397 __ jcc(Assembler::equal, reguard);
2398 __ bind(reguard_done);
2400 // native result if any is live
2402 // Unlock
2403 Label unlock_done;
2404 Label slow_path_unlock;
2405 if (method->is_synchronized()) {
2407 // Get locked oop from the handle we passed to jni
2408 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2410 Label done;
2412 if (UseBiasedLocking) {
2413 __ biased_locking_exit(obj_reg, old_hdr, done);
2414 }
2416 // Simple recursive lock?
2418 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2419 __ jcc(Assembler::equal, done);
2421 // Must save rax if if it is live now because cmpxchg must use it
2422 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2423 save_native_result(masm, ret_type, stack_slots);
2424 }
2427 // get address of the stack lock
2428 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2429 // get old displaced header
2430 __ movptr(old_hdr, Address(rax, 0));
2432 // Atomic swap old header if oop still contains the stack lock
2433 if (os::is_MP()) {
2434 __ lock();
2435 }
2436 __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2437 __ jcc(Assembler::notEqual, slow_path_unlock);
2439 // slow path re-enters here
2440 __ bind(unlock_done);
2441 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2442 restore_native_result(masm, ret_type, stack_slots);
2443 }
2445 __ bind(done);
2447 }
2448 {
2449 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2450 save_native_result(masm, ret_type, stack_slots);
2451 __ movoop(c_rarg1, JNIHandles::make_local(method()));
2452 __ call_VM_leaf(
2453 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2454 r15_thread, c_rarg1);
2455 restore_native_result(masm, ret_type, stack_slots);
2456 }
2458 __ reset_last_Java_frame(false, true);
2460 // Unpack oop result
2461 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2462 Label L;
2463 __ testptr(rax, rax);
2464 __ jcc(Assembler::zero, L);
2465 __ movptr(rax, Address(rax, 0));
2466 __ bind(L);
2467 __ verify_oop(rax);
2468 }
2470 if (!is_critical_native) {
2471 // reset handle block
2472 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2473 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2474 }
2476 // pop our frame
2478 __ leave();
2480 if (!is_critical_native) {
2481 // Any exception pending?
2482 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2483 __ jcc(Assembler::notEqual, exception_pending);
2484 }
2486 // Return
2488 __ ret(0);
2490 // Unexpected paths are out of line and go here
2492 if (!is_critical_native) {
2493 // forward the exception
2494 __ bind(exception_pending);
2496 // and forward the exception
2497 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2498 }
2500 // Slow path locking & unlocking
2501 if (method->is_synchronized()) {
2503 // BEGIN Slow path lock
2504 __ bind(slow_path_lock);
2506 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2507 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2509 // protect the args we've loaded
2510 save_args(masm, total_c_args, c_arg, out_regs);
2512 __ mov(c_rarg0, obj_reg);
2513 __ mov(c_rarg1, lock_reg);
2514 __ mov(c_rarg2, r15_thread);
2516 // Not a leaf but we have last_Java_frame setup as we want
2517 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2518 restore_args(masm, total_c_args, c_arg, out_regs);
2520 #ifdef ASSERT
2521 { Label L;
2522 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2523 __ jcc(Assembler::equal, L);
2524 __ stop("no pending exception allowed on exit from monitorenter");
2525 __ bind(L);
2526 }
2527 #endif
2528 __ jmp(lock_done);
2530 // END Slow path lock
2532 // BEGIN Slow path unlock
2533 __ bind(slow_path_unlock);
2535 // If we haven't already saved the native result we must save it now as xmm registers
2536 // are still exposed.
2538 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2539 save_native_result(masm, ret_type, stack_slots);
2540 }
2542 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2544 __ mov(c_rarg0, obj_reg);
2545 __ mov(r12, rsp); // remember sp
2546 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2547 __ andptr(rsp, -16); // align stack as required by ABI
2549 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2550 // NOTE that obj_reg == rbx currently
2551 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2552 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2554 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2555 __ mov(rsp, r12); // restore sp
2556 __ reinit_heapbase();
2557 #ifdef ASSERT
2558 {
2559 Label L;
2560 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2561 __ jcc(Assembler::equal, L);
2562 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2563 __ bind(L);
2564 }
2565 #endif /* ASSERT */
2567 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2569 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2570 restore_native_result(masm, ret_type, stack_slots);
2571 }
2572 __ jmp(unlock_done);
2574 // END Slow path unlock
2576 } // synchronized
2578 // SLOW PATH Reguard the stack if needed
2580 __ bind(reguard);
2581 save_native_result(masm, ret_type, stack_slots);
2582 __ mov(r12, rsp); // remember sp
2583 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2584 __ andptr(rsp, -16); // align stack as required by ABI
2585 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2586 __ mov(rsp, r12); // restore sp
2587 __ reinit_heapbase();
2588 restore_native_result(masm, ret_type, stack_slots);
2589 // and continue
2590 __ jmp(reguard_done);
2594 __ flush();
2596 nmethod *nm = nmethod::new_native_nmethod(method,
2597 compile_id,
2598 masm->code(),
2599 vep_offset,
2600 frame_complete,
2601 stack_slots / VMRegImpl::slots_per_word,
2602 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2603 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2604 oop_maps);
2606 if (is_critical_native) {
2607 nm->set_lazy_critical_native(true);
2608 }
2610 return nm;
2612 }
2614 #ifdef HAVE_DTRACE_H
2615 // ---------------------------------------------------------------------------
2616 // Generate a dtrace nmethod for a given signature. The method takes arguments
2617 // in the Java compiled code convention, marshals them to the native
2618 // abi and then leaves nops at the position you would expect to call a native
2619 // function. When the probe is enabled the nops are replaced with a trap
2620 // instruction that dtrace inserts and the trace will cause a notification
2621 // to dtrace.
2622 //
2623 // The probes are only able to take primitive types and java/lang/String as
2624 // arguments. No other java types are allowed. Strings are converted to utf8
2625 // strings so that from dtrace point of view java strings are converted to C
2626 // strings. There is an arbitrary fixed limit on the total space that a method
2627 // can use for converting the strings. (256 chars per string in the signature).
2628 // So any java string larger then this is truncated.
2630 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2631 static bool offsets_initialized = false;
2634 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2635 methodHandle method) {
2638 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2639 // be single threaded in this method.
2640 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2642 if (!offsets_initialized) {
2643 fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
2644 fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
2645 fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
2646 fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
2647 fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
2648 fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
2650 fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
2651 fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
2652 fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
2653 fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
2654 fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
2655 fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
2656 fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
2657 fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
2659 offsets_initialized = true;
2660 }
2661 // Fill in the signature array, for the calling-convention call.
2662 int total_args_passed = method->size_of_parameters();
2664 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2665 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2667 // The signature we are going to use for the trap that dtrace will see
2668 // java/lang/String is converted. We drop "this" and any other object
2669 // is converted to NULL. (A one-slot java/lang/Long object reference
2670 // is converted to a two-slot long, which is why we double the allocation).
2671 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2672 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2674 int i=0;
2675 int total_strings = 0;
2676 int first_arg_to_pass = 0;
2677 int total_c_args = 0;
2679 // Skip the receiver as dtrace doesn't want to see it
2680 if( !method->is_static() ) {
2681 in_sig_bt[i++] = T_OBJECT;
2682 first_arg_to_pass = 1;
2683 }
2685 // We need to convert the java args to where a native (non-jni) function
2686 // would expect them. To figure out where they go we convert the java
2687 // signature to a C signature.
2689 SignatureStream ss(method->signature());
2690 for ( ; !ss.at_return_type(); ss.next()) {
2691 BasicType bt = ss.type();
2692 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2693 out_sig_bt[total_c_args++] = bt;
2694 if( bt == T_OBJECT) {
2695 Symbol* s = ss.as_symbol_or_null(); // symbol is created
2696 if (s == vmSymbols::java_lang_String()) {
2697 total_strings++;
2698 out_sig_bt[total_c_args-1] = T_ADDRESS;
2699 } else if (s == vmSymbols::java_lang_Boolean() ||
2700 s == vmSymbols::java_lang_Character() ||
2701 s == vmSymbols::java_lang_Byte() ||
2702 s == vmSymbols::java_lang_Short() ||
2703 s == vmSymbols::java_lang_Integer() ||
2704 s == vmSymbols::java_lang_Float()) {
2705 out_sig_bt[total_c_args-1] = T_INT;
2706 } else if (s == vmSymbols::java_lang_Long() ||
2707 s == vmSymbols::java_lang_Double()) {
2708 out_sig_bt[total_c_args-1] = T_LONG;
2709 out_sig_bt[total_c_args++] = T_VOID;
2710 }
2711 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2712 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2713 // We convert double to long
2714 out_sig_bt[total_c_args-1] = T_LONG;
2715 out_sig_bt[total_c_args++] = T_VOID;
2716 } else if ( bt == T_FLOAT) {
2717 // We convert float to int
2718 out_sig_bt[total_c_args-1] = T_INT;
2719 }
2720 }
2722 assert(i==total_args_passed, "validly parsed signature");
2724 // Now get the compiled-Java layout as input arguments
2725 int comp_args_on_stack;
2726 comp_args_on_stack = SharedRuntime::java_calling_convention(
2727 in_sig_bt, in_regs, total_args_passed, false);
2729 // Now figure out where the args must be stored and how much stack space
2730 // they require (neglecting out_preserve_stack_slots but space for storing
2731 // the 1st six register arguments). It's weird see int_stk_helper.
2733 int out_arg_slots;
2734 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2736 // Calculate the total number of stack slots we will need.
2738 // First count the abi requirement plus all of the outgoing args
2739 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2741 // Now space for the string(s) we must convert
2742 int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2743 for (i = 0; i < total_strings ; i++) {
2744 string_locs[i] = stack_slots;
2745 stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2746 }
2748 // Plus the temps we might need to juggle register args
2749 // regs take two slots each
2750 stack_slots += (Argument::n_int_register_parameters_c +
2751 Argument::n_float_register_parameters_c) * 2;
2754 // + 4 for return address (which we own) and saved rbp,
2756 stack_slots += 4;
2758 // Ok The space we have allocated will look like:
2759 //
2760 //
2761 // FP-> | |
2762 // |---------------------|
2763 // | string[n] |
2764 // |---------------------| <- string_locs[n]
2765 // | string[n-1] |
2766 // |---------------------| <- string_locs[n-1]
2767 // | ... |
2768 // | ... |
2769 // |---------------------| <- string_locs[1]
2770 // | string[0] |
2771 // |---------------------| <- string_locs[0]
2772 // | outbound memory |
2773 // | based arguments |
2774 // | |
2775 // |---------------------|
2776 // | |
2777 // SP-> | out_preserved_slots |
2778 //
2779 //
2781 // Now compute actual number of stack words we need rounding to make
2782 // stack properly aligned.
2783 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2785 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2787 intptr_t start = (intptr_t)__ pc();
2789 // First thing make an ic check to see if we should even be here
2791 // We are free to use all registers as temps without saving them and
2792 // restoring them except rbp. rbp, is the only callee save register
2793 // as far as the interpreter and the compiler(s) are concerned.
2795 const Register ic_reg = rax;
2796 const Register receiver = rcx;
2797 Label hit;
2798 Label exception_pending;
2801 __ verify_oop(receiver);
2802 __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2803 __ jcc(Assembler::equal, hit);
2805 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2807 // verified entry must be aligned for code patching.
2808 // and the first 5 bytes must be in the same cache line
2809 // if we align at 8 then we will be sure 5 bytes are in the same line
2810 __ align(8);
2812 __ bind(hit);
2814 int vep_offset = ((intptr_t)__ pc()) - start;
2817 // The instruction at the verified entry point must be 5 bytes or longer
2818 // because it can be patched on the fly by make_non_entrant. The stack bang
2819 // instruction fits that requirement.
2821 // Generate stack overflow check
2823 if (UseStackBanging) {
2824 if (stack_size <= StackShadowPages*os::vm_page_size()) {
2825 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2826 } else {
2827 __ movl(rax, stack_size);
2828 __ bang_stack_size(rax, rbx);
2829 }
2830 } else {
2831 // need a 5 byte instruction to allow MT safe patching to non-entrant
2832 __ fat_nop();
2833 }
2835 assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
2836 "valid size for make_non_entrant");
2838 // Generate a new frame for the wrapper.
2839 __ enter();
2841 // -4 because return address is already present and so is saved rbp,
2842 if (stack_size - 2*wordSize != 0) {
2843 __ subq(rsp, stack_size - 2*wordSize);
2844 }
2846 // Frame is now completed as far a size and linkage.
2848 int frame_complete = ((intptr_t)__ pc()) - start;
2850 int c_arg, j_arg;
2852 // State of input register args
2854 bool live[ConcreteRegisterImpl::number_of_registers];
2856 live[j_rarg0->as_VMReg()->value()] = false;
2857 live[j_rarg1->as_VMReg()->value()] = false;
2858 live[j_rarg2->as_VMReg()->value()] = false;
2859 live[j_rarg3->as_VMReg()->value()] = false;
2860 live[j_rarg4->as_VMReg()->value()] = false;
2861 live[j_rarg5->as_VMReg()->value()] = false;
2863 live[j_farg0->as_VMReg()->value()] = false;
2864 live[j_farg1->as_VMReg()->value()] = false;
2865 live[j_farg2->as_VMReg()->value()] = false;
2866 live[j_farg3->as_VMReg()->value()] = false;
2867 live[j_farg4->as_VMReg()->value()] = false;
2868 live[j_farg5->as_VMReg()->value()] = false;
2869 live[j_farg6->as_VMReg()->value()] = false;
2870 live[j_farg7->as_VMReg()->value()] = false;
2873 bool rax_is_zero = false;
2875 // All args (except strings) destined for the stack are moved first
2876 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2877 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2878 VMRegPair src = in_regs[j_arg];
2879 VMRegPair dst = out_regs[c_arg];
2881 // Get the real reg value or a dummy (rsp)
2883 int src_reg = src.first()->is_reg() ?
2884 src.first()->value() :
2885 rsp->as_VMReg()->value();
2887 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
2888 (in_sig_bt[j_arg] == T_OBJECT &&
2889 out_sig_bt[c_arg] != T_INT &&
2890 out_sig_bt[c_arg] != T_ADDRESS &&
2891 out_sig_bt[c_arg] != T_LONG);
2893 live[src_reg] = !useless;
2895 if (dst.first()->is_stack()) {
2897 // Even though a string arg in a register is still live after this loop
2898 // after the string conversion loop (next) it will be dead so we take
2899 // advantage of that now for simpler code to manage live.
2901 live[src_reg] = false;
2902 switch (in_sig_bt[j_arg]) {
2904 case T_ARRAY:
2905 case T_OBJECT:
2906 {
2907 Address stack_dst(rsp, reg2offset_out(dst.first()));
2909 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2910 // need to unbox a one-word value
2911 Register in_reg = rax;
2912 if ( src.first()->is_reg() ) {
2913 in_reg = src.first()->as_Register();
2914 } else {
2915 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
2916 rax_is_zero = false;
2917 }
2918 Label skipUnbox;
2919 __ movptr(Address(rsp, reg2offset_out(dst.first())),
2920 (int32_t)NULL_WORD);
2921 __ testq(in_reg, in_reg);
2922 __ jcc(Assembler::zero, skipUnbox);
2924 BasicType bt = out_sig_bt[c_arg];
2925 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2926 Address src1(in_reg, box_offset);
2927 if ( bt == T_LONG ) {
2928 __ movq(in_reg, src1);
2929 __ movq(stack_dst, in_reg);
2930 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2931 ++c_arg; // skip over T_VOID to keep the loop indices in sync
2932 } else {
2933 __ movl(in_reg, src1);
2934 __ movl(stack_dst, in_reg);
2935 }
2937 __ bind(skipUnbox);
2938 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
2939 // Convert the arg to NULL
2940 if (!rax_is_zero) {
2941 __ xorq(rax, rax);
2942 rax_is_zero = true;
2943 }
2944 __ movq(stack_dst, rax);
2945 }
2946 }
2947 break;
2949 case T_VOID:
2950 break;
2952 case T_FLOAT:
2953 // This does the right thing since we know it is destined for the
2954 // stack
2955 float_move(masm, src, dst);
2956 break;
2958 case T_DOUBLE:
2959 // This does the right thing since we know it is destined for the
2960 // stack
2961 double_move(masm, src, dst);
2962 break;
2964 case T_LONG :
2965 long_move(masm, src, dst);
2966 break;
2968 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2970 default:
2971 move32_64(masm, src, dst);
2972 }
2973 }
2975 }
2977 // If we have any strings we must store any register based arg to the stack
2978 // This includes any still live xmm registers too.
2980 int sid = 0;
2982 if (total_strings > 0 ) {
2983 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2984 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2985 VMRegPair src = in_regs[j_arg];
2986 VMRegPair dst = out_regs[c_arg];
2988 if (src.first()->is_reg()) {
2989 Address src_tmp(rbp, fp_offset[src.first()->value()]);
2991 // string oops were left untouched by the previous loop even if the
2992 // eventual (converted) arg is destined for the stack so park them
2993 // away now (except for first)
2995 if (out_sig_bt[c_arg] == T_ADDRESS) {
2996 Address utf8_addr = Address(
2997 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
2998 if (sid != 1) {
2999 // The first string arg won't be killed until after the utf8
3000 // conversion
3001 __ movq(utf8_addr, src.first()->as_Register());
3002 }
3003 } else if (dst.first()->is_reg()) {
3004 if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
3006 // Convert the xmm register to an int and store it in the reserved
3007 // location for the eventual c register arg
3008 XMMRegister f = src.first()->as_XMMRegister();
3009 if (in_sig_bt[j_arg] == T_FLOAT) {
3010 __ movflt(src_tmp, f);
3011 } else {
3012 __ movdbl(src_tmp, f);
3013 }
3014 } else {
3015 // If the arg is an oop type we don't support don't bother to store
3016 // it remember string was handled above.
3017 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3018 (in_sig_bt[j_arg] == T_OBJECT &&
3019 out_sig_bt[c_arg] != T_INT &&
3020 out_sig_bt[c_arg] != T_LONG);
3022 if (!useless) {
3023 __ movq(src_tmp, src.first()->as_Register());
3024 }
3025 }
3026 }
3027 }
3028 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3029 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3030 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3031 }
3032 }
3034 // Now that the volatile registers are safe, convert all the strings
3035 sid = 0;
3037 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3038 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3039 if (out_sig_bt[c_arg] == T_ADDRESS) {
3040 // It's a string
3041 Address utf8_addr = Address(
3042 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3043 // The first string we find might still be in the original java arg
3044 // register
3046 VMReg src = in_regs[j_arg].first();
3048 // We will need to eventually save the final argument to the trap
3049 // in the von-volatile location dedicated to src. This is the offset
3050 // from fp we will use.
3051 int src_off = src->is_reg() ?
3052 fp_offset[src->value()] : reg2offset_in(src);
3054 // This is where the argument will eventually reside
3055 VMRegPair dst = out_regs[c_arg];
3057 if (src->is_reg()) {
3058 if (sid == 1) {
3059 __ movq(c_rarg0, src->as_Register());
3060 } else {
3061 __ movq(c_rarg0, utf8_addr);
3062 }
3063 } else {
3064 // arg is still in the original location
3065 __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
3066 }
3067 Label done, convert;
3069 // see if the oop is NULL
3070 __ testq(c_rarg0, c_rarg0);
3071 __ jcc(Assembler::notEqual, convert);
3073 if (dst.first()->is_reg()) {
3074 // Save the ptr to utf string in the origina src loc or the tmp
3075 // dedicated to it
3076 __ movq(Address(rbp, src_off), c_rarg0);
3077 } else {
3078 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
3079 }
3080 __ jmp(done);
3082 __ bind(convert);
3084 __ lea(c_rarg1, utf8_addr);
3085 if (dst.first()->is_reg()) {
3086 __ movq(Address(rbp, src_off), c_rarg1);
3087 } else {
3088 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
3089 }
3090 // And do the conversion
3091 __ call(RuntimeAddress(
3092 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
3094 __ bind(done);
3095 }
3096 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3097 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3098 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3099 }
3100 }
3101 // The get_utf call killed all the c_arg registers
3102 live[c_rarg0->as_VMReg()->value()] = false;
3103 live[c_rarg1->as_VMReg()->value()] = false;
3104 live[c_rarg2->as_VMReg()->value()] = false;
3105 live[c_rarg3->as_VMReg()->value()] = false;
3106 live[c_rarg4->as_VMReg()->value()] = false;
3107 live[c_rarg5->as_VMReg()->value()] = false;
3109 live[c_farg0->as_VMReg()->value()] = false;
3110 live[c_farg1->as_VMReg()->value()] = false;
3111 live[c_farg2->as_VMReg()->value()] = false;
3112 live[c_farg3->as_VMReg()->value()] = false;
3113 live[c_farg4->as_VMReg()->value()] = false;
3114 live[c_farg5->as_VMReg()->value()] = false;
3115 live[c_farg6->as_VMReg()->value()] = false;
3116 live[c_farg7->as_VMReg()->value()] = false;
3117 }
3119 // Now we can finally move the register args to their desired locations
3121 rax_is_zero = false;
3123 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3124 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3126 VMRegPair src = in_regs[j_arg];
3127 VMRegPair dst = out_regs[c_arg];
3129 // Only need to look for args destined for the interger registers (since we
3130 // convert float/double args to look like int/long outbound)
3131 if (dst.first()->is_reg()) {
3132 Register r = dst.first()->as_Register();
3134 // Check if the java arg is unsupported and thereofre useless
3135 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3136 (in_sig_bt[j_arg] == T_OBJECT &&
3137 out_sig_bt[c_arg] != T_INT &&
3138 out_sig_bt[c_arg] != T_ADDRESS &&
3139 out_sig_bt[c_arg] != T_LONG);
3142 // If we're going to kill an existing arg save it first
3143 if (live[dst.first()->value()]) {
3144 // you can't kill yourself
3145 if (src.first() != dst.first()) {
3146 __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
3147 }
3148 }
3149 if (src.first()->is_reg()) {
3150 if (live[src.first()->value()] ) {
3151 if (in_sig_bt[j_arg] == T_FLOAT) {
3152 __ movdl(r, src.first()->as_XMMRegister());
3153 } else if (in_sig_bt[j_arg] == T_DOUBLE) {
3154 __ movdq(r, src.first()->as_XMMRegister());
3155 } else if (r != src.first()->as_Register()) {
3156 if (!useless) {
3157 __ movq(r, src.first()->as_Register());
3158 }
3159 }
3160 } else {
3161 // If the arg is an oop type we don't support don't bother to store
3162 // it
3163 if (!useless) {
3164 if (in_sig_bt[j_arg] == T_DOUBLE ||
3165 in_sig_bt[j_arg] == T_LONG ||
3166 in_sig_bt[j_arg] == T_OBJECT ) {
3167 __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
3168 } else {
3169 __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
3170 }
3171 }
3172 }
3173 live[src.first()->value()] = false;
3174 } else if (!useless) {
3175 // full sized move even for int should be ok
3176 __ movq(r, Address(rbp, reg2offset_in(src.first())));
3177 }
3179 // At this point r has the original java arg in the final location
3180 // (assuming it wasn't useless). If the java arg was an oop
3181 // we have a bit more to do
3183 if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
3184 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3185 // need to unbox a one-word value
3186 Label skip;
3187 __ testq(r, r);
3188 __ jcc(Assembler::equal, skip);
3189 BasicType bt = out_sig_bt[c_arg];
3190 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3191 Address src1(r, box_offset);
3192 if ( bt == T_LONG ) {
3193 __ movq(r, src1);
3194 } else {
3195 __ movl(r, src1);
3196 }
3197 __ bind(skip);
3199 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3200 // Convert the arg to NULL
3201 __ xorq(r, r);
3202 }
3203 }
3205 // dst can longer be holding an input value
3206 live[dst.first()->value()] = false;
3207 }
3208 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3209 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3210 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3211 }
3212 }
3215 // Ok now we are done. Need to place the nop that dtrace wants in order to
3216 // patch in the trap
3217 int patch_offset = ((intptr_t)__ pc()) - start;
3219 __ nop();
3222 // Return
3224 __ leave();
3225 __ ret(0);
3227 __ flush();
3229 nmethod *nm = nmethod::new_dtrace_nmethod(
3230 method, masm->code(), vep_offset, patch_offset, frame_complete,
3231 stack_slots / VMRegImpl::slots_per_word);
3232 return nm;
3234 }
3236 #endif // HAVE_DTRACE_H
3238 // this function returns the adjust size (in number of words) to a c2i adapter
3239 // activation for use during deoptimization
3240 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3241 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3242 }
3245 uint SharedRuntime::out_preserve_stack_slots() {
3246 return 0;
3247 }
3250 //------------------------------generate_deopt_blob----------------------------
3251 void SharedRuntime::generate_deopt_blob() {
3252 // Allocate space for the code
3253 ResourceMark rm;
3254 // Setup code generation tools
3255 CodeBuffer buffer("deopt_blob", 2048, 1024);
3256 MacroAssembler* masm = new MacroAssembler(&buffer);
3257 int frame_size_in_words;
3258 OopMap* map = NULL;
3259 OopMapSet *oop_maps = new OopMapSet();
3261 // -------------
3262 // This code enters when returning to a de-optimized nmethod. A return
3263 // address has been pushed on the the stack, and return values are in
3264 // registers.
3265 // If we are doing a normal deopt then we were called from the patched
3266 // nmethod from the point we returned to the nmethod. So the return
3267 // address on the stack is wrong by NativeCall::instruction_size
3268 // We will adjust the value so it looks like we have the original return
3269 // address on the stack (like when we eagerly deoptimized).
3270 // In the case of an exception pending when deoptimizing, we enter
3271 // with a return address on the stack that points after the call we patched
3272 // into the exception handler. We have the following register state from,
3273 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3274 // rax: exception oop
3275 // rbx: exception handler
3276 // rdx: throwing pc
3277 // So in this case we simply jam rdx into the useless return address and
3278 // the stack looks just like we want.
3279 //
3280 // At this point we need to de-opt. We save the argument return
3281 // registers. We call the first C routine, fetch_unroll_info(). This
3282 // routine captures the return values and returns a structure which
3283 // describes the current frame size and the sizes of all replacement frames.
3284 // The current frame is compiled code and may contain many inlined
3285 // functions, each with their own JVM state. We pop the current frame, then
3286 // push all the new frames. Then we call the C routine unpack_frames() to
3287 // populate these frames. Finally unpack_frames() returns us the new target
3288 // address. Notice that callee-save registers are BLOWN here; they have
3289 // already been captured in the vframeArray at the time the return PC was
3290 // patched.
3291 address start = __ pc();
3292 Label cont;
3294 // Prolog for non exception case!
3296 // Save everything in sight.
3297 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3299 // Normal deoptimization. Save exec mode for unpack_frames.
3300 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3301 __ jmp(cont);
3303 int reexecute_offset = __ pc() - start;
3305 // Reexecute case
3306 // return address is the pc describes what bci to do re-execute at
3308 // No need to update map as each call to save_live_registers will produce identical oopmap
3309 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3311 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3312 __ jmp(cont);
3314 int exception_offset = __ pc() - start;
3316 // Prolog for exception case
3318 // all registers are dead at this entry point, except for rax, and
3319 // rdx which contain the exception oop and exception pc
3320 // respectively. Set them in TLS and fall thru to the
3321 // unpack_with_exception_in_tls entry point.
3323 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3324 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3326 int exception_in_tls_offset = __ pc() - start;
3328 // new implementation because exception oop is now passed in JavaThread
3330 // Prolog for exception case
3331 // All registers must be preserved because they might be used by LinearScan
3332 // Exceptiop oop and throwing PC are passed in JavaThread
3333 // tos: stack at point of call to method that threw the exception (i.e. only
3334 // args are on the stack, no return address)
3336 // make room on stack for the return address
3337 // It will be patched later with the throwing pc. The correct value is not
3338 // available now because loading it from memory would destroy registers.
3339 __ push(0);
3341 // Save everything in sight.
3342 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3344 // Now it is safe to overwrite any register
3346 // Deopt during an exception. Save exec mode for unpack_frames.
3347 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3349 // load throwing pc from JavaThread and patch it as the return address
3350 // of the current frame. Then clear the field in JavaThread
3352 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3353 __ movptr(Address(rbp, wordSize), rdx);
3354 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3356 #ifdef ASSERT
3357 // verify that there is really an exception oop in JavaThread
3358 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3359 __ verify_oop(rax);
3361 // verify that there is no pending exception
3362 Label no_pending_exception;
3363 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3364 __ testptr(rax, rax);
3365 __ jcc(Assembler::zero, no_pending_exception);
3366 __ stop("must not have pending exception here");
3367 __ bind(no_pending_exception);
3368 #endif
3370 __ bind(cont);
3372 // Call C code. Need thread and this frame, but NOT official VM entry
3373 // crud. We cannot block on this call, no GC can happen.
3374 //
3375 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3377 // fetch_unroll_info needs to call last_java_frame().
3379 __ set_last_Java_frame(noreg, noreg, NULL);
3380 #ifdef ASSERT
3381 { Label L;
3382 __ cmpptr(Address(r15_thread,
3383 JavaThread::last_Java_fp_offset()),
3384 (int32_t)0);
3385 __ jcc(Assembler::equal, L);
3386 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3387 __ bind(L);
3388 }
3389 #endif // ASSERT
3390 __ mov(c_rarg0, r15_thread);
3391 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3393 // Need to have an oopmap that tells fetch_unroll_info where to
3394 // find any register it might need.
3395 oop_maps->add_gc_map(__ pc() - start, map);
3397 __ reset_last_Java_frame(false, false);
3399 // Load UnrollBlock* into rdi
3400 __ mov(rdi, rax);
3402 Label noException;
3403 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3404 __ jcc(Assembler::notEqual, noException);
3405 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3406 // QQQ this is useless it was NULL above
3407 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3408 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3409 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3411 __ verify_oop(rax);
3413 // Overwrite the result registers with the exception results.
3414 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3415 // I think this is useless
3416 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3418 __ bind(noException);
3420 // Only register save data is on the stack.
3421 // Now restore the result registers. Everything else is either dead
3422 // or captured in the vframeArray.
3423 RegisterSaver::restore_result_registers(masm);
3425 // All of the register save area has been popped of the stack. Only the
3426 // return address remains.
3428 // Pop all the frames we must move/replace.
3429 //
3430 // Frame picture (youngest to oldest)
3431 // 1: self-frame (no frame link)
3432 // 2: deopting frame (no frame link)
3433 // 3: caller of deopting frame (could be compiled/interpreted).
3434 //
3435 // Note: by leaving the return address of self-frame on the stack
3436 // and using the size of frame 2 to adjust the stack
3437 // when we are done the return to frame 3 will still be on the stack.
3439 // Pop deoptimized frame
3440 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3441 __ addptr(rsp, rcx);
3443 // rsp should be pointing at the return address to the caller (3)
3445 // Stack bang to make sure there's enough room for these interpreter frames.
3446 if (UseStackBanging) {
3447 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3448 __ bang_stack_size(rbx, rcx);
3449 }
3451 // Load address of array of frame pcs into rcx
3452 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3454 // Trash the old pc
3455 __ addptr(rsp, wordSize);
3457 // Load address of array of frame sizes into rsi
3458 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3460 // Load counter into rdx
3461 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3463 // Pick up the initial fp we should save
3464 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3466 // Now adjust the caller's stack to make up for the extra locals
3467 // but record the original sp so that we can save it in the skeletal interpreter
3468 // frame and the stack walking of interpreter_sender will get the unextended sp
3469 // value and not the "real" sp value.
3471 const Register sender_sp = r8;
3473 __ mov(sender_sp, rsp);
3474 __ movl(rbx, Address(rdi,
3475 Deoptimization::UnrollBlock::
3476 caller_adjustment_offset_in_bytes()));
3477 __ subptr(rsp, rbx);
3479 // Push interpreter frames in a loop
3480 Label loop;
3481 __ bind(loop);
3482 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3483 #ifdef CC_INTERP
3484 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
3485 #ifdef ASSERT
3486 __ push(0xDEADDEAD); // Make a recognizable pattern
3487 __ push(0xDEADDEAD);
3488 #else /* ASSERT */
3489 __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
3490 #endif /* ASSERT */
3491 #else
3492 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3493 #endif // CC_INTERP
3494 __ pushptr(Address(rcx, 0)); // Save return address
3495 __ enter(); // Save old & set new ebp
3496 __ subptr(rsp, rbx); // Prolog
3497 #ifdef CC_INTERP
3498 __ movptr(Address(rbp,
3499 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3500 sender_sp); // Make it walkable
3501 #else /* CC_INTERP */
3502 // This value is corrected by layout_activation_impl
3503 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3504 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3505 #endif /* CC_INTERP */
3506 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3507 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3508 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3509 __ decrementl(rdx); // Decrement counter
3510 __ jcc(Assembler::notZero, loop);
3511 __ pushptr(Address(rcx, 0)); // Save final return address
3513 // Re-push self-frame
3514 __ enter(); // Save old & set new ebp
3516 // Allocate a full sized register save area.
3517 // Return address and rbp are in place, so we allocate two less words.
3518 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3520 // Restore frame locals after moving the frame
3521 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3522 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3524 // Call C code. Need thread but NOT official VM entry
3525 // crud. We cannot block on this call, no GC can happen. Call should
3526 // restore return values to their stack-slots with the new SP.
3527 //
3528 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3530 // Use rbp because the frames look interpreted now
3531 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3532 // Don't need the precise return PC here, just precise enough to point into this code blob.
3533 address the_pc = __ pc();
3534 __ set_last_Java_frame(noreg, rbp, the_pc);
3536 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3537 __ mov(c_rarg0, r15_thread);
3538 __ movl(c_rarg1, r14); // second arg: exec_mode
3539 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3540 // Revert SP alignment after call since we're going to do some SP relative addressing below
3541 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3543 // Set an oopmap for the call site
3544 // Use the same PC we used for the last java frame
3545 oop_maps->add_gc_map(the_pc - start,
3546 new OopMap( frame_size_in_words, 0 ));
3548 // Clear fp AND pc
3549 __ reset_last_Java_frame(true, true);
3551 // Collect return values
3552 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3553 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3554 // I think this is useless (throwing pc?)
3555 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3557 // Pop self-frame.
3558 __ leave(); // Epilog
3560 // Jump to interpreter
3561 __ ret(0);
3563 // Make sure all code is generated
3564 masm->flush();
3566 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3567 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3568 }
3570 #ifdef COMPILER2
3571 //------------------------------generate_uncommon_trap_blob--------------------
3572 void SharedRuntime::generate_uncommon_trap_blob() {
3573 // Allocate space for the code
3574 ResourceMark rm;
3575 // Setup code generation tools
3576 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3577 MacroAssembler* masm = new MacroAssembler(&buffer);
3579 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3581 address start = __ pc();
3583 // Push self-frame. We get here with a return address on the
3584 // stack, so rsp is 8-byte aligned until we allocate our frame.
3585 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3587 // No callee saved registers. rbp is assumed implicitly saved
3588 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3590 // compiler left unloaded_class_index in j_rarg0 move to where the
3591 // runtime expects it.
3592 __ movl(c_rarg1, j_rarg0);
3594 __ set_last_Java_frame(noreg, noreg, NULL);
3596 // Call C code. Need thread but NOT official VM entry
3597 // crud. We cannot block on this call, no GC can happen. Call should
3598 // capture callee-saved registers as well as return values.
3599 // Thread is in rdi already.
3600 //
3601 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3603 __ mov(c_rarg0, r15_thread);
3604 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3606 // Set an oopmap for the call site
3607 OopMapSet* oop_maps = new OopMapSet();
3608 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3610 // location of rbp is known implicitly by the frame sender code
3612 oop_maps->add_gc_map(__ pc() - start, map);
3614 __ reset_last_Java_frame(false, false);
3616 // Load UnrollBlock* into rdi
3617 __ mov(rdi, rax);
3619 // Pop all the frames we must move/replace.
3620 //
3621 // Frame picture (youngest to oldest)
3622 // 1: self-frame (no frame link)
3623 // 2: deopting frame (no frame link)
3624 // 3: caller of deopting frame (could be compiled/interpreted).
3626 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3627 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3629 // Pop deoptimized frame (int)
3630 __ movl(rcx, Address(rdi,
3631 Deoptimization::UnrollBlock::
3632 size_of_deoptimized_frame_offset_in_bytes()));
3633 __ addptr(rsp, rcx);
3635 // rsp should be pointing at the return address to the caller (3)
3637 // Stack bang to make sure there's enough room for these interpreter frames.
3638 if (UseStackBanging) {
3639 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3640 __ bang_stack_size(rbx, rcx);
3641 }
3643 // Load address of array of frame pcs into rcx (address*)
3644 __ movptr(rcx,
3645 Address(rdi,
3646 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3648 // Trash the return pc
3649 __ addptr(rsp, wordSize);
3651 // Load address of array of frame sizes into rsi (intptr_t*)
3652 __ movptr(rsi, Address(rdi,
3653 Deoptimization::UnrollBlock::
3654 frame_sizes_offset_in_bytes()));
3656 // Counter
3657 __ movl(rdx, Address(rdi,
3658 Deoptimization::UnrollBlock::
3659 number_of_frames_offset_in_bytes())); // (int)
3661 // Pick up the initial fp we should save
3662 __ movptr(rbp,
3663 Address(rdi,
3664 Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3666 // Now adjust the caller's stack to make up for the extra locals but
3667 // record the original sp so that we can save it in the skeletal
3668 // interpreter frame and the stack walking of interpreter_sender
3669 // will get the unextended sp value and not the "real" sp value.
3671 const Register sender_sp = r8;
3673 __ mov(sender_sp, rsp);
3674 __ movl(rbx, Address(rdi,
3675 Deoptimization::UnrollBlock::
3676 caller_adjustment_offset_in_bytes())); // (int)
3677 __ subptr(rsp, rbx);
3679 // Push interpreter frames in a loop
3680 Label loop;
3681 __ bind(loop);
3682 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3683 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3684 __ pushptr(Address(rcx, 0)); // Save return address
3685 __ enter(); // Save old & set new rbp
3686 __ subptr(rsp, rbx); // Prolog
3687 #ifdef CC_INTERP
3688 __ movptr(Address(rbp,
3689 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3690 sender_sp); // Make it walkable
3691 #else // CC_INTERP
3692 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3693 sender_sp); // Make it walkable
3694 // This value is corrected by layout_activation_impl
3695 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3696 #endif // CC_INTERP
3697 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3698 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3699 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3700 __ decrementl(rdx); // Decrement counter
3701 __ jcc(Assembler::notZero, loop);
3702 __ pushptr(Address(rcx, 0)); // Save final return address
3704 // Re-push self-frame
3705 __ enter(); // Save old & set new rbp
3706 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3707 // Prolog
3709 // Use rbp because the frames look interpreted now
3710 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3711 // Don't need the precise return PC here, just precise enough to point into this code blob.
3712 address the_pc = __ pc();
3713 __ set_last_Java_frame(noreg, rbp, the_pc);
3715 // Call C code. Need thread but NOT official VM entry
3716 // crud. We cannot block on this call, no GC can happen. Call should
3717 // restore return values to their stack-slots with the new SP.
3718 // Thread is in rdi already.
3719 //
3720 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3722 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3723 __ mov(c_rarg0, r15_thread);
3724 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3725 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3727 // Set an oopmap for the call site
3728 // Use the same PC we used for the last java frame
3729 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3731 // Clear fp AND pc
3732 __ reset_last_Java_frame(true, true);
3734 // Pop self-frame.
3735 __ leave(); // Epilog
3737 // Jump to interpreter
3738 __ ret(0);
3740 // Make sure all code is generated
3741 masm->flush();
3743 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3744 SimpleRuntimeFrame::framesize >> 1);
3745 }
3746 #endif // COMPILER2
3749 //------------------------------generate_handler_blob------
3750 //
3751 // Generate a special Compile2Runtime blob that saves all registers,
3752 // and setup oopmap.
3753 //
3754 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
3755 assert(StubRoutines::forward_exception_entry() != NULL,
3756 "must be generated before");
3758 ResourceMark rm;
3759 OopMapSet *oop_maps = new OopMapSet();
3760 OopMap* map;
3762 // Allocate space for the code. Setup code generation tools.
3763 CodeBuffer buffer("handler_blob", 2048, 1024);
3764 MacroAssembler* masm = new MacroAssembler(&buffer);
3766 address start = __ pc();
3767 address call_pc = NULL;
3768 int frame_size_in_words;
3770 // Make room for return address (or push it again)
3771 if (!cause_return) {
3772 __ push(rbx);
3773 }
3775 // Save registers, fpu state, and flags
3776 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3778 // The following is basically a call_VM. However, we need the precise
3779 // address of the call in order to generate an oopmap. Hence, we do all the
3780 // work outselves.
3782 __ set_last_Java_frame(noreg, noreg, NULL);
3784 // The return address must always be correct so that frame constructor never
3785 // sees an invalid pc.
3787 if (!cause_return) {
3788 // overwrite the dummy value we pushed on entry
3789 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3790 __ movptr(Address(rbp, wordSize), c_rarg0);
3791 }
3793 // Do the call
3794 __ mov(c_rarg0, r15_thread);
3795 __ call(RuntimeAddress(call_ptr));
3797 // Set an oopmap for the call site. This oopmap will map all
3798 // oop-registers and debug-info registers as callee-saved. This
3799 // will allow deoptimization at this safepoint to find all possible
3800 // debug-info recordings, as well as let GC find all oops.
3802 oop_maps->add_gc_map( __ pc() - start, map);
3804 Label noException;
3806 __ reset_last_Java_frame(false, false);
3808 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3809 __ jcc(Assembler::equal, noException);
3811 // Exception pending
3813 RegisterSaver::restore_live_registers(masm);
3815 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3817 // No exception case
3818 __ bind(noException);
3820 // Normal exit, restore registers and exit.
3821 RegisterSaver::restore_live_registers(masm);
3823 __ ret(0);
3825 // Make sure all code is generated
3826 masm->flush();
3828 // Fill-out other meta info
3829 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3830 }
3832 //
3833 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3834 //
3835 // Generate a stub that calls into vm to find out the proper destination
3836 // of a java call. All the argument registers are live at this point
3837 // but since this is generic code we don't know what they are and the caller
3838 // must do any gc of the args.
3839 //
3840 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3841 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3843 // allocate space for the code
3844 ResourceMark rm;
3846 CodeBuffer buffer(name, 1000, 512);
3847 MacroAssembler* masm = new MacroAssembler(&buffer);
3849 int frame_size_in_words;
3851 OopMapSet *oop_maps = new OopMapSet();
3852 OopMap* map = NULL;
3854 int start = __ offset();
3856 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3858 int frame_complete = __ offset();
3860 __ set_last_Java_frame(noreg, noreg, NULL);
3862 __ mov(c_rarg0, r15_thread);
3864 __ call(RuntimeAddress(destination));
3867 // Set an oopmap for the call site.
3868 // We need this not only for callee-saved registers, but also for volatile
3869 // registers that the compiler might be keeping live across a safepoint.
3871 oop_maps->add_gc_map( __ offset() - start, map);
3873 // rax contains the address we are going to jump to assuming no exception got installed
3875 // clear last_Java_sp
3876 __ reset_last_Java_frame(false, false);
3877 // check for pending exceptions
3878 Label pending;
3879 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3880 __ jcc(Assembler::notEqual, pending);
3882 // get the returned methodOop
3883 __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
3884 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3886 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3888 RegisterSaver::restore_live_registers(masm);
3890 // We are back the the original state on entry and ready to go.
3892 __ jmp(rax);
3894 // Pending exception after the safepoint
3896 __ bind(pending);
3898 RegisterSaver::restore_live_registers(masm);
3900 // exception pending => remove activation and forward to exception handler
3902 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3904 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3905 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3907 // -------------
3908 // make sure all code is generated
3909 masm->flush();
3911 // return the blob
3912 // frame_size_words or bytes??
3913 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3914 }
3917 #ifdef COMPILER2
3918 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3919 //
3920 //------------------------------generate_exception_blob---------------------------
3921 // creates exception blob at the end
3922 // Using exception blob, this code is jumped from a compiled method.
3923 // (see emit_exception_handler in x86_64.ad file)
3924 //
3925 // Given an exception pc at a call we call into the runtime for the
3926 // handler in this method. This handler might merely restore state
3927 // (i.e. callee save registers) unwind the frame and jump to the
3928 // exception handler for the nmethod if there is no Java level handler
3929 // for the nmethod.
3930 //
3931 // This code is entered with a jmp.
3932 //
3933 // Arguments:
3934 // rax: exception oop
3935 // rdx: exception pc
3936 //
3937 // Results:
3938 // rax: exception oop
3939 // rdx: exception pc in caller or ???
3940 // destination: exception handler of caller
3941 //
3942 // Note: the exception pc MUST be at a call (precise debug information)
3943 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3944 //
3946 void OptoRuntime::generate_exception_blob() {
3947 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3948 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3949 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3951 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3953 // Allocate space for the code
3954 ResourceMark rm;
3955 // Setup code generation tools
3956 CodeBuffer buffer("exception_blob", 2048, 1024);
3957 MacroAssembler* masm = new MacroAssembler(&buffer);
3960 address start = __ pc();
3962 // Exception pc is 'return address' for stack walker
3963 __ push(rdx);
3964 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3966 // Save callee-saved registers. See x86_64.ad.
3968 // rbp is an implicitly saved callee saved register (i.e. the calling
3969 // convention will save restore it in prolog/epilog) Other than that
3970 // there are no callee save registers now that adapter frames are gone.
3972 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3974 // Store exception in Thread object. We cannot pass any arguments to the
3975 // handle_exception call, since we do not want to make any assumption
3976 // about the size of the frame where the exception happened in.
3977 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3978 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3979 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3981 // This call does all the hard work. It checks if an exception handler
3982 // exists in the method.
3983 // If so, it returns the handler address.
3984 // If not, it prepares for stack-unwinding, restoring the callee-save
3985 // registers of the frame being removed.
3986 //
3987 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3989 // At a method handle call, the stack may not be properly aligned
3990 // when returning with an exception.
3991 address the_pc = __ pc();
3992 __ set_last_Java_frame(noreg, noreg, the_pc);
3993 __ mov(c_rarg0, r15_thread);
3994 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3995 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3997 // Set an oopmap for the call site. This oopmap will only be used if we
3998 // are unwinding the stack. Hence, all locations will be dead.
3999 // Callee-saved registers will be the same as the frame above (i.e.,
4000 // handle_exception_stub), since they were restored when we got the
4001 // exception.
4003 OopMapSet* oop_maps = new OopMapSet();
4005 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4007 __ reset_last_Java_frame(false, true);
4009 // Restore callee-saved registers
4011 // rbp is an implicitly saved callee saved register (i.e. the calling
4012 // convention will save restore it in prolog/epilog) Other than that
4013 // there are no callee save registers no that adapter frames are gone.
4015 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4017 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4018 __ pop(rdx); // No need for exception pc anymore
4020 // rax: exception handler
4022 // Restore SP from BP if the exception PC is a MethodHandle call site.
4023 __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
4024 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
4026 // We have a handler in rax (could be deopt blob).
4027 __ mov(r8, rax);
4029 // Get the exception oop
4030 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4031 // Get the exception pc in case we are deoptimized
4032 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4033 #ifdef ASSERT
4034 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4035 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4036 #endif
4037 // Clear the exception oop so GC no longer processes it as a root.
4038 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4040 // rax: exception oop
4041 // r8: exception handler
4042 // rdx: exception pc
4043 // Jump to handler
4045 __ jmp(r8);
4047 // Make sure all code is generated
4048 masm->flush();
4050 // Set exception blob
4051 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4052 }
4053 #endif // COMPILER2