Mon, 17 Sep 2012 12:57:58 -0700
7196262: JSR 292: java/lang/invoke/PrivateInvokeTest.java fails on solaris-sparc
Reviewed-by: kvn, jrose, bdelsart
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "assembler_x86.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_x86.inline.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_Runtime1.hpp"
39 #endif
40 #ifdef COMPILER2
41 #include "opto/runtime.hpp"
42 #endif
44 #define __ masm->
46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
48 class SimpleRuntimeFrame {
50 public:
52 // Most of the runtime stubs have this simple frame layout.
53 // This class exists to make the layout shared in one place.
54 // Offsets are for compiler stack slots, which are jints.
55 enum layout {
56 // The frame sender code expects that rbp will be in the "natural" place and
57 // will override any oopMap setting for it. We must therefore force the layout
58 // so that it agrees with the frame sender code.
59 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
60 rbp_off2,
61 return_off, return_off2,
62 framesize
63 };
64 };
66 class RegisterSaver {
67 // Capture info about frame layout. Layout offsets are in jint
68 // units because compiler frame slots are jints.
69 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
70 enum layout {
71 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
72 xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
73 DEF_XMM_OFFS(0),
74 DEF_XMM_OFFS(1),
75 DEF_XMM_OFFS(2),
76 DEF_XMM_OFFS(3),
77 DEF_XMM_OFFS(4),
78 DEF_XMM_OFFS(5),
79 DEF_XMM_OFFS(6),
80 DEF_XMM_OFFS(7),
81 DEF_XMM_OFFS(8),
82 DEF_XMM_OFFS(9),
83 DEF_XMM_OFFS(10),
84 DEF_XMM_OFFS(11),
85 DEF_XMM_OFFS(12),
86 DEF_XMM_OFFS(13),
87 DEF_XMM_OFFS(14),
88 DEF_XMM_OFFS(15),
89 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
90 fpu_stateH_end,
91 r15_off, r15H_off,
92 r14_off, r14H_off,
93 r13_off, r13H_off,
94 r12_off, r12H_off,
95 r11_off, r11H_off,
96 r10_off, r10H_off,
97 r9_off, r9H_off,
98 r8_off, r8H_off,
99 rdi_off, rdiH_off,
100 rsi_off, rsiH_off,
101 ignore_off, ignoreH_off, // extra copy of rbp
102 rsp_off, rspH_off,
103 rbx_off, rbxH_off,
104 rdx_off, rdxH_off,
105 rcx_off, rcxH_off,
106 rax_off, raxH_off,
107 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
108 align_off, alignH_off,
109 flags_off, flagsH_off,
110 // The frame sender code expects that rbp will be in the "natural" place and
111 // will override any oopMap setting for it. We must therefore force the layout
112 // so that it agrees with the frame sender code.
113 rbp_off, rbpH_off, // copy of rbp we will restore
114 return_off, returnH_off, // slot for return address
115 reg_save_size // size in compiler stack slots
116 };
118 public:
119 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
120 static void restore_live_registers(MacroAssembler* masm);
122 // Offsets into the register save area
123 // Used by deoptimization when it is managing result register
124 // values on its own
126 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
127 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
128 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
129 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
130 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
132 // During deoptimization only the result registers need to be restored,
133 // all the other values have already been extracted.
134 static void restore_result_registers(MacroAssembler* masm);
135 };
137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
139 // Always make the frame size 16-byte aligned
140 int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
141 reg_save_size*BytesPerInt, 16);
142 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
143 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
144 // The caller will allocate additional_frame_words
145 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
146 // CodeBlob frame size is in words.
147 int frame_size_in_words = frame_size_in_bytes / wordSize;
148 *total_frame_words = frame_size_in_words;
150 // Save registers, fpu state, and flags.
151 // We assume caller has already pushed the return address onto the
152 // stack, so rsp is 8-byte aligned here.
153 // We push rpb twice in this sequence because we want the real rbp
154 // to be under the return like a normal enter.
156 __ enter(); // rsp becomes 16-byte aligned here
157 __ push_CPU_state(); // Push a multiple of 16 bytes
158 if (frame::arg_reg_save_area_bytes != 0) {
159 // Allocate argument register save area
160 __ subptr(rsp, frame::arg_reg_save_area_bytes);
161 }
163 // Set an oopmap for the call site. This oopmap will map all
164 // oop-registers and debug-info registers as callee-saved. This
165 // will allow deoptimization at this safepoint to find all possible
166 // debug-info recordings, as well as let GC find all oops.
168 OopMapSet *oop_maps = new OopMapSet();
169 OopMap* map = new OopMap(frame_size_in_slots, 0);
170 map->set_callee_saved(VMRegImpl::stack2reg( rax_off + additional_frame_slots), rax->as_VMReg());
171 map->set_callee_saved(VMRegImpl::stack2reg( rcx_off + additional_frame_slots), rcx->as_VMReg());
172 map->set_callee_saved(VMRegImpl::stack2reg( rdx_off + additional_frame_slots), rdx->as_VMReg());
173 map->set_callee_saved(VMRegImpl::stack2reg( rbx_off + additional_frame_slots), rbx->as_VMReg());
174 // rbp location is known implicitly by the frame sender code, needs no oopmap
175 // and the location where rbp was saved by is ignored
176 map->set_callee_saved(VMRegImpl::stack2reg( rsi_off + additional_frame_slots), rsi->as_VMReg());
177 map->set_callee_saved(VMRegImpl::stack2reg( rdi_off + additional_frame_slots), rdi->as_VMReg());
178 map->set_callee_saved(VMRegImpl::stack2reg( r8_off + additional_frame_slots), r8->as_VMReg());
179 map->set_callee_saved(VMRegImpl::stack2reg( r9_off + additional_frame_slots), r9->as_VMReg());
180 map->set_callee_saved(VMRegImpl::stack2reg( r10_off + additional_frame_slots), r10->as_VMReg());
181 map->set_callee_saved(VMRegImpl::stack2reg( r11_off + additional_frame_slots), r11->as_VMReg());
182 map->set_callee_saved(VMRegImpl::stack2reg( r12_off + additional_frame_slots), r12->as_VMReg());
183 map->set_callee_saved(VMRegImpl::stack2reg( r13_off + additional_frame_slots), r13->as_VMReg());
184 map->set_callee_saved(VMRegImpl::stack2reg( r14_off + additional_frame_slots), r14->as_VMReg());
185 map->set_callee_saved(VMRegImpl::stack2reg( r15_off + additional_frame_slots), r15->as_VMReg());
186 map->set_callee_saved(VMRegImpl::stack2reg(xmm0_off + additional_frame_slots), xmm0->as_VMReg());
187 map->set_callee_saved(VMRegImpl::stack2reg(xmm1_off + additional_frame_slots), xmm1->as_VMReg());
188 map->set_callee_saved(VMRegImpl::stack2reg(xmm2_off + additional_frame_slots), xmm2->as_VMReg());
189 map->set_callee_saved(VMRegImpl::stack2reg(xmm3_off + additional_frame_slots), xmm3->as_VMReg());
190 map->set_callee_saved(VMRegImpl::stack2reg(xmm4_off + additional_frame_slots), xmm4->as_VMReg());
191 map->set_callee_saved(VMRegImpl::stack2reg(xmm5_off + additional_frame_slots), xmm5->as_VMReg());
192 map->set_callee_saved(VMRegImpl::stack2reg(xmm6_off + additional_frame_slots), xmm6->as_VMReg());
193 map->set_callee_saved(VMRegImpl::stack2reg(xmm7_off + additional_frame_slots), xmm7->as_VMReg());
194 map->set_callee_saved(VMRegImpl::stack2reg(xmm8_off + additional_frame_slots), xmm8->as_VMReg());
195 map->set_callee_saved(VMRegImpl::stack2reg(xmm9_off + additional_frame_slots), xmm9->as_VMReg());
196 map->set_callee_saved(VMRegImpl::stack2reg(xmm10_off + additional_frame_slots), xmm10->as_VMReg());
197 map->set_callee_saved(VMRegImpl::stack2reg(xmm11_off + additional_frame_slots), xmm11->as_VMReg());
198 map->set_callee_saved(VMRegImpl::stack2reg(xmm12_off + additional_frame_slots), xmm12->as_VMReg());
199 map->set_callee_saved(VMRegImpl::stack2reg(xmm13_off + additional_frame_slots), xmm13->as_VMReg());
200 map->set_callee_saved(VMRegImpl::stack2reg(xmm14_off + additional_frame_slots), xmm14->as_VMReg());
201 map->set_callee_saved(VMRegImpl::stack2reg(xmm15_off + additional_frame_slots), xmm15->as_VMReg());
203 // %%% These should all be a waste but we'll keep things as they were for now
204 if (true) {
205 map->set_callee_saved(VMRegImpl::stack2reg( raxH_off + additional_frame_slots),
206 rax->as_VMReg()->next());
207 map->set_callee_saved(VMRegImpl::stack2reg( rcxH_off + additional_frame_slots),
208 rcx->as_VMReg()->next());
209 map->set_callee_saved(VMRegImpl::stack2reg( rdxH_off + additional_frame_slots),
210 rdx->as_VMReg()->next());
211 map->set_callee_saved(VMRegImpl::stack2reg( rbxH_off + additional_frame_slots),
212 rbx->as_VMReg()->next());
213 // rbp location is known implicitly by the frame sender code, needs no oopmap
214 map->set_callee_saved(VMRegImpl::stack2reg( rsiH_off + additional_frame_slots),
215 rsi->as_VMReg()->next());
216 map->set_callee_saved(VMRegImpl::stack2reg( rdiH_off + additional_frame_slots),
217 rdi->as_VMReg()->next());
218 map->set_callee_saved(VMRegImpl::stack2reg( r8H_off + additional_frame_slots),
219 r8->as_VMReg()->next());
220 map->set_callee_saved(VMRegImpl::stack2reg( r9H_off + additional_frame_slots),
221 r9->as_VMReg()->next());
222 map->set_callee_saved(VMRegImpl::stack2reg( r10H_off + additional_frame_slots),
223 r10->as_VMReg()->next());
224 map->set_callee_saved(VMRegImpl::stack2reg( r11H_off + additional_frame_slots),
225 r11->as_VMReg()->next());
226 map->set_callee_saved(VMRegImpl::stack2reg( r12H_off + additional_frame_slots),
227 r12->as_VMReg()->next());
228 map->set_callee_saved(VMRegImpl::stack2reg( r13H_off + additional_frame_slots),
229 r13->as_VMReg()->next());
230 map->set_callee_saved(VMRegImpl::stack2reg( r14H_off + additional_frame_slots),
231 r14->as_VMReg()->next());
232 map->set_callee_saved(VMRegImpl::stack2reg( r15H_off + additional_frame_slots),
233 r15->as_VMReg()->next());
234 map->set_callee_saved(VMRegImpl::stack2reg(xmm0H_off + additional_frame_slots),
235 xmm0->as_VMReg()->next());
236 map->set_callee_saved(VMRegImpl::stack2reg(xmm1H_off + additional_frame_slots),
237 xmm1->as_VMReg()->next());
238 map->set_callee_saved(VMRegImpl::stack2reg(xmm2H_off + additional_frame_slots),
239 xmm2->as_VMReg()->next());
240 map->set_callee_saved(VMRegImpl::stack2reg(xmm3H_off + additional_frame_slots),
241 xmm3->as_VMReg()->next());
242 map->set_callee_saved(VMRegImpl::stack2reg(xmm4H_off + additional_frame_slots),
243 xmm4->as_VMReg()->next());
244 map->set_callee_saved(VMRegImpl::stack2reg(xmm5H_off + additional_frame_slots),
245 xmm5->as_VMReg()->next());
246 map->set_callee_saved(VMRegImpl::stack2reg(xmm6H_off + additional_frame_slots),
247 xmm6->as_VMReg()->next());
248 map->set_callee_saved(VMRegImpl::stack2reg(xmm7H_off + additional_frame_slots),
249 xmm7->as_VMReg()->next());
250 map->set_callee_saved(VMRegImpl::stack2reg(xmm8H_off + additional_frame_slots),
251 xmm8->as_VMReg()->next());
252 map->set_callee_saved(VMRegImpl::stack2reg(xmm9H_off + additional_frame_slots),
253 xmm9->as_VMReg()->next());
254 map->set_callee_saved(VMRegImpl::stack2reg(xmm10H_off + additional_frame_slots),
255 xmm10->as_VMReg()->next());
256 map->set_callee_saved(VMRegImpl::stack2reg(xmm11H_off + additional_frame_slots),
257 xmm11->as_VMReg()->next());
258 map->set_callee_saved(VMRegImpl::stack2reg(xmm12H_off + additional_frame_slots),
259 xmm12->as_VMReg()->next());
260 map->set_callee_saved(VMRegImpl::stack2reg(xmm13H_off + additional_frame_slots),
261 xmm13->as_VMReg()->next());
262 map->set_callee_saved(VMRegImpl::stack2reg(xmm14H_off + additional_frame_slots),
263 xmm14->as_VMReg()->next());
264 map->set_callee_saved(VMRegImpl::stack2reg(xmm15H_off + additional_frame_slots),
265 xmm15->as_VMReg()->next());
266 }
268 return map;
269 }
271 void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
272 if (frame::arg_reg_save_area_bytes != 0) {
273 // Pop arg register save area
274 __ addptr(rsp, frame::arg_reg_save_area_bytes);
275 }
276 // Recover CPU state
277 __ pop_CPU_state();
278 // Get the rbp described implicitly by the calling convention (no oopMap)
279 __ pop(rbp);
280 }
282 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
284 // Just restore result register. Only used by deoptimization. By
285 // now any callee save register that needs to be restored to a c2
286 // caller of the deoptee has been extracted into the vframeArray
287 // and will be stuffed into the c2i adapter we create for later
288 // restoration so only result registers need to be restored here.
290 // Restore fp result register
291 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
292 // Restore integer result register
293 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
294 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
296 // Pop all of the register save are off the stack except the return address
297 __ addptr(rsp, return_offset_in_bytes());
298 }
300 // The java_calling_convention describes stack locations as ideal slots on
301 // a frame with no abi restrictions. Since we must observe abi restrictions
302 // (like the placement of the register window) the slots must be biased by
303 // the following value.
304 static int reg2offset_in(VMReg r) {
305 // Account for saved rbp and return address
306 // This should really be in_preserve_stack_slots
307 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
308 }
310 static int reg2offset_out(VMReg r) {
311 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
312 }
314 // ---------------------------------------------------------------------------
315 // Read the array of BasicTypes from a signature, and compute where the
316 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
317 // quantities. Values less than VMRegImpl::stack0 are registers, those above
318 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
319 // as framesizes are fixed.
320 // VMRegImpl::stack0 refers to the first slot 0(sp).
321 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
322 // up to RegisterImpl::number_of_registers) are the 64-bit
323 // integer registers.
325 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
326 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
327 // units regardless of build. Of course for i486 there is no 64 bit build
329 // The Java calling convention is a "shifted" version of the C ABI.
330 // By skipping the first C ABI register we can call non-static jni methods
331 // with small numbers of arguments without having to shuffle the arguments
332 // at all. Since we control the java ABI we ought to at least get some
333 // advantage out of it.
335 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
336 VMRegPair *regs,
337 int total_args_passed,
338 int is_outgoing) {
340 // Create the mapping between argument positions and
341 // registers.
342 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
343 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
344 };
345 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
346 j_farg0, j_farg1, j_farg2, j_farg3,
347 j_farg4, j_farg5, j_farg6, j_farg7
348 };
351 uint int_args = 0;
352 uint fp_args = 0;
353 uint stk_args = 0; // inc by 2 each time
355 for (int i = 0; i < total_args_passed; i++) {
356 switch (sig_bt[i]) {
357 case T_BOOLEAN:
358 case T_CHAR:
359 case T_BYTE:
360 case T_SHORT:
361 case T_INT:
362 if (int_args < Argument::n_int_register_parameters_j) {
363 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
364 } else {
365 regs[i].set1(VMRegImpl::stack2reg(stk_args));
366 stk_args += 2;
367 }
368 break;
369 case T_VOID:
370 // halves of T_LONG or T_DOUBLE
371 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
372 regs[i].set_bad();
373 break;
374 case T_LONG:
375 assert(sig_bt[i + 1] == T_VOID, "expecting half");
376 // fall through
377 case T_OBJECT:
378 case T_ARRAY:
379 case T_ADDRESS:
380 if (int_args < Argument::n_int_register_parameters_j) {
381 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
382 } else {
383 regs[i].set2(VMRegImpl::stack2reg(stk_args));
384 stk_args += 2;
385 }
386 break;
387 case T_FLOAT:
388 if (fp_args < Argument::n_float_register_parameters_j) {
389 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
390 } else {
391 regs[i].set1(VMRegImpl::stack2reg(stk_args));
392 stk_args += 2;
393 }
394 break;
395 case T_DOUBLE:
396 assert(sig_bt[i + 1] == T_VOID, "expecting half");
397 if (fp_args < Argument::n_float_register_parameters_j) {
398 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
399 } else {
400 regs[i].set2(VMRegImpl::stack2reg(stk_args));
401 stk_args += 2;
402 }
403 break;
404 default:
405 ShouldNotReachHere();
406 break;
407 }
408 }
410 return round_to(stk_args, 2);
411 }
413 // Patch the callers callsite with entry to compiled code if it exists.
414 static void patch_callers_callsite(MacroAssembler *masm) {
415 Label L;
416 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
417 __ jcc(Assembler::equal, L);
419 // Save the current stack pointer
420 __ mov(r13, rsp);
421 // Schedule the branch target address early.
422 // Call into the VM to patch the caller, then jump to compiled callee
423 // rax isn't live so capture return address while we easily can
424 __ movptr(rax, Address(rsp, 0));
426 // align stack so push_CPU_state doesn't fault
427 __ andptr(rsp, -(StackAlignmentInBytes));
428 __ push_CPU_state();
430 // VM needs caller's callsite
431 // VM needs target method
432 // This needs to be a long call since we will relocate this adapter to
433 // the codeBuffer and it may not reach
435 // Allocate argument register save area
436 if (frame::arg_reg_save_area_bytes != 0) {
437 __ subptr(rsp, frame::arg_reg_save_area_bytes);
438 }
439 __ mov(c_rarg0, rbx);
440 __ mov(c_rarg1, rax);
441 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
443 // De-allocate argument register save area
444 if (frame::arg_reg_save_area_bytes != 0) {
445 __ addptr(rsp, frame::arg_reg_save_area_bytes);
446 }
448 __ pop_CPU_state();
449 // restore sp
450 __ mov(rsp, r13);
451 __ bind(L);
452 }
455 static void gen_c2i_adapter(MacroAssembler *masm,
456 int total_args_passed,
457 int comp_args_on_stack,
458 const BasicType *sig_bt,
459 const VMRegPair *regs,
460 Label& skip_fixup) {
461 // Before we get into the guts of the C2I adapter, see if we should be here
462 // at all. We've come from compiled code and are attempting to jump to the
463 // interpreter, which means the caller made a static call to get here
464 // (vcalls always get a compiled target if there is one). Check for a
465 // compiled target. If there is one, we need to patch the caller's call.
466 patch_callers_callsite(masm);
468 __ bind(skip_fixup);
470 // Since all args are passed on the stack, total_args_passed *
471 // Interpreter::stackElementSize is the space we need. Plus 1 because
472 // we also account for the return address location since
473 // we store it first rather than hold it in rax across all the shuffling
475 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
477 // stack is aligned, keep it that way
478 extraspace = round_to(extraspace, 2*wordSize);
480 // Get return address
481 __ pop(rax);
483 // set senderSP value
484 __ mov(r13, rsp);
486 __ subptr(rsp, extraspace);
488 // Store the return address in the expected location
489 __ movptr(Address(rsp, 0), rax);
491 // Now write the args into the outgoing interpreter space
492 for (int i = 0; i < total_args_passed; i++) {
493 if (sig_bt[i] == T_VOID) {
494 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
495 continue;
496 }
498 // offset to start parameters
499 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
500 int next_off = st_off - Interpreter::stackElementSize;
502 // Say 4 args:
503 // i st_off
504 // 0 32 T_LONG
505 // 1 24 T_VOID
506 // 2 16 T_OBJECT
507 // 3 8 T_BOOL
508 // - 0 return address
509 //
510 // However to make thing extra confusing. Because we can fit a long/double in
511 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
512 // leaves one slot empty and only stores to a single slot. In this case the
513 // slot that is occupied is the T_VOID slot. See I said it was confusing.
515 VMReg r_1 = regs[i].first();
516 VMReg r_2 = regs[i].second();
517 if (!r_1->is_valid()) {
518 assert(!r_2->is_valid(), "");
519 continue;
520 }
521 if (r_1->is_stack()) {
522 // memory to memory use rax
523 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
524 if (!r_2->is_valid()) {
525 // sign extend??
526 __ movl(rax, Address(rsp, ld_off));
527 __ movptr(Address(rsp, st_off), rax);
529 } else {
531 __ movq(rax, Address(rsp, ld_off));
533 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
534 // T_DOUBLE and T_LONG use two slots in the interpreter
535 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
536 // ld_off == LSW, ld_off+wordSize == MSW
537 // st_off == MSW, next_off == LSW
538 __ movq(Address(rsp, next_off), rax);
539 #ifdef ASSERT
540 // Overwrite the unused slot with known junk
541 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
542 __ movptr(Address(rsp, st_off), rax);
543 #endif /* ASSERT */
544 } else {
545 __ movq(Address(rsp, st_off), rax);
546 }
547 }
548 } else if (r_1->is_Register()) {
549 Register r = r_1->as_Register();
550 if (!r_2->is_valid()) {
551 // must be only an int (or less ) so move only 32bits to slot
552 // why not sign extend??
553 __ movl(Address(rsp, st_off), r);
554 } else {
555 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
556 // T_DOUBLE and T_LONG use two slots in the interpreter
557 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
558 // long/double in gpr
559 #ifdef ASSERT
560 // Overwrite the unused slot with known junk
561 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
562 __ movptr(Address(rsp, st_off), rax);
563 #endif /* ASSERT */
564 __ movq(Address(rsp, next_off), r);
565 } else {
566 __ movptr(Address(rsp, st_off), r);
567 }
568 }
569 } else {
570 assert(r_1->is_XMMRegister(), "");
571 if (!r_2->is_valid()) {
572 // only a float use just part of the slot
573 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
574 } else {
575 #ifdef ASSERT
576 // Overwrite the unused slot with known junk
577 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
578 __ movptr(Address(rsp, st_off), rax);
579 #endif /* ASSERT */
580 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
581 }
582 }
583 }
585 // Schedule the branch target address early.
586 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
587 __ jmp(rcx);
588 }
590 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
591 address code_start, address code_end,
592 Label& L_ok) {
593 Label L_fail;
594 __ lea(temp_reg, ExternalAddress(code_start));
595 __ cmpptr(pc_reg, temp_reg);
596 __ jcc(Assembler::belowEqual, L_fail);
597 __ lea(temp_reg, ExternalAddress(code_end));
598 __ cmpptr(pc_reg, temp_reg);
599 __ jcc(Assembler::below, L_ok);
600 __ bind(L_fail);
601 }
603 static void gen_i2c_adapter(MacroAssembler *masm,
604 int total_args_passed,
605 int comp_args_on_stack,
606 const BasicType *sig_bt,
607 const VMRegPair *regs) {
609 // Note: r13 contains the senderSP on entry. We must preserve it since
610 // we may do a i2c -> c2i transition if we lose a race where compiled
611 // code goes non-entrant while we get args ready.
612 // In addition we use r13 to locate all the interpreter args as
613 // we must align the stack to 16 bytes on an i2c entry else we
614 // lose alignment we expect in all compiled code and register
615 // save code can segv when fxsave instructions find improperly
616 // aligned stack pointer.
618 // Adapters can be frameless because they do not require the caller
619 // to perform additional cleanup work, such as correcting the stack pointer.
620 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
621 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
622 // even if a callee has modified the stack pointer.
623 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
624 // routinely repairs its caller's stack pointer (from sender_sp, which is set
625 // up via the senderSP register).
626 // In other words, if *either* the caller or callee is interpreted, we can
627 // get the stack pointer repaired after a call.
628 // This is why c2i and i2c adapters cannot be indefinitely composed.
629 // In particular, if a c2i adapter were to somehow call an i2c adapter,
630 // both caller and callee would be compiled methods, and neither would
631 // clean up the stack pointer changes performed by the two adapters.
632 // If this happens, control eventually transfers back to the compiled
633 // caller, but with an uncorrected stack, causing delayed havoc.
635 // Pick up the return address
636 __ movptr(rax, Address(rsp, 0));
638 if (VerifyAdapterCalls &&
639 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
640 // So, let's test for cascading c2i/i2c adapters right now.
641 // assert(Interpreter::contains($return_addr) ||
642 // StubRoutines::contains($return_addr),
643 // "i2c adapter must return to an interpreter frame");
644 __ block_comment("verify_i2c { ");
645 Label L_ok;
646 if (Interpreter::code() != NULL)
647 range_check(masm, rax, r11,
648 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
649 L_ok);
650 if (StubRoutines::code1() != NULL)
651 range_check(masm, rax, r11,
652 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
653 L_ok);
654 if (StubRoutines::code2() != NULL)
655 range_check(masm, rax, r11,
656 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
657 L_ok);
658 const char* msg = "i2c adapter must return to an interpreter frame";
659 __ block_comment(msg);
660 __ stop(msg);
661 __ bind(L_ok);
662 __ block_comment("} verify_i2ce ");
663 }
665 // Must preserve original SP for loading incoming arguments because
666 // we need to align the outgoing SP for compiled code.
667 __ movptr(r11, rsp);
669 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
670 // in registers, we will occasionally have no stack args.
671 int comp_words_on_stack = 0;
672 if (comp_args_on_stack) {
673 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
674 // registers are below. By subtracting stack0, we either get a negative
675 // number (all values in registers) or the maximum stack slot accessed.
677 // Convert 4-byte c2 stack slots to words.
678 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
679 // Round up to miminum stack alignment, in wordSize
680 comp_words_on_stack = round_to(comp_words_on_stack, 2);
681 __ subptr(rsp, comp_words_on_stack * wordSize);
682 }
685 // Ensure compiled code always sees stack at proper alignment
686 __ andptr(rsp, -16);
688 // push the return address and misalign the stack that youngest frame always sees
689 // as far as the placement of the call instruction
690 __ push(rax);
692 // Put saved SP in another register
693 const Register saved_sp = rax;
694 __ movptr(saved_sp, r11);
696 // Will jump to the compiled code just as if compiled code was doing it.
697 // Pre-load the register-jump target early, to schedule it better.
698 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
700 // Now generate the shuffle code. Pick up all register args and move the
701 // rest through the floating point stack top.
702 for (int i = 0; i < total_args_passed; i++) {
703 if (sig_bt[i] == T_VOID) {
704 // Longs and doubles are passed in native word order, but misaligned
705 // in the 32-bit build.
706 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
707 continue;
708 }
710 // Pick up 0, 1 or 2 words from SP+offset.
712 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
713 "scrambled load targets?");
714 // Load in argument order going down.
715 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
716 // Point to interpreter value (vs. tag)
717 int next_off = ld_off - Interpreter::stackElementSize;
718 //
719 //
720 //
721 VMReg r_1 = regs[i].first();
722 VMReg r_2 = regs[i].second();
723 if (!r_1->is_valid()) {
724 assert(!r_2->is_valid(), "");
725 continue;
726 }
727 if (r_1->is_stack()) {
728 // Convert stack slot to an SP offset (+ wordSize to account for return address )
729 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
731 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
732 // and if we end up going thru a c2i because of a miss a reasonable value of r13
733 // will be generated.
734 if (!r_2->is_valid()) {
735 // sign extend???
736 __ movl(r13, Address(saved_sp, ld_off));
737 __ movptr(Address(rsp, st_off), r13);
738 } else {
739 //
740 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
741 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
742 // So we must adjust where to pick up the data to match the interpreter.
743 //
744 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
745 // are accessed as negative so LSW is at LOW address
747 // ld_off is MSW so get LSW
748 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
749 next_off : ld_off;
750 __ movq(r13, Address(saved_sp, offset));
751 // st_off is LSW (i.e. reg.first())
752 __ movq(Address(rsp, st_off), r13);
753 }
754 } else if (r_1->is_Register()) { // Register argument
755 Register r = r_1->as_Register();
756 assert(r != rax, "must be different");
757 if (r_2->is_valid()) {
758 //
759 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
760 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
761 // So we must adjust where to pick up the data to match the interpreter.
763 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
764 next_off : ld_off;
766 // this can be a misaligned move
767 __ movq(r, Address(saved_sp, offset));
768 } else {
769 // sign extend and use a full word?
770 __ movl(r, Address(saved_sp, ld_off));
771 }
772 } else {
773 if (!r_2->is_valid()) {
774 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
775 } else {
776 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
777 }
778 }
779 }
781 // 6243940 We might end up in handle_wrong_method if
782 // the callee is deoptimized as we race thru here. If that
783 // happens we don't want to take a safepoint because the
784 // caller frame will look interpreted and arguments are now
785 // "compiled" so it is much better to make this transition
786 // invisible to the stack walking code. Unfortunately if
787 // we try and find the callee by normal means a safepoint
788 // is possible. So we stash the desired callee in the thread
789 // and the vm will find there should this case occur.
791 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
793 // put Method* where a c2i would expect should we end up there
794 // only needed becaus eof c2 resolve stubs return Method* as a result in
795 // rax
796 __ mov(rax, rbx);
797 __ jmp(r11);
798 }
800 // ---------------------------------------------------------------
801 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
802 int total_args_passed,
803 int comp_args_on_stack,
804 const BasicType *sig_bt,
805 const VMRegPair *regs,
806 AdapterFingerPrint* fingerprint) {
807 address i2c_entry = __ pc();
809 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
811 // -------------------------------------------------------------------------
812 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
813 // to the interpreter. The args start out packed in the compiled layout. They
814 // need to be unpacked into the interpreter layout. This will almost always
815 // require some stack space. We grow the current (compiled) stack, then repack
816 // the args. We finally end in a jump to the generic interpreter entry point.
817 // On exit from the interpreter, the interpreter will restore our SP (lest the
818 // compiled code, which relys solely on SP and not RBP, get sick).
820 address c2i_unverified_entry = __ pc();
821 Label skip_fixup;
822 Label ok;
824 Register holder = rax;
825 Register receiver = j_rarg0;
826 Register temp = rbx;
828 {
829 __ load_klass(temp, receiver);
830 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
831 __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
832 __ jcc(Assembler::equal, ok);
833 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
835 __ bind(ok);
836 // Method might have been compiled since the call site was patched to
837 // interpreted if that is the case treat it as a miss so we can get
838 // the call site corrected.
839 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
840 __ jcc(Assembler::equal, skip_fixup);
841 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
842 }
844 address c2i_entry = __ pc();
846 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
848 __ flush();
849 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
850 }
852 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
853 VMRegPair *regs,
854 int total_args_passed) {
855 // We return the amount of VMRegImpl stack slots we need to reserve for all
856 // the arguments NOT counting out_preserve_stack_slots.
858 // NOTE: These arrays will have to change when c1 is ported
859 #ifdef _WIN64
860 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
861 c_rarg0, c_rarg1, c_rarg2, c_rarg3
862 };
863 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
864 c_farg0, c_farg1, c_farg2, c_farg3
865 };
866 #else
867 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
868 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
869 };
870 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
871 c_farg0, c_farg1, c_farg2, c_farg3,
872 c_farg4, c_farg5, c_farg6, c_farg7
873 };
874 #endif // _WIN64
877 uint int_args = 0;
878 uint fp_args = 0;
879 uint stk_args = 0; // inc by 2 each time
881 for (int i = 0; i < total_args_passed; i++) {
882 switch (sig_bt[i]) {
883 case T_BOOLEAN:
884 case T_CHAR:
885 case T_BYTE:
886 case T_SHORT:
887 case T_INT:
888 if (int_args < Argument::n_int_register_parameters_c) {
889 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
890 #ifdef _WIN64
891 fp_args++;
892 // Allocate slots for callee to stuff register args the stack.
893 stk_args += 2;
894 #endif
895 } else {
896 regs[i].set1(VMRegImpl::stack2reg(stk_args));
897 stk_args += 2;
898 }
899 break;
900 case T_LONG:
901 assert(sig_bt[i + 1] == T_VOID, "expecting half");
902 // fall through
903 case T_OBJECT:
904 case T_ARRAY:
905 case T_ADDRESS:
906 case T_METADATA:
907 if (int_args < Argument::n_int_register_parameters_c) {
908 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
909 #ifdef _WIN64
910 fp_args++;
911 stk_args += 2;
912 #endif
913 } else {
914 regs[i].set2(VMRegImpl::stack2reg(stk_args));
915 stk_args += 2;
916 }
917 break;
918 case T_FLOAT:
919 if (fp_args < Argument::n_float_register_parameters_c) {
920 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
921 #ifdef _WIN64
922 int_args++;
923 // Allocate slots for callee to stuff register args the stack.
924 stk_args += 2;
925 #endif
926 } else {
927 regs[i].set1(VMRegImpl::stack2reg(stk_args));
928 stk_args += 2;
929 }
930 break;
931 case T_DOUBLE:
932 assert(sig_bt[i + 1] == T_VOID, "expecting half");
933 if (fp_args < Argument::n_float_register_parameters_c) {
934 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
935 #ifdef _WIN64
936 int_args++;
937 // Allocate slots for callee to stuff register args the stack.
938 stk_args += 2;
939 #endif
940 } else {
941 regs[i].set2(VMRegImpl::stack2reg(stk_args));
942 stk_args += 2;
943 }
944 break;
945 case T_VOID: // Halves of longs and doubles
946 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
947 regs[i].set_bad();
948 break;
949 default:
950 ShouldNotReachHere();
951 break;
952 }
953 }
954 #ifdef _WIN64
955 // windows abi requires that we always allocate enough stack space
956 // for 4 64bit registers to be stored down.
957 if (stk_args < 8) {
958 stk_args = 8;
959 }
960 #endif // _WIN64
962 return stk_args;
963 }
965 // On 64 bit we will store integer like items to the stack as
966 // 64 bits items (sparc abi) even though java would only store
967 // 32bits for a parameter. On 32bit it will simply be 32 bits
968 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
969 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
970 if (src.first()->is_stack()) {
971 if (dst.first()->is_stack()) {
972 // stack to stack
973 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
974 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
975 } else {
976 // stack to reg
977 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
978 }
979 } else if (dst.first()->is_stack()) {
980 // reg to stack
981 // Do we really have to sign extend???
982 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
983 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
984 } else {
985 // Do we really have to sign extend???
986 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
987 if (dst.first() != src.first()) {
988 __ movq(dst.first()->as_Register(), src.first()->as_Register());
989 }
990 }
991 }
993 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
994 if (src.first()->is_stack()) {
995 if (dst.first()->is_stack()) {
996 // stack to stack
997 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
998 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
999 } else {
1000 // stack to reg
1001 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1002 }
1003 } else if (dst.first()->is_stack()) {
1004 // reg to stack
1005 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1006 } else {
1007 if (dst.first() != src.first()) {
1008 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1009 }
1010 }
1011 }
1013 // An oop arg. Must pass a handle not the oop itself
1014 static void object_move(MacroAssembler* masm,
1015 OopMap* map,
1016 int oop_handle_offset,
1017 int framesize_in_slots,
1018 VMRegPair src,
1019 VMRegPair dst,
1020 bool is_receiver,
1021 int* receiver_offset) {
1023 // must pass a handle. First figure out the location we use as a handle
1025 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1027 // See if oop is NULL if it is we need no handle
1029 if (src.first()->is_stack()) {
1031 // Oop is already on the stack as an argument
1032 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1033 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1034 if (is_receiver) {
1035 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1036 }
1038 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1039 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1040 // conditionally move a NULL
1041 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1042 } else {
1044 // Oop is in an a register we must store it to the space we reserve
1045 // on the stack for oop_handles and pass a handle if oop is non-NULL
1047 const Register rOop = src.first()->as_Register();
1048 int oop_slot;
1049 if (rOop == j_rarg0)
1050 oop_slot = 0;
1051 else if (rOop == j_rarg1)
1052 oop_slot = 1;
1053 else if (rOop == j_rarg2)
1054 oop_slot = 2;
1055 else if (rOop == j_rarg3)
1056 oop_slot = 3;
1057 else if (rOop == j_rarg4)
1058 oop_slot = 4;
1059 else {
1060 assert(rOop == j_rarg5, "wrong register");
1061 oop_slot = 5;
1062 }
1064 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1065 int offset = oop_slot*VMRegImpl::stack_slot_size;
1067 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1068 // Store oop in handle area, may be NULL
1069 __ movptr(Address(rsp, offset), rOop);
1070 if (is_receiver) {
1071 *receiver_offset = offset;
1072 }
1074 __ cmpptr(rOop, (int32_t)NULL_WORD);
1075 __ lea(rHandle, Address(rsp, offset));
1076 // conditionally move a NULL from the handle area where it was just stored
1077 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1078 }
1080 // If arg is on the stack then place it otherwise it is already in correct reg.
1081 if (dst.first()->is_stack()) {
1082 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1083 }
1084 }
1086 // A float arg may have to do float reg int reg conversion
1087 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1088 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1090 // The calling conventions assures us that each VMregpair is either
1091 // all really one physical register or adjacent stack slots.
1092 // This greatly simplifies the cases here compared to sparc.
1094 if (src.first()->is_stack()) {
1095 if (dst.first()->is_stack()) {
1096 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1097 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1098 } else {
1099 // stack to reg
1100 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1101 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1102 }
1103 } else if (dst.first()->is_stack()) {
1104 // reg to stack
1105 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1106 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1107 } else {
1108 // reg to reg
1109 // In theory these overlap but the ordering is such that this is likely a nop
1110 if ( src.first() != dst.first()) {
1111 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1112 }
1113 }
1114 }
1116 // A long move
1117 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1119 // The calling conventions assures us that each VMregpair is either
1120 // all really one physical register or adjacent stack slots.
1121 // This greatly simplifies the cases here compared to sparc.
1123 if (src.is_single_phys_reg() ) {
1124 if (dst.is_single_phys_reg()) {
1125 if (dst.first() != src.first()) {
1126 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1127 }
1128 } else {
1129 assert(dst.is_single_reg(), "not a stack pair");
1130 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1131 }
1132 } else if (dst.is_single_phys_reg()) {
1133 assert(src.is_single_reg(), "not a stack pair");
1134 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1135 } else {
1136 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1137 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1138 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1139 }
1140 }
1142 // A double move
1143 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1145 // The calling conventions assures us that each VMregpair is either
1146 // all really one physical register or adjacent stack slots.
1147 // This greatly simplifies the cases here compared to sparc.
1149 if (src.is_single_phys_reg() ) {
1150 if (dst.is_single_phys_reg()) {
1151 // In theory these overlap but the ordering is such that this is likely a nop
1152 if ( src.first() != dst.first()) {
1153 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1154 }
1155 } else {
1156 assert(dst.is_single_reg(), "not a stack pair");
1157 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1158 }
1159 } else if (dst.is_single_phys_reg()) {
1160 assert(src.is_single_reg(), "not a stack pair");
1161 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1162 } else {
1163 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1164 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1165 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1166 }
1167 }
1170 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1171 // We always ignore the frame_slots arg and just use the space just below frame pointer
1172 // which by this time is free to use
1173 switch (ret_type) {
1174 case T_FLOAT:
1175 __ movflt(Address(rbp, -wordSize), xmm0);
1176 break;
1177 case T_DOUBLE:
1178 __ movdbl(Address(rbp, -wordSize), xmm0);
1179 break;
1180 case T_VOID: break;
1181 default: {
1182 __ movptr(Address(rbp, -wordSize), rax);
1183 }
1184 }
1185 }
1187 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1188 // We always ignore the frame_slots arg and just use the space just below frame pointer
1189 // which by this time is free to use
1190 switch (ret_type) {
1191 case T_FLOAT:
1192 __ movflt(xmm0, Address(rbp, -wordSize));
1193 break;
1194 case T_DOUBLE:
1195 __ movdbl(xmm0, Address(rbp, -wordSize));
1196 break;
1197 case T_VOID: break;
1198 default: {
1199 __ movptr(rax, Address(rbp, -wordSize));
1200 }
1201 }
1202 }
1204 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1205 for ( int i = first_arg ; i < arg_count ; i++ ) {
1206 if (args[i].first()->is_Register()) {
1207 __ push(args[i].first()->as_Register());
1208 } else if (args[i].first()->is_XMMRegister()) {
1209 __ subptr(rsp, 2*wordSize);
1210 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1211 }
1212 }
1213 }
1215 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1216 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1217 if (args[i].first()->is_Register()) {
1218 __ pop(args[i].first()->as_Register());
1219 } else if (args[i].first()->is_XMMRegister()) {
1220 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1221 __ addptr(rsp, 2*wordSize);
1222 }
1223 }
1224 }
1227 static void save_or_restore_arguments(MacroAssembler* masm,
1228 const int stack_slots,
1229 const int total_in_args,
1230 const int arg_save_area,
1231 OopMap* map,
1232 VMRegPair* in_regs,
1233 BasicType* in_sig_bt) {
1234 // if map is non-NULL then the code should store the values,
1235 // otherwise it should load them.
1236 int slot = arg_save_area;
1237 // Save down double word first
1238 for ( int i = 0; i < total_in_args; i++) {
1239 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1240 int offset = slot * VMRegImpl::stack_slot_size;
1241 slot += VMRegImpl::slots_per_word;
1242 assert(slot <= stack_slots, "overflow");
1243 if (map != NULL) {
1244 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1245 } else {
1246 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1247 }
1248 }
1249 if (in_regs[i].first()->is_Register() &&
1250 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1251 int offset = slot * VMRegImpl::stack_slot_size;
1252 if (map != NULL) {
1253 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1254 if (in_sig_bt[i] == T_ARRAY) {
1255 map->set_oop(VMRegImpl::stack2reg(slot));;
1256 }
1257 } else {
1258 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1259 }
1260 slot += VMRegImpl::slots_per_word;
1261 }
1262 }
1263 // Save or restore single word registers
1264 for ( int i = 0; i < total_in_args; i++) {
1265 if (in_regs[i].first()->is_Register()) {
1266 int offset = slot * VMRegImpl::stack_slot_size;
1267 slot++;
1268 assert(slot <= stack_slots, "overflow");
1270 // Value is in an input register pass we must flush it to the stack
1271 const Register reg = in_regs[i].first()->as_Register();
1272 switch (in_sig_bt[i]) {
1273 case T_BOOLEAN:
1274 case T_CHAR:
1275 case T_BYTE:
1276 case T_SHORT:
1277 case T_INT:
1278 if (map != NULL) {
1279 __ movl(Address(rsp, offset), reg);
1280 } else {
1281 __ movl(reg, Address(rsp, offset));
1282 }
1283 break;
1284 case T_ARRAY:
1285 case T_LONG:
1286 // handled above
1287 break;
1288 case T_OBJECT:
1289 default: ShouldNotReachHere();
1290 }
1291 } else if (in_regs[i].first()->is_XMMRegister()) {
1292 if (in_sig_bt[i] == T_FLOAT) {
1293 int offset = slot * VMRegImpl::stack_slot_size;
1294 slot++;
1295 assert(slot <= stack_slots, "overflow");
1296 if (map != NULL) {
1297 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1298 } else {
1299 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1300 }
1301 }
1302 } else if (in_regs[i].first()->is_stack()) {
1303 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1304 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1305 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1306 }
1307 }
1308 }
1309 }
1312 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1313 // keeps a new JNI critical region from starting until a GC has been
1314 // forced. Save down any oops in registers and describe them in an
1315 // OopMap.
1316 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1317 int stack_slots,
1318 int total_c_args,
1319 int total_in_args,
1320 int arg_save_area,
1321 OopMapSet* oop_maps,
1322 VMRegPair* in_regs,
1323 BasicType* in_sig_bt) {
1324 __ block_comment("check GC_locker::needs_gc");
1325 Label cont;
1326 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1327 __ jcc(Assembler::equal, cont);
1329 // Save down any incoming oops and call into the runtime to halt for a GC
1331 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1332 save_or_restore_arguments(masm, stack_slots, total_in_args,
1333 arg_save_area, map, in_regs, in_sig_bt);
1335 address the_pc = __ pc();
1336 oop_maps->add_gc_map( __ offset(), map);
1337 __ set_last_Java_frame(rsp, noreg, the_pc);
1339 __ block_comment("block_for_jni_critical");
1340 __ movptr(c_rarg0, r15_thread);
1341 __ mov(r12, rsp); // remember sp
1342 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1343 __ andptr(rsp, -16); // align stack as required by ABI
1344 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1345 __ mov(rsp, r12); // restore sp
1346 __ reinit_heapbase();
1348 __ reset_last_Java_frame(false, true);
1350 save_or_restore_arguments(masm, stack_slots, total_in_args,
1351 arg_save_area, NULL, in_regs, in_sig_bt);
1353 __ bind(cont);
1354 #ifdef ASSERT
1355 if (StressCriticalJNINatives) {
1356 // Stress register saving
1357 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1358 save_or_restore_arguments(masm, stack_slots, total_in_args,
1359 arg_save_area, map, in_regs, in_sig_bt);
1360 // Destroy argument registers
1361 for (int i = 0; i < total_in_args - 1; i++) {
1362 if (in_regs[i].first()->is_Register()) {
1363 const Register reg = in_regs[i].first()->as_Register();
1364 __ xorptr(reg, reg);
1365 } else if (in_regs[i].first()->is_XMMRegister()) {
1366 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1367 } else if (in_regs[i].first()->is_FloatRegister()) {
1368 ShouldNotReachHere();
1369 } else if (in_regs[i].first()->is_stack()) {
1370 // Nothing to do
1371 } else {
1372 ShouldNotReachHere();
1373 }
1374 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1375 i++;
1376 }
1377 }
1379 save_or_restore_arguments(masm, stack_slots, total_in_args,
1380 arg_save_area, NULL, in_regs, in_sig_bt);
1381 }
1382 #endif
1383 }
1385 // Unpack an array argument into a pointer to the body and the length
1386 // if the array is non-null, otherwise pass 0 for both.
1387 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1388 Register tmp_reg = rax;
1389 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1390 "possible collision");
1391 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1392 "possible collision");
1394 // Pass the length, ptr pair
1395 Label is_null, done;
1396 VMRegPair tmp;
1397 tmp.set_ptr(tmp_reg->as_VMReg());
1398 if (reg.first()->is_stack()) {
1399 // Load the arg up from the stack
1400 move_ptr(masm, reg, tmp);
1401 reg = tmp;
1402 }
1403 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1404 __ jccb(Assembler::equal, is_null);
1405 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1406 move_ptr(masm, tmp, body_arg);
1407 // load the length relative to the body.
1408 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1409 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1410 move32_64(masm, tmp, length_arg);
1411 __ jmpb(done);
1412 __ bind(is_null);
1413 // Pass zeros
1414 __ xorptr(tmp_reg, tmp_reg);
1415 move_ptr(masm, tmp, body_arg);
1416 move32_64(masm, tmp, length_arg);
1417 __ bind(done);
1418 }
1421 // Different signatures may require very different orders for the move
1422 // to avoid clobbering other arguments. There's no simple way to
1423 // order them safely. Compute a safe order for issuing stores and
1424 // break any cycles in those stores. This code is fairly general but
1425 // it's not necessary on the other platforms so we keep it in the
1426 // platform dependent code instead of moving it into a shared file.
1427 // (See bugs 7013347 & 7145024.)
1428 // Note that this code is specific to LP64.
1429 class ComputeMoveOrder: public StackObj {
1430 class MoveOperation: public ResourceObj {
1431 friend class ComputeMoveOrder;
1432 private:
1433 VMRegPair _src;
1434 VMRegPair _dst;
1435 int _src_index;
1436 int _dst_index;
1437 bool _processed;
1438 MoveOperation* _next;
1439 MoveOperation* _prev;
1441 static int get_id(VMRegPair r) {
1442 return r.first()->value();
1443 }
1445 public:
1446 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1447 _src(src)
1448 , _src_index(src_index)
1449 , _dst(dst)
1450 , _dst_index(dst_index)
1451 , _next(NULL)
1452 , _prev(NULL)
1453 , _processed(false) {
1454 }
1456 VMRegPair src() const { return _src; }
1457 int src_id() const { return get_id(src()); }
1458 int src_index() const { return _src_index; }
1459 VMRegPair dst() const { return _dst; }
1460 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1461 int dst_index() const { return _dst_index; }
1462 int dst_id() const { return get_id(dst()); }
1463 MoveOperation* next() const { return _next; }
1464 MoveOperation* prev() const { return _prev; }
1465 void set_processed() { _processed = true; }
1466 bool is_processed() const { return _processed; }
1468 // insert
1469 void break_cycle(VMRegPair temp_register) {
1470 // create a new store following the last store
1471 // to move from the temp_register to the original
1472 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1474 // break the cycle of links and insert new_store at the end
1475 // break the reverse link.
1476 MoveOperation* p = prev();
1477 assert(p->next() == this, "must be");
1478 _prev = NULL;
1479 p->_next = new_store;
1480 new_store->_prev = p;
1482 // change the original store to save it's value in the temp.
1483 set_dst(-1, temp_register);
1484 }
1486 void link(GrowableArray<MoveOperation*>& killer) {
1487 // link this store in front the store that it depends on
1488 MoveOperation* n = killer.at_grow(src_id(), NULL);
1489 if (n != NULL) {
1490 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1491 _next = n;
1492 n->_prev = this;
1493 }
1494 }
1495 };
1497 private:
1498 GrowableArray<MoveOperation*> edges;
1500 public:
1501 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1502 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1503 // Move operations where the dest is the stack can all be
1504 // scheduled first since they can't interfere with the other moves.
1505 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1506 if (in_sig_bt[i] == T_ARRAY) {
1507 c_arg--;
1508 if (out_regs[c_arg].first()->is_stack() &&
1509 out_regs[c_arg + 1].first()->is_stack()) {
1510 arg_order.push(i);
1511 arg_order.push(c_arg);
1512 } else {
1513 if (out_regs[c_arg].first()->is_stack() ||
1514 in_regs[i].first() == out_regs[c_arg].first()) {
1515 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1516 } else {
1517 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1518 }
1519 }
1520 } else if (in_sig_bt[i] == T_VOID) {
1521 arg_order.push(i);
1522 arg_order.push(c_arg);
1523 } else {
1524 if (out_regs[c_arg].first()->is_stack() ||
1525 in_regs[i].first() == out_regs[c_arg].first()) {
1526 arg_order.push(i);
1527 arg_order.push(c_arg);
1528 } else {
1529 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1530 }
1531 }
1532 }
1533 // Break any cycles in the register moves and emit the in the
1534 // proper order.
1535 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1536 for (int i = 0; i < stores->length(); i++) {
1537 arg_order.push(stores->at(i)->src_index());
1538 arg_order.push(stores->at(i)->dst_index());
1539 }
1540 }
1542 // Collected all the move operations
1543 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1544 if (src.first() == dst.first()) return;
1545 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1546 }
1548 // Walk the edges breaking cycles between moves. The result list
1549 // can be walked in order to produce the proper set of loads
1550 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1551 // Record which moves kill which values
1552 GrowableArray<MoveOperation*> killer;
1553 for (int i = 0; i < edges.length(); i++) {
1554 MoveOperation* s = edges.at(i);
1555 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1556 killer.at_put_grow(s->dst_id(), s, NULL);
1557 }
1558 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1559 "make sure temp isn't in the registers that are killed");
1561 // create links between loads and stores
1562 for (int i = 0; i < edges.length(); i++) {
1563 edges.at(i)->link(killer);
1564 }
1566 // at this point, all the move operations are chained together
1567 // in a doubly linked list. Processing it backwards finds
1568 // the beginning of the chain, forwards finds the end. If there's
1569 // a cycle it can be broken at any point, so pick an edge and walk
1570 // backward until the list ends or we end where we started.
1571 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1572 for (int e = 0; e < edges.length(); e++) {
1573 MoveOperation* s = edges.at(e);
1574 if (!s->is_processed()) {
1575 MoveOperation* start = s;
1576 // search for the beginning of the chain or cycle
1577 while (start->prev() != NULL && start->prev() != s) {
1578 start = start->prev();
1579 }
1580 if (start->prev() == s) {
1581 start->break_cycle(temp_register);
1582 }
1583 // walk the chain forward inserting to store list
1584 while (start != NULL) {
1585 stores->append(start);
1586 start->set_processed();
1587 start = start->next();
1588 }
1589 }
1590 }
1591 return stores;
1592 }
1593 };
1595 static void verify_oop_args(MacroAssembler* masm,
1596 methodHandle method,
1597 const BasicType* sig_bt,
1598 const VMRegPair* regs) {
1599 Register temp_reg = rbx; // not part of any compiled calling seq
1600 if (VerifyOops) {
1601 for (int i = 0; i < method->size_of_parameters(); i++) {
1602 if (sig_bt[i] == T_OBJECT ||
1603 sig_bt[i] == T_ARRAY) {
1604 VMReg r = regs[i].first();
1605 assert(r->is_valid(), "bad oop arg");
1606 if (r->is_stack()) {
1607 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1608 __ verify_oop(temp_reg);
1609 } else {
1610 __ verify_oop(r->as_Register());
1611 }
1612 }
1613 }
1614 }
1615 }
1617 static void gen_special_dispatch(MacroAssembler* masm,
1618 methodHandle method,
1619 const BasicType* sig_bt,
1620 const VMRegPair* regs) {
1621 verify_oop_args(masm, method, sig_bt, regs);
1622 vmIntrinsics::ID iid = method->intrinsic_id();
1624 // Now write the args into the outgoing interpreter space
1625 bool has_receiver = false;
1626 Register receiver_reg = noreg;
1627 int member_arg_pos = -1;
1628 Register member_reg = noreg;
1629 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1630 if (ref_kind != 0) {
1631 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1632 member_reg = rbx; // known to be free at this point
1633 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1634 } else if (iid == vmIntrinsics::_invokeBasic) {
1635 has_receiver = true;
1636 } else {
1637 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1638 }
1640 if (member_reg != noreg) {
1641 // Load the member_arg into register, if necessary.
1642 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1643 VMReg r = regs[member_arg_pos].first();
1644 if (r->is_stack()) {
1645 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1646 } else {
1647 // no data motion is needed
1648 member_reg = r->as_Register();
1649 }
1650 }
1652 if (has_receiver) {
1653 // Make sure the receiver is loaded into a register.
1654 assert(method->size_of_parameters() > 0, "oob");
1655 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1656 VMReg r = regs[0].first();
1657 assert(r->is_valid(), "bad receiver arg");
1658 if (r->is_stack()) {
1659 // Porting note: This assumes that compiled calling conventions always
1660 // pass the receiver oop in a register. If this is not true on some
1661 // platform, pick a temp and load the receiver from stack.
1662 fatal("receiver always in a register");
1663 receiver_reg = j_rarg0; // known to be free at this point
1664 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1665 } else {
1666 // no data motion is needed
1667 receiver_reg = r->as_Register();
1668 }
1669 }
1671 // Figure out which address we are really jumping to:
1672 MethodHandles::generate_method_handle_dispatch(masm, iid,
1673 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1674 }
1676 // ---------------------------------------------------------------------------
1677 // Generate a native wrapper for a given method. The method takes arguments
1678 // in the Java compiled code convention, marshals them to the native
1679 // convention (handlizes oops, etc), transitions to native, makes the call,
1680 // returns to java state (possibly blocking), unhandlizes any result and
1681 // returns.
1682 //
1683 // Critical native functions are a shorthand for the use of
1684 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1685 // functions. The wrapper is expected to unpack the arguments before
1686 // passing them to the callee and perform checks before and after the
1687 // native call to ensure that they GC_locker
1688 // lock_critical/unlock_critical semantics are followed. Some other
1689 // parts of JNI setup are skipped like the tear down of the JNI handle
1690 // block and the check for pending exceptions it's impossible for them
1691 // to be thrown.
1692 //
1693 // They are roughly structured like this:
1694 // if (GC_locker::needs_gc())
1695 // SharedRuntime::block_for_jni_critical();
1696 // tranistion to thread_in_native
1697 // unpack arrray arguments and call native entry point
1698 // check for safepoint in progress
1699 // check if any thread suspend flags are set
1700 // call into JVM and possible unlock the JNI critical
1701 // if a GC was suppressed while in the critical native.
1702 // transition back to thread_in_Java
1703 // return to caller
1704 //
1705 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1706 methodHandle method,
1707 int compile_id,
1708 BasicType* in_sig_bt,
1709 VMRegPair* in_regs,
1710 BasicType ret_type) {
1711 if (method->is_method_handle_intrinsic()) {
1712 vmIntrinsics::ID iid = method->intrinsic_id();
1713 intptr_t start = (intptr_t)__ pc();
1714 int vep_offset = ((intptr_t)__ pc()) - start;
1715 gen_special_dispatch(masm,
1716 method,
1717 in_sig_bt,
1718 in_regs);
1719 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1720 __ flush();
1721 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1722 return nmethod::new_native_nmethod(method,
1723 compile_id,
1724 masm->code(),
1725 vep_offset,
1726 frame_complete,
1727 stack_slots / VMRegImpl::slots_per_word,
1728 in_ByteSize(-1),
1729 in_ByteSize(-1),
1730 (OopMapSet*)NULL);
1731 }
1732 bool is_critical_native = true;
1733 address native_func = method->critical_native_function();
1734 if (native_func == NULL) {
1735 native_func = method->native_function();
1736 is_critical_native = false;
1737 }
1738 assert(native_func != NULL, "must have function");
1740 // An OopMap for lock (and class if static)
1741 OopMapSet *oop_maps = new OopMapSet();
1742 intptr_t start = (intptr_t)__ pc();
1744 // We have received a description of where all the java arg are located
1745 // on entry to the wrapper. We need to convert these args to where
1746 // the jni function will expect them. To figure out where they go
1747 // we convert the java signature to a C signature by inserting
1748 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1750 const int total_in_args = method->size_of_parameters();
1751 int total_c_args = total_in_args;
1752 if (!is_critical_native) {
1753 total_c_args += 1;
1754 if (method->is_static()) {
1755 total_c_args++;
1756 }
1757 } else {
1758 for (int i = 0; i < total_in_args; i++) {
1759 if (in_sig_bt[i] == T_ARRAY) {
1760 total_c_args++;
1761 }
1762 }
1763 }
1765 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1766 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1767 BasicType* in_elem_bt = NULL;
1769 int argc = 0;
1770 if (!is_critical_native) {
1771 out_sig_bt[argc++] = T_ADDRESS;
1772 if (method->is_static()) {
1773 out_sig_bt[argc++] = T_OBJECT;
1774 }
1776 for (int i = 0; i < total_in_args ; i++ ) {
1777 out_sig_bt[argc++] = in_sig_bt[i];
1778 }
1779 } else {
1780 Thread* THREAD = Thread::current();
1781 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1782 SignatureStream ss(method->signature());
1783 for (int i = 0; i < total_in_args ; i++ ) {
1784 if (in_sig_bt[i] == T_ARRAY) {
1785 // Arrays are passed as int, elem* pair
1786 out_sig_bt[argc++] = T_INT;
1787 out_sig_bt[argc++] = T_ADDRESS;
1788 Symbol* atype = ss.as_symbol(CHECK_NULL);
1789 const char* at = atype->as_C_string();
1790 if (strlen(at) == 2) {
1791 assert(at[0] == '[', "must be");
1792 switch (at[1]) {
1793 case 'B': in_elem_bt[i] = T_BYTE; break;
1794 case 'C': in_elem_bt[i] = T_CHAR; break;
1795 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1796 case 'F': in_elem_bt[i] = T_FLOAT; break;
1797 case 'I': in_elem_bt[i] = T_INT; break;
1798 case 'J': in_elem_bt[i] = T_LONG; break;
1799 case 'S': in_elem_bt[i] = T_SHORT; break;
1800 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1801 default: ShouldNotReachHere();
1802 }
1803 }
1804 } else {
1805 out_sig_bt[argc++] = in_sig_bt[i];
1806 in_elem_bt[i] = T_VOID;
1807 }
1808 if (in_sig_bt[i] != T_VOID) {
1809 assert(in_sig_bt[i] == ss.type(), "must match");
1810 ss.next();
1811 }
1812 }
1813 }
1815 // Now figure out where the args must be stored and how much stack space
1816 // they require.
1817 int out_arg_slots;
1818 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1820 // Compute framesize for the wrapper. We need to handlize all oops in
1821 // incoming registers
1823 // Calculate the total number of stack slots we will need.
1825 // First count the abi requirement plus all of the outgoing args
1826 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1828 // Now the space for the inbound oop handle area
1829 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1830 if (is_critical_native) {
1831 // Critical natives may have to call out so they need a save area
1832 // for register arguments.
1833 int double_slots = 0;
1834 int single_slots = 0;
1835 for ( int i = 0; i < total_in_args; i++) {
1836 if (in_regs[i].first()->is_Register()) {
1837 const Register reg = in_regs[i].first()->as_Register();
1838 switch (in_sig_bt[i]) {
1839 case T_BOOLEAN:
1840 case T_BYTE:
1841 case T_SHORT:
1842 case T_CHAR:
1843 case T_INT: single_slots++; break;
1844 case T_ARRAY: // specific to LP64 (7145024)
1845 case T_LONG: double_slots++; break;
1846 default: ShouldNotReachHere();
1847 }
1848 } else if (in_regs[i].first()->is_XMMRegister()) {
1849 switch (in_sig_bt[i]) {
1850 case T_FLOAT: single_slots++; break;
1851 case T_DOUBLE: double_slots++; break;
1852 default: ShouldNotReachHere();
1853 }
1854 } else if (in_regs[i].first()->is_FloatRegister()) {
1855 ShouldNotReachHere();
1856 }
1857 }
1858 total_save_slots = double_slots * 2 + single_slots;
1859 // align the save area
1860 if (double_slots != 0) {
1861 stack_slots = round_to(stack_slots, 2);
1862 }
1863 }
1865 int oop_handle_offset = stack_slots;
1866 stack_slots += total_save_slots;
1868 // Now any space we need for handlizing a klass if static method
1870 int klass_slot_offset = 0;
1871 int klass_offset = -1;
1872 int lock_slot_offset = 0;
1873 bool is_static = false;
1875 if (method->is_static()) {
1876 klass_slot_offset = stack_slots;
1877 stack_slots += VMRegImpl::slots_per_word;
1878 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1879 is_static = true;
1880 }
1882 // Plus a lock if needed
1884 if (method->is_synchronized()) {
1885 lock_slot_offset = stack_slots;
1886 stack_slots += VMRegImpl::slots_per_word;
1887 }
1889 // Now a place (+2) to save return values or temp during shuffling
1890 // + 4 for return address (which we own) and saved rbp
1891 stack_slots += 6;
1893 // Ok The space we have allocated will look like:
1894 //
1895 //
1896 // FP-> | |
1897 // |---------------------|
1898 // | 2 slots for moves |
1899 // |---------------------|
1900 // | lock box (if sync) |
1901 // |---------------------| <- lock_slot_offset
1902 // | klass (if static) |
1903 // |---------------------| <- klass_slot_offset
1904 // | oopHandle area |
1905 // |---------------------| <- oop_handle_offset (6 java arg registers)
1906 // | outbound memory |
1907 // | based arguments |
1908 // | |
1909 // |---------------------|
1910 // | |
1911 // SP-> | out_preserved_slots |
1912 //
1913 //
1916 // Now compute actual number of stack words we need rounding to make
1917 // stack properly aligned.
1918 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1920 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1922 // First thing make an ic check to see if we should even be here
1924 // We are free to use all registers as temps without saving them and
1925 // restoring them except rbp. rbp is the only callee save register
1926 // as far as the interpreter and the compiler(s) are concerned.
1929 const Register ic_reg = rax;
1930 const Register receiver = j_rarg0;
1932 Label hit;
1933 Label exception_pending;
1935 assert_different_registers(ic_reg, receiver, rscratch1);
1936 __ verify_oop(receiver);
1937 __ load_klass(rscratch1, receiver);
1938 __ cmpq(ic_reg, rscratch1);
1939 __ jcc(Assembler::equal, hit);
1941 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1943 // Verified entry point must be aligned
1944 __ align(8);
1946 __ bind(hit);
1948 int vep_offset = ((intptr_t)__ pc()) - start;
1950 // The instruction at the verified entry point must be 5 bytes or longer
1951 // because it can be patched on the fly by make_non_entrant. The stack bang
1952 // instruction fits that requirement.
1954 // Generate stack overflow check
1956 if (UseStackBanging) {
1957 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1958 } else {
1959 // need a 5 byte instruction to allow MT safe patching to non-entrant
1960 __ fat_nop();
1961 }
1963 // Generate a new frame for the wrapper.
1964 __ enter();
1965 // -2 because return address is already present and so is saved rbp
1966 __ subptr(rsp, stack_size - 2*wordSize);
1968 // Frame is now completed as far as size and linkage.
1969 int frame_complete = ((intptr_t)__ pc()) - start;
1971 #ifdef ASSERT
1972 {
1973 Label L;
1974 __ mov(rax, rsp);
1975 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
1976 __ cmpptr(rax, rsp);
1977 __ jcc(Assembler::equal, L);
1978 __ stop("improperly aligned stack");
1979 __ bind(L);
1980 }
1981 #endif /* ASSERT */
1984 // We use r14 as the oop handle for the receiver/klass
1985 // It is callee save so it survives the call to native
1987 const Register oop_handle_reg = r14;
1989 if (is_critical_native) {
1990 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1991 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1992 }
1994 //
1995 // We immediately shuffle the arguments so that any vm call we have to
1996 // make from here on out (sync slow path, jvmti, etc.) we will have
1997 // captured the oops from our caller and have a valid oopMap for
1998 // them.
2000 // -----------------
2001 // The Grand Shuffle
2003 // The Java calling convention is either equal (linux) or denser (win64) than the
2004 // c calling convention. However the because of the jni_env argument the c calling
2005 // convention always has at least one more (and two for static) arguments than Java.
2006 // Therefore if we move the args from java -> c backwards then we will never have
2007 // a register->register conflict and we don't have to build a dependency graph
2008 // and figure out how to break any cycles.
2009 //
2011 // Record esp-based slot for receiver on stack for non-static methods
2012 int receiver_offset = -1;
2014 // This is a trick. We double the stack slots so we can claim
2015 // the oops in the caller's frame. Since we are sure to have
2016 // more args than the caller doubling is enough to make
2017 // sure we can capture all the incoming oop args from the
2018 // caller.
2019 //
2020 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2022 // Mark location of rbp (someday)
2023 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2025 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2026 // All inbound args are referenced based on rbp and all outbound args via rsp.
2029 #ifdef ASSERT
2030 bool reg_destroyed[RegisterImpl::number_of_registers];
2031 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2032 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2033 reg_destroyed[r] = false;
2034 }
2035 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2036 freg_destroyed[f] = false;
2037 }
2039 #endif /* ASSERT */
2041 // This may iterate in two different directions depending on the
2042 // kind of native it is. The reason is that for regular JNI natives
2043 // the incoming and outgoing registers are offset upwards and for
2044 // critical natives they are offset down.
2045 GrowableArray<int> arg_order(2 * total_in_args);
2046 VMRegPair tmp_vmreg;
2047 tmp_vmreg.set1(rbx->as_VMReg());
2049 if (!is_critical_native) {
2050 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2051 arg_order.push(i);
2052 arg_order.push(c_arg);
2053 }
2054 } else {
2055 // Compute a valid move order, using tmp_vmreg to break any cycles
2056 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2057 }
2059 int temploc = -1;
2060 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2061 int i = arg_order.at(ai);
2062 int c_arg = arg_order.at(ai + 1);
2063 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2064 if (c_arg == -1) {
2065 assert(is_critical_native, "should only be required for critical natives");
2066 // This arg needs to be moved to a temporary
2067 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2068 in_regs[i] = tmp_vmreg;
2069 temploc = i;
2070 continue;
2071 } else if (i == -1) {
2072 assert(is_critical_native, "should only be required for critical natives");
2073 // Read from the temporary location
2074 assert(temploc != -1, "must be valid");
2075 i = temploc;
2076 temploc = -1;
2077 }
2078 #ifdef ASSERT
2079 if (in_regs[i].first()->is_Register()) {
2080 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2081 } else if (in_regs[i].first()->is_XMMRegister()) {
2082 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2083 }
2084 if (out_regs[c_arg].first()->is_Register()) {
2085 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2086 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2087 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2088 }
2089 #endif /* ASSERT */
2090 switch (in_sig_bt[i]) {
2091 case T_ARRAY:
2092 if (is_critical_native) {
2093 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2094 c_arg++;
2095 #ifdef ASSERT
2096 if (out_regs[c_arg].first()->is_Register()) {
2097 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2098 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2099 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2100 }
2101 #endif
2102 break;
2103 }
2104 case T_OBJECT:
2105 assert(!is_critical_native, "no oop arguments");
2106 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2107 ((i == 0) && (!is_static)),
2108 &receiver_offset);
2109 break;
2110 case T_VOID:
2111 break;
2113 case T_FLOAT:
2114 float_move(masm, in_regs[i], out_regs[c_arg]);
2115 break;
2117 case T_DOUBLE:
2118 assert( i + 1 < total_in_args &&
2119 in_sig_bt[i + 1] == T_VOID &&
2120 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2121 double_move(masm, in_regs[i], out_regs[c_arg]);
2122 break;
2124 case T_LONG :
2125 long_move(masm, in_regs[i], out_regs[c_arg]);
2126 break;
2128 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2130 default:
2131 move32_64(masm, in_regs[i], out_regs[c_arg]);
2132 }
2133 }
2135 // point c_arg at the first arg that is already loaded in case we
2136 // need to spill before we call out
2137 int c_arg = total_c_args - total_in_args;
2139 // Pre-load a static method's oop into r14. Used both by locking code and
2140 // the normal JNI call code.
2141 if (method->is_static() && !is_critical_native) {
2143 // load oop into a register
2144 __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
2146 // Now handlize the static class mirror it's known not-null.
2147 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2148 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2150 // Now get the handle
2151 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2152 // store the klass handle as second argument
2153 __ movptr(c_rarg1, oop_handle_reg);
2154 // and protect the arg if we must spill
2155 c_arg--;
2156 }
2158 // Change state to native (we save the return address in the thread, since it might not
2159 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2160 // points into the right code segment. It does not have to be the correct return pc.
2161 // We use the same pc/oopMap repeatedly when we call out
2163 intptr_t the_pc = (intptr_t) __ pc();
2164 oop_maps->add_gc_map(the_pc - start, map);
2166 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2169 // We have all of the arguments setup at this point. We must not touch any register
2170 // argument registers at this point (what if we save/restore them there are no oop?
2172 {
2173 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2174 // protect the args we've loaded
2175 save_args(masm, total_c_args, c_arg, out_regs);
2176 __ mov_metadata(c_rarg1, method());
2177 __ call_VM_leaf(
2178 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2179 r15_thread, c_rarg1);
2180 restore_args(masm, total_c_args, c_arg, out_regs);
2181 }
2183 // RedefineClasses() tracing support for obsolete method entry
2184 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2185 // protect the args we've loaded
2186 save_args(masm, total_c_args, c_arg, out_regs);
2187 __ mov_metadata(c_rarg1, method());
2188 __ call_VM_leaf(
2189 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2190 r15_thread, c_rarg1);
2191 restore_args(masm, total_c_args, c_arg, out_regs);
2192 }
2194 // Lock a synchronized method
2196 // Register definitions used by locking and unlocking
2198 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2199 const Register obj_reg = rbx; // Will contain the oop
2200 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2201 const Register old_hdr = r13; // value of old header at unlock time
2203 Label slow_path_lock;
2204 Label lock_done;
2206 if (method->is_synchronized()) {
2207 assert(!is_critical_native, "unhandled");
2210 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2212 // Get the handle (the 2nd argument)
2213 __ mov(oop_handle_reg, c_rarg1);
2215 // Get address of the box
2217 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2219 // Load the oop from the handle
2220 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2222 if (UseBiasedLocking) {
2223 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2224 }
2226 // Load immediate 1 into swap_reg %rax
2227 __ movl(swap_reg, 1);
2229 // Load (object->mark() | 1) into swap_reg %rax
2230 __ orptr(swap_reg, Address(obj_reg, 0));
2232 // Save (object->mark() | 1) into BasicLock's displaced header
2233 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2235 if (os::is_MP()) {
2236 __ lock();
2237 }
2239 // src -> dest iff dest == rax else rax <- dest
2240 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2241 __ jcc(Assembler::equal, lock_done);
2243 // Hmm should this move to the slow path code area???
2245 // Test if the oopMark is an obvious stack pointer, i.e.,
2246 // 1) (mark & 3) == 0, and
2247 // 2) rsp <= mark < mark + os::pagesize()
2248 // These 3 tests can be done by evaluating the following
2249 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2250 // assuming both stack pointer and pagesize have their
2251 // least significant 2 bits clear.
2252 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2254 __ subptr(swap_reg, rsp);
2255 __ andptr(swap_reg, 3 - os::vm_page_size());
2257 // Save the test result, for recursive case, the result is zero
2258 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2259 __ jcc(Assembler::notEqual, slow_path_lock);
2261 // Slow path will re-enter here
2263 __ bind(lock_done);
2264 }
2267 // Finally just about ready to make the JNI call
2270 // get JNIEnv* which is first argument to native
2271 if (!is_critical_native) {
2272 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2273 }
2275 // Now set thread in native
2276 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2278 __ call(RuntimeAddress(native_func));
2280 // Either restore the MXCSR register after returning from the JNI Call
2281 // or verify that it wasn't changed.
2282 if (RestoreMXCSROnJNICalls) {
2283 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
2285 }
2286 else if (CheckJNICalls ) {
2287 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
2288 }
2291 // Unpack native results.
2292 switch (ret_type) {
2293 case T_BOOLEAN: __ c2bool(rax); break;
2294 case T_CHAR : __ movzwl(rax, rax); break;
2295 case T_BYTE : __ sign_extend_byte (rax); break;
2296 case T_SHORT : __ sign_extend_short(rax); break;
2297 case T_INT : /* nothing to do */ break;
2298 case T_DOUBLE :
2299 case T_FLOAT :
2300 // Result is in xmm0 we'll save as needed
2301 break;
2302 case T_ARRAY: // Really a handle
2303 case T_OBJECT: // Really a handle
2304 break; // can't de-handlize until after safepoint check
2305 case T_VOID: break;
2306 case T_LONG: break;
2307 default : ShouldNotReachHere();
2308 }
2310 // Switch thread to "native transition" state before reading the synchronization state.
2311 // This additional state is necessary because reading and testing the synchronization
2312 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2313 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2314 // VM thread changes sync state to synchronizing and suspends threads for GC.
2315 // Thread A is resumed to finish this native method, but doesn't block here since it
2316 // didn't see any synchronization is progress, and escapes.
2317 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2319 if(os::is_MP()) {
2320 if (UseMembar) {
2321 // Force this write out before the read below
2322 __ membar(Assembler::Membar_mask_bits(
2323 Assembler::LoadLoad | Assembler::LoadStore |
2324 Assembler::StoreLoad | Assembler::StoreStore));
2325 } else {
2326 // Write serialization page so VM thread can do a pseudo remote membar.
2327 // We use the current thread pointer to calculate a thread specific
2328 // offset to write to within the page. This minimizes bus traffic
2329 // due to cache line collision.
2330 __ serialize_memory(r15_thread, rcx);
2331 }
2332 }
2334 Label after_transition;
2336 // check for safepoint operation in progress and/or pending suspend requests
2337 {
2338 Label Continue;
2340 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2341 SafepointSynchronize::_not_synchronized);
2343 Label L;
2344 __ jcc(Assembler::notEqual, L);
2345 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2346 __ jcc(Assembler::equal, Continue);
2347 __ bind(L);
2349 // Don't use call_VM as it will see a possible pending exception and forward it
2350 // and never return here preventing us from clearing _last_native_pc down below.
2351 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2352 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2353 // by hand.
2354 //
2355 save_native_result(masm, ret_type, stack_slots);
2356 __ mov(c_rarg0, r15_thread);
2357 __ mov(r12, rsp); // remember sp
2358 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2359 __ andptr(rsp, -16); // align stack as required by ABI
2360 if (!is_critical_native) {
2361 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2362 } else {
2363 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2364 }
2365 __ mov(rsp, r12); // restore sp
2366 __ reinit_heapbase();
2367 // Restore any method result value
2368 restore_native_result(masm, ret_type, stack_slots);
2370 if (is_critical_native) {
2371 // The call above performed the transition to thread_in_Java so
2372 // skip the transition logic below.
2373 __ jmpb(after_transition);
2374 }
2376 __ bind(Continue);
2377 }
2379 // change thread state
2380 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2381 __ bind(after_transition);
2383 Label reguard;
2384 Label reguard_done;
2385 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2386 __ jcc(Assembler::equal, reguard);
2387 __ bind(reguard_done);
2389 // native result if any is live
2391 // Unlock
2392 Label unlock_done;
2393 Label slow_path_unlock;
2394 if (method->is_synchronized()) {
2396 // Get locked oop from the handle we passed to jni
2397 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2399 Label done;
2401 if (UseBiasedLocking) {
2402 __ biased_locking_exit(obj_reg, old_hdr, done);
2403 }
2405 // Simple recursive lock?
2407 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2408 __ jcc(Assembler::equal, done);
2410 // Must save rax if if it is live now because cmpxchg must use it
2411 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2412 save_native_result(masm, ret_type, stack_slots);
2413 }
2416 // get address of the stack lock
2417 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2418 // get old displaced header
2419 __ movptr(old_hdr, Address(rax, 0));
2421 // Atomic swap old header if oop still contains the stack lock
2422 if (os::is_MP()) {
2423 __ lock();
2424 }
2425 __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2426 __ jcc(Assembler::notEqual, slow_path_unlock);
2428 // slow path re-enters here
2429 __ bind(unlock_done);
2430 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2431 restore_native_result(masm, ret_type, stack_slots);
2432 }
2434 __ bind(done);
2436 }
2437 {
2438 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2439 save_native_result(masm, ret_type, stack_slots);
2440 __ mov_metadata(c_rarg1, method());
2441 __ call_VM_leaf(
2442 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2443 r15_thread, c_rarg1);
2444 restore_native_result(masm, ret_type, stack_slots);
2445 }
2447 __ reset_last_Java_frame(false, true);
2449 // Unpack oop result
2450 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2451 Label L;
2452 __ testptr(rax, rax);
2453 __ jcc(Assembler::zero, L);
2454 __ movptr(rax, Address(rax, 0));
2455 __ bind(L);
2456 __ verify_oop(rax);
2457 }
2459 if (!is_critical_native) {
2460 // reset handle block
2461 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2462 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2463 }
2465 // pop our frame
2467 __ leave();
2469 if (!is_critical_native) {
2470 // Any exception pending?
2471 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2472 __ jcc(Assembler::notEqual, exception_pending);
2473 }
2475 // Return
2477 __ ret(0);
2479 // Unexpected paths are out of line and go here
2481 if (!is_critical_native) {
2482 // forward the exception
2483 __ bind(exception_pending);
2485 // and forward the exception
2486 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2487 }
2489 // Slow path locking & unlocking
2490 if (method->is_synchronized()) {
2492 // BEGIN Slow path lock
2493 __ bind(slow_path_lock);
2495 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2496 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2498 // protect the args we've loaded
2499 save_args(masm, total_c_args, c_arg, out_regs);
2501 __ mov(c_rarg0, obj_reg);
2502 __ mov(c_rarg1, lock_reg);
2503 __ mov(c_rarg2, r15_thread);
2505 // Not a leaf but we have last_Java_frame setup as we want
2506 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2507 restore_args(masm, total_c_args, c_arg, out_regs);
2509 #ifdef ASSERT
2510 { Label L;
2511 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2512 __ jcc(Assembler::equal, L);
2513 __ stop("no pending exception allowed on exit from monitorenter");
2514 __ bind(L);
2515 }
2516 #endif
2517 __ jmp(lock_done);
2519 // END Slow path lock
2521 // BEGIN Slow path unlock
2522 __ bind(slow_path_unlock);
2524 // If we haven't already saved the native result we must save it now as xmm registers
2525 // are still exposed.
2527 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2528 save_native_result(masm, ret_type, stack_slots);
2529 }
2531 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2533 __ mov(c_rarg0, obj_reg);
2534 __ mov(r12, rsp); // remember sp
2535 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2536 __ andptr(rsp, -16); // align stack as required by ABI
2538 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2539 // NOTE that obj_reg == rbx currently
2540 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2541 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2543 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2544 __ mov(rsp, r12); // restore sp
2545 __ reinit_heapbase();
2546 #ifdef ASSERT
2547 {
2548 Label L;
2549 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2550 __ jcc(Assembler::equal, L);
2551 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2552 __ bind(L);
2553 }
2554 #endif /* ASSERT */
2556 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2558 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2559 restore_native_result(masm, ret_type, stack_slots);
2560 }
2561 __ jmp(unlock_done);
2563 // END Slow path unlock
2565 } // synchronized
2567 // SLOW PATH Reguard the stack if needed
2569 __ bind(reguard);
2570 save_native_result(masm, ret_type, stack_slots);
2571 __ mov(r12, rsp); // remember sp
2572 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2573 __ andptr(rsp, -16); // align stack as required by ABI
2574 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2575 __ mov(rsp, r12); // restore sp
2576 __ reinit_heapbase();
2577 restore_native_result(masm, ret_type, stack_slots);
2578 // and continue
2579 __ jmp(reguard_done);
2583 __ flush();
2585 nmethod *nm = nmethod::new_native_nmethod(method,
2586 compile_id,
2587 masm->code(),
2588 vep_offset,
2589 frame_complete,
2590 stack_slots / VMRegImpl::slots_per_word,
2591 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2592 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2593 oop_maps);
2595 if (is_critical_native) {
2596 nm->set_lazy_critical_native(true);
2597 }
2599 return nm;
2601 }
2603 #ifdef HAVE_DTRACE_H
2604 // ---------------------------------------------------------------------------
2605 // Generate a dtrace nmethod for a given signature. The method takes arguments
2606 // in the Java compiled code convention, marshals them to the native
2607 // abi and then leaves nops at the position you would expect to call a native
2608 // function. When the probe is enabled the nops are replaced with a trap
2609 // instruction that dtrace inserts and the trace will cause a notification
2610 // to dtrace.
2611 //
2612 // The probes are only able to take primitive types and java/lang/String as
2613 // arguments. No other java types are allowed. Strings are converted to utf8
2614 // strings so that from dtrace point of view java strings are converted to C
2615 // strings. There is an arbitrary fixed limit on the total space that a method
2616 // can use for converting the strings. (256 chars per string in the signature).
2617 // So any java string larger then this is truncated.
2619 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2620 static bool offsets_initialized = false;
2623 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2624 methodHandle method) {
2627 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2628 // be single threaded in this method.
2629 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2631 if (!offsets_initialized) {
2632 fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
2633 fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
2634 fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
2635 fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
2636 fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
2637 fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
2639 fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
2640 fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
2641 fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
2642 fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
2643 fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
2644 fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
2645 fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
2646 fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
2648 offsets_initialized = true;
2649 }
2650 // Fill in the signature array, for the calling-convention call.
2651 int total_args_passed = method->size_of_parameters();
2653 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2654 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2656 // The signature we are going to use for the trap that dtrace will see
2657 // java/lang/String is converted. We drop "this" and any other object
2658 // is converted to NULL. (A one-slot java/lang/Long object reference
2659 // is converted to a two-slot long, which is why we double the allocation).
2660 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2661 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2663 int i=0;
2664 int total_strings = 0;
2665 int first_arg_to_pass = 0;
2666 int total_c_args = 0;
2668 // Skip the receiver as dtrace doesn't want to see it
2669 if( !method->is_static() ) {
2670 in_sig_bt[i++] = T_OBJECT;
2671 first_arg_to_pass = 1;
2672 }
2674 // We need to convert the java args to where a native (non-jni) function
2675 // would expect them. To figure out where they go we convert the java
2676 // signature to a C signature.
2678 SignatureStream ss(method->signature());
2679 for ( ; !ss.at_return_type(); ss.next()) {
2680 BasicType bt = ss.type();
2681 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2682 out_sig_bt[total_c_args++] = bt;
2683 if( bt == T_OBJECT) {
2684 Symbol* s = ss.as_symbol_or_null(); // symbol is created
2685 if (s == vmSymbols::java_lang_String()) {
2686 total_strings++;
2687 out_sig_bt[total_c_args-1] = T_ADDRESS;
2688 } else if (s == vmSymbols::java_lang_Boolean() ||
2689 s == vmSymbols::java_lang_Character() ||
2690 s == vmSymbols::java_lang_Byte() ||
2691 s == vmSymbols::java_lang_Short() ||
2692 s == vmSymbols::java_lang_Integer() ||
2693 s == vmSymbols::java_lang_Float()) {
2694 out_sig_bt[total_c_args-1] = T_INT;
2695 } else if (s == vmSymbols::java_lang_Long() ||
2696 s == vmSymbols::java_lang_Double()) {
2697 out_sig_bt[total_c_args-1] = T_LONG;
2698 out_sig_bt[total_c_args++] = T_VOID;
2699 }
2700 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2701 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2702 // We convert double to long
2703 out_sig_bt[total_c_args-1] = T_LONG;
2704 out_sig_bt[total_c_args++] = T_VOID;
2705 } else if ( bt == T_FLOAT) {
2706 // We convert float to int
2707 out_sig_bt[total_c_args-1] = T_INT;
2708 }
2709 }
2711 assert(i==total_args_passed, "validly parsed signature");
2713 // Now get the compiled-Java layout as input arguments
2714 int comp_args_on_stack;
2715 comp_args_on_stack = SharedRuntime::java_calling_convention(
2716 in_sig_bt, in_regs, total_args_passed, false);
2718 // Now figure out where the args must be stored and how much stack space
2719 // they require (neglecting out_preserve_stack_slots but space for storing
2720 // the 1st six register arguments). It's weird see int_stk_helper.
2722 int out_arg_slots;
2723 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2725 // Calculate the total number of stack slots we will need.
2727 // First count the abi requirement plus all of the outgoing args
2728 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2730 // Now space for the string(s) we must convert
2731 int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2732 for (i = 0; i < total_strings ; i++) {
2733 string_locs[i] = stack_slots;
2734 stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2735 }
2737 // Plus the temps we might need to juggle register args
2738 // regs take two slots each
2739 stack_slots += (Argument::n_int_register_parameters_c +
2740 Argument::n_float_register_parameters_c) * 2;
2743 // + 4 for return address (which we own) and saved rbp,
2745 stack_slots += 4;
2747 // Ok The space we have allocated will look like:
2748 //
2749 //
2750 // FP-> | |
2751 // |---------------------|
2752 // | string[n] |
2753 // |---------------------| <- string_locs[n]
2754 // | string[n-1] |
2755 // |---------------------| <- string_locs[n-1]
2756 // | ... |
2757 // | ... |
2758 // |---------------------| <- string_locs[1]
2759 // | string[0] |
2760 // |---------------------| <- string_locs[0]
2761 // | outbound memory |
2762 // | based arguments |
2763 // | |
2764 // |---------------------|
2765 // | |
2766 // SP-> | out_preserved_slots |
2767 //
2768 //
2770 // Now compute actual number of stack words we need rounding to make
2771 // stack properly aligned.
2772 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2774 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2776 intptr_t start = (intptr_t)__ pc();
2778 // First thing make an ic check to see if we should even be here
2780 // We are free to use all registers as temps without saving them and
2781 // restoring them except rbp. rbp, is the only callee save register
2782 // as far as the interpreter and the compiler(s) are concerned.
2784 const Register ic_reg = rax;
2785 const Register receiver = rcx;
2786 Label hit;
2787 Label exception_pending;
2790 __ verify_oop(receiver);
2791 __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2792 __ jcc(Assembler::equal, hit);
2794 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2796 // verified entry must be aligned for code patching.
2797 // and the first 5 bytes must be in the same cache line
2798 // if we align at 8 then we will be sure 5 bytes are in the same line
2799 __ align(8);
2801 __ bind(hit);
2803 int vep_offset = ((intptr_t)__ pc()) - start;
2806 // The instruction at the verified entry point must be 5 bytes or longer
2807 // because it can be patched on the fly by make_non_entrant. The stack bang
2808 // instruction fits that requirement.
2810 // Generate stack overflow check
2812 if (UseStackBanging) {
2813 if (stack_size <= StackShadowPages*os::vm_page_size()) {
2814 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2815 } else {
2816 __ movl(rax, stack_size);
2817 __ bang_stack_size(rax, rbx);
2818 }
2819 } else {
2820 // need a 5 byte instruction to allow MT safe patching to non-entrant
2821 __ fat_nop();
2822 }
2824 assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
2825 "valid size for make_non_entrant");
2827 // Generate a new frame for the wrapper.
2828 __ enter();
2830 // -4 because return address is already present and so is saved rbp,
2831 if (stack_size - 2*wordSize != 0) {
2832 __ subq(rsp, stack_size - 2*wordSize);
2833 }
2835 // Frame is now completed as far a size and linkage.
2837 int frame_complete = ((intptr_t)__ pc()) - start;
2839 int c_arg, j_arg;
2841 // State of input register args
2843 bool live[ConcreteRegisterImpl::number_of_registers];
2845 live[j_rarg0->as_VMReg()->value()] = false;
2846 live[j_rarg1->as_VMReg()->value()] = false;
2847 live[j_rarg2->as_VMReg()->value()] = false;
2848 live[j_rarg3->as_VMReg()->value()] = false;
2849 live[j_rarg4->as_VMReg()->value()] = false;
2850 live[j_rarg5->as_VMReg()->value()] = false;
2852 live[j_farg0->as_VMReg()->value()] = false;
2853 live[j_farg1->as_VMReg()->value()] = false;
2854 live[j_farg2->as_VMReg()->value()] = false;
2855 live[j_farg3->as_VMReg()->value()] = false;
2856 live[j_farg4->as_VMReg()->value()] = false;
2857 live[j_farg5->as_VMReg()->value()] = false;
2858 live[j_farg6->as_VMReg()->value()] = false;
2859 live[j_farg7->as_VMReg()->value()] = false;
2862 bool rax_is_zero = false;
2864 // All args (except strings) destined for the stack are moved first
2865 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2866 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2867 VMRegPair src = in_regs[j_arg];
2868 VMRegPair dst = out_regs[c_arg];
2870 // Get the real reg value or a dummy (rsp)
2872 int src_reg = src.first()->is_reg() ?
2873 src.first()->value() :
2874 rsp->as_VMReg()->value();
2876 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
2877 (in_sig_bt[j_arg] == T_OBJECT &&
2878 out_sig_bt[c_arg] != T_INT &&
2879 out_sig_bt[c_arg] != T_ADDRESS &&
2880 out_sig_bt[c_arg] != T_LONG);
2882 live[src_reg] = !useless;
2884 if (dst.first()->is_stack()) {
2886 // Even though a string arg in a register is still live after this loop
2887 // after the string conversion loop (next) it will be dead so we take
2888 // advantage of that now for simpler code to manage live.
2890 live[src_reg] = false;
2891 switch (in_sig_bt[j_arg]) {
2893 case T_ARRAY:
2894 case T_OBJECT:
2895 {
2896 Address stack_dst(rsp, reg2offset_out(dst.first()));
2898 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2899 // need to unbox a one-word value
2900 Register in_reg = rax;
2901 if ( src.first()->is_reg() ) {
2902 in_reg = src.first()->as_Register();
2903 } else {
2904 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
2905 rax_is_zero = false;
2906 }
2907 Label skipUnbox;
2908 __ movptr(Address(rsp, reg2offset_out(dst.first())),
2909 (int32_t)NULL_WORD);
2910 __ testq(in_reg, in_reg);
2911 __ jcc(Assembler::zero, skipUnbox);
2913 BasicType bt = out_sig_bt[c_arg];
2914 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2915 Address src1(in_reg, box_offset);
2916 if ( bt == T_LONG ) {
2917 __ movq(in_reg, src1);
2918 __ movq(stack_dst, in_reg);
2919 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2920 ++c_arg; // skip over T_VOID to keep the loop indices in sync
2921 } else {
2922 __ movl(in_reg, src1);
2923 __ movl(stack_dst, in_reg);
2924 }
2926 __ bind(skipUnbox);
2927 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
2928 // Convert the arg to NULL
2929 if (!rax_is_zero) {
2930 __ xorq(rax, rax);
2931 rax_is_zero = true;
2932 }
2933 __ movq(stack_dst, rax);
2934 }
2935 }
2936 break;
2938 case T_VOID:
2939 break;
2941 case T_FLOAT:
2942 // This does the right thing since we know it is destined for the
2943 // stack
2944 float_move(masm, src, dst);
2945 break;
2947 case T_DOUBLE:
2948 // This does the right thing since we know it is destined for the
2949 // stack
2950 double_move(masm, src, dst);
2951 break;
2953 case T_LONG :
2954 long_move(masm, src, dst);
2955 break;
2957 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2959 default:
2960 move32_64(masm, src, dst);
2961 }
2962 }
2964 }
2966 // If we have any strings we must store any register based arg to the stack
2967 // This includes any still live xmm registers too.
2969 int sid = 0;
2971 if (total_strings > 0 ) {
2972 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2973 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2974 VMRegPair src = in_regs[j_arg];
2975 VMRegPair dst = out_regs[c_arg];
2977 if (src.first()->is_reg()) {
2978 Address src_tmp(rbp, fp_offset[src.first()->value()]);
2980 // string oops were left untouched by the previous loop even if the
2981 // eventual (converted) arg is destined for the stack so park them
2982 // away now (except for first)
2984 if (out_sig_bt[c_arg] == T_ADDRESS) {
2985 Address utf8_addr = Address(
2986 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
2987 if (sid != 1) {
2988 // The first string arg won't be killed until after the utf8
2989 // conversion
2990 __ movq(utf8_addr, src.first()->as_Register());
2991 }
2992 } else if (dst.first()->is_reg()) {
2993 if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
2995 // Convert the xmm register to an int and store it in the reserved
2996 // location for the eventual c register arg
2997 XMMRegister f = src.first()->as_XMMRegister();
2998 if (in_sig_bt[j_arg] == T_FLOAT) {
2999 __ movflt(src_tmp, f);
3000 } else {
3001 __ movdbl(src_tmp, f);
3002 }
3003 } else {
3004 // If the arg is an oop type we don't support don't bother to store
3005 // it remember string was handled above.
3006 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3007 (in_sig_bt[j_arg] == T_OBJECT &&
3008 out_sig_bt[c_arg] != T_INT &&
3009 out_sig_bt[c_arg] != T_LONG);
3011 if (!useless) {
3012 __ movq(src_tmp, src.first()->as_Register());
3013 }
3014 }
3015 }
3016 }
3017 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3018 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3019 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3020 }
3021 }
3023 // Now that the volatile registers are safe, convert all the strings
3024 sid = 0;
3026 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3027 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3028 if (out_sig_bt[c_arg] == T_ADDRESS) {
3029 // It's a string
3030 Address utf8_addr = Address(
3031 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3032 // The first string we find might still be in the original java arg
3033 // register
3035 VMReg src = in_regs[j_arg].first();
3037 // We will need to eventually save the final argument to the trap
3038 // in the von-volatile location dedicated to src. This is the offset
3039 // from fp we will use.
3040 int src_off = src->is_reg() ?
3041 fp_offset[src->value()] : reg2offset_in(src);
3043 // This is where the argument will eventually reside
3044 VMRegPair dst = out_regs[c_arg];
3046 if (src->is_reg()) {
3047 if (sid == 1) {
3048 __ movq(c_rarg0, src->as_Register());
3049 } else {
3050 __ movq(c_rarg0, utf8_addr);
3051 }
3052 } else {
3053 // arg is still in the original location
3054 __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
3055 }
3056 Label done, convert;
3058 // see if the oop is NULL
3059 __ testq(c_rarg0, c_rarg0);
3060 __ jcc(Assembler::notEqual, convert);
3062 if (dst.first()->is_reg()) {
3063 // Save the ptr to utf string in the origina src loc or the tmp
3064 // dedicated to it
3065 __ movq(Address(rbp, src_off), c_rarg0);
3066 } else {
3067 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
3068 }
3069 __ jmp(done);
3071 __ bind(convert);
3073 __ lea(c_rarg1, utf8_addr);
3074 if (dst.first()->is_reg()) {
3075 __ movq(Address(rbp, src_off), c_rarg1);
3076 } else {
3077 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
3078 }
3079 // And do the conversion
3080 __ call(RuntimeAddress(
3081 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
3083 __ bind(done);
3084 }
3085 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3086 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3087 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3088 }
3089 }
3090 // The get_utf call killed all the c_arg registers
3091 live[c_rarg0->as_VMReg()->value()] = false;
3092 live[c_rarg1->as_VMReg()->value()] = false;
3093 live[c_rarg2->as_VMReg()->value()] = false;
3094 live[c_rarg3->as_VMReg()->value()] = false;
3095 live[c_rarg4->as_VMReg()->value()] = false;
3096 live[c_rarg5->as_VMReg()->value()] = false;
3098 live[c_farg0->as_VMReg()->value()] = false;
3099 live[c_farg1->as_VMReg()->value()] = false;
3100 live[c_farg2->as_VMReg()->value()] = false;
3101 live[c_farg3->as_VMReg()->value()] = false;
3102 live[c_farg4->as_VMReg()->value()] = false;
3103 live[c_farg5->as_VMReg()->value()] = false;
3104 live[c_farg6->as_VMReg()->value()] = false;
3105 live[c_farg7->as_VMReg()->value()] = false;
3106 }
3108 // Now we can finally move the register args to their desired locations
3110 rax_is_zero = false;
3112 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3113 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3115 VMRegPair src = in_regs[j_arg];
3116 VMRegPair dst = out_regs[c_arg];
3118 // Only need to look for args destined for the interger registers (since we
3119 // convert float/double args to look like int/long outbound)
3120 if (dst.first()->is_reg()) {
3121 Register r = dst.first()->as_Register();
3123 // Check if the java arg is unsupported and thereofre useless
3124 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3125 (in_sig_bt[j_arg] == T_OBJECT &&
3126 out_sig_bt[c_arg] != T_INT &&
3127 out_sig_bt[c_arg] != T_ADDRESS &&
3128 out_sig_bt[c_arg] != T_LONG);
3131 // If we're going to kill an existing arg save it first
3132 if (live[dst.first()->value()]) {
3133 // you can't kill yourself
3134 if (src.first() != dst.first()) {
3135 __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
3136 }
3137 }
3138 if (src.first()->is_reg()) {
3139 if (live[src.first()->value()] ) {
3140 if (in_sig_bt[j_arg] == T_FLOAT) {
3141 __ movdl(r, src.first()->as_XMMRegister());
3142 } else if (in_sig_bt[j_arg] == T_DOUBLE) {
3143 __ movdq(r, src.first()->as_XMMRegister());
3144 } else if (r != src.first()->as_Register()) {
3145 if (!useless) {
3146 __ movq(r, src.first()->as_Register());
3147 }
3148 }
3149 } else {
3150 // If the arg is an oop type we don't support don't bother to store
3151 // it
3152 if (!useless) {
3153 if (in_sig_bt[j_arg] == T_DOUBLE ||
3154 in_sig_bt[j_arg] == T_LONG ||
3155 in_sig_bt[j_arg] == T_OBJECT ) {
3156 __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
3157 } else {
3158 __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
3159 }
3160 }
3161 }
3162 live[src.first()->value()] = false;
3163 } else if (!useless) {
3164 // full sized move even for int should be ok
3165 __ movq(r, Address(rbp, reg2offset_in(src.first())));
3166 }
3168 // At this point r has the original java arg in the final location
3169 // (assuming it wasn't useless). If the java arg was an oop
3170 // we have a bit more to do
3172 if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
3173 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3174 // need to unbox a one-word value
3175 Label skip;
3176 __ testq(r, r);
3177 __ jcc(Assembler::equal, skip);
3178 BasicType bt = out_sig_bt[c_arg];
3179 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3180 Address src1(r, box_offset);
3181 if ( bt == T_LONG ) {
3182 __ movq(r, src1);
3183 } else {
3184 __ movl(r, src1);
3185 }
3186 __ bind(skip);
3188 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3189 // Convert the arg to NULL
3190 __ xorq(r, r);
3191 }
3192 }
3194 // dst can longer be holding an input value
3195 live[dst.first()->value()] = false;
3196 }
3197 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3198 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3199 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3200 }
3201 }
3204 // Ok now we are done. Need to place the nop that dtrace wants in order to
3205 // patch in the trap
3206 int patch_offset = ((intptr_t)__ pc()) - start;
3208 __ nop();
3211 // Return
3213 __ leave();
3214 __ ret(0);
3216 __ flush();
3218 nmethod *nm = nmethod::new_dtrace_nmethod(
3219 method, masm->code(), vep_offset, patch_offset, frame_complete,
3220 stack_slots / VMRegImpl::slots_per_word);
3221 return nm;
3223 }
3225 #endif // HAVE_DTRACE_H
3227 // this function returns the adjust size (in number of words) to a c2i adapter
3228 // activation for use during deoptimization
3229 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3230 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3231 }
3234 uint SharedRuntime::out_preserve_stack_slots() {
3235 return 0;
3236 }
3239 //------------------------------generate_deopt_blob----------------------------
3240 void SharedRuntime::generate_deopt_blob() {
3241 // Allocate space for the code
3242 ResourceMark rm;
3243 // Setup code generation tools
3244 CodeBuffer buffer("deopt_blob", 2048, 1024);
3245 MacroAssembler* masm = new MacroAssembler(&buffer);
3246 int frame_size_in_words;
3247 OopMap* map = NULL;
3248 OopMapSet *oop_maps = new OopMapSet();
3250 // -------------
3251 // This code enters when returning to a de-optimized nmethod. A return
3252 // address has been pushed on the the stack, and return values are in
3253 // registers.
3254 // If we are doing a normal deopt then we were called from the patched
3255 // nmethod from the point we returned to the nmethod. So the return
3256 // address on the stack is wrong by NativeCall::instruction_size
3257 // We will adjust the value so it looks like we have the original return
3258 // address on the stack (like when we eagerly deoptimized).
3259 // In the case of an exception pending when deoptimizing, we enter
3260 // with a return address on the stack that points after the call we patched
3261 // into the exception handler. We have the following register state from,
3262 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3263 // rax: exception oop
3264 // rbx: exception handler
3265 // rdx: throwing pc
3266 // So in this case we simply jam rdx into the useless return address and
3267 // the stack looks just like we want.
3268 //
3269 // At this point we need to de-opt. We save the argument return
3270 // registers. We call the first C routine, fetch_unroll_info(). This
3271 // routine captures the return values and returns a structure which
3272 // describes the current frame size and the sizes of all replacement frames.
3273 // The current frame is compiled code and may contain many inlined
3274 // functions, each with their own JVM state. We pop the current frame, then
3275 // push all the new frames. Then we call the C routine unpack_frames() to
3276 // populate these frames. Finally unpack_frames() returns us the new target
3277 // address. Notice that callee-save registers are BLOWN here; they have
3278 // already been captured in the vframeArray at the time the return PC was
3279 // patched.
3280 address start = __ pc();
3281 Label cont;
3283 // Prolog for non exception case!
3285 // Save everything in sight.
3286 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3288 // Normal deoptimization. Save exec mode for unpack_frames.
3289 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3290 __ jmp(cont);
3292 int reexecute_offset = __ pc() - start;
3294 // Reexecute case
3295 // return address is the pc describes what bci to do re-execute at
3297 // No need to update map as each call to save_live_registers will produce identical oopmap
3298 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3300 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3301 __ jmp(cont);
3303 int exception_offset = __ pc() - start;
3305 // Prolog for exception case
3307 // all registers are dead at this entry point, except for rax, and
3308 // rdx which contain the exception oop and exception pc
3309 // respectively. Set them in TLS and fall thru to the
3310 // unpack_with_exception_in_tls entry point.
3312 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3313 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3315 int exception_in_tls_offset = __ pc() - start;
3317 // new implementation because exception oop is now passed in JavaThread
3319 // Prolog for exception case
3320 // All registers must be preserved because they might be used by LinearScan
3321 // Exceptiop oop and throwing PC are passed in JavaThread
3322 // tos: stack at point of call to method that threw the exception (i.e. only
3323 // args are on the stack, no return address)
3325 // make room on stack for the return address
3326 // It will be patched later with the throwing pc. The correct value is not
3327 // available now because loading it from memory would destroy registers.
3328 __ push(0);
3330 // Save everything in sight.
3331 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3333 // Now it is safe to overwrite any register
3335 // Deopt during an exception. Save exec mode for unpack_frames.
3336 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3338 // load throwing pc from JavaThread and patch it as the return address
3339 // of the current frame. Then clear the field in JavaThread
3341 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3342 __ movptr(Address(rbp, wordSize), rdx);
3343 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3345 #ifdef ASSERT
3346 // verify that there is really an exception oop in JavaThread
3347 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3348 __ verify_oop(rax);
3350 // verify that there is no pending exception
3351 Label no_pending_exception;
3352 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3353 __ testptr(rax, rax);
3354 __ jcc(Assembler::zero, no_pending_exception);
3355 __ stop("must not have pending exception here");
3356 __ bind(no_pending_exception);
3357 #endif
3359 __ bind(cont);
3361 // Call C code. Need thread and this frame, but NOT official VM entry
3362 // crud. We cannot block on this call, no GC can happen.
3363 //
3364 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3366 // fetch_unroll_info needs to call last_java_frame().
3368 __ set_last_Java_frame(noreg, noreg, NULL);
3369 #ifdef ASSERT
3370 { Label L;
3371 __ cmpptr(Address(r15_thread,
3372 JavaThread::last_Java_fp_offset()),
3373 (int32_t)0);
3374 __ jcc(Assembler::equal, L);
3375 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3376 __ bind(L);
3377 }
3378 #endif // ASSERT
3379 __ mov(c_rarg0, r15_thread);
3380 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3382 // Need to have an oopmap that tells fetch_unroll_info where to
3383 // find any register it might need.
3384 oop_maps->add_gc_map(__ pc() - start, map);
3386 __ reset_last_Java_frame(false, false);
3388 // Load UnrollBlock* into rdi
3389 __ mov(rdi, rax);
3391 Label noException;
3392 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3393 __ jcc(Assembler::notEqual, noException);
3394 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3395 // QQQ this is useless it was NULL above
3396 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3397 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3398 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3400 __ verify_oop(rax);
3402 // Overwrite the result registers with the exception results.
3403 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3404 // I think this is useless
3405 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3407 __ bind(noException);
3409 // Only register save data is on the stack.
3410 // Now restore the result registers. Everything else is either dead
3411 // or captured in the vframeArray.
3412 RegisterSaver::restore_result_registers(masm);
3414 // All of the register save area has been popped of the stack. Only the
3415 // return address remains.
3417 // Pop all the frames we must move/replace.
3418 //
3419 // Frame picture (youngest to oldest)
3420 // 1: self-frame (no frame link)
3421 // 2: deopting frame (no frame link)
3422 // 3: caller of deopting frame (could be compiled/interpreted).
3423 //
3424 // Note: by leaving the return address of self-frame on the stack
3425 // and using the size of frame 2 to adjust the stack
3426 // when we are done the return to frame 3 will still be on the stack.
3428 // Pop deoptimized frame
3429 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3430 __ addptr(rsp, rcx);
3432 // rsp should be pointing at the return address to the caller (3)
3434 // Stack bang to make sure there's enough room for these interpreter frames.
3435 if (UseStackBanging) {
3436 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3437 __ bang_stack_size(rbx, rcx);
3438 }
3440 // Load address of array of frame pcs into rcx
3441 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3443 // Trash the old pc
3444 __ addptr(rsp, wordSize);
3446 // Load address of array of frame sizes into rsi
3447 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3449 // Load counter into rdx
3450 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3452 // Pick up the initial fp we should save
3453 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3455 // Now adjust the caller's stack to make up for the extra locals
3456 // but record the original sp so that we can save it in the skeletal interpreter
3457 // frame and the stack walking of interpreter_sender will get the unextended sp
3458 // value and not the "real" sp value.
3460 const Register sender_sp = r8;
3462 __ mov(sender_sp, rsp);
3463 __ movl(rbx, Address(rdi,
3464 Deoptimization::UnrollBlock::
3465 caller_adjustment_offset_in_bytes()));
3466 __ subptr(rsp, rbx);
3468 // Push interpreter frames in a loop
3469 Label loop;
3470 __ bind(loop);
3471 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3472 #ifdef CC_INTERP
3473 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
3474 #ifdef ASSERT
3475 __ push(0xDEADDEAD); // Make a recognizable pattern
3476 __ push(0xDEADDEAD);
3477 #else /* ASSERT */
3478 __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
3479 #endif /* ASSERT */
3480 #else
3481 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3482 #endif // CC_INTERP
3483 __ pushptr(Address(rcx, 0)); // Save return address
3484 __ enter(); // Save old & set new ebp
3485 __ subptr(rsp, rbx); // Prolog
3486 #ifdef CC_INTERP
3487 __ movptr(Address(rbp,
3488 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3489 sender_sp); // Make it walkable
3490 #else /* CC_INTERP */
3491 // This value is corrected by layout_activation_impl
3492 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3493 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3494 #endif /* CC_INTERP */
3495 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3496 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3497 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3498 __ decrementl(rdx); // Decrement counter
3499 __ jcc(Assembler::notZero, loop);
3500 __ pushptr(Address(rcx, 0)); // Save final return address
3502 // Re-push self-frame
3503 __ enter(); // Save old & set new ebp
3505 // Allocate a full sized register save area.
3506 // Return address and rbp are in place, so we allocate two less words.
3507 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3509 // Restore frame locals after moving the frame
3510 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3511 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3513 // Call C code. Need thread but NOT official VM entry
3514 // crud. We cannot block on this call, no GC can happen. Call should
3515 // restore return values to their stack-slots with the new SP.
3516 //
3517 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3519 // Use rbp because the frames look interpreted now
3520 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3521 // Don't need the precise return PC here, just precise enough to point into this code blob.
3522 address the_pc = __ pc();
3523 __ set_last_Java_frame(noreg, rbp, the_pc);
3525 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3526 __ mov(c_rarg0, r15_thread);
3527 __ movl(c_rarg1, r14); // second arg: exec_mode
3528 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3529 // Revert SP alignment after call since we're going to do some SP relative addressing below
3530 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3532 // Set an oopmap for the call site
3533 // Use the same PC we used for the last java frame
3534 oop_maps->add_gc_map(the_pc - start,
3535 new OopMap( frame_size_in_words, 0 ));
3537 // Clear fp AND pc
3538 __ reset_last_Java_frame(true, true);
3540 // Collect return values
3541 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3542 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3543 // I think this is useless (throwing pc?)
3544 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3546 // Pop self-frame.
3547 __ leave(); // Epilog
3549 // Jump to interpreter
3550 __ ret(0);
3552 // Make sure all code is generated
3553 masm->flush();
3555 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3556 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3557 }
3559 #ifdef COMPILER2
3560 //------------------------------generate_uncommon_trap_blob--------------------
3561 void SharedRuntime::generate_uncommon_trap_blob() {
3562 // Allocate space for the code
3563 ResourceMark rm;
3564 // Setup code generation tools
3565 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3566 MacroAssembler* masm = new MacroAssembler(&buffer);
3568 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3570 address start = __ pc();
3572 // Push self-frame. We get here with a return address on the
3573 // stack, so rsp is 8-byte aligned until we allocate our frame.
3574 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3576 // No callee saved registers. rbp is assumed implicitly saved
3577 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3579 // compiler left unloaded_class_index in j_rarg0 move to where the
3580 // runtime expects it.
3581 __ movl(c_rarg1, j_rarg0);
3583 __ set_last_Java_frame(noreg, noreg, NULL);
3585 // Call C code. Need thread but NOT official VM entry
3586 // crud. We cannot block on this call, no GC can happen. Call should
3587 // capture callee-saved registers as well as return values.
3588 // Thread is in rdi already.
3589 //
3590 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3592 __ mov(c_rarg0, r15_thread);
3593 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3595 // Set an oopmap for the call site
3596 OopMapSet* oop_maps = new OopMapSet();
3597 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3599 // location of rbp is known implicitly by the frame sender code
3601 oop_maps->add_gc_map(__ pc() - start, map);
3603 __ reset_last_Java_frame(false, false);
3605 // Load UnrollBlock* into rdi
3606 __ mov(rdi, rax);
3608 // Pop all the frames we must move/replace.
3609 //
3610 // Frame picture (youngest to oldest)
3611 // 1: self-frame (no frame link)
3612 // 2: deopting frame (no frame link)
3613 // 3: caller of deopting frame (could be compiled/interpreted).
3615 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3616 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3618 // Pop deoptimized frame (int)
3619 __ movl(rcx, Address(rdi,
3620 Deoptimization::UnrollBlock::
3621 size_of_deoptimized_frame_offset_in_bytes()));
3622 __ addptr(rsp, rcx);
3624 // rsp should be pointing at the return address to the caller (3)
3626 // Stack bang to make sure there's enough room for these interpreter frames.
3627 if (UseStackBanging) {
3628 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3629 __ bang_stack_size(rbx, rcx);
3630 }
3632 // Load address of array of frame pcs into rcx (address*)
3633 __ movptr(rcx,
3634 Address(rdi,
3635 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3637 // Trash the return pc
3638 __ addptr(rsp, wordSize);
3640 // Load address of array of frame sizes into rsi (intptr_t*)
3641 __ movptr(rsi, Address(rdi,
3642 Deoptimization::UnrollBlock::
3643 frame_sizes_offset_in_bytes()));
3645 // Counter
3646 __ movl(rdx, Address(rdi,
3647 Deoptimization::UnrollBlock::
3648 number_of_frames_offset_in_bytes())); // (int)
3650 // Pick up the initial fp we should save
3651 __ movptr(rbp,
3652 Address(rdi,
3653 Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3655 // Now adjust the caller's stack to make up for the extra locals but
3656 // record the original sp so that we can save it in the skeletal
3657 // interpreter frame and the stack walking of interpreter_sender
3658 // will get the unextended sp value and not the "real" sp value.
3660 const Register sender_sp = r8;
3662 __ mov(sender_sp, rsp);
3663 __ movl(rbx, Address(rdi,
3664 Deoptimization::UnrollBlock::
3665 caller_adjustment_offset_in_bytes())); // (int)
3666 __ subptr(rsp, rbx);
3668 // Push interpreter frames in a loop
3669 Label loop;
3670 __ bind(loop);
3671 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3672 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3673 __ pushptr(Address(rcx, 0)); // Save return address
3674 __ enter(); // Save old & set new rbp
3675 __ subptr(rsp, rbx); // Prolog
3676 #ifdef CC_INTERP
3677 __ movptr(Address(rbp,
3678 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3679 sender_sp); // Make it walkable
3680 #else // CC_INTERP
3681 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3682 sender_sp); // Make it walkable
3683 // This value is corrected by layout_activation_impl
3684 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3685 #endif // CC_INTERP
3686 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3687 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3688 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3689 __ decrementl(rdx); // Decrement counter
3690 __ jcc(Assembler::notZero, loop);
3691 __ pushptr(Address(rcx, 0)); // Save final return address
3693 // Re-push self-frame
3694 __ enter(); // Save old & set new rbp
3695 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3696 // Prolog
3698 // Use rbp because the frames look interpreted now
3699 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3700 // Don't need the precise return PC here, just precise enough to point into this code blob.
3701 address the_pc = __ pc();
3702 __ set_last_Java_frame(noreg, rbp, the_pc);
3704 // Call C code. Need thread but NOT official VM entry
3705 // crud. We cannot block on this call, no GC can happen. Call should
3706 // restore return values to their stack-slots with the new SP.
3707 // Thread is in rdi already.
3708 //
3709 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3711 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3712 __ mov(c_rarg0, r15_thread);
3713 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3714 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3716 // Set an oopmap for the call site
3717 // Use the same PC we used for the last java frame
3718 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3720 // Clear fp AND pc
3721 __ reset_last_Java_frame(true, true);
3723 // Pop self-frame.
3724 __ leave(); // Epilog
3726 // Jump to interpreter
3727 __ ret(0);
3729 // Make sure all code is generated
3730 masm->flush();
3732 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3733 SimpleRuntimeFrame::framesize >> 1);
3734 }
3735 #endif // COMPILER2
3738 //------------------------------generate_handler_blob------
3739 //
3740 // Generate a special Compile2Runtime blob that saves all registers,
3741 // and setup oopmap.
3742 //
3743 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
3744 assert(StubRoutines::forward_exception_entry() != NULL,
3745 "must be generated before");
3747 ResourceMark rm;
3748 OopMapSet *oop_maps = new OopMapSet();
3749 OopMap* map;
3751 // Allocate space for the code. Setup code generation tools.
3752 CodeBuffer buffer("handler_blob", 2048, 1024);
3753 MacroAssembler* masm = new MacroAssembler(&buffer);
3755 address start = __ pc();
3756 address call_pc = NULL;
3757 int frame_size_in_words;
3759 // Make room for return address (or push it again)
3760 if (!cause_return) {
3761 __ push(rbx);
3762 }
3764 // Save registers, fpu state, and flags
3765 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3767 // The following is basically a call_VM. However, we need the precise
3768 // address of the call in order to generate an oopmap. Hence, we do all the
3769 // work outselves.
3771 __ set_last_Java_frame(noreg, noreg, NULL);
3773 // The return address must always be correct so that frame constructor never
3774 // sees an invalid pc.
3776 if (!cause_return) {
3777 // overwrite the dummy value we pushed on entry
3778 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3779 __ movptr(Address(rbp, wordSize), c_rarg0);
3780 }
3782 // Do the call
3783 __ mov(c_rarg0, r15_thread);
3784 __ call(RuntimeAddress(call_ptr));
3786 // Set an oopmap for the call site. This oopmap will map all
3787 // oop-registers and debug-info registers as callee-saved. This
3788 // will allow deoptimization at this safepoint to find all possible
3789 // debug-info recordings, as well as let GC find all oops.
3791 oop_maps->add_gc_map( __ pc() - start, map);
3793 Label noException;
3795 __ reset_last_Java_frame(false, false);
3797 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3798 __ jcc(Assembler::equal, noException);
3800 // Exception pending
3802 RegisterSaver::restore_live_registers(masm);
3804 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3806 // No exception case
3807 __ bind(noException);
3809 // Normal exit, restore registers and exit.
3810 RegisterSaver::restore_live_registers(masm);
3812 __ ret(0);
3814 // Make sure all code is generated
3815 masm->flush();
3817 // Fill-out other meta info
3818 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3819 }
3821 //
3822 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3823 //
3824 // Generate a stub that calls into vm to find out the proper destination
3825 // of a java call. All the argument registers are live at this point
3826 // but since this is generic code we don't know what they are and the caller
3827 // must do any gc of the args.
3828 //
3829 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3830 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3832 // allocate space for the code
3833 ResourceMark rm;
3835 CodeBuffer buffer(name, 1000, 512);
3836 MacroAssembler* masm = new MacroAssembler(&buffer);
3838 int frame_size_in_words;
3840 OopMapSet *oop_maps = new OopMapSet();
3841 OopMap* map = NULL;
3843 int start = __ offset();
3845 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3847 int frame_complete = __ offset();
3849 __ set_last_Java_frame(noreg, noreg, NULL);
3851 __ mov(c_rarg0, r15_thread);
3853 __ call(RuntimeAddress(destination));
3856 // Set an oopmap for the call site.
3857 // We need this not only for callee-saved registers, but also for volatile
3858 // registers that the compiler might be keeping live across a safepoint.
3860 oop_maps->add_gc_map( __ offset() - start, map);
3862 // rax contains the address we are going to jump to assuming no exception got installed
3864 // clear last_Java_sp
3865 __ reset_last_Java_frame(false, false);
3866 // check for pending exceptions
3867 Label pending;
3868 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3869 __ jcc(Assembler::notEqual, pending);
3871 // get the returned Method*
3872 __ get_vm_result_2(rbx, r15_thread);
3873 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3875 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3877 RegisterSaver::restore_live_registers(masm);
3879 // We are back the the original state on entry and ready to go.
3881 __ jmp(rax);
3883 // Pending exception after the safepoint
3885 __ bind(pending);
3887 RegisterSaver::restore_live_registers(masm);
3889 // exception pending => remove activation and forward to exception handler
3891 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3893 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3894 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3896 // -------------
3897 // make sure all code is generated
3898 masm->flush();
3900 // return the blob
3901 // frame_size_words or bytes??
3902 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3903 }
3906 #ifdef COMPILER2
3907 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3908 //
3909 //------------------------------generate_exception_blob---------------------------
3910 // creates exception blob at the end
3911 // Using exception blob, this code is jumped from a compiled method.
3912 // (see emit_exception_handler in x86_64.ad file)
3913 //
3914 // Given an exception pc at a call we call into the runtime for the
3915 // handler in this method. This handler might merely restore state
3916 // (i.e. callee save registers) unwind the frame and jump to the
3917 // exception handler for the nmethod if there is no Java level handler
3918 // for the nmethod.
3919 //
3920 // This code is entered with a jmp.
3921 //
3922 // Arguments:
3923 // rax: exception oop
3924 // rdx: exception pc
3925 //
3926 // Results:
3927 // rax: exception oop
3928 // rdx: exception pc in caller or ???
3929 // destination: exception handler of caller
3930 //
3931 // Note: the exception pc MUST be at a call (precise debug information)
3932 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3933 //
3935 void OptoRuntime::generate_exception_blob() {
3936 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3937 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3938 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3940 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3942 // Allocate space for the code
3943 ResourceMark rm;
3944 // Setup code generation tools
3945 CodeBuffer buffer("exception_blob", 2048, 1024);
3946 MacroAssembler* masm = new MacroAssembler(&buffer);
3949 address start = __ pc();
3951 // Exception pc is 'return address' for stack walker
3952 __ push(rdx);
3953 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3955 // Save callee-saved registers. See x86_64.ad.
3957 // rbp is an implicitly saved callee saved register (i.e. the calling
3958 // convention will save restore it in prolog/epilog) Other than that
3959 // there are no callee save registers now that adapter frames are gone.
3961 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3963 // Store exception in Thread object. We cannot pass any arguments to the
3964 // handle_exception call, since we do not want to make any assumption
3965 // about the size of the frame where the exception happened in.
3966 // c_rarg0 is either rdi (Linux) or rcx (Windows).
3967 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3968 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3970 // This call does all the hard work. It checks if an exception handler
3971 // exists in the method.
3972 // If so, it returns the handler address.
3973 // If not, it prepares for stack-unwinding, restoring the callee-save
3974 // registers of the frame being removed.
3975 //
3976 // address OptoRuntime::handle_exception_C(JavaThread* thread)
3978 // At a method handle call, the stack may not be properly aligned
3979 // when returning with an exception.
3980 address the_pc = __ pc();
3981 __ set_last_Java_frame(noreg, noreg, the_pc);
3982 __ mov(c_rarg0, r15_thread);
3983 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
3984 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3986 // Set an oopmap for the call site. This oopmap will only be used if we
3987 // are unwinding the stack. Hence, all locations will be dead.
3988 // Callee-saved registers will be the same as the frame above (i.e.,
3989 // handle_exception_stub), since they were restored when we got the
3990 // exception.
3992 OopMapSet* oop_maps = new OopMapSet();
3994 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3996 __ reset_last_Java_frame(false, true);
3998 // Restore callee-saved registers
4000 // rbp is an implicitly saved callee saved register (i.e. the calling
4001 // convention will save restore it in prolog/epilog) Other than that
4002 // there are no callee save registers no that adapter frames are gone.
4004 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4006 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4007 __ pop(rdx); // No need for exception pc anymore
4009 // rax: exception handler
4011 // Restore SP from BP if the exception PC is a MethodHandle call site.
4012 __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
4013 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
4015 // We have a handler in rax (could be deopt blob).
4016 __ mov(r8, rax);
4018 // Get the exception oop
4019 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4020 // Get the exception pc in case we are deoptimized
4021 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4022 #ifdef ASSERT
4023 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4024 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4025 #endif
4026 // Clear the exception oop so GC no longer processes it as a root.
4027 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4029 // rax: exception oop
4030 // r8: exception handler
4031 // rdx: exception pc
4032 // Jump to handler
4034 __ jmp(r8);
4036 // Make sure all code is generated
4037 masm->flush();
4039 // Set exception blob
4040 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4041 }
4042 #endif // COMPILER2