Thu, 24 May 2018 17:06:56 +0800
Merge
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #ifndef _WINDOWS
27 #include "alloca.h"
28 #endif
29 #include "asm/macroAssembler.hpp"
30 #include "asm/macroAssembler.inline.hpp"
31 #include "code/debugInfoRec.hpp"
32 #include "code/icBuffer.hpp"
33 #include "code/vtableStubs.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "oops/compiledICHolder.hpp"
36 #include "prims/jvmtiRedefineClassesTrace.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/vframeArray.hpp"
39 #include "vmreg_x86.inline.hpp"
40 #ifdef COMPILER1
41 #include "c1/c1_Runtime1.hpp"
42 #endif
43 #ifdef COMPILER2
44 #include "opto/runtime.hpp"
45 #endif
47 #define __ masm->
49 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
51 class SimpleRuntimeFrame {
53 public:
55 // Most of the runtime stubs have this simple frame layout.
56 // This class exists to make the layout shared in one place.
57 // Offsets are for compiler stack slots, which are jints.
58 enum layout {
59 // The frame sender code expects that rbp will be in the "natural" place and
60 // will override any oopMap setting for it. We must therefore force the layout
61 // so that it agrees with the frame sender code.
62 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
63 rbp_off2,
64 return_off, return_off2,
65 framesize
66 };
67 };
69 class RegisterSaver {
70 // Capture info about frame layout. Layout offsets are in jint
71 // units because compiler frame slots are jints.
72 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
73 enum layout {
74 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
75 xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
76 DEF_XMM_OFFS(0),
77 DEF_XMM_OFFS(1),
78 DEF_XMM_OFFS(2),
79 DEF_XMM_OFFS(3),
80 DEF_XMM_OFFS(4),
81 DEF_XMM_OFFS(5),
82 DEF_XMM_OFFS(6),
83 DEF_XMM_OFFS(7),
84 DEF_XMM_OFFS(8),
85 DEF_XMM_OFFS(9),
86 DEF_XMM_OFFS(10),
87 DEF_XMM_OFFS(11),
88 DEF_XMM_OFFS(12),
89 DEF_XMM_OFFS(13),
90 DEF_XMM_OFFS(14),
91 DEF_XMM_OFFS(15),
92 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
93 fpu_stateH_end,
94 r15_off, r15H_off,
95 r14_off, r14H_off,
96 r13_off, r13H_off,
97 r12_off, r12H_off,
98 r11_off, r11H_off,
99 r10_off, r10H_off,
100 r9_off, r9H_off,
101 r8_off, r8H_off,
102 rdi_off, rdiH_off,
103 rsi_off, rsiH_off,
104 ignore_off, ignoreH_off, // extra copy of rbp
105 rsp_off, rspH_off,
106 rbx_off, rbxH_off,
107 rdx_off, rdxH_off,
108 rcx_off, rcxH_off,
109 rax_off, raxH_off,
110 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
111 align_off, alignH_off,
112 flags_off, flagsH_off,
113 // The frame sender code expects that rbp will be in the "natural" place and
114 // will override any oopMap setting for it. We must therefore force the layout
115 // so that it agrees with the frame sender code.
116 rbp_off, rbpH_off, // copy of rbp we will restore
117 return_off, returnH_off, // slot for return address
118 reg_save_size // size in compiler stack slots
119 };
121 public:
122 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
123 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
125 // Offsets into the register save area
126 // Used by deoptimization when it is managing result register
127 // values on its own
129 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
130 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
131 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
132 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
133 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
135 // During deoptimization only the result registers need to be restored,
136 // all the other values have already been extracted.
137 static void restore_result_registers(MacroAssembler* masm);
138 };
140 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
141 int vect_words = 0;
142 #ifdef COMPILER2
143 if (save_vectors) {
144 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
145 assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
146 // Save upper half of YMM registes
147 vect_words = 16 * 16 / wordSize;
148 additional_frame_words += vect_words;
149 }
150 #else
151 assert(!save_vectors, "vectors are generated only by C2");
152 #endif
154 // Always make the frame size 16-byte aligned
155 int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
156 reg_save_size*BytesPerInt, 16);
157 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
158 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
159 // The caller will allocate additional_frame_words
160 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
161 // CodeBlob frame size is in words.
162 int frame_size_in_words = frame_size_in_bytes / wordSize;
163 *total_frame_words = frame_size_in_words;
165 // Save registers, fpu state, and flags.
166 // We assume caller has already pushed the return address onto the
167 // stack, so rsp is 8-byte aligned here.
168 // We push rpb twice in this sequence because we want the real rbp
169 // to be under the return like a normal enter.
171 __ enter(); // rsp becomes 16-byte aligned here
172 __ push_CPU_state(); // Push a multiple of 16 bytes
174 if (vect_words > 0) {
175 assert(vect_words*wordSize == 256, "");
176 __ subptr(rsp, 256); // Save upper half of YMM registes
177 __ vextractf128h(Address(rsp, 0),xmm0);
178 __ vextractf128h(Address(rsp, 16),xmm1);
179 __ vextractf128h(Address(rsp, 32),xmm2);
180 __ vextractf128h(Address(rsp, 48),xmm3);
181 __ vextractf128h(Address(rsp, 64),xmm4);
182 __ vextractf128h(Address(rsp, 80),xmm5);
183 __ vextractf128h(Address(rsp, 96),xmm6);
184 __ vextractf128h(Address(rsp,112),xmm7);
185 __ vextractf128h(Address(rsp,128),xmm8);
186 __ vextractf128h(Address(rsp,144),xmm9);
187 __ vextractf128h(Address(rsp,160),xmm10);
188 __ vextractf128h(Address(rsp,176),xmm11);
189 __ vextractf128h(Address(rsp,192),xmm12);
190 __ vextractf128h(Address(rsp,208),xmm13);
191 __ vextractf128h(Address(rsp,224),xmm14);
192 __ vextractf128h(Address(rsp,240),xmm15);
193 }
194 if (frame::arg_reg_save_area_bytes != 0) {
195 // Allocate argument register save area
196 __ subptr(rsp, frame::arg_reg_save_area_bytes);
197 }
199 // Set an oopmap for the call site. This oopmap will map all
200 // oop-registers and debug-info registers as callee-saved. This
201 // will allow deoptimization at this safepoint to find all possible
202 // debug-info recordings, as well as let GC find all oops.
204 OopMapSet *oop_maps = new OopMapSet();
205 OopMap* map = new OopMap(frame_size_in_slots, 0);
207 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
209 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
210 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
211 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
212 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
213 // rbp location is known implicitly by the frame sender code, needs no oopmap
214 // and the location where rbp was saved by is ignored
215 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
216 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
217 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
218 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
219 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
220 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
221 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
222 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
223 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
224 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
225 map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0->as_VMReg());
226 map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1->as_VMReg());
227 map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2->as_VMReg());
228 map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3->as_VMReg());
229 map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4->as_VMReg());
230 map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5->as_VMReg());
231 map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6->as_VMReg());
232 map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7->as_VMReg());
233 map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8->as_VMReg());
234 map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9->as_VMReg());
235 map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10->as_VMReg());
236 map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11->as_VMReg());
237 map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12->as_VMReg());
238 map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13->as_VMReg());
239 map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14->as_VMReg());
240 map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg());
242 // %%% These should all be a waste but we'll keep things as they were for now
243 if (true) {
244 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
245 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
246 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
247 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
248 // rbp location is known implicitly by the frame sender code, needs no oopmap
249 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
250 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
251 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
252 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
253 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
254 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
255 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
256 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
257 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
258 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
259 map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0->as_VMReg()->next());
260 map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1->as_VMReg()->next());
261 map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2->as_VMReg()->next());
262 map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3->as_VMReg()->next());
263 map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4->as_VMReg()->next());
264 map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5->as_VMReg()->next());
265 map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6->as_VMReg()->next());
266 map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7->as_VMReg()->next());
267 map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8->as_VMReg()->next());
268 map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next());
269 map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next());
270 map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next());
271 map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next());
272 map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next());
273 map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next());
274 map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next());
275 }
277 return map;
278 }
280 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
281 if (frame::arg_reg_save_area_bytes != 0) {
282 // Pop arg register save area
283 __ addptr(rsp, frame::arg_reg_save_area_bytes);
284 }
285 #ifdef COMPILER2
286 if (restore_vectors) {
287 // Restore upper half of YMM registes.
288 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
289 assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
290 __ vinsertf128h(xmm0, Address(rsp, 0));
291 __ vinsertf128h(xmm1, Address(rsp, 16));
292 __ vinsertf128h(xmm2, Address(rsp, 32));
293 __ vinsertf128h(xmm3, Address(rsp, 48));
294 __ vinsertf128h(xmm4, Address(rsp, 64));
295 __ vinsertf128h(xmm5, Address(rsp, 80));
296 __ vinsertf128h(xmm6, Address(rsp, 96));
297 __ vinsertf128h(xmm7, Address(rsp,112));
298 __ vinsertf128h(xmm8, Address(rsp,128));
299 __ vinsertf128h(xmm9, Address(rsp,144));
300 __ vinsertf128h(xmm10, Address(rsp,160));
301 __ vinsertf128h(xmm11, Address(rsp,176));
302 __ vinsertf128h(xmm12, Address(rsp,192));
303 __ vinsertf128h(xmm13, Address(rsp,208));
304 __ vinsertf128h(xmm14, Address(rsp,224));
305 __ vinsertf128h(xmm15, Address(rsp,240));
306 __ addptr(rsp, 256);
307 }
308 #else
309 assert(!restore_vectors, "vectors are generated only by C2");
310 #endif
311 // Recover CPU state
312 __ pop_CPU_state();
313 // Get the rbp described implicitly by the calling convention (no oopMap)
314 __ pop(rbp);
315 }
317 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
319 // Just restore result register. Only used by deoptimization. By
320 // now any callee save register that needs to be restored to a c2
321 // caller of the deoptee has been extracted into the vframeArray
322 // and will be stuffed into the c2i adapter we create for later
323 // restoration so only result registers need to be restored here.
325 // Restore fp result register
326 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
327 // Restore integer result register
328 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
329 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
331 // Pop all of the register save are off the stack except the return address
332 __ addptr(rsp, return_offset_in_bytes());
333 }
335 // Is vector's size (in bytes) bigger than a size saved by default?
336 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
337 bool SharedRuntime::is_wide_vector(int size) {
338 return size > 16;
339 }
341 // The java_calling_convention describes stack locations as ideal slots on
342 // a frame with no abi restrictions. Since we must observe abi restrictions
343 // (like the placement of the register window) the slots must be biased by
344 // the following value.
345 static int reg2offset_in(VMReg r) {
346 // Account for saved rbp and return address
347 // This should really be in_preserve_stack_slots
348 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
349 }
351 static int reg2offset_out(VMReg r) {
352 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
353 }
355 // ---------------------------------------------------------------------------
356 // Read the array of BasicTypes from a signature, and compute where the
357 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
358 // quantities. Values less than VMRegImpl::stack0 are registers, those above
359 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
360 // as framesizes are fixed.
361 // VMRegImpl::stack0 refers to the first slot 0(sp).
362 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
363 // up to RegisterImpl::number_of_registers) are the 64-bit
364 // integer registers.
366 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
367 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
368 // units regardless of build. Of course for i486 there is no 64 bit build
370 // The Java calling convention is a "shifted" version of the C ABI.
371 // By skipping the first C ABI register we can call non-static jni methods
372 // with small numbers of arguments without having to shuffle the arguments
373 // at all. Since we control the java ABI we ought to at least get some
374 // advantage out of it.
376 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
377 VMRegPair *regs,
378 int total_args_passed,
379 int is_outgoing) {
381 // Create the mapping between argument positions and
382 // registers.
383 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
384 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
385 };
386 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
387 j_farg0, j_farg1, j_farg2, j_farg3,
388 j_farg4, j_farg5, j_farg6, j_farg7
389 };
392 uint int_args = 0;
393 uint fp_args = 0;
394 uint stk_args = 0; // inc by 2 each time
396 for (int i = 0; i < total_args_passed; i++) {
397 switch (sig_bt[i]) {
398 case T_BOOLEAN:
399 case T_CHAR:
400 case T_BYTE:
401 case T_SHORT:
402 case T_INT:
403 if (int_args < Argument::n_int_register_parameters_j) {
404 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
405 } else {
406 regs[i].set1(VMRegImpl::stack2reg(stk_args));
407 stk_args += 2;
408 }
409 break;
410 case T_VOID:
411 // halves of T_LONG or T_DOUBLE
412 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
413 regs[i].set_bad();
414 break;
415 case T_LONG:
416 assert(sig_bt[i + 1] == T_VOID, "expecting half");
417 // fall through
418 case T_OBJECT:
419 case T_ARRAY:
420 case T_ADDRESS:
421 if (int_args < Argument::n_int_register_parameters_j) {
422 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
423 } else {
424 regs[i].set2(VMRegImpl::stack2reg(stk_args));
425 stk_args += 2;
426 }
427 break;
428 case T_FLOAT:
429 if (fp_args < Argument::n_float_register_parameters_j) {
430 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
431 } else {
432 regs[i].set1(VMRegImpl::stack2reg(stk_args));
433 stk_args += 2;
434 }
435 break;
436 case T_DOUBLE:
437 assert(sig_bt[i + 1] == T_VOID, "expecting half");
438 if (fp_args < Argument::n_float_register_parameters_j) {
439 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
440 } else {
441 regs[i].set2(VMRegImpl::stack2reg(stk_args));
442 stk_args += 2;
443 }
444 break;
445 default:
446 ShouldNotReachHere();
447 break;
448 }
449 }
451 return round_to(stk_args, 2);
452 }
454 // Patch the callers callsite with entry to compiled code if it exists.
455 static void patch_callers_callsite(MacroAssembler *masm) {
456 Label L;
457 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
458 __ jcc(Assembler::equal, L);
460 // Save the current stack pointer
461 __ mov(r13, rsp);
462 // Schedule the branch target address early.
463 // Call into the VM to patch the caller, then jump to compiled callee
464 // rax isn't live so capture return address while we easily can
465 __ movptr(rax, Address(rsp, 0));
467 // align stack so push_CPU_state doesn't fault
468 __ andptr(rsp, -(StackAlignmentInBytes));
469 __ push_CPU_state();
471 // VM needs caller's callsite
472 // VM needs target method
473 // This needs to be a long call since we will relocate this adapter to
474 // the codeBuffer and it may not reach
476 // Allocate argument register save area
477 if (frame::arg_reg_save_area_bytes != 0) {
478 __ subptr(rsp, frame::arg_reg_save_area_bytes);
479 }
480 __ mov(c_rarg0, rbx);
481 __ mov(c_rarg1, rax);
482 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
484 // De-allocate argument register save area
485 if (frame::arg_reg_save_area_bytes != 0) {
486 __ addptr(rsp, frame::arg_reg_save_area_bytes);
487 }
489 __ pop_CPU_state();
490 // restore sp
491 __ mov(rsp, r13);
492 __ bind(L);
493 }
496 static void gen_c2i_adapter(MacroAssembler *masm,
497 int total_args_passed,
498 int comp_args_on_stack,
499 const BasicType *sig_bt,
500 const VMRegPair *regs,
501 Label& skip_fixup) {
502 // Before we get into the guts of the C2I adapter, see if we should be here
503 // at all. We've come from compiled code and are attempting to jump to the
504 // interpreter, which means the caller made a static call to get here
505 // (vcalls always get a compiled target if there is one). Check for a
506 // compiled target. If there is one, we need to patch the caller's call.
507 patch_callers_callsite(masm);
509 __ bind(skip_fixup);
511 // Since all args are passed on the stack, total_args_passed *
512 // Interpreter::stackElementSize is the space we need. Plus 1 because
513 // we also account for the return address location since
514 // we store it first rather than hold it in rax across all the shuffling
516 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
518 // stack is aligned, keep it that way
519 extraspace = round_to(extraspace, 2*wordSize);
521 // Get return address
522 __ pop(rax);
524 // set senderSP value
525 __ mov(r13, rsp);
527 __ subptr(rsp, extraspace);
529 // Store the return address in the expected location
530 __ movptr(Address(rsp, 0), rax);
532 // Now write the args into the outgoing interpreter space
533 for (int i = 0; i < total_args_passed; i++) {
534 if (sig_bt[i] == T_VOID) {
535 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
536 continue;
537 }
539 // offset to start parameters
540 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
541 int next_off = st_off - Interpreter::stackElementSize;
543 // Say 4 args:
544 // i st_off
545 // 0 32 T_LONG
546 // 1 24 T_VOID
547 // 2 16 T_OBJECT
548 // 3 8 T_BOOL
549 // - 0 return address
550 //
551 // However to make thing extra confusing. Because we can fit a long/double in
552 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
553 // leaves one slot empty and only stores to a single slot. In this case the
554 // slot that is occupied is the T_VOID slot. See I said it was confusing.
556 VMReg r_1 = regs[i].first();
557 VMReg r_2 = regs[i].second();
558 if (!r_1->is_valid()) {
559 assert(!r_2->is_valid(), "");
560 continue;
561 }
562 if (r_1->is_stack()) {
563 // memory to memory use rax
564 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
565 if (!r_2->is_valid()) {
566 // sign extend??
567 __ movl(rax, Address(rsp, ld_off));
568 __ movptr(Address(rsp, st_off), rax);
570 } else {
572 __ movq(rax, Address(rsp, ld_off));
574 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
575 // T_DOUBLE and T_LONG use two slots in the interpreter
576 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
577 // ld_off == LSW, ld_off+wordSize == MSW
578 // st_off == MSW, next_off == LSW
579 __ movq(Address(rsp, next_off), rax);
580 #ifdef ASSERT
581 // Overwrite the unused slot with known junk
582 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
583 __ movptr(Address(rsp, st_off), rax);
584 #endif /* ASSERT */
585 } else {
586 __ movq(Address(rsp, st_off), rax);
587 }
588 }
589 } else if (r_1->is_Register()) {
590 Register r = r_1->as_Register();
591 if (!r_2->is_valid()) {
592 // must be only an int (or less ) so move only 32bits to slot
593 // why not sign extend??
594 __ movl(Address(rsp, st_off), r);
595 } else {
596 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
597 // T_DOUBLE and T_LONG use two slots in the interpreter
598 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
599 // long/double in gpr
600 #ifdef ASSERT
601 // Overwrite the unused slot with known junk
602 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
603 __ movptr(Address(rsp, st_off), rax);
604 #endif /* ASSERT */
605 __ movq(Address(rsp, next_off), r);
606 } else {
607 __ movptr(Address(rsp, st_off), r);
608 }
609 }
610 } else {
611 assert(r_1->is_XMMRegister(), "");
612 if (!r_2->is_valid()) {
613 // only a float use just part of the slot
614 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
615 } else {
616 #ifdef ASSERT
617 // Overwrite the unused slot with known junk
618 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
619 __ movptr(Address(rsp, st_off), rax);
620 #endif /* ASSERT */
621 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
622 }
623 }
624 }
626 // Schedule the branch target address early.
627 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
628 __ jmp(rcx);
629 }
631 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
632 address code_start, address code_end,
633 Label& L_ok) {
634 Label L_fail;
635 __ lea(temp_reg, ExternalAddress(code_start));
636 __ cmpptr(pc_reg, temp_reg);
637 __ jcc(Assembler::belowEqual, L_fail);
638 __ lea(temp_reg, ExternalAddress(code_end));
639 __ cmpptr(pc_reg, temp_reg);
640 __ jcc(Assembler::below, L_ok);
641 __ bind(L_fail);
642 }
644 static void gen_i2c_adapter(MacroAssembler *masm,
645 int total_args_passed,
646 int comp_args_on_stack,
647 const BasicType *sig_bt,
648 const VMRegPair *regs) {
650 // Note: r13 contains the senderSP on entry. We must preserve it since
651 // we may do a i2c -> c2i transition if we lose a race where compiled
652 // code goes non-entrant while we get args ready.
653 // In addition we use r13 to locate all the interpreter args as
654 // we must align the stack to 16 bytes on an i2c entry else we
655 // lose alignment we expect in all compiled code and register
656 // save code can segv when fxsave instructions find improperly
657 // aligned stack pointer.
659 // Adapters can be frameless because they do not require the caller
660 // to perform additional cleanup work, such as correcting the stack pointer.
661 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
662 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
663 // even if a callee has modified the stack pointer.
664 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
665 // routinely repairs its caller's stack pointer (from sender_sp, which is set
666 // up via the senderSP register).
667 // In other words, if *either* the caller or callee is interpreted, we can
668 // get the stack pointer repaired after a call.
669 // This is why c2i and i2c adapters cannot be indefinitely composed.
670 // In particular, if a c2i adapter were to somehow call an i2c adapter,
671 // both caller and callee would be compiled methods, and neither would
672 // clean up the stack pointer changes performed by the two adapters.
673 // If this happens, control eventually transfers back to the compiled
674 // caller, but with an uncorrected stack, causing delayed havoc.
676 // Pick up the return address
677 __ movptr(rax, Address(rsp, 0));
679 if (VerifyAdapterCalls &&
680 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
681 // So, let's test for cascading c2i/i2c adapters right now.
682 // assert(Interpreter::contains($return_addr) ||
683 // StubRoutines::contains($return_addr),
684 // "i2c adapter must return to an interpreter frame");
685 __ block_comment("verify_i2c { ");
686 Label L_ok;
687 if (Interpreter::code() != NULL)
688 range_check(masm, rax, r11,
689 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
690 L_ok);
691 if (StubRoutines::code1() != NULL)
692 range_check(masm, rax, r11,
693 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
694 L_ok);
695 if (StubRoutines::code2() != NULL)
696 range_check(masm, rax, r11,
697 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
698 L_ok);
699 const char* msg = "i2c adapter must return to an interpreter frame";
700 __ block_comment(msg);
701 __ stop(msg);
702 __ bind(L_ok);
703 __ block_comment("} verify_i2ce ");
704 }
706 // Must preserve original SP for loading incoming arguments because
707 // we need to align the outgoing SP for compiled code.
708 __ movptr(r11, rsp);
710 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
711 // in registers, we will occasionally have no stack args.
712 int comp_words_on_stack = 0;
713 if (comp_args_on_stack) {
714 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
715 // registers are below. By subtracting stack0, we either get a negative
716 // number (all values in registers) or the maximum stack slot accessed.
718 // Convert 4-byte c2 stack slots to words.
719 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
720 // Round up to miminum stack alignment, in wordSize
721 comp_words_on_stack = round_to(comp_words_on_stack, 2);
722 __ subptr(rsp, comp_words_on_stack * wordSize);
723 }
726 // Ensure compiled code always sees stack at proper alignment
727 __ andptr(rsp, -16);
729 // push the return address and misalign the stack that youngest frame always sees
730 // as far as the placement of the call instruction
731 __ push(rax);
733 // Put saved SP in another register
734 const Register saved_sp = rax;
735 __ movptr(saved_sp, r11);
737 // Will jump to the compiled code just as if compiled code was doing it.
738 // Pre-load the register-jump target early, to schedule it better.
739 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
741 // Now generate the shuffle code. Pick up all register args and move the
742 // rest through the floating point stack top.
743 for (int i = 0; i < total_args_passed; i++) {
744 if (sig_bt[i] == T_VOID) {
745 // Longs and doubles are passed in native word order, but misaligned
746 // in the 32-bit build.
747 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
748 continue;
749 }
751 // Pick up 0, 1 or 2 words from SP+offset.
753 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
754 "scrambled load targets?");
755 // Load in argument order going down.
756 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
757 // Point to interpreter value (vs. tag)
758 int next_off = ld_off - Interpreter::stackElementSize;
759 //
760 //
761 //
762 VMReg r_1 = regs[i].first();
763 VMReg r_2 = regs[i].second();
764 if (!r_1->is_valid()) {
765 assert(!r_2->is_valid(), "");
766 continue;
767 }
768 if (r_1->is_stack()) {
769 // Convert stack slot to an SP offset (+ wordSize to account for return address )
770 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
772 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
773 // and if we end up going thru a c2i because of a miss a reasonable value of r13
774 // will be generated.
775 if (!r_2->is_valid()) {
776 // sign extend???
777 __ movl(r13, Address(saved_sp, ld_off));
778 __ movptr(Address(rsp, st_off), r13);
779 } else {
780 //
781 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
782 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
783 // So we must adjust where to pick up the data to match the interpreter.
784 //
785 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
786 // are accessed as negative so LSW is at LOW address
788 // ld_off is MSW so get LSW
789 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
790 next_off : ld_off;
791 __ movq(r13, Address(saved_sp, offset));
792 // st_off is LSW (i.e. reg.first())
793 __ movq(Address(rsp, st_off), r13);
794 }
795 } else if (r_1->is_Register()) { // Register argument
796 Register r = r_1->as_Register();
797 assert(r != rax, "must be different");
798 if (r_2->is_valid()) {
799 //
800 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
801 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
802 // So we must adjust where to pick up the data to match the interpreter.
804 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
805 next_off : ld_off;
807 // this can be a misaligned move
808 __ movq(r, Address(saved_sp, offset));
809 } else {
810 // sign extend and use a full word?
811 __ movl(r, Address(saved_sp, ld_off));
812 }
813 } else {
814 if (!r_2->is_valid()) {
815 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
816 } else {
817 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
818 }
819 }
820 }
822 // 6243940 We might end up in handle_wrong_method if
823 // the callee is deoptimized as we race thru here. If that
824 // happens we don't want to take a safepoint because the
825 // caller frame will look interpreted and arguments are now
826 // "compiled" so it is much better to make this transition
827 // invisible to the stack walking code. Unfortunately if
828 // we try and find the callee by normal means a safepoint
829 // is possible. So we stash the desired callee in the thread
830 // and the vm will find there should this case occur.
832 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
834 // put Method* where a c2i would expect should we end up there
835 // only needed becaus eof c2 resolve stubs return Method* as a result in
836 // rax
837 __ mov(rax, rbx);
838 __ jmp(r11);
839 }
841 // ---------------------------------------------------------------
842 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
843 int total_args_passed,
844 int comp_args_on_stack,
845 const BasicType *sig_bt,
846 const VMRegPair *regs,
847 AdapterFingerPrint* fingerprint) {
848 address i2c_entry = __ pc();
850 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
852 // -------------------------------------------------------------------------
853 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
854 // to the interpreter. The args start out packed in the compiled layout. They
855 // need to be unpacked into the interpreter layout. This will almost always
856 // require some stack space. We grow the current (compiled) stack, then repack
857 // the args. We finally end in a jump to the generic interpreter entry point.
858 // On exit from the interpreter, the interpreter will restore our SP (lest the
859 // compiled code, which relys solely on SP and not RBP, get sick).
861 address c2i_unverified_entry = __ pc();
862 Label skip_fixup;
863 Label ok;
865 Register holder = rax;
866 Register receiver = j_rarg0;
867 Register temp = rbx;
869 {
870 __ load_klass(temp, receiver);
871 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
872 __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
873 __ jcc(Assembler::equal, ok);
874 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
876 __ bind(ok);
877 // Method might have been compiled since the call site was patched to
878 // interpreted if that is the case treat it as a miss so we can get
879 // the call site corrected.
880 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
881 __ jcc(Assembler::equal, skip_fixup);
882 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
883 }
885 address c2i_entry = __ pc();
887 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
889 __ flush();
890 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
891 }
893 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
894 VMRegPair *regs,
895 VMRegPair *regs2,
896 int total_args_passed) {
897 assert(regs2 == NULL, "not needed on x86");
898 // We return the amount of VMRegImpl stack slots we need to reserve for all
899 // the arguments NOT counting out_preserve_stack_slots.
901 // NOTE: These arrays will have to change when c1 is ported
902 #ifdef _WIN64
903 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
904 c_rarg0, c_rarg1, c_rarg2, c_rarg3
905 };
906 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
907 c_farg0, c_farg1, c_farg2, c_farg3
908 };
909 #else
910 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
911 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
912 };
913 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
914 c_farg0, c_farg1, c_farg2, c_farg3,
915 c_farg4, c_farg5, c_farg6, c_farg7
916 };
917 #endif // _WIN64
920 uint int_args = 0;
921 uint fp_args = 0;
922 uint stk_args = 0; // inc by 2 each time
924 for (int i = 0; i < total_args_passed; i++) {
925 switch (sig_bt[i]) {
926 case T_BOOLEAN:
927 case T_CHAR:
928 case T_BYTE:
929 case T_SHORT:
930 case T_INT:
931 if (int_args < Argument::n_int_register_parameters_c) {
932 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
933 #ifdef _WIN64
934 fp_args++;
935 // Allocate slots for callee to stuff register args the stack.
936 stk_args += 2;
937 #endif
938 } else {
939 regs[i].set1(VMRegImpl::stack2reg(stk_args));
940 stk_args += 2;
941 }
942 break;
943 case T_LONG:
944 assert(sig_bt[i + 1] == T_VOID, "expecting half");
945 // fall through
946 case T_OBJECT:
947 case T_ARRAY:
948 case T_ADDRESS:
949 case T_METADATA:
950 if (int_args < Argument::n_int_register_parameters_c) {
951 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
952 #ifdef _WIN64
953 fp_args++;
954 stk_args += 2;
955 #endif
956 } else {
957 regs[i].set2(VMRegImpl::stack2reg(stk_args));
958 stk_args += 2;
959 }
960 break;
961 case T_FLOAT:
962 if (fp_args < Argument::n_float_register_parameters_c) {
963 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
964 #ifdef _WIN64
965 int_args++;
966 // Allocate slots for callee to stuff register args the stack.
967 stk_args += 2;
968 #endif
969 } else {
970 regs[i].set1(VMRegImpl::stack2reg(stk_args));
971 stk_args += 2;
972 }
973 break;
974 case T_DOUBLE:
975 assert(sig_bt[i + 1] == T_VOID, "expecting half");
976 if (fp_args < Argument::n_float_register_parameters_c) {
977 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
978 #ifdef _WIN64
979 int_args++;
980 // Allocate slots for callee to stuff register args the stack.
981 stk_args += 2;
982 #endif
983 } else {
984 regs[i].set2(VMRegImpl::stack2reg(stk_args));
985 stk_args += 2;
986 }
987 break;
988 case T_VOID: // Halves of longs and doubles
989 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
990 regs[i].set_bad();
991 break;
992 default:
993 ShouldNotReachHere();
994 break;
995 }
996 }
997 #ifdef _WIN64
998 // windows abi requires that we always allocate enough stack space
999 // for 4 64bit registers to be stored down.
1000 if (stk_args < 8) {
1001 stk_args = 8;
1002 }
1003 #endif // _WIN64
1005 return stk_args;
1006 }
1008 // On 64 bit we will store integer like items to the stack as
1009 // 64 bits items (sparc abi) even though java would only store
1010 // 32bits for a parameter. On 32bit it will simply be 32 bits
1011 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1012 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1013 if (src.first()->is_stack()) {
1014 if (dst.first()->is_stack()) {
1015 // stack to stack
1016 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1017 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1018 } else {
1019 // stack to reg
1020 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1021 }
1022 } else if (dst.first()->is_stack()) {
1023 // reg to stack
1024 // Do we really have to sign extend???
1025 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1026 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1027 } else {
1028 // Do we really have to sign extend???
1029 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1030 if (dst.first() != src.first()) {
1031 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1032 }
1033 }
1034 }
1036 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1037 if (src.first()->is_stack()) {
1038 if (dst.first()->is_stack()) {
1039 // stack to stack
1040 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1041 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1042 } else {
1043 // stack to reg
1044 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1045 }
1046 } else if (dst.first()->is_stack()) {
1047 // reg to stack
1048 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1049 } else {
1050 if (dst.first() != src.first()) {
1051 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1052 }
1053 }
1054 }
1056 // An oop arg. Must pass a handle not the oop itself
1057 static void object_move(MacroAssembler* masm,
1058 OopMap* map,
1059 int oop_handle_offset,
1060 int framesize_in_slots,
1061 VMRegPair src,
1062 VMRegPair dst,
1063 bool is_receiver,
1064 int* receiver_offset) {
1066 // must pass a handle. First figure out the location we use as a handle
1068 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1070 // See if oop is NULL if it is we need no handle
1072 if (src.first()->is_stack()) {
1074 // Oop is already on the stack as an argument
1075 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1076 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1077 if (is_receiver) {
1078 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1079 }
1081 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1082 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1083 // conditionally move a NULL
1084 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1085 } else {
1087 // Oop is in an a register we must store it to the space we reserve
1088 // on the stack for oop_handles and pass a handle if oop is non-NULL
1090 const Register rOop = src.first()->as_Register();
1091 int oop_slot;
1092 if (rOop == j_rarg0)
1093 oop_slot = 0;
1094 else if (rOop == j_rarg1)
1095 oop_slot = 1;
1096 else if (rOop == j_rarg2)
1097 oop_slot = 2;
1098 else if (rOop == j_rarg3)
1099 oop_slot = 3;
1100 else if (rOop == j_rarg4)
1101 oop_slot = 4;
1102 else {
1103 assert(rOop == j_rarg5, "wrong register");
1104 oop_slot = 5;
1105 }
1107 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1108 int offset = oop_slot*VMRegImpl::stack_slot_size;
1110 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1111 // Store oop in handle area, may be NULL
1112 __ movptr(Address(rsp, offset), rOop);
1113 if (is_receiver) {
1114 *receiver_offset = offset;
1115 }
1117 __ cmpptr(rOop, (int32_t)NULL_WORD);
1118 __ lea(rHandle, Address(rsp, offset));
1119 // conditionally move a NULL from the handle area where it was just stored
1120 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1121 }
1123 // If arg is on the stack then place it otherwise it is already in correct reg.
1124 if (dst.first()->is_stack()) {
1125 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1126 }
1127 }
1129 // A float arg may have to do float reg int reg conversion
1130 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1131 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1133 // The calling conventions assures us that each VMregpair is either
1134 // all really one physical register or adjacent stack slots.
1135 // This greatly simplifies the cases here compared to sparc.
1137 if (src.first()->is_stack()) {
1138 if (dst.first()->is_stack()) {
1139 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1140 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1141 } else {
1142 // stack to reg
1143 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1144 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1145 }
1146 } else if (dst.first()->is_stack()) {
1147 // reg to stack
1148 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1149 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1150 } else {
1151 // reg to reg
1152 // In theory these overlap but the ordering is such that this is likely a nop
1153 if ( src.first() != dst.first()) {
1154 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1155 }
1156 }
1157 }
1159 // A long move
1160 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1162 // The calling conventions assures us that each VMregpair is either
1163 // all really one physical register or adjacent stack slots.
1164 // This greatly simplifies the cases here compared to sparc.
1166 if (src.is_single_phys_reg() ) {
1167 if (dst.is_single_phys_reg()) {
1168 if (dst.first() != src.first()) {
1169 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1170 }
1171 } else {
1172 assert(dst.is_single_reg(), "not a stack pair");
1173 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1174 }
1175 } else if (dst.is_single_phys_reg()) {
1176 assert(src.is_single_reg(), "not a stack pair");
1177 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1178 } else {
1179 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1180 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1181 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1182 }
1183 }
1185 // A double move
1186 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1188 // The calling conventions assures us that each VMregpair is either
1189 // all really one physical register or adjacent stack slots.
1190 // This greatly simplifies the cases here compared to sparc.
1192 if (src.is_single_phys_reg() ) {
1193 if (dst.is_single_phys_reg()) {
1194 // In theory these overlap but the ordering is such that this is likely a nop
1195 if ( src.first() != dst.first()) {
1196 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1197 }
1198 } else {
1199 assert(dst.is_single_reg(), "not a stack pair");
1200 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1201 }
1202 } else if (dst.is_single_phys_reg()) {
1203 assert(src.is_single_reg(), "not a stack pair");
1204 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1205 } else {
1206 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1207 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1208 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1209 }
1210 }
1213 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1214 // We always ignore the frame_slots arg and just use the space just below frame pointer
1215 // which by this time is free to use
1216 switch (ret_type) {
1217 case T_FLOAT:
1218 __ movflt(Address(rbp, -wordSize), xmm0);
1219 break;
1220 case T_DOUBLE:
1221 __ movdbl(Address(rbp, -wordSize), xmm0);
1222 break;
1223 case T_VOID: break;
1224 default: {
1225 __ movptr(Address(rbp, -wordSize), rax);
1226 }
1227 }
1228 }
1230 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1231 // We always ignore the frame_slots arg and just use the space just below frame pointer
1232 // which by this time is free to use
1233 switch (ret_type) {
1234 case T_FLOAT:
1235 __ movflt(xmm0, Address(rbp, -wordSize));
1236 break;
1237 case T_DOUBLE:
1238 __ movdbl(xmm0, Address(rbp, -wordSize));
1239 break;
1240 case T_VOID: break;
1241 default: {
1242 __ movptr(rax, Address(rbp, -wordSize));
1243 }
1244 }
1245 }
1247 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1248 for ( int i = first_arg ; i < arg_count ; i++ ) {
1249 if (args[i].first()->is_Register()) {
1250 __ push(args[i].first()->as_Register());
1251 } else if (args[i].first()->is_XMMRegister()) {
1252 __ subptr(rsp, 2*wordSize);
1253 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1254 }
1255 }
1256 }
1258 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1259 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1260 if (args[i].first()->is_Register()) {
1261 __ pop(args[i].first()->as_Register());
1262 } else if (args[i].first()->is_XMMRegister()) {
1263 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1264 __ addptr(rsp, 2*wordSize);
1265 }
1266 }
1267 }
1270 static void save_or_restore_arguments(MacroAssembler* masm,
1271 const int stack_slots,
1272 const int total_in_args,
1273 const int arg_save_area,
1274 OopMap* map,
1275 VMRegPair* in_regs,
1276 BasicType* in_sig_bt) {
1277 // if map is non-NULL then the code should store the values,
1278 // otherwise it should load them.
1279 int slot = arg_save_area;
1280 // Save down double word first
1281 for ( int i = 0; i < total_in_args; i++) {
1282 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1283 int offset = slot * VMRegImpl::stack_slot_size;
1284 slot += VMRegImpl::slots_per_word;
1285 assert(slot <= stack_slots, "overflow");
1286 if (map != NULL) {
1287 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1288 } else {
1289 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1290 }
1291 }
1292 if (in_regs[i].first()->is_Register() &&
1293 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1294 int offset = slot * VMRegImpl::stack_slot_size;
1295 if (map != NULL) {
1296 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1297 if (in_sig_bt[i] == T_ARRAY) {
1298 map->set_oop(VMRegImpl::stack2reg(slot));;
1299 }
1300 } else {
1301 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1302 }
1303 slot += VMRegImpl::slots_per_word;
1304 }
1305 }
1306 // Save or restore single word registers
1307 for ( int i = 0; i < total_in_args; i++) {
1308 if (in_regs[i].first()->is_Register()) {
1309 int offset = slot * VMRegImpl::stack_slot_size;
1310 slot++;
1311 assert(slot <= stack_slots, "overflow");
1313 // Value is in an input register pass we must flush it to the stack
1314 const Register reg = in_regs[i].first()->as_Register();
1315 switch (in_sig_bt[i]) {
1316 case T_BOOLEAN:
1317 case T_CHAR:
1318 case T_BYTE:
1319 case T_SHORT:
1320 case T_INT:
1321 if (map != NULL) {
1322 __ movl(Address(rsp, offset), reg);
1323 } else {
1324 __ movl(reg, Address(rsp, offset));
1325 }
1326 break;
1327 case T_ARRAY:
1328 case T_LONG:
1329 // handled above
1330 break;
1331 case T_OBJECT:
1332 default: ShouldNotReachHere();
1333 }
1334 } else if (in_regs[i].first()->is_XMMRegister()) {
1335 if (in_sig_bt[i] == T_FLOAT) {
1336 int offset = slot * VMRegImpl::stack_slot_size;
1337 slot++;
1338 assert(slot <= stack_slots, "overflow");
1339 if (map != NULL) {
1340 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1341 } else {
1342 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1343 }
1344 }
1345 } else if (in_regs[i].first()->is_stack()) {
1346 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1347 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1348 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1349 }
1350 }
1351 }
1352 }
1355 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1356 // keeps a new JNI critical region from starting until a GC has been
1357 // forced. Save down any oops in registers and describe them in an
1358 // OopMap.
1359 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1360 int stack_slots,
1361 int total_c_args,
1362 int total_in_args,
1363 int arg_save_area,
1364 OopMapSet* oop_maps,
1365 VMRegPair* in_regs,
1366 BasicType* in_sig_bt) {
1367 __ block_comment("check GC_locker::needs_gc");
1368 Label cont;
1369 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1370 __ jcc(Assembler::equal, cont);
1372 // Save down any incoming oops and call into the runtime to halt for a GC
1374 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1375 save_or_restore_arguments(masm, stack_slots, total_in_args,
1376 arg_save_area, map, in_regs, in_sig_bt);
1378 address the_pc = __ pc();
1379 oop_maps->add_gc_map( __ offset(), map);
1380 __ set_last_Java_frame(rsp, noreg, the_pc);
1382 __ block_comment("block_for_jni_critical");
1383 __ movptr(c_rarg0, r15_thread);
1384 __ mov(r12, rsp); // remember sp
1385 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1386 __ andptr(rsp, -16); // align stack as required by ABI
1387 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1388 __ mov(rsp, r12); // restore sp
1389 __ reinit_heapbase();
1391 __ reset_last_Java_frame(false, true);
1393 save_or_restore_arguments(masm, stack_slots, total_in_args,
1394 arg_save_area, NULL, in_regs, in_sig_bt);
1396 __ bind(cont);
1397 #ifdef ASSERT
1398 if (StressCriticalJNINatives) {
1399 // Stress register saving
1400 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1401 save_or_restore_arguments(masm, stack_slots, total_in_args,
1402 arg_save_area, map, in_regs, in_sig_bt);
1403 // Destroy argument registers
1404 for (int i = 0; i < total_in_args - 1; i++) {
1405 if (in_regs[i].first()->is_Register()) {
1406 const Register reg = in_regs[i].first()->as_Register();
1407 __ xorptr(reg, reg);
1408 } else if (in_regs[i].first()->is_XMMRegister()) {
1409 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1410 } else if (in_regs[i].first()->is_FloatRegister()) {
1411 ShouldNotReachHere();
1412 } else if (in_regs[i].first()->is_stack()) {
1413 // Nothing to do
1414 } else {
1415 ShouldNotReachHere();
1416 }
1417 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1418 i++;
1419 }
1420 }
1422 save_or_restore_arguments(masm, stack_slots, total_in_args,
1423 arg_save_area, NULL, in_regs, in_sig_bt);
1424 }
1425 #endif
1426 }
1428 // Unpack an array argument into a pointer to the body and the length
1429 // if the array is non-null, otherwise pass 0 for both.
1430 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1431 Register tmp_reg = rax;
1432 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1433 "possible collision");
1434 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1435 "possible collision");
1437 __ block_comment("unpack_array_argument {");
1439 // Pass the length, ptr pair
1440 Label is_null, done;
1441 VMRegPair tmp;
1442 tmp.set_ptr(tmp_reg->as_VMReg());
1443 if (reg.first()->is_stack()) {
1444 // Load the arg up from the stack
1445 move_ptr(masm, reg, tmp);
1446 reg = tmp;
1447 }
1448 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1449 __ jccb(Assembler::equal, is_null);
1450 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1451 move_ptr(masm, tmp, body_arg);
1452 // load the length relative to the body.
1453 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1454 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1455 move32_64(masm, tmp, length_arg);
1456 __ jmpb(done);
1457 __ bind(is_null);
1458 // Pass zeros
1459 __ xorptr(tmp_reg, tmp_reg);
1460 move_ptr(masm, tmp, body_arg);
1461 move32_64(masm, tmp, length_arg);
1462 __ bind(done);
1464 __ block_comment("} unpack_array_argument");
1465 }
1468 // Different signatures may require very different orders for the move
1469 // to avoid clobbering other arguments. There's no simple way to
1470 // order them safely. Compute a safe order for issuing stores and
1471 // break any cycles in those stores. This code is fairly general but
1472 // it's not necessary on the other platforms so we keep it in the
1473 // platform dependent code instead of moving it into a shared file.
1474 // (See bugs 7013347 & 7145024.)
1475 // Note that this code is specific to LP64.
1476 class ComputeMoveOrder: public StackObj {
1477 class MoveOperation: public ResourceObj {
1478 friend class ComputeMoveOrder;
1479 private:
1480 VMRegPair _src;
1481 VMRegPair _dst;
1482 int _src_index;
1483 int _dst_index;
1484 bool _processed;
1485 MoveOperation* _next;
1486 MoveOperation* _prev;
1488 static int get_id(VMRegPair r) {
1489 return r.first()->value();
1490 }
1492 public:
1493 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1494 _src(src)
1495 , _src_index(src_index)
1496 , _dst(dst)
1497 , _dst_index(dst_index)
1498 , _next(NULL)
1499 , _prev(NULL)
1500 , _processed(false) {
1501 }
1503 VMRegPair src() const { return _src; }
1504 int src_id() const { return get_id(src()); }
1505 int src_index() const { return _src_index; }
1506 VMRegPair dst() const { return _dst; }
1507 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1508 int dst_index() const { return _dst_index; }
1509 int dst_id() const { return get_id(dst()); }
1510 MoveOperation* next() const { return _next; }
1511 MoveOperation* prev() const { return _prev; }
1512 void set_processed() { _processed = true; }
1513 bool is_processed() const { return _processed; }
1515 // insert
1516 void break_cycle(VMRegPair temp_register) {
1517 // create a new store following the last store
1518 // to move from the temp_register to the original
1519 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1521 // break the cycle of links and insert new_store at the end
1522 // break the reverse link.
1523 MoveOperation* p = prev();
1524 assert(p->next() == this, "must be");
1525 _prev = NULL;
1526 p->_next = new_store;
1527 new_store->_prev = p;
1529 // change the original store to save it's value in the temp.
1530 set_dst(-1, temp_register);
1531 }
1533 void link(GrowableArray<MoveOperation*>& killer) {
1534 // link this store in front the store that it depends on
1535 MoveOperation* n = killer.at_grow(src_id(), NULL);
1536 if (n != NULL) {
1537 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1538 _next = n;
1539 n->_prev = this;
1540 }
1541 }
1542 };
1544 private:
1545 GrowableArray<MoveOperation*> edges;
1547 public:
1548 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1549 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1550 // Move operations where the dest is the stack can all be
1551 // scheduled first since they can't interfere with the other moves.
1552 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1553 if (in_sig_bt[i] == T_ARRAY) {
1554 c_arg--;
1555 if (out_regs[c_arg].first()->is_stack() &&
1556 out_regs[c_arg + 1].first()->is_stack()) {
1557 arg_order.push(i);
1558 arg_order.push(c_arg);
1559 } else {
1560 if (out_regs[c_arg].first()->is_stack() ||
1561 in_regs[i].first() == out_regs[c_arg].first()) {
1562 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1563 } else {
1564 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1565 }
1566 }
1567 } else if (in_sig_bt[i] == T_VOID) {
1568 arg_order.push(i);
1569 arg_order.push(c_arg);
1570 } else {
1571 if (out_regs[c_arg].first()->is_stack() ||
1572 in_regs[i].first() == out_regs[c_arg].first()) {
1573 arg_order.push(i);
1574 arg_order.push(c_arg);
1575 } else {
1576 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1577 }
1578 }
1579 }
1580 // Break any cycles in the register moves and emit the in the
1581 // proper order.
1582 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1583 for (int i = 0; i < stores->length(); i++) {
1584 arg_order.push(stores->at(i)->src_index());
1585 arg_order.push(stores->at(i)->dst_index());
1586 }
1587 }
1589 // Collected all the move operations
1590 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1591 if (src.first() == dst.first()) return;
1592 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1593 }
1595 // Walk the edges breaking cycles between moves. The result list
1596 // can be walked in order to produce the proper set of loads
1597 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1598 // Record which moves kill which values
1599 GrowableArray<MoveOperation*> killer;
1600 for (int i = 0; i < edges.length(); i++) {
1601 MoveOperation* s = edges.at(i);
1602 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1603 killer.at_put_grow(s->dst_id(), s, NULL);
1604 }
1605 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1606 "make sure temp isn't in the registers that are killed");
1608 // create links between loads and stores
1609 for (int i = 0; i < edges.length(); i++) {
1610 edges.at(i)->link(killer);
1611 }
1613 // at this point, all the move operations are chained together
1614 // in a doubly linked list. Processing it backwards finds
1615 // the beginning of the chain, forwards finds the end. If there's
1616 // a cycle it can be broken at any point, so pick an edge and walk
1617 // backward until the list ends or we end where we started.
1618 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1619 for (int e = 0; e < edges.length(); e++) {
1620 MoveOperation* s = edges.at(e);
1621 if (!s->is_processed()) {
1622 MoveOperation* start = s;
1623 // search for the beginning of the chain or cycle
1624 while (start->prev() != NULL && start->prev() != s) {
1625 start = start->prev();
1626 }
1627 if (start->prev() == s) {
1628 start->break_cycle(temp_register);
1629 }
1630 // walk the chain forward inserting to store list
1631 while (start != NULL) {
1632 stores->append(start);
1633 start->set_processed();
1634 start = start->next();
1635 }
1636 }
1637 }
1638 return stores;
1639 }
1640 };
1642 static void verify_oop_args(MacroAssembler* masm,
1643 methodHandle method,
1644 const BasicType* sig_bt,
1645 const VMRegPair* regs) {
1646 Register temp_reg = rbx; // not part of any compiled calling seq
1647 if (VerifyOops) {
1648 for (int i = 0; i < method->size_of_parameters(); i++) {
1649 if (sig_bt[i] == T_OBJECT ||
1650 sig_bt[i] == T_ARRAY) {
1651 VMReg r = regs[i].first();
1652 assert(r->is_valid(), "bad oop arg");
1653 if (r->is_stack()) {
1654 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1655 __ verify_oop(temp_reg);
1656 } else {
1657 __ verify_oop(r->as_Register());
1658 }
1659 }
1660 }
1661 }
1662 }
1664 static void gen_special_dispatch(MacroAssembler* masm,
1665 methodHandle method,
1666 const BasicType* sig_bt,
1667 const VMRegPair* regs) {
1668 verify_oop_args(masm, method, sig_bt, regs);
1669 vmIntrinsics::ID iid = method->intrinsic_id();
1671 // Now write the args into the outgoing interpreter space
1672 bool has_receiver = false;
1673 Register receiver_reg = noreg;
1674 int member_arg_pos = -1;
1675 Register member_reg = noreg;
1676 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1677 if (ref_kind != 0) {
1678 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1679 member_reg = rbx; // known to be free at this point
1680 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1681 } else if (iid == vmIntrinsics::_invokeBasic) {
1682 has_receiver = true;
1683 } else {
1684 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1685 }
1687 if (member_reg != noreg) {
1688 // Load the member_arg into register, if necessary.
1689 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1690 VMReg r = regs[member_arg_pos].first();
1691 if (r->is_stack()) {
1692 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1693 } else {
1694 // no data motion is needed
1695 member_reg = r->as_Register();
1696 }
1697 }
1699 if (has_receiver) {
1700 // Make sure the receiver is loaded into a register.
1701 assert(method->size_of_parameters() > 0, "oob");
1702 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1703 VMReg r = regs[0].first();
1704 assert(r->is_valid(), "bad receiver arg");
1705 if (r->is_stack()) {
1706 // Porting note: This assumes that compiled calling conventions always
1707 // pass the receiver oop in a register. If this is not true on some
1708 // platform, pick a temp and load the receiver from stack.
1709 fatal("receiver always in a register");
1710 receiver_reg = j_rarg0; // known to be free at this point
1711 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1712 } else {
1713 // no data motion is needed
1714 receiver_reg = r->as_Register();
1715 }
1716 }
1718 // Figure out which address we are really jumping to:
1719 MethodHandles::generate_method_handle_dispatch(masm, iid,
1720 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1721 }
1723 // ---------------------------------------------------------------------------
1724 // Generate a native wrapper for a given method. The method takes arguments
1725 // in the Java compiled code convention, marshals them to the native
1726 // convention (handlizes oops, etc), transitions to native, makes the call,
1727 // returns to java state (possibly blocking), unhandlizes any result and
1728 // returns.
1729 //
1730 // Critical native functions are a shorthand for the use of
1731 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1732 // functions. The wrapper is expected to unpack the arguments before
1733 // passing them to the callee and perform checks before and after the
1734 // native call to ensure that they GC_locker
1735 // lock_critical/unlock_critical semantics are followed. Some other
1736 // parts of JNI setup are skipped like the tear down of the JNI handle
1737 // block and the check for pending exceptions it's impossible for them
1738 // to be thrown.
1739 //
1740 // They are roughly structured like this:
1741 // if (GC_locker::needs_gc())
1742 // SharedRuntime::block_for_jni_critical();
1743 // tranistion to thread_in_native
1744 // unpack arrray arguments and call native entry point
1745 // check for safepoint in progress
1746 // check if any thread suspend flags are set
1747 // call into JVM and possible unlock the JNI critical
1748 // if a GC was suppressed while in the critical native.
1749 // transition back to thread_in_Java
1750 // return to caller
1751 //
1752 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1753 methodHandle method,
1754 int compile_id,
1755 BasicType* in_sig_bt,
1756 VMRegPair* in_regs,
1757 BasicType ret_type) {
1758 if (method->is_method_handle_intrinsic()) {
1759 vmIntrinsics::ID iid = method->intrinsic_id();
1760 intptr_t start = (intptr_t)__ pc();
1761 int vep_offset = ((intptr_t)__ pc()) - start;
1762 gen_special_dispatch(masm,
1763 method,
1764 in_sig_bt,
1765 in_regs);
1766 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1767 __ flush();
1768 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1769 return nmethod::new_native_nmethod(method,
1770 compile_id,
1771 masm->code(),
1772 vep_offset,
1773 frame_complete,
1774 stack_slots / VMRegImpl::slots_per_word,
1775 in_ByteSize(-1),
1776 in_ByteSize(-1),
1777 (OopMapSet*)NULL);
1778 }
1779 bool is_critical_native = true;
1780 address native_func = method->critical_native_function();
1781 if (native_func == NULL) {
1782 native_func = method->native_function();
1783 is_critical_native = false;
1784 }
1785 assert(native_func != NULL, "must have function");
1787 // An OopMap for lock (and class if static)
1788 OopMapSet *oop_maps = new OopMapSet();
1789 intptr_t start = (intptr_t)__ pc();
1791 // We have received a description of where all the java arg are located
1792 // on entry to the wrapper. We need to convert these args to where
1793 // the jni function will expect them. To figure out where they go
1794 // we convert the java signature to a C signature by inserting
1795 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1797 const int total_in_args = method->size_of_parameters();
1798 int total_c_args = total_in_args;
1799 if (!is_critical_native) {
1800 total_c_args += 1;
1801 if (method->is_static()) {
1802 total_c_args++;
1803 }
1804 } else {
1805 for (int i = 0; i < total_in_args; i++) {
1806 if (in_sig_bt[i] == T_ARRAY) {
1807 total_c_args++;
1808 }
1809 }
1810 }
1812 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1813 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1814 BasicType* in_elem_bt = NULL;
1816 int argc = 0;
1817 if (!is_critical_native) {
1818 out_sig_bt[argc++] = T_ADDRESS;
1819 if (method->is_static()) {
1820 out_sig_bt[argc++] = T_OBJECT;
1821 }
1823 for (int i = 0; i < total_in_args ; i++ ) {
1824 out_sig_bt[argc++] = in_sig_bt[i];
1825 }
1826 } else {
1827 Thread* THREAD = Thread::current();
1828 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1829 SignatureStream ss(method->signature());
1830 for (int i = 0; i < total_in_args ; i++ ) {
1831 if (in_sig_bt[i] == T_ARRAY) {
1832 // Arrays are passed as int, elem* pair
1833 out_sig_bt[argc++] = T_INT;
1834 out_sig_bt[argc++] = T_ADDRESS;
1835 Symbol* atype = ss.as_symbol(CHECK_NULL);
1836 const char* at = atype->as_C_string();
1837 if (strlen(at) == 2) {
1838 assert(at[0] == '[', "must be");
1839 switch (at[1]) {
1840 case 'B': in_elem_bt[i] = T_BYTE; break;
1841 case 'C': in_elem_bt[i] = T_CHAR; break;
1842 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1843 case 'F': in_elem_bt[i] = T_FLOAT; break;
1844 case 'I': in_elem_bt[i] = T_INT; break;
1845 case 'J': in_elem_bt[i] = T_LONG; break;
1846 case 'S': in_elem_bt[i] = T_SHORT; break;
1847 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1848 default: ShouldNotReachHere();
1849 }
1850 }
1851 } else {
1852 out_sig_bt[argc++] = in_sig_bt[i];
1853 in_elem_bt[i] = T_VOID;
1854 }
1855 if (in_sig_bt[i] != T_VOID) {
1856 assert(in_sig_bt[i] == ss.type(), "must match");
1857 ss.next();
1858 }
1859 }
1860 }
1862 // Now figure out where the args must be stored and how much stack space
1863 // they require.
1864 int out_arg_slots;
1865 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1867 // Compute framesize for the wrapper. We need to handlize all oops in
1868 // incoming registers
1870 // Calculate the total number of stack slots we will need.
1872 // First count the abi requirement plus all of the outgoing args
1873 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1875 // Now the space for the inbound oop handle area
1876 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1877 if (is_critical_native) {
1878 // Critical natives may have to call out so they need a save area
1879 // for register arguments.
1880 int double_slots = 0;
1881 int single_slots = 0;
1882 for ( int i = 0; i < total_in_args; i++) {
1883 if (in_regs[i].first()->is_Register()) {
1884 const Register reg = in_regs[i].first()->as_Register();
1885 switch (in_sig_bt[i]) {
1886 case T_BOOLEAN:
1887 case T_BYTE:
1888 case T_SHORT:
1889 case T_CHAR:
1890 case T_INT: single_slots++; break;
1891 case T_ARRAY: // specific to LP64 (7145024)
1892 case T_LONG: double_slots++; break;
1893 default: ShouldNotReachHere();
1894 }
1895 } else if (in_regs[i].first()->is_XMMRegister()) {
1896 switch (in_sig_bt[i]) {
1897 case T_FLOAT: single_slots++; break;
1898 case T_DOUBLE: double_slots++; break;
1899 default: ShouldNotReachHere();
1900 }
1901 } else if (in_regs[i].first()->is_FloatRegister()) {
1902 ShouldNotReachHere();
1903 }
1904 }
1905 total_save_slots = double_slots * 2 + single_slots;
1906 // align the save area
1907 if (double_slots != 0) {
1908 stack_slots = round_to(stack_slots, 2);
1909 }
1910 }
1912 int oop_handle_offset = stack_slots;
1913 stack_slots += total_save_slots;
1915 // Now any space we need for handlizing a klass if static method
1917 int klass_slot_offset = 0;
1918 int klass_offset = -1;
1919 int lock_slot_offset = 0;
1920 bool is_static = false;
1922 if (method->is_static()) {
1923 klass_slot_offset = stack_slots;
1924 stack_slots += VMRegImpl::slots_per_word;
1925 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1926 is_static = true;
1927 }
1929 // Plus a lock if needed
1931 if (method->is_synchronized()) {
1932 lock_slot_offset = stack_slots;
1933 stack_slots += VMRegImpl::slots_per_word;
1934 }
1936 // Now a place (+2) to save return values or temp during shuffling
1937 // + 4 for return address (which we own) and saved rbp
1938 stack_slots += 6;
1940 // Ok The space we have allocated will look like:
1941 //
1942 //
1943 // FP-> | |
1944 // |---------------------|
1945 // | 2 slots for moves |
1946 // |---------------------|
1947 // | lock box (if sync) |
1948 // |---------------------| <- lock_slot_offset
1949 // | klass (if static) |
1950 // |---------------------| <- klass_slot_offset
1951 // | oopHandle area |
1952 // |---------------------| <- oop_handle_offset (6 java arg registers)
1953 // | outbound memory |
1954 // | based arguments |
1955 // | |
1956 // |---------------------|
1957 // | |
1958 // SP-> | out_preserved_slots |
1959 //
1960 //
1963 // Now compute actual number of stack words we need rounding to make
1964 // stack properly aligned.
1965 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1967 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1969 // First thing make an ic check to see if we should even be here
1971 // We are free to use all registers as temps without saving them and
1972 // restoring them except rbp. rbp is the only callee save register
1973 // as far as the interpreter and the compiler(s) are concerned.
1976 const Register ic_reg = rax;
1977 const Register receiver = j_rarg0;
1979 Label hit;
1980 Label exception_pending;
1982 assert_different_registers(ic_reg, receiver, rscratch1);
1983 __ verify_oop(receiver);
1984 __ load_klass(rscratch1, receiver);
1985 __ cmpq(ic_reg, rscratch1);
1986 __ jcc(Assembler::equal, hit);
1988 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1990 // Verified entry point must be aligned
1991 __ align(8);
1993 __ bind(hit);
1995 int vep_offset = ((intptr_t)__ pc()) - start;
1997 // The instruction at the verified entry point must be 5 bytes or longer
1998 // because it can be patched on the fly by make_non_entrant. The stack bang
1999 // instruction fits that requirement.
2001 // Generate stack overflow check
2003 if (UseStackBanging) {
2004 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2005 } else {
2006 // need a 5 byte instruction to allow MT safe patching to non-entrant
2007 __ fat_nop();
2008 }
2010 // Generate a new frame for the wrapper.
2011 __ enter();
2012 // -2 because return address is already present and so is saved rbp
2013 __ subptr(rsp, stack_size - 2*wordSize);
2015 // Frame is now completed as far as size and linkage.
2016 int frame_complete = ((intptr_t)__ pc()) - start;
2018 if (UseRTMLocking) {
2019 // Abort RTM transaction before calling JNI
2020 // because critical section will be large and will be
2021 // aborted anyway. Also nmethod could be deoptimized.
2022 __ xabort(0);
2023 }
2025 #ifdef ASSERT
2026 {
2027 Label L;
2028 __ mov(rax, rsp);
2029 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2030 __ cmpptr(rax, rsp);
2031 __ jcc(Assembler::equal, L);
2032 __ stop("improperly aligned stack");
2033 __ bind(L);
2034 }
2035 #endif /* ASSERT */
2038 // We use r14 as the oop handle for the receiver/klass
2039 // It is callee save so it survives the call to native
2041 const Register oop_handle_reg = r14;
2043 if (is_critical_native) {
2044 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2045 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2046 }
2048 //
2049 // We immediately shuffle the arguments so that any vm call we have to
2050 // make from here on out (sync slow path, jvmti, etc.) we will have
2051 // captured the oops from our caller and have a valid oopMap for
2052 // them.
2054 // -----------------
2055 // The Grand Shuffle
2057 // The Java calling convention is either equal (linux) or denser (win64) than the
2058 // c calling convention. However the because of the jni_env argument the c calling
2059 // convention always has at least one more (and two for static) arguments than Java.
2060 // Therefore if we move the args from java -> c backwards then we will never have
2061 // a register->register conflict and we don't have to build a dependency graph
2062 // and figure out how to break any cycles.
2063 //
2065 // Record esp-based slot for receiver on stack for non-static methods
2066 int receiver_offset = -1;
2068 // This is a trick. We double the stack slots so we can claim
2069 // the oops in the caller's frame. Since we are sure to have
2070 // more args than the caller doubling is enough to make
2071 // sure we can capture all the incoming oop args from the
2072 // caller.
2073 //
2074 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2076 // Mark location of rbp (someday)
2077 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2079 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2080 // All inbound args are referenced based on rbp and all outbound args via rsp.
2083 #ifdef ASSERT
2084 bool reg_destroyed[RegisterImpl::number_of_registers];
2085 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2086 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2087 reg_destroyed[r] = false;
2088 }
2089 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2090 freg_destroyed[f] = false;
2091 }
2093 #endif /* ASSERT */
2095 // This may iterate in two different directions depending on the
2096 // kind of native it is. The reason is that for regular JNI natives
2097 // the incoming and outgoing registers are offset upwards and for
2098 // critical natives they are offset down.
2099 GrowableArray<int> arg_order(2 * total_in_args);
2100 VMRegPair tmp_vmreg;
2101 tmp_vmreg.set1(rbx->as_VMReg());
2103 if (!is_critical_native) {
2104 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2105 arg_order.push(i);
2106 arg_order.push(c_arg);
2107 }
2108 } else {
2109 // Compute a valid move order, using tmp_vmreg to break any cycles
2110 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2111 }
2113 int temploc = -1;
2114 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2115 int i = arg_order.at(ai);
2116 int c_arg = arg_order.at(ai + 1);
2117 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2118 if (c_arg == -1) {
2119 assert(is_critical_native, "should only be required for critical natives");
2120 // This arg needs to be moved to a temporary
2121 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2122 in_regs[i] = tmp_vmreg;
2123 temploc = i;
2124 continue;
2125 } else if (i == -1) {
2126 assert(is_critical_native, "should only be required for critical natives");
2127 // Read from the temporary location
2128 assert(temploc != -1, "must be valid");
2129 i = temploc;
2130 temploc = -1;
2131 }
2132 #ifdef ASSERT
2133 if (in_regs[i].first()->is_Register()) {
2134 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2135 } else if (in_regs[i].first()->is_XMMRegister()) {
2136 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2137 }
2138 if (out_regs[c_arg].first()->is_Register()) {
2139 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2140 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2141 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2142 }
2143 #endif /* ASSERT */
2144 switch (in_sig_bt[i]) {
2145 case T_ARRAY:
2146 if (is_critical_native) {
2147 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2148 c_arg++;
2149 #ifdef ASSERT
2150 if (out_regs[c_arg].first()->is_Register()) {
2151 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2152 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2153 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2154 }
2155 #endif
2156 break;
2157 }
2158 case T_OBJECT:
2159 assert(!is_critical_native, "no oop arguments");
2160 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2161 ((i == 0) && (!is_static)),
2162 &receiver_offset);
2163 break;
2164 case T_VOID:
2165 break;
2167 case T_FLOAT:
2168 float_move(masm, in_regs[i], out_regs[c_arg]);
2169 break;
2171 case T_DOUBLE:
2172 assert( i + 1 < total_in_args &&
2173 in_sig_bt[i + 1] == T_VOID &&
2174 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2175 double_move(masm, in_regs[i], out_regs[c_arg]);
2176 break;
2178 case T_LONG :
2179 long_move(masm, in_regs[i], out_regs[c_arg]);
2180 break;
2182 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2184 default:
2185 move32_64(masm, in_regs[i], out_regs[c_arg]);
2186 }
2187 }
2189 int c_arg;
2191 // Pre-load a static method's oop into r14. Used both by locking code and
2192 // the normal JNI call code.
2193 if (!is_critical_native) {
2194 // point c_arg at the first arg that is already loaded in case we
2195 // need to spill before we call out
2196 c_arg = total_c_args - total_in_args;
2198 if (method->is_static()) {
2200 // load oop into a register
2201 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2203 // Now handlize the static class mirror it's known not-null.
2204 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2205 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2207 // Now get the handle
2208 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2209 // store the klass handle as second argument
2210 __ movptr(c_rarg1, oop_handle_reg);
2211 // and protect the arg if we must spill
2212 c_arg--;
2213 }
2214 } else {
2215 // For JNI critical methods we need to save all registers in save_args.
2216 c_arg = 0;
2217 }
2219 // Change state to native (we save the return address in the thread, since it might not
2220 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2221 // points into the right code segment. It does not have to be the correct return pc.
2222 // We use the same pc/oopMap repeatedly when we call out
2224 intptr_t the_pc = (intptr_t) __ pc();
2225 oop_maps->add_gc_map(the_pc - start, map);
2227 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2230 // We have all of the arguments setup at this point. We must not touch any register
2231 // argument registers at this point (what if we save/restore them there are no oop?
2233 {
2234 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2235 // protect the args we've loaded
2236 save_args(masm, total_c_args, c_arg, out_regs);
2237 __ mov_metadata(c_rarg1, method());
2238 __ call_VM_leaf(
2239 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2240 r15_thread, c_rarg1);
2241 restore_args(masm, total_c_args, c_arg, out_regs);
2242 }
2244 // RedefineClasses() tracing support for obsolete method entry
2245 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2246 // protect the args we've loaded
2247 save_args(masm, total_c_args, c_arg, out_regs);
2248 __ mov_metadata(c_rarg1, method());
2249 __ call_VM_leaf(
2250 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2251 r15_thread, c_rarg1);
2252 restore_args(masm, total_c_args, c_arg, out_regs);
2253 }
2255 // Lock a synchronized method
2257 // Register definitions used by locking and unlocking
2259 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2260 const Register obj_reg = rbx; // Will contain the oop
2261 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2262 const Register old_hdr = r13; // value of old header at unlock time
2264 Label slow_path_lock;
2265 Label lock_done;
2267 if (method->is_synchronized()) {
2268 assert(!is_critical_native, "unhandled");
2271 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2273 // Get the handle (the 2nd argument)
2274 __ mov(oop_handle_reg, c_rarg1);
2276 // Get address of the box
2278 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2280 // Load the oop from the handle
2281 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2283 if (UseBiasedLocking) {
2284 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2285 }
2287 // Load immediate 1 into swap_reg %rax
2288 __ movl(swap_reg, 1);
2290 // Load (object->mark() | 1) into swap_reg %rax
2291 __ orptr(swap_reg, Address(obj_reg, 0));
2293 // Save (object->mark() | 1) into BasicLock's displaced header
2294 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2296 if (os::is_MP()) {
2297 __ lock();
2298 }
2300 // src -> dest iff dest == rax else rax <- dest
2301 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2302 __ jcc(Assembler::equal, lock_done);
2304 // Hmm should this move to the slow path code area???
2306 // Test if the oopMark is an obvious stack pointer, i.e.,
2307 // 1) (mark & 3) == 0, and
2308 // 2) rsp <= mark < mark + os::pagesize()
2309 // These 3 tests can be done by evaluating the following
2310 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2311 // assuming both stack pointer and pagesize have their
2312 // least significant 2 bits clear.
2313 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2315 __ subptr(swap_reg, rsp);
2316 __ andptr(swap_reg, 3 - os::vm_page_size());
2318 // Save the test result, for recursive case, the result is zero
2319 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2320 __ jcc(Assembler::notEqual, slow_path_lock);
2322 // Slow path will re-enter here
2324 __ bind(lock_done);
2325 }
2328 // Finally just about ready to make the JNI call
2331 // get JNIEnv* which is first argument to native
2332 if (!is_critical_native) {
2333 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2334 }
2336 // Now set thread in native
2337 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2339 __ call(RuntimeAddress(native_func));
2341 // Verify or restore cpu control state after JNI call
2342 __ restore_cpu_control_state_after_jni();
2344 // Unpack native results.
2345 switch (ret_type) {
2346 case T_BOOLEAN: __ c2bool(rax); break;
2347 case T_CHAR : __ movzwl(rax, rax); break;
2348 case T_BYTE : __ sign_extend_byte (rax); break;
2349 case T_SHORT : __ sign_extend_short(rax); break;
2350 case T_INT : /* nothing to do */ break;
2351 case T_DOUBLE :
2352 case T_FLOAT :
2353 // Result is in xmm0 we'll save as needed
2354 break;
2355 case T_ARRAY: // Really a handle
2356 case T_OBJECT: // Really a handle
2357 break; // can't de-handlize until after safepoint check
2358 case T_VOID: break;
2359 case T_LONG: break;
2360 default : ShouldNotReachHere();
2361 }
2363 // Switch thread to "native transition" state before reading the synchronization state.
2364 // This additional state is necessary because reading and testing the synchronization
2365 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2366 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2367 // VM thread changes sync state to synchronizing and suspends threads for GC.
2368 // Thread A is resumed to finish this native method, but doesn't block here since it
2369 // didn't see any synchronization is progress, and escapes.
2370 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2372 if(os::is_MP()) {
2373 if (UseMembar) {
2374 // Force this write out before the read below
2375 __ membar(Assembler::Membar_mask_bits(
2376 Assembler::LoadLoad | Assembler::LoadStore |
2377 Assembler::StoreLoad | Assembler::StoreStore));
2378 } else {
2379 // Write serialization page so VM thread can do a pseudo remote membar.
2380 // We use the current thread pointer to calculate a thread specific
2381 // offset to write to within the page. This minimizes bus traffic
2382 // due to cache line collision.
2383 __ serialize_memory(r15_thread, rcx);
2384 }
2385 }
2387 Label after_transition;
2389 // check for safepoint operation in progress and/or pending suspend requests
2390 {
2391 Label Continue;
2393 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2394 SafepointSynchronize::_not_synchronized);
2396 Label L;
2397 __ jcc(Assembler::notEqual, L);
2398 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2399 __ jcc(Assembler::equal, Continue);
2400 __ bind(L);
2402 // Don't use call_VM as it will see a possible pending exception and forward it
2403 // and never return here preventing us from clearing _last_native_pc down below.
2404 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2405 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2406 // by hand.
2407 //
2408 save_native_result(masm, ret_type, stack_slots);
2409 __ mov(c_rarg0, r15_thread);
2410 __ mov(r12, rsp); // remember sp
2411 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2412 __ andptr(rsp, -16); // align stack as required by ABI
2413 if (!is_critical_native) {
2414 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2415 } else {
2416 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2417 }
2418 __ mov(rsp, r12); // restore sp
2419 __ reinit_heapbase();
2420 // Restore any method result value
2421 restore_native_result(masm, ret_type, stack_slots);
2423 if (is_critical_native) {
2424 // The call above performed the transition to thread_in_Java so
2425 // skip the transition logic below.
2426 __ jmpb(after_transition);
2427 }
2429 __ bind(Continue);
2430 }
2432 // change thread state
2433 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2434 __ bind(after_transition);
2436 Label reguard;
2437 Label reguard_done;
2438 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2439 __ jcc(Assembler::equal, reguard);
2440 __ bind(reguard_done);
2442 // native result if any is live
2444 // Unlock
2445 Label unlock_done;
2446 Label slow_path_unlock;
2447 if (method->is_synchronized()) {
2449 // Get locked oop from the handle we passed to jni
2450 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2452 Label done;
2454 if (UseBiasedLocking) {
2455 __ biased_locking_exit(obj_reg, old_hdr, done);
2456 }
2458 // Simple recursive lock?
2460 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2461 __ jcc(Assembler::equal, done);
2463 // Must save rax if if it is live now because cmpxchg must use it
2464 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2465 save_native_result(masm, ret_type, stack_slots);
2466 }
2469 // get address of the stack lock
2470 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2471 // get old displaced header
2472 __ movptr(old_hdr, Address(rax, 0));
2474 // Atomic swap old header if oop still contains the stack lock
2475 if (os::is_MP()) {
2476 __ lock();
2477 }
2478 __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2479 __ jcc(Assembler::notEqual, slow_path_unlock);
2481 // slow path re-enters here
2482 __ bind(unlock_done);
2483 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2484 restore_native_result(masm, ret_type, stack_slots);
2485 }
2487 __ bind(done);
2489 }
2490 {
2491 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2492 save_native_result(masm, ret_type, stack_slots);
2493 __ mov_metadata(c_rarg1, method());
2494 __ call_VM_leaf(
2495 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2496 r15_thread, c_rarg1);
2497 restore_native_result(masm, ret_type, stack_slots);
2498 }
2500 __ reset_last_Java_frame(false, true);
2502 // Unpack oop result
2503 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2504 Label L;
2505 __ testptr(rax, rax);
2506 __ jcc(Assembler::zero, L);
2507 __ movptr(rax, Address(rax, 0));
2508 __ bind(L);
2509 __ verify_oop(rax);
2510 }
2512 if (!is_critical_native) {
2513 // reset handle block
2514 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2515 __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2516 }
2518 // pop our frame
2520 __ leave();
2522 if (!is_critical_native) {
2523 // Any exception pending?
2524 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2525 __ jcc(Assembler::notEqual, exception_pending);
2526 }
2528 // Return
2530 __ ret(0);
2532 // Unexpected paths are out of line and go here
2534 if (!is_critical_native) {
2535 // forward the exception
2536 __ bind(exception_pending);
2538 // and forward the exception
2539 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2540 }
2542 // Slow path locking & unlocking
2543 if (method->is_synchronized()) {
2545 // BEGIN Slow path lock
2546 __ bind(slow_path_lock);
2548 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2549 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2551 // protect the args we've loaded
2552 save_args(masm, total_c_args, c_arg, out_regs);
2554 __ mov(c_rarg0, obj_reg);
2555 __ mov(c_rarg1, lock_reg);
2556 __ mov(c_rarg2, r15_thread);
2558 // Not a leaf but we have last_Java_frame setup as we want
2559 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2560 restore_args(masm, total_c_args, c_arg, out_regs);
2562 #ifdef ASSERT
2563 { Label L;
2564 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2565 __ jcc(Assembler::equal, L);
2566 __ stop("no pending exception allowed on exit from monitorenter");
2567 __ bind(L);
2568 }
2569 #endif
2570 __ jmp(lock_done);
2572 // END Slow path lock
2574 // BEGIN Slow path unlock
2575 __ bind(slow_path_unlock);
2577 // If we haven't already saved the native result we must save it now as xmm registers
2578 // are still exposed.
2580 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2581 save_native_result(masm, ret_type, stack_slots);
2582 }
2584 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2586 __ mov(c_rarg0, obj_reg);
2587 __ mov(r12, rsp); // remember sp
2588 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2589 __ andptr(rsp, -16); // align stack as required by ABI
2591 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2592 // NOTE that obj_reg == rbx currently
2593 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2594 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2596 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2597 __ mov(rsp, r12); // restore sp
2598 __ reinit_heapbase();
2599 #ifdef ASSERT
2600 {
2601 Label L;
2602 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2603 __ jcc(Assembler::equal, L);
2604 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2605 __ bind(L);
2606 }
2607 #endif /* ASSERT */
2609 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2611 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2612 restore_native_result(masm, ret_type, stack_slots);
2613 }
2614 __ jmp(unlock_done);
2616 // END Slow path unlock
2618 } // synchronized
2620 // SLOW PATH Reguard the stack if needed
2622 __ bind(reguard);
2623 save_native_result(masm, ret_type, stack_slots);
2624 __ mov(r12, rsp); // remember sp
2625 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2626 __ andptr(rsp, -16); // align stack as required by ABI
2627 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2628 __ mov(rsp, r12); // restore sp
2629 __ reinit_heapbase();
2630 restore_native_result(masm, ret_type, stack_slots);
2631 // and continue
2632 __ jmp(reguard_done);
2636 __ flush();
2638 nmethod *nm = nmethod::new_native_nmethod(method,
2639 compile_id,
2640 masm->code(),
2641 vep_offset,
2642 frame_complete,
2643 stack_slots / VMRegImpl::slots_per_word,
2644 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2645 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2646 oop_maps);
2648 if (is_critical_native) {
2649 nm->set_lazy_critical_native(true);
2650 }
2652 return nm;
2654 }
2656 #ifdef HAVE_DTRACE_H
2657 // ---------------------------------------------------------------------------
2658 // Generate a dtrace nmethod for a given signature. The method takes arguments
2659 // in the Java compiled code convention, marshals them to the native
2660 // abi and then leaves nops at the position you would expect to call a native
2661 // function. When the probe is enabled the nops are replaced with a trap
2662 // instruction that dtrace inserts and the trace will cause a notification
2663 // to dtrace.
2664 //
2665 // The probes are only able to take primitive types and java/lang/String as
2666 // arguments. No other java types are allowed. Strings are converted to utf8
2667 // strings so that from dtrace point of view java strings are converted to C
2668 // strings. There is an arbitrary fixed limit on the total space that a method
2669 // can use for converting the strings. (256 chars per string in the signature).
2670 // So any java string larger then this is truncated.
2672 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2673 static bool offsets_initialized = false;
2676 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2677 methodHandle method) {
2680 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2681 // be single threaded in this method.
2682 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2684 if (!offsets_initialized) {
2685 fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
2686 fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
2687 fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
2688 fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
2689 fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
2690 fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
2692 fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
2693 fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
2694 fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
2695 fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
2696 fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
2697 fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
2698 fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
2699 fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
2701 offsets_initialized = true;
2702 }
2703 // Fill in the signature array, for the calling-convention call.
2704 int total_args_passed = method->size_of_parameters();
2706 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2707 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2709 // The signature we are going to use for the trap that dtrace will see
2710 // java/lang/String is converted. We drop "this" and any other object
2711 // is converted to NULL. (A one-slot java/lang/Long object reference
2712 // is converted to a two-slot long, which is why we double the allocation).
2713 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2714 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2716 int i=0;
2717 int total_strings = 0;
2718 int first_arg_to_pass = 0;
2719 int total_c_args = 0;
2721 // Skip the receiver as dtrace doesn't want to see it
2722 if( !method->is_static() ) {
2723 in_sig_bt[i++] = T_OBJECT;
2724 first_arg_to_pass = 1;
2725 }
2727 // We need to convert the java args to where a native (non-jni) function
2728 // would expect them. To figure out where they go we convert the java
2729 // signature to a C signature.
2731 SignatureStream ss(method->signature());
2732 for ( ; !ss.at_return_type(); ss.next()) {
2733 BasicType bt = ss.type();
2734 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2735 out_sig_bt[total_c_args++] = bt;
2736 if( bt == T_OBJECT) {
2737 Symbol* s = ss.as_symbol_or_null(); // symbol is created
2738 if (s == vmSymbols::java_lang_String()) {
2739 total_strings++;
2740 out_sig_bt[total_c_args-1] = T_ADDRESS;
2741 } else if (s == vmSymbols::java_lang_Boolean() ||
2742 s == vmSymbols::java_lang_Character() ||
2743 s == vmSymbols::java_lang_Byte() ||
2744 s == vmSymbols::java_lang_Short() ||
2745 s == vmSymbols::java_lang_Integer() ||
2746 s == vmSymbols::java_lang_Float()) {
2747 out_sig_bt[total_c_args-1] = T_INT;
2748 } else if (s == vmSymbols::java_lang_Long() ||
2749 s == vmSymbols::java_lang_Double()) {
2750 out_sig_bt[total_c_args-1] = T_LONG;
2751 out_sig_bt[total_c_args++] = T_VOID;
2752 }
2753 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2754 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2755 // We convert double to long
2756 out_sig_bt[total_c_args-1] = T_LONG;
2757 out_sig_bt[total_c_args++] = T_VOID;
2758 } else if ( bt == T_FLOAT) {
2759 // We convert float to int
2760 out_sig_bt[total_c_args-1] = T_INT;
2761 }
2762 }
2764 assert(i==total_args_passed, "validly parsed signature");
2766 // Now get the compiled-Java layout as input arguments
2767 int comp_args_on_stack;
2768 comp_args_on_stack = SharedRuntime::java_calling_convention(
2769 in_sig_bt, in_regs, total_args_passed, false);
2771 // Now figure out where the args must be stored and how much stack space
2772 // they require (neglecting out_preserve_stack_slots but space for storing
2773 // the 1st six register arguments). It's weird see int_stk_helper.
2775 int out_arg_slots;
2776 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2778 // Calculate the total number of stack slots we will need.
2780 // First count the abi requirement plus all of the outgoing args
2781 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2783 // Now space for the string(s) we must convert
2784 int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2785 for (i = 0; i < total_strings ; i++) {
2786 string_locs[i] = stack_slots;
2787 stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2788 }
2790 // Plus the temps we might need to juggle register args
2791 // regs take two slots each
2792 stack_slots += (Argument::n_int_register_parameters_c +
2793 Argument::n_float_register_parameters_c) * 2;
2796 // + 4 for return address (which we own) and saved rbp,
2798 stack_slots += 4;
2800 // Ok The space we have allocated will look like:
2801 //
2802 //
2803 // FP-> | |
2804 // |---------------------|
2805 // | string[n] |
2806 // |---------------------| <- string_locs[n]
2807 // | string[n-1] |
2808 // |---------------------| <- string_locs[n-1]
2809 // | ... |
2810 // | ... |
2811 // |---------------------| <- string_locs[1]
2812 // | string[0] |
2813 // |---------------------| <- string_locs[0]
2814 // | outbound memory |
2815 // | based arguments |
2816 // | |
2817 // |---------------------|
2818 // | |
2819 // SP-> | out_preserved_slots |
2820 //
2821 //
2823 // Now compute actual number of stack words we need rounding to make
2824 // stack properly aligned.
2825 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2827 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2829 intptr_t start = (intptr_t)__ pc();
2831 // First thing make an ic check to see if we should even be here
2833 // We are free to use all registers as temps without saving them and
2834 // restoring them except rbp. rbp, is the only callee save register
2835 // as far as the interpreter and the compiler(s) are concerned.
2837 const Register ic_reg = rax;
2838 const Register receiver = rcx;
2839 Label hit;
2840 Label exception_pending;
2843 __ verify_oop(receiver);
2844 __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2845 __ jcc(Assembler::equal, hit);
2847 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2849 // verified entry must be aligned for code patching.
2850 // and the first 5 bytes must be in the same cache line
2851 // if we align at 8 then we will be sure 5 bytes are in the same line
2852 __ align(8);
2854 __ bind(hit);
2856 int vep_offset = ((intptr_t)__ pc()) - start;
2859 // The instruction at the verified entry point must be 5 bytes or longer
2860 // because it can be patched on the fly by make_non_entrant. The stack bang
2861 // instruction fits that requirement.
2863 // Generate stack overflow check
2865 if (UseStackBanging) {
2866 if (stack_size <= StackShadowPages*os::vm_page_size()) {
2867 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2868 } else {
2869 __ movl(rax, stack_size);
2870 __ bang_stack_size(rax, rbx);
2871 }
2872 } else {
2873 // need a 5 byte instruction to allow MT safe patching to non-entrant
2874 __ fat_nop();
2875 }
2877 assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
2878 "valid size for make_non_entrant");
2880 // Generate a new frame for the wrapper.
2881 __ enter();
2883 // -4 because return address is already present and so is saved rbp,
2884 if (stack_size - 2*wordSize != 0) {
2885 __ subq(rsp, stack_size - 2*wordSize);
2886 }
2888 // Frame is now completed as far a size and linkage.
2890 int frame_complete = ((intptr_t)__ pc()) - start;
2892 int c_arg, j_arg;
2894 // State of input register args
2896 bool live[ConcreteRegisterImpl::number_of_registers];
2898 live[j_rarg0->as_VMReg()->value()] = false;
2899 live[j_rarg1->as_VMReg()->value()] = false;
2900 live[j_rarg2->as_VMReg()->value()] = false;
2901 live[j_rarg3->as_VMReg()->value()] = false;
2902 live[j_rarg4->as_VMReg()->value()] = false;
2903 live[j_rarg5->as_VMReg()->value()] = false;
2905 live[j_farg0->as_VMReg()->value()] = false;
2906 live[j_farg1->as_VMReg()->value()] = false;
2907 live[j_farg2->as_VMReg()->value()] = false;
2908 live[j_farg3->as_VMReg()->value()] = false;
2909 live[j_farg4->as_VMReg()->value()] = false;
2910 live[j_farg5->as_VMReg()->value()] = false;
2911 live[j_farg6->as_VMReg()->value()] = false;
2912 live[j_farg7->as_VMReg()->value()] = false;
2915 bool rax_is_zero = false;
2917 // All args (except strings) destined for the stack are moved first
2918 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2919 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2920 VMRegPair src = in_regs[j_arg];
2921 VMRegPair dst = out_regs[c_arg];
2923 // Get the real reg value or a dummy (rsp)
2925 int src_reg = src.first()->is_reg() ?
2926 src.first()->value() :
2927 rsp->as_VMReg()->value();
2929 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
2930 (in_sig_bt[j_arg] == T_OBJECT &&
2931 out_sig_bt[c_arg] != T_INT &&
2932 out_sig_bt[c_arg] != T_ADDRESS &&
2933 out_sig_bt[c_arg] != T_LONG);
2935 live[src_reg] = !useless;
2937 if (dst.first()->is_stack()) {
2939 // Even though a string arg in a register is still live after this loop
2940 // after the string conversion loop (next) it will be dead so we take
2941 // advantage of that now for simpler code to manage live.
2943 live[src_reg] = false;
2944 switch (in_sig_bt[j_arg]) {
2946 case T_ARRAY:
2947 case T_OBJECT:
2948 {
2949 Address stack_dst(rsp, reg2offset_out(dst.first()));
2951 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2952 // need to unbox a one-word value
2953 Register in_reg = rax;
2954 if ( src.first()->is_reg() ) {
2955 in_reg = src.first()->as_Register();
2956 } else {
2957 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
2958 rax_is_zero = false;
2959 }
2960 Label skipUnbox;
2961 __ movptr(Address(rsp, reg2offset_out(dst.first())),
2962 (int32_t)NULL_WORD);
2963 __ testq(in_reg, in_reg);
2964 __ jcc(Assembler::zero, skipUnbox);
2966 BasicType bt = out_sig_bt[c_arg];
2967 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2968 Address src1(in_reg, box_offset);
2969 if ( bt == T_LONG ) {
2970 __ movq(in_reg, src1);
2971 __ movq(stack_dst, in_reg);
2972 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2973 ++c_arg; // skip over T_VOID to keep the loop indices in sync
2974 } else {
2975 __ movl(in_reg, src1);
2976 __ movl(stack_dst, in_reg);
2977 }
2979 __ bind(skipUnbox);
2980 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
2981 // Convert the arg to NULL
2982 if (!rax_is_zero) {
2983 __ xorq(rax, rax);
2984 rax_is_zero = true;
2985 }
2986 __ movq(stack_dst, rax);
2987 }
2988 }
2989 break;
2991 case T_VOID:
2992 break;
2994 case T_FLOAT:
2995 // This does the right thing since we know it is destined for the
2996 // stack
2997 float_move(masm, src, dst);
2998 break;
3000 case T_DOUBLE:
3001 // This does the right thing since we know it is destined for the
3002 // stack
3003 double_move(masm, src, dst);
3004 break;
3006 case T_LONG :
3007 long_move(masm, src, dst);
3008 break;
3010 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
3012 default:
3013 move32_64(masm, src, dst);
3014 }
3015 }
3017 }
3019 // If we have any strings we must store any register based arg to the stack
3020 // This includes any still live xmm registers too.
3022 int sid = 0;
3024 if (total_strings > 0 ) {
3025 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3026 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3027 VMRegPair src = in_regs[j_arg];
3028 VMRegPair dst = out_regs[c_arg];
3030 if (src.first()->is_reg()) {
3031 Address src_tmp(rbp, fp_offset[src.first()->value()]);
3033 // string oops were left untouched by the previous loop even if the
3034 // eventual (converted) arg is destined for the stack so park them
3035 // away now (except for first)
3037 if (out_sig_bt[c_arg] == T_ADDRESS) {
3038 Address utf8_addr = Address(
3039 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3040 if (sid != 1) {
3041 // The first string arg won't be killed until after the utf8
3042 // conversion
3043 __ movq(utf8_addr, src.first()->as_Register());
3044 }
3045 } else if (dst.first()->is_reg()) {
3046 if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
3048 // Convert the xmm register to an int and store it in the reserved
3049 // location for the eventual c register arg
3050 XMMRegister f = src.first()->as_XMMRegister();
3051 if (in_sig_bt[j_arg] == T_FLOAT) {
3052 __ movflt(src_tmp, f);
3053 } else {
3054 __ movdbl(src_tmp, f);
3055 }
3056 } else {
3057 // If the arg is an oop type we don't support don't bother to store
3058 // it remember string was handled above.
3059 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3060 (in_sig_bt[j_arg] == T_OBJECT &&
3061 out_sig_bt[c_arg] != T_INT &&
3062 out_sig_bt[c_arg] != T_LONG);
3064 if (!useless) {
3065 __ movq(src_tmp, src.first()->as_Register());
3066 }
3067 }
3068 }
3069 }
3070 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3071 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3072 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3073 }
3074 }
3076 // Now that the volatile registers are safe, convert all the strings
3077 sid = 0;
3079 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3080 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3081 if (out_sig_bt[c_arg] == T_ADDRESS) {
3082 // It's a string
3083 Address utf8_addr = Address(
3084 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3085 // The first string we find might still be in the original java arg
3086 // register
3088 VMReg src = in_regs[j_arg].first();
3090 // We will need to eventually save the final argument to the trap
3091 // in the von-volatile location dedicated to src. This is the offset
3092 // from fp we will use.
3093 int src_off = src->is_reg() ?
3094 fp_offset[src->value()] : reg2offset_in(src);
3096 // This is where the argument will eventually reside
3097 VMRegPair dst = out_regs[c_arg];
3099 if (src->is_reg()) {
3100 if (sid == 1) {
3101 __ movq(c_rarg0, src->as_Register());
3102 } else {
3103 __ movq(c_rarg0, utf8_addr);
3104 }
3105 } else {
3106 // arg is still in the original location
3107 __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
3108 }
3109 Label done, convert;
3111 // see if the oop is NULL
3112 __ testq(c_rarg0, c_rarg0);
3113 __ jcc(Assembler::notEqual, convert);
3115 if (dst.first()->is_reg()) {
3116 // Save the ptr to utf string in the origina src loc or the tmp
3117 // dedicated to it
3118 __ movq(Address(rbp, src_off), c_rarg0);
3119 } else {
3120 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
3121 }
3122 __ jmp(done);
3124 __ bind(convert);
3126 __ lea(c_rarg1, utf8_addr);
3127 if (dst.first()->is_reg()) {
3128 __ movq(Address(rbp, src_off), c_rarg1);
3129 } else {
3130 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
3131 }
3132 // And do the conversion
3133 __ call(RuntimeAddress(
3134 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
3136 __ bind(done);
3137 }
3138 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3139 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3140 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3141 }
3142 }
3143 // The get_utf call killed all the c_arg registers
3144 live[c_rarg0->as_VMReg()->value()] = false;
3145 live[c_rarg1->as_VMReg()->value()] = false;
3146 live[c_rarg2->as_VMReg()->value()] = false;
3147 live[c_rarg3->as_VMReg()->value()] = false;
3148 live[c_rarg4->as_VMReg()->value()] = false;
3149 live[c_rarg5->as_VMReg()->value()] = false;
3151 live[c_farg0->as_VMReg()->value()] = false;
3152 live[c_farg1->as_VMReg()->value()] = false;
3153 live[c_farg2->as_VMReg()->value()] = false;
3154 live[c_farg3->as_VMReg()->value()] = false;
3155 live[c_farg4->as_VMReg()->value()] = false;
3156 live[c_farg5->as_VMReg()->value()] = false;
3157 live[c_farg6->as_VMReg()->value()] = false;
3158 live[c_farg7->as_VMReg()->value()] = false;
3159 }
3161 // Now we can finally move the register args to their desired locations
3163 rax_is_zero = false;
3165 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3166 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3168 VMRegPair src = in_regs[j_arg];
3169 VMRegPair dst = out_regs[c_arg];
3171 // Only need to look for args destined for the interger registers (since we
3172 // convert float/double args to look like int/long outbound)
3173 if (dst.first()->is_reg()) {
3174 Register r = dst.first()->as_Register();
3176 // Check if the java arg is unsupported and thereofre useless
3177 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3178 (in_sig_bt[j_arg] == T_OBJECT &&
3179 out_sig_bt[c_arg] != T_INT &&
3180 out_sig_bt[c_arg] != T_ADDRESS &&
3181 out_sig_bt[c_arg] != T_LONG);
3184 // If we're going to kill an existing arg save it first
3185 if (live[dst.first()->value()]) {
3186 // you can't kill yourself
3187 if (src.first() != dst.first()) {
3188 __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
3189 }
3190 }
3191 if (src.first()->is_reg()) {
3192 if (live[src.first()->value()] ) {
3193 if (in_sig_bt[j_arg] == T_FLOAT) {
3194 __ movdl(r, src.first()->as_XMMRegister());
3195 } else if (in_sig_bt[j_arg] == T_DOUBLE) {
3196 __ movdq(r, src.first()->as_XMMRegister());
3197 } else if (r != src.first()->as_Register()) {
3198 if (!useless) {
3199 __ movq(r, src.first()->as_Register());
3200 }
3201 }
3202 } else {
3203 // If the arg is an oop type we don't support don't bother to store
3204 // it
3205 if (!useless) {
3206 if (in_sig_bt[j_arg] == T_DOUBLE ||
3207 in_sig_bt[j_arg] == T_LONG ||
3208 in_sig_bt[j_arg] == T_OBJECT ) {
3209 __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
3210 } else {
3211 __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
3212 }
3213 }
3214 }
3215 live[src.first()->value()] = false;
3216 } else if (!useless) {
3217 // full sized move even for int should be ok
3218 __ movq(r, Address(rbp, reg2offset_in(src.first())));
3219 }
3221 // At this point r has the original java arg in the final location
3222 // (assuming it wasn't useless). If the java arg was an oop
3223 // we have a bit more to do
3225 if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
3226 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3227 // need to unbox a one-word value
3228 Label skip;
3229 __ testq(r, r);
3230 __ jcc(Assembler::equal, skip);
3231 BasicType bt = out_sig_bt[c_arg];
3232 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3233 Address src1(r, box_offset);
3234 if ( bt == T_LONG ) {
3235 __ movq(r, src1);
3236 } else {
3237 __ movl(r, src1);
3238 }
3239 __ bind(skip);
3241 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3242 // Convert the arg to NULL
3243 __ xorq(r, r);
3244 }
3245 }
3247 // dst can longer be holding an input value
3248 live[dst.first()->value()] = false;
3249 }
3250 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3251 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3252 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3253 }
3254 }
3257 // Ok now we are done. Need to place the nop that dtrace wants in order to
3258 // patch in the trap
3259 int patch_offset = ((intptr_t)__ pc()) - start;
3261 __ nop();
3264 // Return
3266 __ leave();
3267 __ ret(0);
3269 __ flush();
3271 nmethod *nm = nmethod::new_dtrace_nmethod(
3272 method, masm->code(), vep_offset, patch_offset, frame_complete,
3273 stack_slots / VMRegImpl::slots_per_word);
3274 return nm;
3276 }
3278 #endif // HAVE_DTRACE_H
3280 // this function returns the adjust size (in number of words) to a c2i adapter
3281 // activation for use during deoptimization
3282 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3283 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3284 }
3287 uint SharedRuntime::out_preserve_stack_slots() {
3288 return 0;
3289 }
3291 //------------------------------generate_deopt_blob----------------------------
3292 void SharedRuntime::generate_deopt_blob() {
3293 // Allocate space for the code
3294 ResourceMark rm;
3295 // Setup code generation tools
3296 CodeBuffer buffer("deopt_blob", 2048, 1024);
3297 MacroAssembler* masm = new MacroAssembler(&buffer);
3298 int frame_size_in_words;
3299 OopMap* map = NULL;
3300 OopMapSet *oop_maps = new OopMapSet();
3302 // -------------
3303 // This code enters when returning to a de-optimized nmethod. A return
3304 // address has been pushed on the the stack, and return values are in
3305 // registers.
3306 // If we are doing a normal deopt then we were called from the patched
3307 // nmethod from the point we returned to the nmethod. So the return
3308 // address on the stack is wrong by NativeCall::instruction_size
3309 // We will adjust the value so it looks like we have the original return
3310 // address on the stack (like when we eagerly deoptimized).
3311 // In the case of an exception pending when deoptimizing, we enter
3312 // with a return address on the stack that points after the call we patched
3313 // into the exception handler. We have the following register state from,
3314 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3315 // rax: exception oop
3316 // rbx: exception handler
3317 // rdx: throwing pc
3318 // So in this case we simply jam rdx into the useless return address and
3319 // the stack looks just like we want.
3320 //
3321 // At this point we need to de-opt. We save the argument return
3322 // registers. We call the first C routine, fetch_unroll_info(). This
3323 // routine captures the return values and returns a structure which
3324 // describes the current frame size and the sizes of all replacement frames.
3325 // The current frame is compiled code and may contain many inlined
3326 // functions, each with their own JVM state. We pop the current frame, then
3327 // push all the new frames. Then we call the C routine unpack_frames() to
3328 // populate these frames. Finally unpack_frames() returns us the new target
3329 // address. Notice that callee-save registers are BLOWN here; they have
3330 // already been captured in the vframeArray at the time the return PC was
3331 // patched.
3332 address start = __ pc();
3333 Label cont;
3335 // Prolog for non exception case!
3337 // Save everything in sight.
3338 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3340 // Normal deoptimization. Save exec mode for unpack_frames.
3341 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3342 __ jmp(cont);
3344 int reexecute_offset = __ pc() - start;
3346 // Reexecute case
3347 // return address is the pc describes what bci to do re-execute at
3349 // No need to update map as each call to save_live_registers will produce identical oopmap
3350 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3352 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3353 __ jmp(cont);
3355 int exception_offset = __ pc() - start;
3357 // Prolog for exception case
3359 // all registers are dead at this entry point, except for rax, and
3360 // rdx which contain the exception oop and exception pc
3361 // respectively. Set them in TLS and fall thru to the
3362 // unpack_with_exception_in_tls entry point.
3364 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3365 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3367 int exception_in_tls_offset = __ pc() - start;
3369 // new implementation because exception oop is now passed in JavaThread
3371 // Prolog for exception case
3372 // All registers must be preserved because they might be used by LinearScan
3373 // Exceptiop oop and throwing PC are passed in JavaThread
3374 // tos: stack at point of call to method that threw the exception (i.e. only
3375 // args are on the stack, no return address)
3377 // make room on stack for the return address
3378 // It will be patched later with the throwing pc. The correct value is not
3379 // available now because loading it from memory would destroy registers.
3380 __ push(0);
3382 // Save everything in sight.
3383 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3385 // Now it is safe to overwrite any register
3387 // Deopt during an exception. Save exec mode for unpack_frames.
3388 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3390 // load throwing pc from JavaThread and patch it as the return address
3391 // of the current frame. Then clear the field in JavaThread
3393 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3394 __ movptr(Address(rbp, wordSize), rdx);
3395 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3397 #ifdef ASSERT
3398 // verify that there is really an exception oop in JavaThread
3399 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3400 __ verify_oop(rax);
3402 // verify that there is no pending exception
3403 Label no_pending_exception;
3404 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3405 __ testptr(rax, rax);
3406 __ jcc(Assembler::zero, no_pending_exception);
3407 __ stop("must not have pending exception here");
3408 __ bind(no_pending_exception);
3409 #endif
3411 __ bind(cont);
3413 // Call C code. Need thread and this frame, but NOT official VM entry
3414 // crud. We cannot block on this call, no GC can happen.
3415 //
3416 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3418 // fetch_unroll_info needs to call last_java_frame().
3420 __ set_last_Java_frame(noreg, noreg, NULL);
3421 #ifdef ASSERT
3422 { Label L;
3423 __ cmpptr(Address(r15_thread,
3424 JavaThread::last_Java_fp_offset()),
3425 (int32_t)0);
3426 __ jcc(Assembler::equal, L);
3427 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3428 __ bind(L);
3429 }
3430 #endif // ASSERT
3431 __ mov(c_rarg0, r15_thread);
3432 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3434 // Need to have an oopmap that tells fetch_unroll_info where to
3435 // find any register it might need.
3436 oop_maps->add_gc_map(__ pc() - start, map);
3438 __ reset_last_Java_frame(false, false);
3440 // Load UnrollBlock* into rdi
3441 __ mov(rdi, rax);
3443 Label noException;
3444 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3445 __ jcc(Assembler::notEqual, noException);
3446 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3447 // QQQ this is useless it was NULL above
3448 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3449 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3450 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3452 __ verify_oop(rax);
3454 // Overwrite the result registers with the exception results.
3455 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3456 // I think this is useless
3457 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3459 __ bind(noException);
3461 // Only register save data is on the stack.
3462 // Now restore the result registers. Everything else is either dead
3463 // or captured in the vframeArray.
3464 RegisterSaver::restore_result_registers(masm);
3466 // All of the register save area has been popped of the stack. Only the
3467 // return address remains.
3469 // Pop all the frames we must move/replace.
3470 //
3471 // Frame picture (youngest to oldest)
3472 // 1: self-frame (no frame link)
3473 // 2: deopting frame (no frame link)
3474 // 3: caller of deopting frame (could be compiled/interpreted).
3475 //
3476 // Note: by leaving the return address of self-frame on the stack
3477 // and using the size of frame 2 to adjust the stack
3478 // when we are done the return to frame 3 will still be on the stack.
3480 // Pop deoptimized frame
3481 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3482 __ addptr(rsp, rcx);
3484 // rsp should be pointing at the return address to the caller (3)
3486 // Pick up the initial fp we should save
3487 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3488 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3490 #ifdef ASSERT
3491 // Compilers generate code that bang the stack by as much as the
3492 // interpreter would need. So this stack banging should never
3493 // trigger a fault. Verify that it does not on non product builds.
3494 if (UseStackBanging) {
3495 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3496 __ bang_stack_size(rbx, rcx);
3497 }
3498 #endif
3500 // Load address of array of frame pcs into rcx
3501 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3503 // Trash the old pc
3504 __ addptr(rsp, wordSize);
3506 // Load address of array of frame sizes into rsi
3507 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3509 // Load counter into rdx
3510 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3512 // Now adjust the caller's stack to make up for the extra locals
3513 // but record the original sp so that we can save it in the skeletal interpreter
3514 // frame and the stack walking of interpreter_sender will get the unextended sp
3515 // value and not the "real" sp value.
3517 const Register sender_sp = r8;
3519 __ mov(sender_sp, rsp);
3520 __ movl(rbx, Address(rdi,
3521 Deoptimization::UnrollBlock::
3522 caller_adjustment_offset_in_bytes()));
3523 __ subptr(rsp, rbx);
3525 // Push interpreter frames in a loop
3526 Label loop;
3527 __ bind(loop);
3528 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3529 #ifdef CC_INTERP
3530 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
3531 #ifdef ASSERT
3532 __ push(0xDEADDEAD); // Make a recognizable pattern
3533 __ push(0xDEADDEAD);
3534 #else /* ASSERT */
3535 __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
3536 #endif /* ASSERT */
3537 #else
3538 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3539 #endif // CC_INTERP
3540 __ pushptr(Address(rcx, 0)); // Save return address
3541 __ enter(); // Save old & set new ebp
3542 __ subptr(rsp, rbx); // Prolog
3543 #ifdef CC_INTERP
3544 __ movptr(Address(rbp,
3545 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3546 sender_sp); // Make it walkable
3547 #else /* CC_INTERP */
3548 // This value is corrected by layout_activation_impl
3549 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3550 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3551 #endif /* CC_INTERP */
3552 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3553 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3554 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3555 __ decrementl(rdx); // Decrement counter
3556 __ jcc(Assembler::notZero, loop);
3557 __ pushptr(Address(rcx, 0)); // Save final return address
3559 // Re-push self-frame
3560 __ enter(); // Save old & set new ebp
3562 // Allocate a full sized register save area.
3563 // Return address and rbp are in place, so we allocate two less words.
3564 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3566 // Restore frame locals after moving the frame
3567 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3568 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3570 // Call C code. Need thread but NOT official VM entry
3571 // crud. We cannot block on this call, no GC can happen. Call should
3572 // restore return values to their stack-slots with the new SP.
3573 //
3574 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3576 // Use rbp because the frames look interpreted now
3577 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3578 // Don't need the precise return PC here, just precise enough to point into this code blob.
3579 address the_pc = __ pc();
3580 __ set_last_Java_frame(noreg, rbp, the_pc);
3582 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3583 __ mov(c_rarg0, r15_thread);
3584 __ movl(c_rarg1, r14); // second arg: exec_mode
3585 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3586 // Revert SP alignment after call since we're going to do some SP relative addressing below
3587 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3589 // Set an oopmap for the call site
3590 // Use the same PC we used for the last java frame
3591 oop_maps->add_gc_map(the_pc - start,
3592 new OopMap( frame_size_in_words, 0 ));
3594 // Clear fp AND pc
3595 __ reset_last_Java_frame(true, true);
3597 // Collect return values
3598 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3599 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3600 // I think this is useless (throwing pc?)
3601 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3603 // Pop self-frame.
3604 __ leave(); // Epilog
3606 // Jump to interpreter
3607 __ ret(0);
3609 // Make sure all code is generated
3610 masm->flush();
3612 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3613 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3614 }
3616 #ifdef COMPILER2
3617 //------------------------------generate_uncommon_trap_blob--------------------
3618 void SharedRuntime::generate_uncommon_trap_blob() {
3619 // Allocate space for the code
3620 ResourceMark rm;
3621 // Setup code generation tools
3622 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3623 MacroAssembler* masm = new MacroAssembler(&buffer);
3625 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3627 address start = __ pc();
3629 if (UseRTMLocking) {
3630 // Abort RTM transaction before possible nmethod deoptimization.
3631 __ xabort(0);
3632 }
3634 // Push self-frame. We get here with a return address on the
3635 // stack, so rsp is 8-byte aligned until we allocate our frame.
3636 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3638 // No callee saved registers. rbp is assumed implicitly saved
3639 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3641 // compiler left unloaded_class_index in j_rarg0 move to where the
3642 // runtime expects it.
3643 __ movl(c_rarg1, j_rarg0);
3645 __ set_last_Java_frame(noreg, noreg, NULL);
3647 // Call C code. Need thread but NOT official VM entry
3648 // crud. We cannot block on this call, no GC can happen. Call should
3649 // capture callee-saved registers as well as return values.
3650 // Thread is in rdi already.
3651 //
3652 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3654 __ mov(c_rarg0, r15_thread);
3655 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3657 // Set an oopmap for the call site
3658 OopMapSet* oop_maps = new OopMapSet();
3659 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3661 // location of rbp is known implicitly by the frame sender code
3663 oop_maps->add_gc_map(__ pc() - start, map);
3665 __ reset_last_Java_frame(false, false);
3667 // Load UnrollBlock* into rdi
3668 __ mov(rdi, rax);
3670 // Pop all the frames we must move/replace.
3671 //
3672 // Frame picture (youngest to oldest)
3673 // 1: self-frame (no frame link)
3674 // 2: deopting frame (no frame link)
3675 // 3: caller of deopting frame (could be compiled/interpreted).
3677 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3678 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3680 // Pop deoptimized frame (int)
3681 __ movl(rcx, Address(rdi,
3682 Deoptimization::UnrollBlock::
3683 size_of_deoptimized_frame_offset_in_bytes()));
3684 __ addptr(rsp, rcx);
3686 // rsp should be pointing at the return address to the caller (3)
3688 // Pick up the initial fp we should save
3689 // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3690 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3692 #ifdef ASSERT
3693 // Compilers generate code that bang the stack by as much as the
3694 // interpreter would need. So this stack banging should never
3695 // trigger a fault. Verify that it does not on non product builds.
3696 if (UseStackBanging) {
3697 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3698 __ bang_stack_size(rbx, rcx);
3699 }
3700 #endif
3702 // Load address of array of frame pcs into rcx (address*)
3703 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3705 // Trash the return pc
3706 __ addptr(rsp, wordSize);
3708 // Load address of array of frame sizes into rsi (intptr_t*)
3709 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3711 // Counter
3712 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3714 // Now adjust the caller's stack to make up for the extra locals but
3715 // record the original sp so that we can save it in the skeletal
3716 // interpreter frame and the stack walking of interpreter_sender
3717 // will get the unextended sp value and not the "real" sp value.
3719 const Register sender_sp = r8;
3721 __ mov(sender_sp, rsp);
3722 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3723 __ subptr(rsp, rbx);
3725 // Push interpreter frames in a loop
3726 Label loop;
3727 __ bind(loop);
3728 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3729 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3730 __ pushptr(Address(rcx, 0)); // Save return address
3731 __ enter(); // Save old & set new rbp
3732 __ subptr(rsp, rbx); // Prolog
3733 #ifdef CC_INTERP
3734 __ movptr(Address(rbp,
3735 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3736 sender_sp); // Make it walkable
3737 #else // CC_INTERP
3738 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3739 sender_sp); // Make it walkable
3740 // This value is corrected by layout_activation_impl
3741 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3742 #endif // CC_INTERP
3743 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3744 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3745 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3746 __ decrementl(rdx); // Decrement counter
3747 __ jcc(Assembler::notZero, loop);
3748 __ pushptr(Address(rcx, 0)); // Save final return address
3750 // Re-push self-frame
3751 __ enter(); // Save old & set new rbp
3752 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3753 // Prolog
3755 // Use rbp because the frames look interpreted now
3756 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3757 // Don't need the precise return PC here, just precise enough to point into this code blob.
3758 address the_pc = __ pc();
3759 __ set_last_Java_frame(noreg, rbp, the_pc);
3761 // Call C code. Need thread but NOT official VM entry
3762 // crud. We cannot block on this call, no GC can happen. Call should
3763 // restore return values to their stack-slots with the new SP.
3764 // Thread is in rdi already.
3765 //
3766 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3768 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3769 __ mov(c_rarg0, r15_thread);
3770 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3771 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3773 // Set an oopmap for the call site
3774 // Use the same PC we used for the last java frame
3775 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3777 // Clear fp AND pc
3778 __ reset_last_Java_frame(true, true);
3780 // Pop self-frame.
3781 __ leave(); // Epilog
3783 // Jump to interpreter
3784 __ ret(0);
3786 // Make sure all code is generated
3787 masm->flush();
3789 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3790 SimpleRuntimeFrame::framesize >> 1);
3791 }
3792 #endif // COMPILER2
3795 //------------------------------generate_handler_blob------
3796 //
3797 // Generate a special Compile2Runtime blob that saves all registers,
3798 // and setup oopmap.
3799 //
3800 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3801 assert(StubRoutines::forward_exception_entry() != NULL,
3802 "must be generated before");
3804 ResourceMark rm;
3805 OopMapSet *oop_maps = new OopMapSet();
3806 OopMap* map;
3808 // Allocate space for the code. Setup code generation tools.
3809 CodeBuffer buffer("handler_blob", 2048, 1024);
3810 MacroAssembler* masm = new MacroAssembler(&buffer);
3812 address start = __ pc();
3813 address call_pc = NULL;
3814 int frame_size_in_words;
3815 bool cause_return = (poll_type == POLL_AT_RETURN);
3816 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3818 if (UseRTMLocking) {
3819 // Abort RTM transaction before calling runtime
3820 // because critical section will be large and will be
3821 // aborted anyway. Also nmethod could be deoptimized.
3822 __ xabort(0);
3823 }
3825 // Make room for return address (or push it again)
3826 if (!cause_return) {
3827 __ push(rbx);
3828 }
3830 // Save registers, fpu state, and flags
3831 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3833 // The following is basically a call_VM. However, we need the precise
3834 // address of the call in order to generate an oopmap. Hence, we do all the
3835 // work outselves.
3837 __ set_last_Java_frame(noreg, noreg, NULL);
3839 // The return address must always be correct so that frame constructor never
3840 // sees an invalid pc.
3842 if (!cause_return) {
3843 // overwrite the dummy value we pushed on entry
3844 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3845 __ movptr(Address(rbp, wordSize), c_rarg0);
3846 }
3848 // Do the call
3849 __ mov(c_rarg0, r15_thread);
3850 __ call(RuntimeAddress(call_ptr));
3852 // Set an oopmap for the call site. This oopmap will map all
3853 // oop-registers and debug-info registers as callee-saved. This
3854 // will allow deoptimization at this safepoint to find all possible
3855 // debug-info recordings, as well as let GC find all oops.
3857 oop_maps->add_gc_map( __ pc() - start, map);
3859 Label noException;
3861 __ reset_last_Java_frame(false, false);
3863 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3864 __ jcc(Assembler::equal, noException);
3866 // Exception pending
3868 RegisterSaver::restore_live_registers(masm, save_vectors);
3870 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3872 // No exception case
3873 __ bind(noException);
3875 // Normal exit, restore registers and exit.
3876 RegisterSaver::restore_live_registers(masm, save_vectors);
3878 __ ret(0);
3880 // Make sure all code is generated
3881 masm->flush();
3883 // Fill-out other meta info
3884 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3885 }
3887 //
3888 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3889 //
3890 // Generate a stub that calls into vm to find out the proper destination
3891 // of a java call. All the argument registers are live at this point
3892 // but since this is generic code we don't know what they are and the caller
3893 // must do any gc of the args.
3894 //
3895 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3896 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3898 // allocate space for the code
3899 ResourceMark rm;
3901 CodeBuffer buffer(name, 1000, 512);
3902 MacroAssembler* masm = new MacroAssembler(&buffer);
3904 int frame_size_in_words;
3906 OopMapSet *oop_maps = new OopMapSet();
3907 OopMap* map = NULL;
3909 int start = __ offset();
3911 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3913 int frame_complete = __ offset();
3915 __ set_last_Java_frame(noreg, noreg, NULL);
3917 __ mov(c_rarg0, r15_thread);
3919 __ call(RuntimeAddress(destination));
3922 // Set an oopmap for the call site.
3923 // We need this not only for callee-saved registers, but also for volatile
3924 // registers that the compiler might be keeping live across a safepoint.
3926 oop_maps->add_gc_map( __ offset() - start, map);
3928 // rax contains the address we are going to jump to assuming no exception got installed
3930 // clear last_Java_sp
3931 __ reset_last_Java_frame(false, false);
3932 // check for pending exceptions
3933 Label pending;
3934 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3935 __ jcc(Assembler::notEqual, pending);
3937 // get the returned Method*
3938 __ get_vm_result_2(rbx, r15_thread);
3939 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3941 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3943 RegisterSaver::restore_live_registers(masm);
3945 // We are back the the original state on entry and ready to go.
3947 __ jmp(rax);
3949 // Pending exception after the safepoint
3951 __ bind(pending);
3953 RegisterSaver::restore_live_registers(masm);
3955 // exception pending => remove activation and forward to exception handler
3957 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3959 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3960 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3962 // -------------
3963 // make sure all code is generated
3964 masm->flush();
3966 // return the blob
3967 // frame_size_words or bytes??
3968 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3969 }
3972 //------------------------------Montgomery multiplication------------------------
3973 //
3975 #ifndef _WINDOWS
3977 #define ASM_SUBTRACT
3979 #ifdef ASM_SUBTRACT
3980 // Subtract 0:b from carry:a. Return carry.
3981 static unsigned long
3982 sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3983 long i = 0, cnt = len;
3984 unsigned long tmp;
3985 asm volatile("clc; "
3986 "0: ; "
3987 "mov (%[b], %[i], 8), %[tmp]; "
3988 "sbb %[tmp], (%[a], %[i], 8); "
3989 "inc %[i]; dec %[cnt]; "
3990 "jne 0b; "
3991 "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3992 : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3993 : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3994 : "memory");
3995 return tmp;
3996 }
3997 #else // ASM_SUBTRACT
3998 typedef int __attribute__((mode(TI))) int128;
4000 // Subtract 0:b from carry:a. Return carry.
4001 static unsigned long
4002 sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
4003 int128 tmp = 0;
4004 int i;
4005 for (i = 0; i < len; i++) {
4006 tmp += a[i];
4007 tmp -= b[i];
4008 a[i] = tmp;
4009 tmp >>= 64;
4010 assert(-1 <= tmp && tmp <= 0, "invariant");
4011 }
4012 return tmp + carry;
4013 }
4014 #endif // ! ASM_SUBTRACT
4016 // Multiply (unsigned) Long A by Long B, accumulating the double-
4017 // length result into the accumulator formed of T0, T1, and T2.
4018 #define MACC(A, B, T0, T1, T2) \
4019 do { \
4020 unsigned long hi, lo; \
4021 asm volatile("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
4022 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
4023 : "r"(A), "a"(B) : "cc"); \
4024 } while(0)
4026 // As above, but add twice the double-length result into the
4027 // accumulator.
4028 #define MACC2(A, B, T0, T1, T2) \
4029 do { \
4030 unsigned long hi, lo; \
4031 asm volatile("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4;" \
4032 "add %%rax, %2; adc %%rdx, %3; adc $0, %4" \
4033 : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2) \
4034 : "r"(A), "a"(B) : "cc"); \
4035 } while(0)
4037 // Fast Montgomery multiplication. The derivation of the algorithm is
4038 // in A Cryptographic Library for the Motorola DSP56000,
4039 // Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
4041 static void __attribute__((noinline))
4042 montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
4043 unsigned long m[], unsigned long inv, int len) {
4044 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4045 int i;
4047 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4049 for (i = 0; i < len; i++) {
4050 int j;
4051 for (j = 0; j < i; j++) {
4052 MACC(a[j], b[i-j], t0, t1, t2);
4053 MACC(m[j], n[i-j], t0, t1, t2);
4054 }
4055 MACC(a[i], b[0], t0, t1, t2);
4056 m[i] = t0 * inv;
4057 MACC(m[i], n[0], t0, t1, t2);
4059 assert(t0 == 0, "broken Montgomery multiply");
4061 t0 = t1; t1 = t2; t2 = 0;
4062 }
4064 for (i = len; i < 2*len; i++) {
4065 int j;
4066 for (j = i-len+1; j < len; j++) {
4067 MACC(a[j], b[i-j], t0, t1, t2);
4068 MACC(m[j], n[i-j], t0, t1, t2);
4069 }
4070 m[i-len] = t0;
4071 t0 = t1; t1 = t2; t2 = 0;
4072 }
4074 while (t0)
4075 t0 = sub(m, n, t0, len);
4076 }
4078 // Fast Montgomery squaring. This uses asymptotically 25% fewer
4079 // multiplies so it should be up to 25% faster than Montgomery
4080 // multiplication. However, its loop control is more complex and it
4081 // may actually run slower on some machines.
4083 static void __attribute__((noinline))
4084 montgomery_square(unsigned long a[], unsigned long n[],
4085 unsigned long m[], unsigned long inv, int len) {
4086 unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
4087 int i;
4089 assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
4091 for (i = 0; i < len; i++) {
4092 int j;
4093 int end = (i+1)/2;
4094 for (j = 0; j < end; j++) {
4095 MACC2(a[j], a[i-j], t0, t1, t2);
4096 MACC(m[j], n[i-j], t0, t1, t2);
4097 }
4098 if ((i & 1) == 0) {
4099 MACC(a[j], a[j], t0, t1, t2);
4100 }
4101 for (; j < i; j++) {
4102 MACC(m[j], n[i-j], t0, t1, t2);
4103 }
4104 m[i] = t0 * inv;
4105 MACC(m[i], n[0], t0, t1, t2);
4107 assert(t0 == 0, "broken Montgomery square");
4109 t0 = t1; t1 = t2; t2 = 0;
4110 }
4112 for (i = len; i < 2*len; i++) {
4113 int start = i-len+1;
4114 int end = start + (len - start)/2;
4115 int j;
4116 for (j = start; j < end; j++) {
4117 MACC2(a[j], a[i-j], t0, t1, t2);
4118 MACC(m[j], n[i-j], t0, t1, t2);
4119 }
4120 if ((i & 1) == 0) {
4121 MACC(a[j], a[j], t0, t1, t2);
4122 }
4123 for (; j < len; j++) {
4124 MACC(m[j], n[i-j], t0, t1, t2);
4125 }
4126 m[i-len] = t0;
4127 t0 = t1; t1 = t2; t2 = 0;
4128 }
4130 while (t0)
4131 t0 = sub(m, n, t0, len);
4132 }
4134 // Swap words in a longword.
4135 static unsigned long swap(unsigned long x) {
4136 return (x << 32) | (x >> 32);
4137 }
4139 // Copy len longwords from s to d, word-swapping as we go. The
4140 // destination array is reversed.
4141 static void reverse_words(unsigned long *s, unsigned long *d, int len) {
4142 d += len;
4143 while(len-- > 0) {
4144 d--;
4145 *d = swap(*s);
4146 s++;
4147 }
4148 }
4150 // The threshold at which squaring is advantageous was determined
4151 // experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
4152 #define MONTGOMERY_SQUARING_THRESHOLD 64
4154 void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
4155 jint len, jlong inv,
4156 jint *m_ints) {
4157 assert(len % 2 == 0, "array length in montgomery_multiply must be even");
4158 int longwords = len/2;
4160 // Make very sure we don't use so much space that the stack might
4161 // overflow. 512 jints corresponds to an 16384-bit integer and
4162 // will use here a total of 8k bytes of stack space.
4163 int total_allocation = longwords * sizeof (unsigned long) * 4;
4164 guarantee(total_allocation <= 8192, "must be");
4165 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4167 // Local scratch arrays
4168 unsigned long
4169 *a = scratch + 0 * longwords,
4170 *b = scratch + 1 * longwords,
4171 *n = scratch + 2 * longwords,
4172 *m = scratch + 3 * longwords;
4174 reverse_words((unsigned long *)a_ints, a, longwords);
4175 reverse_words((unsigned long *)b_ints, b, longwords);
4176 reverse_words((unsigned long *)n_ints, n, longwords);
4178 ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
4180 reverse_words(m, (unsigned long *)m_ints, longwords);
4181 }
4183 void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
4184 jint len, jlong inv,
4185 jint *m_ints) {
4186 assert(len % 2 == 0, "array length in montgomery_square must be even");
4187 int longwords = len/2;
4189 // Make very sure we don't use so much space that the stack might
4190 // overflow. 512 jints corresponds to an 16384-bit integer and
4191 // will use here a total of 6k bytes of stack space.
4192 int total_allocation = longwords * sizeof (unsigned long) * 3;
4193 guarantee(total_allocation <= 8192, "must be");
4194 unsigned long *scratch = (unsigned long *)alloca(total_allocation);
4196 // Local scratch arrays
4197 unsigned long
4198 *a = scratch + 0 * longwords,
4199 *n = scratch + 1 * longwords,
4200 *m = scratch + 2 * longwords;
4202 reverse_words((unsigned long *)a_ints, a, longwords);
4203 reverse_words((unsigned long *)n_ints, n, longwords);
4205 //montgomery_square fails to pass BigIntegerTest on solaris amd64
4206 //on jdk7 and jdk8.
4207 #ifndef SOLARIS
4208 if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
4209 #else
4210 if (0) {
4211 #endif
4212 ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
4213 } else {
4214 ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
4215 }
4217 reverse_words(m, (unsigned long *)m_ints, longwords);
4218 }
4220 #endif // WINDOWS
4222 #ifdef COMPILER2
4223 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
4224 //
4225 //------------------------------generate_exception_blob---------------------------
4226 // creates exception blob at the end
4227 // Using exception blob, this code is jumped from a compiled method.
4228 // (see emit_exception_handler in x86_64.ad file)
4229 //
4230 // Given an exception pc at a call we call into the runtime for the
4231 // handler in this method. This handler might merely restore state
4232 // (i.e. callee save registers) unwind the frame and jump to the
4233 // exception handler for the nmethod if there is no Java level handler
4234 // for the nmethod.
4235 //
4236 // This code is entered with a jmp.
4237 //
4238 // Arguments:
4239 // rax: exception oop
4240 // rdx: exception pc
4241 //
4242 // Results:
4243 // rax: exception oop
4244 // rdx: exception pc in caller or ???
4245 // destination: exception handler of caller
4246 //
4247 // Note: the exception pc MUST be at a call (precise debug information)
4248 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
4249 //
4251 void OptoRuntime::generate_exception_blob() {
4252 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
4253 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
4254 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
4256 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
4258 // Allocate space for the code
4259 ResourceMark rm;
4260 // Setup code generation tools
4261 CodeBuffer buffer("exception_blob", 2048, 1024);
4262 MacroAssembler* masm = new MacroAssembler(&buffer);
4265 address start = __ pc();
4267 // Exception pc is 'return address' for stack walker
4268 __ push(rdx);
4269 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
4271 // Save callee-saved registers. See x86_64.ad.
4273 // rbp is an implicitly saved callee saved register (i.e., the calling
4274 // convention will save/restore it in the prolog/epilog). Other than that
4275 // there are no callee save registers now that adapter frames are gone.
4277 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4279 // Store exception in Thread object. We cannot pass any arguments to the
4280 // handle_exception call, since we do not want to make any assumption
4281 // about the size of the frame where the exception happened in.
4282 // c_rarg0 is either rdi (Linux) or rcx (Windows).
4283 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4284 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4286 // This call does all the hard work. It checks if an exception handler
4287 // exists in the method.
4288 // If so, it returns the handler address.
4289 // If not, it prepares for stack-unwinding, restoring the callee-save
4290 // registers of the frame being removed.
4291 //
4292 // address OptoRuntime::handle_exception_C(JavaThread* thread)
4294 // At a method handle call, the stack may not be properly aligned
4295 // when returning with an exception.
4296 address the_pc = __ pc();
4297 __ set_last_Java_frame(noreg, noreg, the_pc);
4298 __ mov(c_rarg0, r15_thread);
4299 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
4300 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4302 // Set an oopmap for the call site. This oopmap will only be used if we
4303 // are unwinding the stack. Hence, all locations will be dead.
4304 // Callee-saved registers will be the same as the frame above (i.e.,
4305 // handle_exception_stub), since they were restored when we got the
4306 // exception.
4308 OopMapSet* oop_maps = new OopMapSet();
4310 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4312 __ reset_last_Java_frame(false, true);
4314 // Restore callee-saved registers
4316 // rbp is an implicitly saved callee-saved register (i.e., the calling
4317 // convention will save restore it in prolog/epilog) Other than that
4318 // there are no callee save registers now that adapter frames are gone.
4320 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4322 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4323 __ pop(rdx); // No need for exception pc anymore
4325 // rax: exception handler
4327 // We have a handler in rax (could be deopt blob).
4328 __ mov(r8, rax);
4330 // Get the exception oop
4331 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4332 // Get the exception pc in case we are deoptimized
4333 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4334 #ifdef ASSERT
4335 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4336 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4337 #endif
4338 // Clear the exception oop so GC no longer processes it as a root.
4339 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4341 // rax: exception oop
4342 // r8: exception handler
4343 // rdx: exception pc
4344 // Jump to handler
4346 __ jmp(r8);
4348 // Make sure all code is generated
4349 masm->flush();
4351 // Set exception blob
4352 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4353 }
4354 #endif // COMPILER2