Thu, 03 Jan 2013 16:30:47 -0800
8005544: Use 256bit YMM registers in arraycopy stubs on x86
Summary: Use YMM registers in arraycopy and array_fill stubs.
Reviewed-by: roland, twisti
1 /*
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "code/debugInfoRec.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "oops/compiledICHolder.hpp"
33 #include "prims/jvmtiRedefineClassesTrace.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/vframeArray.hpp"
36 #include "vmreg_x86.inline.hpp"
37 #ifdef COMPILER1
38 #include "c1/c1_Runtime1.hpp"
39 #endif
40 #ifdef COMPILER2
41 #include "opto/runtime.hpp"
42 #endif
44 #define __ masm->
46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
48 class SimpleRuntimeFrame {
50 public:
52 // Most of the runtime stubs have this simple frame layout.
53 // This class exists to make the layout shared in one place.
54 // Offsets are for compiler stack slots, which are jints.
55 enum layout {
56 // The frame sender code expects that rbp will be in the "natural" place and
57 // will override any oopMap setting for it. We must therefore force the layout
58 // so that it agrees with the frame sender code.
59 rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
60 rbp_off2,
61 return_off, return_off2,
62 framesize
63 };
64 };
66 class RegisterSaver {
67 // Capture info about frame layout. Layout offsets are in jint
68 // units because compiler frame slots are jints.
69 #define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
70 enum layout {
71 fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
72 xmm_off = fpu_state_off + 160/BytesPerInt, // offset in fxsave save area
73 DEF_XMM_OFFS(0),
74 DEF_XMM_OFFS(1),
75 DEF_XMM_OFFS(2),
76 DEF_XMM_OFFS(3),
77 DEF_XMM_OFFS(4),
78 DEF_XMM_OFFS(5),
79 DEF_XMM_OFFS(6),
80 DEF_XMM_OFFS(7),
81 DEF_XMM_OFFS(8),
82 DEF_XMM_OFFS(9),
83 DEF_XMM_OFFS(10),
84 DEF_XMM_OFFS(11),
85 DEF_XMM_OFFS(12),
86 DEF_XMM_OFFS(13),
87 DEF_XMM_OFFS(14),
88 DEF_XMM_OFFS(15),
89 fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
90 fpu_stateH_end,
91 r15_off, r15H_off,
92 r14_off, r14H_off,
93 r13_off, r13H_off,
94 r12_off, r12H_off,
95 r11_off, r11H_off,
96 r10_off, r10H_off,
97 r9_off, r9H_off,
98 r8_off, r8H_off,
99 rdi_off, rdiH_off,
100 rsi_off, rsiH_off,
101 ignore_off, ignoreH_off, // extra copy of rbp
102 rsp_off, rspH_off,
103 rbx_off, rbxH_off,
104 rdx_off, rdxH_off,
105 rcx_off, rcxH_off,
106 rax_off, raxH_off,
107 // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
108 align_off, alignH_off,
109 flags_off, flagsH_off,
110 // The frame sender code expects that rbp will be in the "natural" place and
111 // will override any oopMap setting for it. We must therefore force the layout
112 // so that it agrees with the frame sender code.
113 rbp_off, rbpH_off, // copy of rbp we will restore
114 return_off, returnH_off, // slot for return address
115 reg_save_size // size in compiler stack slots
116 };
118 public:
119 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
120 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
122 // Offsets into the register save area
123 // Used by deoptimization when it is managing result register
124 // values on its own
126 static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
127 static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
128 static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
129 static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
130 static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
132 // During deoptimization only the result registers need to be restored,
133 // all the other values have already been extracted.
134 static void restore_result_registers(MacroAssembler* masm);
135 };
137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
138 int vect_words = 0;
139 #ifdef COMPILER2
140 if (save_vectors) {
141 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
142 assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
143 // Save upper half of YMM registes
144 vect_words = 16 * 16 / wordSize;
145 additional_frame_words += vect_words;
146 }
147 #else
148 assert(!save_vectors, "vectors are generated only by C2");
149 #endif
151 // Always make the frame size 16-byte aligned
152 int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
153 reg_save_size*BytesPerInt, 16);
154 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
155 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
156 // The caller will allocate additional_frame_words
157 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
158 // CodeBlob frame size is in words.
159 int frame_size_in_words = frame_size_in_bytes / wordSize;
160 *total_frame_words = frame_size_in_words;
162 // Save registers, fpu state, and flags.
163 // We assume caller has already pushed the return address onto the
164 // stack, so rsp is 8-byte aligned here.
165 // We push rpb twice in this sequence because we want the real rbp
166 // to be under the return like a normal enter.
168 __ enter(); // rsp becomes 16-byte aligned here
169 __ push_CPU_state(); // Push a multiple of 16 bytes
171 if (vect_words > 0) {
172 assert(vect_words*wordSize == 256, "");
173 __ subptr(rsp, 256); // Save upper half of YMM registes
174 __ vextractf128h(Address(rsp, 0),xmm0);
175 __ vextractf128h(Address(rsp, 16),xmm1);
176 __ vextractf128h(Address(rsp, 32),xmm2);
177 __ vextractf128h(Address(rsp, 48),xmm3);
178 __ vextractf128h(Address(rsp, 64),xmm4);
179 __ vextractf128h(Address(rsp, 80),xmm5);
180 __ vextractf128h(Address(rsp, 96),xmm6);
181 __ vextractf128h(Address(rsp,112),xmm7);
182 __ vextractf128h(Address(rsp,128),xmm8);
183 __ vextractf128h(Address(rsp,144),xmm9);
184 __ vextractf128h(Address(rsp,160),xmm10);
185 __ vextractf128h(Address(rsp,176),xmm11);
186 __ vextractf128h(Address(rsp,192),xmm12);
187 __ vextractf128h(Address(rsp,208),xmm13);
188 __ vextractf128h(Address(rsp,224),xmm14);
189 __ vextractf128h(Address(rsp,240),xmm15);
190 }
191 if (frame::arg_reg_save_area_bytes != 0) {
192 // Allocate argument register save area
193 __ subptr(rsp, frame::arg_reg_save_area_bytes);
194 }
196 // Set an oopmap for the call site. This oopmap will map all
197 // oop-registers and debug-info registers as callee-saved. This
198 // will allow deoptimization at this safepoint to find all possible
199 // debug-info recordings, as well as let GC find all oops.
201 OopMapSet *oop_maps = new OopMapSet();
202 OopMap* map = new OopMap(frame_size_in_slots, 0);
204 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
206 map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
207 map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
208 map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
209 map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
210 // rbp location is known implicitly by the frame sender code, needs no oopmap
211 // and the location where rbp was saved by is ignored
212 map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
213 map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
214 map->set_callee_saved(STACK_OFFSET( r8_off ), r8->as_VMReg());
215 map->set_callee_saved(STACK_OFFSET( r9_off ), r9->as_VMReg());
216 map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
217 map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
218 map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
219 map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
220 map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
221 map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
222 map->set_callee_saved(STACK_OFFSET(xmm0_off ), xmm0->as_VMReg());
223 map->set_callee_saved(STACK_OFFSET(xmm1_off ), xmm1->as_VMReg());
224 map->set_callee_saved(STACK_OFFSET(xmm2_off ), xmm2->as_VMReg());
225 map->set_callee_saved(STACK_OFFSET(xmm3_off ), xmm3->as_VMReg());
226 map->set_callee_saved(STACK_OFFSET(xmm4_off ), xmm4->as_VMReg());
227 map->set_callee_saved(STACK_OFFSET(xmm5_off ), xmm5->as_VMReg());
228 map->set_callee_saved(STACK_OFFSET(xmm6_off ), xmm6->as_VMReg());
229 map->set_callee_saved(STACK_OFFSET(xmm7_off ), xmm7->as_VMReg());
230 map->set_callee_saved(STACK_OFFSET(xmm8_off ), xmm8->as_VMReg());
231 map->set_callee_saved(STACK_OFFSET(xmm9_off ), xmm9->as_VMReg());
232 map->set_callee_saved(STACK_OFFSET(xmm10_off), xmm10->as_VMReg());
233 map->set_callee_saved(STACK_OFFSET(xmm11_off), xmm11->as_VMReg());
234 map->set_callee_saved(STACK_OFFSET(xmm12_off), xmm12->as_VMReg());
235 map->set_callee_saved(STACK_OFFSET(xmm13_off), xmm13->as_VMReg());
236 map->set_callee_saved(STACK_OFFSET(xmm14_off), xmm14->as_VMReg());
237 map->set_callee_saved(STACK_OFFSET(xmm15_off), xmm15->as_VMReg());
239 // %%% These should all be a waste but we'll keep things as they were for now
240 if (true) {
241 map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
242 map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
243 map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
244 map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
245 // rbp location is known implicitly by the frame sender code, needs no oopmap
246 map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
247 map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
248 map->set_callee_saved(STACK_OFFSET( r8H_off ), r8->as_VMReg()->next());
249 map->set_callee_saved(STACK_OFFSET( r9H_off ), r9->as_VMReg()->next());
250 map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
251 map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
252 map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
253 map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
254 map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
255 map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
256 map->set_callee_saved(STACK_OFFSET(xmm0H_off ), xmm0->as_VMReg()->next());
257 map->set_callee_saved(STACK_OFFSET(xmm1H_off ), xmm1->as_VMReg()->next());
258 map->set_callee_saved(STACK_OFFSET(xmm2H_off ), xmm2->as_VMReg()->next());
259 map->set_callee_saved(STACK_OFFSET(xmm3H_off ), xmm3->as_VMReg()->next());
260 map->set_callee_saved(STACK_OFFSET(xmm4H_off ), xmm4->as_VMReg()->next());
261 map->set_callee_saved(STACK_OFFSET(xmm5H_off ), xmm5->as_VMReg()->next());
262 map->set_callee_saved(STACK_OFFSET(xmm6H_off ), xmm6->as_VMReg()->next());
263 map->set_callee_saved(STACK_OFFSET(xmm7H_off ), xmm7->as_VMReg()->next());
264 map->set_callee_saved(STACK_OFFSET(xmm8H_off ), xmm8->as_VMReg()->next());
265 map->set_callee_saved(STACK_OFFSET(xmm9H_off ), xmm9->as_VMReg()->next());
266 map->set_callee_saved(STACK_OFFSET(xmm10H_off), xmm10->as_VMReg()->next());
267 map->set_callee_saved(STACK_OFFSET(xmm11H_off), xmm11->as_VMReg()->next());
268 map->set_callee_saved(STACK_OFFSET(xmm12H_off), xmm12->as_VMReg()->next());
269 map->set_callee_saved(STACK_OFFSET(xmm13H_off), xmm13->as_VMReg()->next());
270 map->set_callee_saved(STACK_OFFSET(xmm14H_off), xmm14->as_VMReg()->next());
271 map->set_callee_saved(STACK_OFFSET(xmm15H_off), xmm15->as_VMReg()->next());
272 }
274 return map;
275 }
277 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
278 if (frame::arg_reg_save_area_bytes != 0) {
279 // Pop arg register save area
280 __ addptr(rsp, frame::arg_reg_save_area_bytes);
281 }
282 #ifdef COMPILER2
283 if (restore_vectors) {
284 // Restore upper half of YMM registes.
285 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
286 assert(MaxVectorSize == 32, "only 256bit vectors are supported now");
287 __ vinsertf128h(xmm0, Address(rsp, 0));
288 __ vinsertf128h(xmm1, Address(rsp, 16));
289 __ vinsertf128h(xmm2, Address(rsp, 32));
290 __ vinsertf128h(xmm3, Address(rsp, 48));
291 __ vinsertf128h(xmm4, Address(rsp, 64));
292 __ vinsertf128h(xmm5, Address(rsp, 80));
293 __ vinsertf128h(xmm6, Address(rsp, 96));
294 __ vinsertf128h(xmm7, Address(rsp,112));
295 __ vinsertf128h(xmm8, Address(rsp,128));
296 __ vinsertf128h(xmm9, Address(rsp,144));
297 __ vinsertf128h(xmm10, Address(rsp,160));
298 __ vinsertf128h(xmm11, Address(rsp,176));
299 __ vinsertf128h(xmm12, Address(rsp,192));
300 __ vinsertf128h(xmm13, Address(rsp,208));
301 __ vinsertf128h(xmm14, Address(rsp,224));
302 __ vinsertf128h(xmm15, Address(rsp,240));
303 __ addptr(rsp, 256);
304 }
305 #else
306 assert(!restore_vectors, "vectors are generated only by C2");
307 #endif
308 // Recover CPU state
309 __ pop_CPU_state();
310 // Get the rbp described implicitly by the calling convention (no oopMap)
311 __ pop(rbp);
312 }
314 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
316 // Just restore result register. Only used by deoptimization. By
317 // now any callee save register that needs to be restored to a c2
318 // caller of the deoptee has been extracted into the vframeArray
319 // and will be stuffed into the c2i adapter we create for later
320 // restoration so only result registers need to be restored here.
322 // Restore fp result register
323 __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
324 // Restore integer result register
325 __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
326 __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
328 // Pop all of the register save are off the stack except the return address
329 __ addptr(rsp, return_offset_in_bytes());
330 }
332 // Is vector's size (in bytes) bigger than a size saved by default?
333 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
334 bool SharedRuntime::is_wide_vector(int size) {
335 return size > 16;
336 }
338 // The java_calling_convention describes stack locations as ideal slots on
339 // a frame with no abi restrictions. Since we must observe abi restrictions
340 // (like the placement of the register window) the slots must be biased by
341 // the following value.
342 static int reg2offset_in(VMReg r) {
343 // Account for saved rbp and return address
344 // This should really be in_preserve_stack_slots
345 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
346 }
348 static int reg2offset_out(VMReg r) {
349 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
350 }
352 // ---------------------------------------------------------------------------
353 // Read the array of BasicTypes from a signature, and compute where the
354 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
355 // quantities. Values less than VMRegImpl::stack0 are registers, those above
356 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
357 // as framesizes are fixed.
358 // VMRegImpl::stack0 refers to the first slot 0(sp).
359 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
360 // up to RegisterImpl::number_of_registers) are the 64-bit
361 // integer registers.
363 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
364 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
365 // units regardless of build. Of course for i486 there is no 64 bit build
367 // The Java calling convention is a "shifted" version of the C ABI.
368 // By skipping the first C ABI register we can call non-static jni methods
369 // with small numbers of arguments without having to shuffle the arguments
370 // at all. Since we control the java ABI we ought to at least get some
371 // advantage out of it.
373 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
374 VMRegPair *regs,
375 int total_args_passed,
376 int is_outgoing) {
378 // Create the mapping between argument positions and
379 // registers.
380 static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
381 j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
382 };
383 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
384 j_farg0, j_farg1, j_farg2, j_farg3,
385 j_farg4, j_farg5, j_farg6, j_farg7
386 };
389 uint int_args = 0;
390 uint fp_args = 0;
391 uint stk_args = 0; // inc by 2 each time
393 for (int i = 0; i < total_args_passed; i++) {
394 switch (sig_bt[i]) {
395 case T_BOOLEAN:
396 case T_CHAR:
397 case T_BYTE:
398 case T_SHORT:
399 case T_INT:
400 if (int_args < Argument::n_int_register_parameters_j) {
401 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
402 } else {
403 regs[i].set1(VMRegImpl::stack2reg(stk_args));
404 stk_args += 2;
405 }
406 break;
407 case T_VOID:
408 // halves of T_LONG or T_DOUBLE
409 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
410 regs[i].set_bad();
411 break;
412 case T_LONG:
413 assert(sig_bt[i + 1] == T_VOID, "expecting half");
414 // fall through
415 case T_OBJECT:
416 case T_ARRAY:
417 case T_ADDRESS:
418 if (int_args < Argument::n_int_register_parameters_j) {
419 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
420 } else {
421 regs[i].set2(VMRegImpl::stack2reg(stk_args));
422 stk_args += 2;
423 }
424 break;
425 case T_FLOAT:
426 if (fp_args < Argument::n_float_register_parameters_j) {
427 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
428 } else {
429 regs[i].set1(VMRegImpl::stack2reg(stk_args));
430 stk_args += 2;
431 }
432 break;
433 case T_DOUBLE:
434 assert(sig_bt[i + 1] == T_VOID, "expecting half");
435 if (fp_args < Argument::n_float_register_parameters_j) {
436 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
437 } else {
438 regs[i].set2(VMRegImpl::stack2reg(stk_args));
439 stk_args += 2;
440 }
441 break;
442 default:
443 ShouldNotReachHere();
444 break;
445 }
446 }
448 return round_to(stk_args, 2);
449 }
451 // Patch the callers callsite with entry to compiled code if it exists.
452 static void patch_callers_callsite(MacroAssembler *masm) {
453 Label L;
454 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
455 __ jcc(Assembler::equal, L);
457 // Save the current stack pointer
458 __ mov(r13, rsp);
459 // Schedule the branch target address early.
460 // Call into the VM to patch the caller, then jump to compiled callee
461 // rax isn't live so capture return address while we easily can
462 __ movptr(rax, Address(rsp, 0));
464 // align stack so push_CPU_state doesn't fault
465 __ andptr(rsp, -(StackAlignmentInBytes));
466 __ push_CPU_state();
468 // VM needs caller's callsite
469 // VM needs target method
470 // This needs to be a long call since we will relocate this adapter to
471 // the codeBuffer and it may not reach
473 // Allocate argument register save area
474 if (frame::arg_reg_save_area_bytes != 0) {
475 __ subptr(rsp, frame::arg_reg_save_area_bytes);
476 }
477 __ mov(c_rarg0, rbx);
478 __ mov(c_rarg1, rax);
479 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
481 // De-allocate argument register save area
482 if (frame::arg_reg_save_area_bytes != 0) {
483 __ addptr(rsp, frame::arg_reg_save_area_bytes);
484 }
486 __ pop_CPU_state();
487 // restore sp
488 __ mov(rsp, r13);
489 __ bind(L);
490 }
493 static void gen_c2i_adapter(MacroAssembler *masm,
494 int total_args_passed,
495 int comp_args_on_stack,
496 const BasicType *sig_bt,
497 const VMRegPair *regs,
498 Label& skip_fixup) {
499 // Before we get into the guts of the C2I adapter, see if we should be here
500 // at all. We've come from compiled code and are attempting to jump to the
501 // interpreter, which means the caller made a static call to get here
502 // (vcalls always get a compiled target if there is one). Check for a
503 // compiled target. If there is one, we need to patch the caller's call.
504 patch_callers_callsite(masm);
506 __ bind(skip_fixup);
508 // Since all args are passed on the stack, total_args_passed *
509 // Interpreter::stackElementSize is the space we need. Plus 1 because
510 // we also account for the return address location since
511 // we store it first rather than hold it in rax across all the shuffling
513 int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
515 // stack is aligned, keep it that way
516 extraspace = round_to(extraspace, 2*wordSize);
518 // Get return address
519 __ pop(rax);
521 // set senderSP value
522 __ mov(r13, rsp);
524 __ subptr(rsp, extraspace);
526 // Store the return address in the expected location
527 __ movptr(Address(rsp, 0), rax);
529 // Now write the args into the outgoing interpreter space
530 for (int i = 0; i < total_args_passed; i++) {
531 if (sig_bt[i] == T_VOID) {
532 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
533 continue;
534 }
536 // offset to start parameters
537 int st_off = (total_args_passed - i) * Interpreter::stackElementSize;
538 int next_off = st_off - Interpreter::stackElementSize;
540 // Say 4 args:
541 // i st_off
542 // 0 32 T_LONG
543 // 1 24 T_VOID
544 // 2 16 T_OBJECT
545 // 3 8 T_BOOL
546 // - 0 return address
547 //
548 // However to make thing extra confusing. Because we can fit a long/double in
549 // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
550 // leaves one slot empty and only stores to a single slot. In this case the
551 // slot that is occupied is the T_VOID slot. See I said it was confusing.
553 VMReg r_1 = regs[i].first();
554 VMReg r_2 = regs[i].second();
555 if (!r_1->is_valid()) {
556 assert(!r_2->is_valid(), "");
557 continue;
558 }
559 if (r_1->is_stack()) {
560 // memory to memory use rax
561 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
562 if (!r_2->is_valid()) {
563 // sign extend??
564 __ movl(rax, Address(rsp, ld_off));
565 __ movptr(Address(rsp, st_off), rax);
567 } else {
569 __ movq(rax, Address(rsp, ld_off));
571 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
572 // T_DOUBLE and T_LONG use two slots in the interpreter
573 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
574 // ld_off == LSW, ld_off+wordSize == MSW
575 // st_off == MSW, next_off == LSW
576 __ movq(Address(rsp, next_off), rax);
577 #ifdef ASSERT
578 // Overwrite the unused slot with known junk
579 __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
580 __ movptr(Address(rsp, st_off), rax);
581 #endif /* ASSERT */
582 } else {
583 __ movq(Address(rsp, st_off), rax);
584 }
585 }
586 } else if (r_1->is_Register()) {
587 Register r = r_1->as_Register();
588 if (!r_2->is_valid()) {
589 // must be only an int (or less ) so move only 32bits to slot
590 // why not sign extend??
591 __ movl(Address(rsp, st_off), r);
592 } else {
593 // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
594 // T_DOUBLE and T_LONG use two slots in the interpreter
595 if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
596 // long/double in gpr
597 #ifdef ASSERT
598 // Overwrite the unused slot with known junk
599 __ mov64(rax, CONST64(0xdeadffffdeadaaab));
600 __ movptr(Address(rsp, st_off), rax);
601 #endif /* ASSERT */
602 __ movq(Address(rsp, next_off), r);
603 } else {
604 __ movptr(Address(rsp, st_off), r);
605 }
606 }
607 } else {
608 assert(r_1->is_XMMRegister(), "");
609 if (!r_2->is_valid()) {
610 // only a float use just part of the slot
611 __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
612 } else {
613 #ifdef ASSERT
614 // Overwrite the unused slot with known junk
615 __ mov64(rax, CONST64(0xdeadffffdeadaaac));
616 __ movptr(Address(rsp, st_off), rax);
617 #endif /* ASSERT */
618 __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
619 }
620 }
621 }
623 // Schedule the branch target address early.
624 __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
625 __ jmp(rcx);
626 }
628 static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
629 address code_start, address code_end,
630 Label& L_ok) {
631 Label L_fail;
632 __ lea(temp_reg, ExternalAddress(code_start));
633 __ cmpptr(pc_reg, temp_reg);
634 __ jcc(Assembler::belowEqual, L_fail);
635 __ lea(temp_reg, ExternalAddress(code_end));
636 __ cmpptr(pc_reg, temp_reg);
637 __ jcc(Assembler::below, L_ok);
638 __ bind(L_fail);
639 }
641 static void gen_i2c_adapter(MacroAssembler *masm,
642 int total_args_passed,
643 int comp_args_on_stack,
644 const BasicType *sig_bt,
645 const VMRegPair *regs) {
647 // Note: r13 contains the senderSP on entry. We must preserve it since
648 // we may do a i2c -> c2i transition if we lose a race where compiled
649 // code goes non-entrant while we get args ready.
650 // In addition we use r13 to locate all the interpreter args as
651 // we must align the stack to 16 bytes on an i2c entry else we
652 // lose alignment we expect in all compiled code and register
653 // save code can segv when fxsave instructions find improperly
654 // aligned stack pointer.
656 // Adapters can be frameless because they do not require the caller
657 // to perform additional cleanup work, such as correcting the stack pointer.
658 // An i2c adapter is frameless because the *caller* frame, which is interpreted,
659 // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
660 // even if a callee has modified the stack pointer.
661 // A c2i adapter is frameless because the *callee* frame, which is interpreted,
662 // routinely repairs its caller's stack pointer (from sender_sp, which is set
663 // up via the senderSP register).
664 // In other words, if *either* the caller or callee is interpreted, we can
665 // get the stack pointer repaired after a call.
666 // This is why c2i and i2c adapters cannot be indefinitely composed.
667 // In particular, if a c2i adapter were to somehow call an i2c adapter,
668 // both caller and callee would be compiled methods, and neither would
669 // clean up the stack pointer changes performed by the two adapters.
670 // If this happens, control eventually transfers back to the compiled
671 // caller, but with an uncorrected stack, causing delayed havoc.
673 // Pick up the return address
674 __ movptr(rax, Address(rsp, 0));
676 if (VerifyAdapterCalls &&
677 (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
678 // So, let's test for cascading c2i/i2c adapters right now.
679 // assert(Interpreter::contains($return_addr) ||
680 // StubRoutines::contains($return_addr),
681 // "i2c adapter must return to an interpreter frame");
682 __ block_comment("verify_i2c { ");
683 Label L_ok;
684 if (Interpreter::code() != NULL)
685 range_check(masm, rax, r11,
686 Interpreter::code()->code_start(), Interpreter::code()->code_end(),
687 L_ok);
688 if (StubRoutines::code1() != NULL)
689 range_check(masm, rax, r11,
690 StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
691 L_ok);
692 if (StubRoutines::code2() != NULL)
693 range_check(masm, rax, r11,
694 StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
695 L_ok);
696 const char* msg = "i2c adapter must return to an interpreter frame";
697 __ block_comment(msg);
698 __ stop(msg);
699 __ bind(L_ok);
700 __ block_comment("} verify_i2ce ");
701 }
703 // Must preserve original SP for loading incoming arguments because
704 // we need to align the outgoing SP for compiled code.
705 __ movptr(r11, rsp);
707 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
708 // in registers, we will occasionally have no stack args.
709 int comp_words_on_stack = 0;
710 if (comp_args_on_stack) {
711 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
712 // registers are below. By subtracting stack0, we either get a negative
713 // number (all values in registers) or the maximum stack slot accessed.
715 // Convert 4-byte c2 stack slots to words.
716 comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
717 // Round up to miminum stack alignment, in wordSize
718 comp_words_on_stack = round_to(comp_words_on_stack, 2);
719 __ subptr(rsp, comp_words_on_stack * wordSize);
720 }
723 // Ensure compiled code always sees stack at proper alignment
724 __ andptr(rsp, -16);
726 // push the return address and misalign the stack that youngest frame always sees
727 // as far as the placement of the call instruction
728 __ push(rax);
730 // Put saved SP in another register
731 const Register saved_sp = rax;
732 __ movptr(saved_sp, r11);
734 // Will jump to the compiled code just as if compiled code was doing it.
735 // Pre-load the register-jump target early, to schedule it better.
736 __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
738 // Now generate the shuffle code. Pick up all register args and move the
739 // rest through the floating point stack top.
740 for (int i = 0; i < total_args_passed; i++) {
741 if (sig_bt[i] == T_VOID) {
742 // Longs and doubles are passed in native word order, but misaligned
743 // in the 32-bit build.
744 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
745 continue;
746 }
748 // Pick up 0, 1 or 2 words from SP+offset.
750 assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
751 "scrambled load targets?");
752 // Load in argument order going down.
753 int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
754 // Point to interpreter value (vs. tag)
755 int next_off = ld_off - Interpreter::stackElementSize;
756 //
757 //
758 //
759 VMReg r_1 = regs[i].first();
760 VMReg r_2 = regs[i].second();
761 if (!r_1->is_valid()) {
762 assert(!r_2->is_valid(), "");
763 continue;
764 }
765 if (r_1->is_stack()) {
766 // Convert stack slot to an SP offset (+ wordSize to account for return address )
767 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
769 // We can use r13 as a temp here because compiled code doesn't need r13 as an input
770 // and if we end up going thru a c2i because of a miss a reasonable value of r13
771 // will be generated.
772 if (!r_2->is_valid()) {
773 // sign extend???
774 __ movl(r13, Address(saved_sp, ld_off));
775 __ movptr(Address(rsp, st_off), r13);
776 } else {
777 //
778 // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
779 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
780 // So we must adjust where to pick up the data to match the interpreter.
781 //
782 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
783 // are accessed as negative so LSW is at LOW address
785 // ld_off is MSW so get LSW
786 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
787 next_off : ld_off;
788 __ movq(r13, Address(saved_sp, offset));
789 // st_off is LSW (i.e. reg.first())
790 __ movq(Address(rsp, st_off), r13);
791 }
792 } else if (r_1->is_Register()) { // Register argument
793 Register r = r_1->as_Register();
794 assert(r != rax, "must be different");
795 if (r_2->is_valid()) {
796 //
797 // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
798 // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
799 // So we must adjust where to pick up the data to match the interpreter.
801 const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
802 next_off : ld_off;
804 // this can be a misaligned move
805 __ movq(r, Address(saved_sp, offset));
806 } else {
807 // sign extend and use a full word?
808 __ movl(r, Address(saved_sp, ld_off));
809 }
810 } else {
811 if (!r_2->is_valid()) {
812 __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
813 } else {
814 __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
815 }
816 }
817 }
819 // 6243940 We might end up in handle_wrong_method if
820 // the callee is deoptimized as we race thru here. If that
821 // happens we don't want to take a safepoint because the
822 // caller frame will look interpreted and arguments are now
823 // "compiled" so it is much better to make this transition
824 // invisible to the stack walking code. Unfortunately if
825 // we try and find the callee by normal means a safepoint
826 // is possible. So we stash the desired callee in the thread
827 // and the vm will find there should this case occur.
829 __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
831 // put Method* where a c2i would expect should we end up there
832 // only needed becaus eof c2 resolve stubs return Method* as a result in
833 // rax
834 __ mov(rax, rbx);
835 __ jmp(r11);
836 }
838 // ---------------------------------------------------------------
839 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
840 int total_args_passed,
841 int comp_args_on_stack,
842 const BasicType *sig_bt,
843 const VMRegPair *regs,
844 AdapterFingerPrint* fingerprint) {
845 address i2c_entry = __ pc();
847 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
849 // -------------------------------------------------------------------------
850 // Generate a C2I adapter. On entry we know rbx holds the Method* during calls
851 // to the interpreter. The args start out packed in the compiled layout. They
852 // need to be unpacked into the interpreter layout. This will almost always
853 // require some stack space. We grow the current (compiled) stack, then repack
854 // the args. We finally end in a jump to the generic interpreter entry point.
855 // On exit from the interpreter, the interpreter will restore our SP (lest the
856 // compiled code, which relys solely on SP and not RBP, get sick).
858 address c2i_unverified_entry = __ pc();
859 Label skip_fixup;
860 Label ok;
862 Register holder = rax;
863 Register receiver = j_rarg0;
864 Register temp = rbx;
866 {
867 __ load_klass(temp, receiver);
868 __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
869 __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
870 __ jcc(Assembler::equal, ok);
871 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
873 __ bind(ok);
874 // Method might have been compiled since the call site was patched to
875 // interpreted if that is the case treat it as a miss so we can get
876 // the call site corrected.
877 __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
878 __ jcc(Assembler::equal, skip_fixup);
879 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
880 }
882 address c2i_entry = __ pc();
884 gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
886 __ flush();
887 return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
888 }
890 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
891 VMRegPair *regs,
892 int total_args_passed) {
893 // We return the amount of VMRegImpl stack slots we need to reserve for all
894 // the arguments NOT counting out_preserve_stack_slots.
896 // NOTE: These arrays will have to change when c1 is ported
897 #ifdef _WIN64
898 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
899 c_rarg0, c_rarg1, c_rarg2, c_rarg3
900 };
901 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
902 c_farg0, c_farg1, c_farg2, c_farg3
903 };
904 #else
905 static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
906 c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
907 };
908 static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
909 c_farg0, c_farg1, c_farg2, c_farg3,
910 c_farg4, c_farg5, c_farg6, c_farg7
911 };
912 #endif // _WIN64
915 uint int_args = 0;
916 uint fp_args = 0;
917 uint stk_args = 0; // inc by 2 each time
919 for (int i = 0; i < total_args_passed; i++) {
920 switch (sig_bt[i]) {
921 case T_BOOLEAN:
922 case T_CHAR:
923 case T_BYTE:
924 case T_SHORT:
925 case T_INT:
926 if (int_args < Argument::n_int_register_parameters_c) {
927 regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
928 #ifdef _WIN64
929 fp_args++;
930 // Allocate slots for callee to stuff register args the stack.
931 stk_args += 2;
932 #endif
933 } else {
934 regs[i].set1(VMRegImpl::stack2reg(stk_args));
935 stk_args += 2;
936 }
937 break;
938 case T_LONG:
939 assert(sig_bt[i + 1] == T_VOID, "expecting half");
940 // fall through
941 case T_OBJECT:
942 case T_ARRAY:
943 case T_ADDRESS:
944 case T_METADATA:
945 if (int_args < Argument::n_int_register_parameters_c) {
946 regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
947 #ifdef _WIN64
948 fp_args++;
949 stk_args += 2;
950 #endif
951 } else {
952 regs[i].set2(VMRegImpl::stack2reg(stk_args));
953 stk_args += 2;
954 }
955 break;
956 case T_FLOAT:
957 if (fp_args < Argument::n_float_register_parameters_c) {
958 regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
959 #ifdef _WIN64
960 int_args++;
961 // Allocate slots for callee to stuff register args the stack.
962 stk_args += 2;
963 #endif
964 } else {
965 regs[i].set1(VMRegImpl::stack2reg(stk_args));
966 stk_args += 2;
967 }
968 break;
969 case T_DOUBLE:
970 assert(sig_bt[i + 1] == T_VOID, "expecting half");
971 if (fp_args < Argument::n_float_register_parameters_c) {
972 regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
973 #ifdef _WIN64
974 int_args++;
975 // Allocate slots for callee to stuff register args the stack.
976 stk_args += 2;
977 #endif
978 } else {
979 regs[i].set2(VMRegImpl::stack2reg(stk_args));
980 stk_args += 2;
981 }
982 break;
983 case T_VOID: // Halves of longs and doubles
984 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
985 regs[i].set_bad();
986 break;
987 default:
988 ShouldNotReachHere();
989 break;
990 }
991 }
992 #ifdef _WIN64
993 // windows abi requires that we always allocate enough stack space
994 // for 4 64bit registers to be stored down.
995 if (stk_args < 8) {
996 stk_args = 8;
997 }
998 #endif // _WIN64
1000 return stk_args;
1001 }
1003 // On 64 bit we will store integer like items to the stack as
1004 // 64 bits items (sparc abi) even though java would only store
1005 // 32bits for a parameter. On 32bit it will simply be 32 bits
1006 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1007 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1008 if (src.first()->is_stack()) {
1009 if (dst.first()->is_stack()) {
1010 // stack to stack
1011 __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1012 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1013 } else {
1014 // stack to reg
1015 __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1016 }
1017 } else if (dst.first()->is_stack()) {
1018 // reg to stack
1019 // Do we really have to sign extend???
1020 // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1021 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1022 } else {
1023 // Do we really have to sign extend???
1024 // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1025 if (dst.first() != src.first()) {
1026 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1027 }
1028 }
1029 }
1031 static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1032 if (src.first()->is_stack()) {
1033 if (dst.first()->is_stack()) {
1034 // stack to stack
1035 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1036 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1037 } else {
1038 // stack to reg
1039 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1040 }
1041 } else if (dst.first()->is_stack()) {
1042 // reg to stack
1043 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1044 } else {
1045 if (dst.first() != src.first()) {
1046 __ movq(dst.first()->as_Register(), src.first()->as_Register());
1047 }
1048 }
1049 }
1051 // An oop arg. Must pass a handle not the oop itself
1052 static void object_move(MacroAssembler* masm,
1053 OopMap* map,
1054 int oop_handle_offset,
1055 int framesize_in_slots,
1056 VMRegPair src,
1057 VMRegPair dst,
1058 bool is_receiver,
1059 int* receiver_offset) {
1061 // must pass a handle. First figure out the location we use as a handle
1063 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1065 // See if oop is NULL if it is we need no handle
1067 if (src.first()->is_stack()) {
1069 // Oop is already on the stack as an argument
1070 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1071 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1072 if (is_receiver) {
1073 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1074 }
1076 __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1077 __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1078 // conditionally move a NULL
1079 __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1080 } else {
1082 // Oop is in an a register we must store it to the space we reserve
1083 // on the stack for oop_handles and pass a handle if oop is non-NULL
1085 const Register rOop = src.first()->as_Register();
1086 int oop_slot;
1087 if (rOop == j_rarg0)
1088 oop_slot = 0;
1089 else if (rOop == j_rarg1)
1090 oop_slot = 1;
1091 else if (rOop == j_rarg2)
1092 oop_slot = 2;
1093 else if (rOop == j_rarg3)
1094 oop_slot = 3;
1095 else if (rOop == j_rarg4)
1096 oop_slot = 4;
1097 else {
1098 assert(rOop == j_rarg5, "wrong register");
1099 oop_slot = 5;
1100 }
1102 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1103 int offset = oop_slot*VMRegImpl::stack_slot_size;
1105 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1106 // Store oop in handle area, may be NULL
1107 __ movptr(Address(rsp, offset), rOop);
1108 if (is_receiver) {
1109 *receiver_offset = offset;
1110 }
1112 __ cmpptr(rOop, (int32_t)NULL_WORD);
1113 __ lea(rHandle, Address(rsp, offset));
1114 // conditionally move a NULL from the handle area where it was just stored
1115 __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1116 }
1118 // If arg is on the stack then place it otherwise it is already in correct reg.
1119 if (dst.first()->is_stack()) {
1120 __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1121 }
1122 }
1124 // A float arg may have to do float reg int reg conversion
1125 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1126 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1128 // The calling conventions assures us that each VMregpair is either
1129 // all really one physical register or adjacent stack slots.
1130 // This greatly simplifies the cases here compared to sparc.
1132 if (src.first()->is_stack()) {
1133 if (dst.first()->is_stack()) {
1134 __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1135 __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1136 } else {
1137 // stack to reg
1138 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1139 __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1140 }
1141 } else if (dst.first()->is_stack()) {
1142 // reg to stack
1143 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1144 __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1145 } else {
1146 // reg to reg
1147 // In theory these overlap but the ordering is such that this is likely a nop
1148 if ( src.first() != dst.first()) {
1149 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1150 }
1151 }
1152 }
1154 // A long move
1155 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1157 // The calling conventions assures us that each VMregpair is either
1158 // all really one physical register or adjacent stack slots.
1159 // This greatly simplifies the cases here compared to sparc.
1161 if (src.is_single_phys_reg() ) {
1162 if (dst.is_single_phys_reg()) {
1163 if (dst.first() != src.first()) {
1164 __ mov(dst.first()->as_Register(), src.first()->as_Register());
1165 }
1166 } else {
1167 assert(dst.is_single_reg(), "not a stack pair");
1168 __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1169 }
1170 } else if (dst.is_single_phys_reg()) {
1171 assert(src.is_single_reg(), "not a stack pair");
1172 __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1173 } else {
1174 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1175 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1176 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1177 }
1178 }
1180 // A double move
1181 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1183 // The calling conventions assures us that each VMregpair is either
1184 // all really one physical register or adjacent stack slots.
1185 // This greatly simplifies the cases here compared to sparc.
1187 if (src.is_single_phys_reg() ) {
1188 if (dst.is_single_phys_reg()) {
1189 // In theory these overlap but the ordering is such that this is likely a nop
1190 if ( src.first() != dst.first()) {
1191 __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1192 }
1193 } else {
1194 assert(dst.is_single_reg(), "not a stack pair");
1195 __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1196 }
1197 } else if (dst.is_single_phys_reg()) {
1198 assert(src.is_single_reg(), "not a stack pair");
1199 __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1200 } else {
1201 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1202 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1203 __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1204 }
1205 }
1208 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1209 // We always ignore the frame_slots arg and just use the space just below frame pointer
1210 // which by this time is free to use
1211 switch (ret_type) {
1212 case T_FLOAT:
1213 __ movflt(Address(rbp, -wordSize), xmm0);
1214 break;
1215 case T_DOUBLE:
1216 __ movdbl(Address(rbp, -wordSize), xmm0);
1217 break;
1218 case T_VOID: break;
1219 default: {
1220 __ movptr(Address(rbp, -wordSize), rax);
1221 }
1222 }
1223 }
1225 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1226 // We always ignore the frame_slots arg and just use the space just below frame pointer
1227 // which by this time is free to use
1228 switch (ret_type) {
1229 case T_FLOAT:
1230 __ movflt(xmm0, Address(rbp, -wordSize));
1231 break;
1232 case T_DOUBLE:
1233 __ movdbl(xmm0, Address(rbp, -wordSize));
1234 break;
1235 case T_VOID: break;
1236 default: {
1237 __ movptr(rax, Address(rbp, -wordSize));
1238 }
1239 }
1240 }
1242 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1243 for ( int i = first_arg ; i < arg_count ; i++ ) {
1244 if (args[i].first()->is_Register()) {
1245 __ push(args[i].first()->as_Register());
1246 } else if (args[i].first()->is_XMMRegister()) {
1247 __ subptr(rsp, 2*wordSize);
1248 __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1249 }
1250 }
1251 }
1253 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1254 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1255 if (args[i].first()->is_Register()) {
1256 __ pop(args[i].first()->as_Register());
1257 } else if (args[i].first()->is_XMMRegister()) {
1258 __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1259 __ addptr(rsp, 2*wordSize);
1260 }
1261 }
1262 }
1265 static void save_or_restore_arguments(MacroAssembler* masm,
1266 const int stack_slots,
1267 const int total_in_args,
1268 const int arg_save_area,
1269 OopMap* map,
1270 VMRegPair* in_regs,
1271 BasicType* in_sig_bt) {
1272 // if map is non-NULL then the code should store the values,
1273 // otherwise it should load them.
1274 int slot = arg_save_area;
1275 // Save down double word first
1276 for ( int i = 0; i < total_in_args; i++) {
1277 if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1278 int offset = slot * VMRegImpl::stack_slot_size;
1279 slot += VMRegImpl::slots_per_word;
1280 assert(slot <= stack_slots, "overflow");
1281 if (map != NULL) {
1282 __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1283 } else {
1284 __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1285 }
1286 }
1287 if (in_regs[i].first()->is_Register() &&
1288 (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1289 int offset = slot * VMRegImpl::stack_slot_size;
1290 if (map != NULL) {
1291 __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1292 if (in_sig_bt[i] == T_ARRAY) {
1293 map->set_oop(VMRegImpl::stack2reg(slot));;
1294 }
1295 } else {
1296 __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1297 }
1298 slot += VMRegImpl::slots_per_word;
1299 }
1300 }
1301 // Save or restore single word registers
1302 for ( int i = 0; i < total_in_args; i++) {
1303 if (in_regs[i].first()->is_Register()) {
1304 int offset = slot * VMRegImpl::stack_slot_size;
1305 slot++;
1306 assert(slot <= stack_slots, "overflow");
1308 // Value is in an input register pass we must flush it to the stack
1309 const Register reg = in_regs[i].first()->as_Register();
1310 switch (in_sig_bt[i]) {
1311 case T_BOOLEAN:
1312 case T_CHAR:
1313 case T_BYTE:
1314 case T_SHORT:
1315 case T_INT:
1316 if (map != NULL) {
1317 __ movl(Address(rsp, offset), reg);
1318 } else {
1319 __ movl(reg, Address(rsp, offset));
1320 }
1321 break;
1322 case T_ARRAY:
1323 case T_LONG:
1324 // handled above
1325 break;
1326 case T_OBJECT:
1327 default: ShouldNotReachHere();
1328 }
1329 } else if (in_regs[i].first()->is_XMMRegister()) {
1330 if (in_sig_bt[i] == T_FLOAT) {
1331 int offset = slot * VMRegImpl::stack_slot_size;
1332 slot++;
1333 assert(slot <= stack_slots, "overflow");
1334 if (map != NULL) {
1335 __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1336 } else {
1337 __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1338 }
1339 }
1340 } else if (in_regs[i].first()->is_stack()) {
1341 if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1342 int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1343 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1344 }
1345 }
1346 }
1347 }
1350 // Check GC_locker::needs_gc and enter the runtime if it's true. This
1351 // keeps a new JNI critical region from starting until a GC has been
1352 // forced. Save down any oops in registers and describe them in an
1353 // OopMap.
1354 static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1355 int stack_slots,
1356 int total_c_args,
1357 int total_in_args,
1358 int arg_save_area,
1359 OopMapSet* oop_maps,
1360 VMRegPair* in_regs,
1361 BasicType* in_sig_bt) {
1362 __ block_comment("check GC_locker::needs_gc");
1363 Label cont;
1364 __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1365 __ jcc(Assembler::equal, cont);
1367 // Save down any incoming oops and call into the runtime to halt for a GC
1369 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1370 save_or_restore_arguments(masm, stack_slots, total_in_args,
1371 arg_save_area, map, in_regs, in_sig_bt);
1373 address the_pc = __ pc();
1374 oop_maps->add_gc_map( __ offset(), map);
1375 __ set_last_Java_frame(rsp, noreg, the_pc);
1377 __ block_comment("block_for_jni_critical");
1378 __ movptr(c_rarg0, r15_thread);
1379 __ mov(r12, rsp); // remember sp
1380 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1381 __ andptr(rsp, -16); // align stack as required by ABI
1382 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1383 __ mov(rsp, r12); // restore sp
1384 __ reinit_heapbase();
1386 __ reset_last_Java_frame(false, true);
1388 save_or_restore_arguments(masm, stack_slots, total_in_args,
1389 arg_save_area, NULL, in_regs, in_sig_bt);
1391 __ bind(cont);
1392 #ifdef ASSERT
1393 if (StressCriticalJNINatives) {
1394 // Stress register saving
1395 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1396 save_or_restore_arguments(masm, stack_slots, total_in_args,
1397 arg_save_area, map, in_regs, in_sig_bt);
1398 // Destroy argument registers
1399 for (int i = 0; i < total_in_args - 1; i++) {
1400 if (in_regs[i].first()->is_Register()) {
1401 const Register reg = in_regs[i].first()->as_Register();
1402 __ xorptr(reg, reg);
1403 } else if (in_regs[i].first()->is_XMMRegister()) {
1404 __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1405 } else if (in_regs[i].first()->is_FloatRegister()) {
1406 ShouldNotReachHere();
1407 } else if (in_regs[i].first()->is_stack()) {
1408 // Nothing to do
1409 } else {
1410 ShouldNotReachHere();
1411 }
1412 if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1413 i++;
1414 }
1415 }
1417 save_or_restore_arguments(masm, stack_slots, total_in_args,
1418 arg_save_area, NULL, in_regs, in_sig_bt);
1419 }
1420 #endif
1421 }
1423 // Unpack an array argument into a pointer to the body and the length
1424 // if the array is non-null, otherwise pass 0 for both.
1425 static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1426 Register tmp_reg = rax;
1427 assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1428 "possible collision");
1429 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1430 "possible collision");
1432 // Pass the length, ptr pair
1433 Label is_null, done;
1434 VMRegPair tmp;
1435 tmp.set_ptr(tmp_reg->as_VMReg());
1436 if (reg.first()->is_stack()) {
1437 // Load the arg up from the stack
1438 move_ptr(masm, reg, tmp);
1439 reg = tmp;
1440 }
1441 __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1442 __ jccb(Assembler::equal, is_null);
1443 __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1444 move_ptr(masm, tmp, body_arg);
1445 // load the length relative to the body.
1446 __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1447 arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1448 move32_64(masm, tmp, length_arg);
1449 __ jmpb(done);
1450 __ bind(is_null);
1451 // Pass zeros
1452 __ xorptr(tmp_reg, tmp_reg);
1453 move_ptr(masm, tmp, body_arg);
1454 move32_64(masm, tmp, length_arg);
1455 __ bind(done);
1456 }
1459 // Different signatures may require very different orders for the move
1460 // to avoid clobbering other arguments. There's no simple way to
1461 // order them safely. Compute a safe order for issuing stores and
1462 // break any cycles in those stores. This code is fairly general but
1463 // it's not necessary on the other platforms so we keep it in the
1464 // platform dependent code instead of moving it into a shared file.
1465 // (See bugs 7013347 & 7145024.)
1466 // Note that this code is specific to LP64.
1467 class ComputeMoveOrder: public StackObj {
1468 class MoveOperation: public ResourceObj {
1469 friend class ComputeMoveOrder;
1470 private:
1471 VMRegPair _src;
1472 VMRegPair _dst;
1473 int _src_index;
1474 int _dst_index;
1475 bool _processed;
1476 MoveOperation* _next;
1477 MoveOperation* _prev;
1479 static int get_id(VMRegPair r) {
1480 return r.first()->value();
1481 }
1483 public:
1484 MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1485 _src(src)
1486 , _src_index(src_index)
1487 , _dst(dst)
1488 , _dst_index(dst_index)
1489 , _next(NULL)
1490 , _prev(NULL)
1491 , _processed(false) {
1492 }
1494 VMRegPair src() const { return _src; }
1495 int src_id() const { return get_id(src()); }
1496 int src_index() const { return _src_index; }
1497 VMRegPair dst() const { return _dst; }
1498 void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1499 int dst_index() const { return _dst_index; }
1500 int dst_id() const { return get_id(dst()); }
1501 MoveOperation* next() const { return _next; }
1502 MoveOperation* prev() const { return _prev; }
1503 void set_processed() { _processed = true; }
1504 bool is_processed() const { return _processed; }
1506 // insert
1507 void break_cycle(VMRegPair temp_register) {
1508 // create a new store following the last store
1509 // to move from the temp_register to the original
1510 MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1512 // break the cycle of links and insert new_store at the end
1513 // break the reverse link.
1514 MoveOperation* p = prev();
1515 assert(p->next() == this, "must be");
1516 _prev = NULL;
1517 p->_next = new_store;
1518 new_store->_prev = p;
1520 // change the original store to save it's value in the temp.
1521 set_dst(-1, temp_register);
1522 }
1524 void link(GrowableArray<MoveOperation*>& killer) {
1525 // link this store in front the store that it depends on
1526 MoveOperation* n = killer.at_grow(src_id(), NULL);
1527 if (n != NULL) {
1528 assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1529 _next = n;
1530 n->_prev = this;
1531 }
1532 }
1533 };
1535 private:
1536 GrowableArray<MoveOperation*> edges;
1538 public:
1539 ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1540 BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1541 // Move operations where the dest is the stack can all be
1542 // scheduled first since they can't interfere with the other moves.
1543 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1544 if (in_sig_bt[i] == T_ARRAY) {
1545 c_arg--;
1546 if (out_regs[c_arg].first()->is_stack() &&
1547 out_regs[c_arg + 1].first()->is_stack()) {
1548 arg_order.push(i);
1549 arg_order.push(c_arg);
1550 } else {
1551 if (out_regs[c_arg].first()->is_stack() ||
1552 in_regs[i].first() == out_regs[c_arg].first()) {
1553 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1554 } else {
1555 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1556 }
1557 }
1558 } else if (in_sig_bt[i] == T_VOID) {
1559 arg_order.push(i);
1560 arg_order.push(c_arg);
1561 } else {
1562 if (out_regs[c_arg].first()->is_stack() ||
1563 in_regs[i].first() == out_regs[c_arg].first()) {
1564 arg_order.push(i);
1565 arg_order.push(c_arg);
1566 } else {
1567 add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1568 }
1569 }
1570 }
1571 // Break any cycles in the register moves and emit the in the
1572 // proper order.
1573 GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1574 for (int i = 0; i < stores->length(); i++) {
1575 arg_order.push(stores->at(i)->src_index());
1576 arg_order.push(stores->at(i)->dst_index());
1577 }
1578 }
1580 // Collected all the move operations
1581 void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1582 if (src.first() == dst.first()) return;
1583 edges.append(new MoveOperation(src_index, src, dst_index, dst));
1584 }
1586 // Walk the edges breaking cycles between moves. The result list
1587 // can be walked in order to produce the proper set of loads
1588 GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1589 // Record which moves kill which values
1590 GrowableArray<MoveOperation*> killer;
1591 for (int i = 0; i < edges.length(); i++) {
1592 MoveOperation* s = edges.at(i);
1593 assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1594 killer.at_put_grow(s->dst_id(), s, NULL);
1595 }
1596 assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1597 "make sure temp isn't in the registers that are killed");
1599 // create links between loads and stores
1600 for (int i = 0; i < edges.length(); i++) {
1601 edges.at(i)->link(killer);
1602 }
1604 // at this point, all the move operations are chained together
1605 // in a doubly linked list. Processing it backwards finds
1606 // the beginning of the chain, forwards finds the end. If there's
1607 // a cycle it can be broken at any point, so pick an edge and walk
1608 // backward until the list ends or we end where we started.
1609 GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1610 for (int e = 0; e < edges.length(); e++) {
1611 MoveOperation* s = edges.at(e);
1612 if (!s->is_processed()) {
1613 MoveOperation* start = s;
1614 // search for the beginning of the chain or cycle
1615 while (start->prev() != NULL && start->prev() != s) {
1616 start = start->prev();
1617 }
1618 if (start->prev() == s) {
1619 start->break_cycle(temp_register);
1620 }
1621 // walk the chain forward inserting to store list
1622 while (start != NULL) {
1623 stores->append(start);
1624 start->set_processed();
1625 start = start->next();
1626 }
1627 }
1628 }
1629 return stores;
1630 }
1631 };
1633 static void verify_oop_args(MacroAssembler* masm,
1634 methodHandle method,
1635 const BasicType* sig_bt,
1636 const VMRegPair* regs) {
1637 Register temp_reg = rbx; // not part of any compiled calling seq
1638 if (VerifyOops) {
1639 for (int i = 0; i < method->size_of_parameters(); i++) {
1640 if (sig_bt[i] == T_OBJECT ||
1641 sig_bt[i] == T_ARRAY) {
1642 VMReg r = regs[i].first();
1643 assert(r->is_valid(), "bad oop arg");
1644 if (r->is_stack()) {
1645 __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1646 __ verify_oop(temp_reg);
1647 } else {
1648 __ verify_oop(r->as_Register());
1649 }
1650 }
1651 }
1652 }
1653 }
1655 static void gen_special_dispatch(MacroAssembler* masm,
1656 methodHandle method,
1657 const BasicType* sig_bt,
1658 const VMRegPair* regs) {
1659 verify_oop_args(masm, method, sig_bt, regs);
1660 vmIntrinsics::ID iid = method->intrinsic_id();
1662 // Now write the args into the outgoing interpreter space
1663 bool has_receiver = false;
1664 Register receiver_reg = noreg;
1665 int member_arg_pos = -1;
1666 Register member_reg = noreg;
1667 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1668 if (ref_kind != 0) {
1669 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1670 member_reg = rbx; // known to be free at this point
1671 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1672 } else if (iid == vmIntrinsics::_invokeBasic) {
1673 has_receiver = true;
1674 } else {
1675 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1676 }
1678 if (member_reg != noreg) {
1679 // Load the member_arg into register, if necessary.
1680 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1681 VMReg r = regs[member_arg_pos].first();
1682 if (r->is_stack()) {
1683 __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1684 } else {
1685 // no data motion is needed
1686 member_reg = r->as_Register();
1687 }
1688 }
1690 if (has_receiver) {
1691 // Make sure the receiver is loaded into a register.
1692 assert(method->size_of_parameters() > 0, "oob");
1693 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1694 VMReg r = regs[0].first();
1695 assert(r->is_valid(), "bad receiver arg");
1696 if (r->is_stack()) {
1697 // Porting note: This assumes that compiled calling conventions always
1698 // pass the receiver oop in a register. If this is not true on some
1699 // platform, pick a temp and load the receiver from stack.
1700 fatal("receiver always in a register");
1701 receiver_reg = j_rarg0; // known to be free at this point
1702 __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1703 } else {
1704 // no data motion is needed
1705 receiver_reg = r->as_Register();
1706 }
1707 }
1709 // Figure out which address we are really jumping to:
1710 MethodHandles::generate_method_handle_dispatch(masm, iid,
1711 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1712 }
1714 // ---------------------------------------------------------------------------
1715 // Generate a native wrapper for a given method. The method takes arguments
1716 // in the Java compiled code convention, marshals them to the native
1717 // convention (handlizes oops, etc), transitions to native, makes the call,
1718 // returns to java state (possibly blocking), unhandlizes any result and
1719 // returns.
1720 //
1721 // Critical native functions are a shorthand for the use of
1722 // GetPrimtiveArrayCritical and disallow the use of any other JNI
1723 // functions. The wrapper is expected to unpack the arguments before
1724 // passing them to the callee and perform checks before and after the
1725 // native call to ensure that they GC_locker
1726 // lock_critical/unlock_critical semantics are followed. Some other
1727 // parts of JNI setup are skipped like the tear down of the JNI handle
1728 // block and the check for pending exceptions it's impossible for them
1729 // to be thrown.
1730 //
1731 // They are roughly structured like this:
1732 // if (GC_locker::needs_gc())
1733 // SharedRuntime::block_for_jni_critical();
1734 // tranistion to thread_in_native
1735 // unpack arrray arguments and call native entry point
1736 // check for safepoint in progress
1737 // check if any thread suspend flags are set
1738 // call into JVM and possible unlock the JNI critical
1739 // if a GC was suppressed while in the critical native.
1740 // transition back to thread_in_Java
1741 // return to caller
1742 //
1743 nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1744 methodHandle method,
1745 int compile_id,
1746 BasicType* in_sig_bt,
1747 VMRegPair* in_regs,
1748 BasicType ret_type) {
1749 if (method->is_method_handle_intrinsic()) {
1750 vmIntrinsics::ID iid = method->intrinsic_id();
1751 intptr_t start = (intptr_t)__ pc();
1752 int vep_offset = ((intptr_t)__ pc()) - start;
1753 gen_special_dispatch(masm,
1754 method,
1755 in_sig_bt,
1756 in_regs);
1757 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1758 __ flush();
1759 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1760 return nmethod::new_native_nmethod(method,
1761 compile_id,
1762 masm->code(),
1763 vep_offset,
1764 frame_complete,
1765 stack_slots / VMRegImpl::slots_per_word,
1766 in_ByteSize(-1),
1767 in_ByteSize(-1),
1768 (OopMapSet*)NULL);
1769 }
1770 bool is_critical_native = true;
1771 address native_func = method->critical_native_function();
1772 if (native_func == NULL) {
1773 native_func = method->native_function();
1774 is_critical_native = false;
1775 }
1776 assert(native_func != NULL, "must have function");
1778 // An OopMap for lock (and class if static)
1779 OopMapSet *oop_maps = new OopMapSet();
1780 intptr_t start = (intptr_t)__ pc();
1782 // We have received a description of where all the java arg are located
1783 // on entry to the wrapper. We need to convert these args to where
1784 // the jni function will expect them. To figure out where they go
1785 // we convert the java signature to a C signature by inserting
1786 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1788 const int total_in_args = method->size_of_parameters();
1789 int total_c_args = total_in_args;
1790 if (!is_critical_native) {
1791 total_c_args += 1;
1792 if (method->is_static()) {
1793 total_c_args++;
1794 }
1795 } else {
1796 for (int i = 0; i < total_in_args; i++) {
1797 if (in_sig_bt[i] == T_ARRAY) {
1798 total_c_args++;
1799 }
1800 }
1801 }
1803 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1804 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1805 BasicType* in_elem_bt = NULL;
1807 int argc = 0;
1808 if (!is_critical_native) {
1809 out_sig_bt[argc++] = T_ADDRESS;
1810 if (method->is_static()) {
1811 out_sig_bt[argc++] = T_OBJECT;
1812 }
1814 for (int i = 0; i < total_in_args ; i++ ) {
1815 out_sig_bt[argc++] = in_sig_bt[i];
1816 }
1817 } else {
1818 Thread* THREAD = Thread::current();
1819 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1820 SignatureStream ss(method->signature());
1821 for (int i = 0; i < total_in_args ; i++ ) {
1822 if (in_sig_bt[i] == T_ARRAY) {
1823 // Arrays are passed as int, elem* pair
1824 out_sig_bt[argc++] = T_INT;
1825 out_sig_bt[argc++] = T_ADDRESS;
1826 Symbol* atype = ss.as_symbol(CHECK_NULL);
1827 const char* at = atype->as_C_string();
1828 if (strlen(at) == 2) {
1829 assert(at[0] == '[', "must be");
1830 switch (at[1]) {
1831 case 'B': in_elem_bt[i] = T_BYTE; break;
1832 case 'C': in_elem_bt[i] = T_CHAR; break;
1833 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1834 case 'F': in_elem_bt[i] = T_FLOAT; break;
1835 case 'I': in_elem_bt[i] = T_INT; break;
1836 case 'J': in_elem_bt[i] = T_LONG; break;
1837 case 'S': in_elem_bt[i] = T_SHORT; break;
1838 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1839 default: ShouldNotReachHere();
1840 }
1841 }
1842 } else {
1843 out_sig_bt[argc++] = in_sig_bt[i];
1844 in_elem_bt[i] = T_VOID;
1845 }
1846 if (in_sig_bt[i] != T_VOID) {
1847 assert(in_sig_bt[i] == ss.type(), "must match");
1848 ss.next();
1849 }
1850 }
1851 }
1853 // Now figure out where the args must be stored and how much stack space
1854 // they require.
1855 int out_arg_slots;
1856 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1858 // Compute framesize for the wrapper. We need to handlize all oops in
1859 // incoming registers
1861 // Calculate the total number of stack slots we will need.
1863 // First count the abi requirement plus all of the outgoing args
1864 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1866 // Now the space for the inbound oop handle area
1867 int total_save_slots = 6 * VMRegImpl::slots_per_word; // 6 arguments passed in registers
1868 if (is_critical_native) {
1869 // Critical natives may have to call out so they need a save area
1870 // for register arguments.
1871 int double_slots = 0;
1872 int single_slots = 0;
1873 for ( int i = 0; i < total_in_args; i++) {
1874 if (in_regs[i].first()->is_Register()) {
1875 const Register reg = in_regs[i].first()->as_Register();
1876 switch (in_sig_bt[i]) {
1877 case T_BOOLEAN:
1878 case T_BYTE:
1879 case T_SHORT:
1880 case T_CHAR:
1881 case T_INT: single_slots++; break;
1882 case T_ARRAY: // specific to LP64 (7145024)
1883 case T_LONG: double_slots++; break;
1884 default: ShouldNotReachHere();
1885 }
1886 } else if (in_regs[i].first()->is_XMMRegister()) {
1887 switch (in_sig_bt[i]) {
1888 case T_FLOAT: single_slots++; break;
1889 case T_DOUBLE: double_slots++; break;
1890 default: ShouldNotReachHere();
1891 }
1892 } else if (in_regs[i].first()->is_FloatRegister()) {
1893 ShouldNotReachHere();
1894 }
1895 }
1896 total_save_slots = double_slots * 2 + single_slots;
1897 // align the save area
1898 if (double_slots != 0) {
1899 stack_slots = round_to(stack_slots, 2);
1900 }
1901 }
1903 int oop_handle_offset = stack_slots;
1904 stack_slots += total_save_slots;
1906 // Now any space we need for handlizing a klass if static method
1908 int klass_slot_offset = 0;
1909 int klass_offset = -1;
1910 int lock_slot_offset = 0;
1911 bool is_static = false;
1913 if (method->is_static()) {
1914 klass_slot_offset = stack_slots;
1915 stack_slots += VMRegImpl::slots_per_word;
1916 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1917 is_static = true;
1918 }
1920 // Plus a lock if needed
1922 if (method->is_synchronized()) {
1923 lock_slot_offset = stack_slots;
1924 stack_slots += VMRegImpl::slots_per_word;
1925 }
1927 // Now a place (+2) to save return values or temp during shuffling
1928 // + 4 for return address (which we own) and saved rbp
1929 stack_slots += 6;
1931 // Ok The space we have allocated will look like:
1932 //
1933 //
1934 // FP-> | |
1935 // |---------------------|
1936 // | 2 slots for moves |
1937 // |---------------------|
1938 // | lock box (if sync) |
1939 // |---------------------| <- lock_slot_offset
1940 // | klass (if static) |
1941 // |---------------------| <- klass_slot_offset
1942 // | oopHandle area |
1943 // |---------------------| <- oop_handle_offset (6 java arg registers)
1944 // | outbound memory |
1945 // | based arguments |
1946 // | |
1947 // |---------------------|
1948 // | |
1949 // SP-> | out_preserved_slots |
1950 //
1951 //
1954 // Now compute actual number of stack words we need rounding to make
1955 // stack properly aligned.
1956 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1958 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1960 // First thing make an ic check to see if we should even be here
1962 // We are free to use all registers as temps without saving them and
1963 // restoring them except rbp. rbp is the only callee save register
1964 // as far as the interpreter and the compiler(s) are concerned.
1967 const Register ic_reg = rax;
1968 const Register receiver = j_rarg0;
1970 Label hit;
1971 Label exception_pending;
1973 assert_different_registers(ic_reg, receiver, rscratch1);
1974 __ verify_oop(receiver);
1975 __ load_klass(rscratch1, receiver);
1976 __ cmpq(ic_reg, rscratch1);
1977 __ jcc(Assembler::equal, hit);
1979 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1981 // Verified entry point must be aligned
1982 __ align(8);
1984 __ bind(hit);
1986 int vep_offset = ((intptr_t)__ pc()) - start;
1988 // The instruction at the verified entry point must be 5 bytes or longer
1989 // because it can be patched on the fly by make_non_entrant. The stack bang
1990 // instruction fits that requirement.
1992 // Generate stack overflow check
1994 if (UseStackBanging) {
1995 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1996 } else {
1997 // need a 5 byte instruction to allow MT safe patching to non-entrant
1998 __ fat_nop();
1999 }
2001 // Generate a new frame for the wrapper.
2002 __ enter();
2003 // -2 because return address is already present and so is saved rbp
2004 __ subptr(rsp, stack_size - 2*wordSize);
2006 // Frame is now completed as far as size and linkage.
2007 int frame_complete = ((intptr_t)__ pc()) - start;
2009 #ifdef ASSERT
2010 {
2011 Label L;
2012 __ mov(rax, rsp);
2013 __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2014 __ cmpptr(rax, rsp);
2015 __ jcc(Assembler::equal, L);
2016 __ stop("improperly aligned stack");
2017 __ bind(L);
2018 }
2019 #endif /* ASSERT */
2022 // We use r14 as the oop handle for the receiver/klass
2023 // It is callee save so it survives the call to native
2025 const Register oop_handle_reg = r14;
2027 if (is_critical_native) {
2028 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2029 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2030 }
2032 //
2033 // We immediately shuffle the arguments so that any vm call we have to
2034 // make from here on out (sync slow path, jvmti, etc.) we will have
2035 // captured the oops from our caller and have a valid oopMap for
2036 // them.
2038 // -----------------
2039 // The Grand Shuffle
2041 // The Java calling convention is either equal (linux) or denser (win64) than the
2042 // c calling convention. However the because of the jni_env argument the c calling
2043 // convention always has at least one more (and two for static) arguments than Java.
2044 // Therefore if we move the args from java -> c backwards then we will never have
2045 // a register->register conflict and we don't have to build a dependency graph
2046 // and figure out how to break any cycles.
2047 //
2049 // Record esp-based slot for receiver on stack for non-static methods
2050 int receiver_offset = -1;
2052 // This is a trick. We double the stack slots so we can claim
2053 // the oops in the caller's frame. Since we are sure to have
2054 // more args than the caller doubling is enough to make
2055 // sure we can capture all the incoming oop args from the
2056 // caller.
2057 //
2058 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2060 // Mark location of rbp (someday)
2061 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2063 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2064 // All inbound args are referenced based on rbp and all outbound args via rsp.
2067 #ifdef ASSERT
2068 bool reg_destroyed[RegisterImpl::number_of_registers];
2069 bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2070 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2071 reg_destroyed[r] = false;
2072 }
2073 for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2074 freg_destroyed[f] = false;
2075 }
2077 #endif /* ASSERT */
2079 // This may iterate in two different directions depending on the
2080 // kind of native it is. The reason is that for regular JNI natives
2081 // the incoming and outgoing registers are offset upwards and for
2082 // critical natives they are offset down.
2083 GrowableArray<int> arg_order(2 * total_in_args);
2084 VMRegPair tmp_vmreg;
2085 tmp_vmreg.set1(rbx->as_VMReg());
2087 if (!is_critical_native) {
2088 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2089 arg_order.push(i);
2090 arg_order.push(c_arg);
2091 }
2092 } else {
2093 // Compute a valid move order, using tmp_vmreg to break any cycles
2094 ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2095 }
2097 int temploc = -1;
2098 for (int ai = 0; ai < arg_order.length(); ai += 2) {
2099 int i = arg_order.at(ai);
2100 int c_arg = arg_order.at(ai + 1);
2101 __ block_comment(err_msg("move %d -> %d", i, c_arg));
2102 if (c_arg == -1) {
2103 assert(is_critical_native, "should only be required for critical natives");
2104 // This arg needs to be moved to a temporary
2105 __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2106 in_regs[i] = tmp_vmreg;
2107 temploc = i;
2108 continue;
2109 } else if (i == -1) {
2110 assert(is_critical_native, "should only be required for critical natives");
2111 // Read from the temporary location
2112 assert(temploc != -1, "must be valid");
2113 i = temploc;
2114 temploc = -1;
2115 }
2116 #ifdef ASSERT
2117 if (in_regs[i].first()->is_Register()) {
2118 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2119 } else if (in_regs[i].first()->is_XMMRegister()) {
2120 assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2121 }
2122 if (out_regs[c_arg].first()->is_Register()) {
2123 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2124 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2125 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2126 }
2127 #endif /* ASSERT */
2128 switch (in_sig_bt[i]) {
2129 case T_ARRAY:
2130 if (is_critical_native) {
2131 unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2132 c_arg++;
2133 #ifdef ASSERT
2134 if (out_regs[c_arg].first()->is_Register()) {
2135 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2136 } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2137 freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2138 }
2139 #endif
2140 break;
2141 }
2142 case T_OBJECT:
2143 assert(!is_critical_native, "no oop arguments");
2144 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2145 ((i == 0) && (!is_static)),
2146 &receiver_offset);
2147 break;
2148 case T_VOID:
2149 break;
2151 case T_FLOAT:
2152 float_move(masm, in_regs[i], out_regs[c_arg]);
2153 break;
2155 case T_DOUBLE:
2156 assert( i + 1 < total_in_args &&
2157 in_sig_bt[i + 1] == T_VOID &&
2158 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2159 double_move(masm, in_regs[i], out_regs[c_arg]);
2160 break;
2162 case T_LONG :
2163 long_move(masm, in_regs[i], out_regs[c_arg]);
2164 break;
2166 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2168 default:
2169 move32_64(masm, in_regs[i], out_regs[c_arg]);
2170 }
2171 }
2173 // point c_arg at the first arg that is already loaded in case we
2174 // need to spill before we call out
2175 int c_arg = total_c_args - total_in_args;
2177 // Pre-load a static method's oop into r14. Used both by locking code and
2178 // the normal JNI call code.
2179 if (method->is_static() && !is_critical_native) {
2181 // load oop into a register
2182 __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2184 // Now handlize the static class mirror it's known not-null.
2185 __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2186 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2188 // Now get the handle
2189 __ lea(oop_handle_reg, Address(rsp, klass_offset));
2190 // store the klass handle as second argument
2191 __ movptr(c_rarg1, oop_handle_reg);
2192 // and protect the arg if we must spill
2193 c_arg--;
2194 }
2196 // Change state to native (we save the return address in the thread, since it might not
2197 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2198 // points into the right code segment. It does not have to be the correct return pc.
2199 // We use the same pc/oopMap repeatedly when we call out
2201 intptr_t the_pc = (intptr_t) __ pc();
2202 oop_maps->add_gc_map(the_pc - start, map);
2204 __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2207 // We have all of the arguments setup at this point. We must not touch any register
2208 // argument registers at this point (what if we save/restore them there are no oop?
2210 {
2211 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2212 // protect the args we've loaded
2213 save_args(masm, total_c_args, c_arg, out_regs);
2214 __ mov_metadata(c_rarg1, method());
2215 __ call_VM_leaf(
2216 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2217 r15_thread, c_rarg1);
2218 restore_args(masm, total_c_args, c_arg, out_regs);
2219 }
2221 // RedefineClasses() tracing support for obsolete method entry
2222 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2223 // protect the args we've loaded
2224 save_args(masm, total_c_args, c_arg, out_regs);
2225 __ mov_metadata(c_rarg1, method());
2226 __ call_VM_leaf(
2227 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2228 r15_thread, c_rarg1);
2229 restore_args(masm, total_c_args, c_arg, out_regs);
2230 }
2232 // Lock a synchronized method
2234 // Register definitions used by locking and unlocking
2236 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
2237 const Register obj_reg = rbx; // Will contain the oop
2238 const Register lock_reg = r13; // Address of compiler lock object (BasicLock)
2239 const Register old_hdr = r13; // value of old header at unlock time
2241 Label slow_path_lock;
2242 Label lock_done;
2244 if (method->is_synchronized()) {
2245 assert(!is_critical_native, "unhandled");
2248 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2250 // Get the handle (the 2nd argument)
2251 __ mov(oop_handle_reg, c_rarg1);
2253 // Get address of the box
2255 __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2257 // Load the oop from the handle
2258 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2260 if (UseBiasedLocking) {
2261 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2262 }
2264 // Load immediate 1 into swap_reg %rax
2265 __ movl(swap_reg, 1);
2267 // Load (object->mark() | 1) into swap_reg %rax
2268 __ orptr(swap_reg, Address(obj_reg, 0));
2270 // Save (object->mark() | 1) into BasicLock's displaced header
2271 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2273 if (os::is_MP()) {
2274 __ lock();
2275 }
2277 // src -> dest iff dest == rax else rax <- dest
2278 __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2279 __ jcc(Assembler::equal, lock_done);
2281 // Hmm should this move to the slow path code area???
2283 // Test if the oopMark is an obvious stack pointer, i.e.,
2284 // 1) (mark & 3) == 0, and
2285 // 2) rsp <= mark < mark + os::pagesize()
2286 // These 3 tests can be done by evaluating the following
2287 // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2288 // assuming both stack pointer and pagesize have their
2289 // least significant 2 bits clear.
2290 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2292 __ subptr(swap_reg, rsp);
2293 __ andptr(swap_reg, 3 - os::vm_page_size());
2295 // Save the test result, for recursive case, the result is zero
2296 __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2297 __ jcc(Assembler::notEqual, slow_path_lock);
2299 // Slow path will re-enter here
2301 __ bind(lock_done);
2302 }
2305 // Finally just about ready to make the JNI call
2308 // get JNIEnv* which is first argument to native
2309 if (!is_critical_native) {
2310 __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2311 }
2313 // Now set thread in native
2314 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2316 __ call(RuntimeAddress(native_func));
2318 // Either restore the MXCSR register after returning from the JNI Call
2319 // or verify that it wasn't changed.
2320 if (RestoreMXCSROnJNICalls) {
2321 __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
2323 }
2324 else if (CheckJNICalls ) {
2325 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
2326 }
2329 // Unpack native results.
2330 switch (ret_type) {
2331 case T_BOOLEAN: __ c2bool(rax); break;
2332 case T_CHAR : __ movzwl(rax, rax); break;
2333 case T_BYTE : __ sign_extend_byte (rax); break;
2334 case T_SHORT : __ sign_extend_short(rax); break;
2335 case T_INT : /* nothing to do */ break;
2336 case T_DOUBLE :
2337 case T_FLOAT :
2338 // Result is in xmm0 we'll save as needed
2339 break;
2340 case T_ARRAY: // Really a handle
2341 case T_OBJECT: // Really a handle
2342 break; // can't de-handlize until after safepoint check
2343 case T_VOID: break;
2344 case T_LONG: break;
2345 default : ShouldNotReachHere();
2346 }
2348 // Switch thread to "native transition" state before reading the synchronization state.
2349 // This additional state is necessary because reading and testing the synchronization
2350 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2351 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2352 // VM thread changes sync state to synchronizing and suspends threads for GC.
2353 // Thread A is resumed to finish this native method, but doesn't block here since it
2354 // didn't see any synchronization is progress, and escapes.
2355 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2357 if(os::is_MP()) {
2358 if (UseMembar) {
2359 // Force this write out before the read below
2360 __ membar(Assembler::Membar_mask_bits(
2361 Assembler::LoadLoad | Assembler::LoadStore |
2362 Assembler::StoreLoad | Assembler::StoreStore));
2363 } else {
2364 // Write serialization page so VM thread can do a pseudo remote membar.
2365 // We use the current thread pointer to calculate a thread specific
2366 // offset to write to within the page. This minimizes bus traffic
2367 // due to cache line collision.
2368 __ serialize_memory(r15_thread, rcx);
2369 }
2370 }
2372 Label after_transition;
2374 // check for safepoint operation in progress and/or pending suspend requests
2375 {
2376 Label Continue;
2378 __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2379 SafepointSynchronize::_not_synchronized);
2381 Label L;
2382 __ jcc(Assembler::notEqual, L);
2383 __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2384 __ jcc(Assembler::equal, Continue);
2385 __ bind(L);
2387 // Don't use call_VM as it will see a possible pending exception and forward it
2388 // and never return here preventing us from clearing _last_native_pc down below.
2389 // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2390 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2391 // by hand.
2392 //
2393 save_native_result(masm, ret_type, stack_slots);
2394 __ mov(c_rarg0, r15_thread);
2395 __ mov(r12, rsp); // remember sp
2396 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2397 __ andptr(rsp, -16); // align stack as required by ABI
2398 if (!is_critical_native) {
2399 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2400 } else {
2401 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2402 }
2403 __ mov(rsp, r12); // restore sp
2404 __ reinit_heapbase();
2405 // Restore any method result value
2406 restore_native_result(masm, ret_type, stack_slots);
2408 if (is_critical_native) {
2409 // The call above performed the transition to thread_in_Java so
2410 // skip the transition logic below.
2411 __ jmpb(after_transition);
2412 }
2414 __ bind(Continue);
2415 }
2417 // change thread state
2418 __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2419 __ bind(after_transition);
2421 Label reguard;
2422 Label reguard_done;
2423 __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
2424 __ jcc(Assembler::equal, reguard);
2425 __ bind(reguard_done);
2427 // native result if any is live
2429 // Unlock
2430 Label unlock_done;
2431 Label slow_path_unlock;
2432 if (method->is_synchronized()) {
2434 // Get locked oop from the handle we passed to jni
2435 __ movptr(obj_reg, Address(oop_handle_reg, 0));
2437 Label done;
2439 if (UseBiasedLocking) {
2440 __ biased_locking_exit(obj_reg, old_hdr, done);
2441 }
2443 // Simple recursive lock?
2445 __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2446 __ jcc(Assembler::equal, done);
2448 // Must save rax if if it is live now because cmpxchg must use it
2449 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2450 save_native_result(masm, ret_type, stack_slots);
2451 }
2454 // get address of the stack lock
2455 __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2456 // get old displaced header
2457 __ movptr(old_hdr, Address(rax, 0));
2459 // Atomic swap old header if oop still contains the stack lock
2460 if (os::is_MP()) {
2461 __ lock();
2462 }
2463 __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2464 __ jcc(Assembler::notEqual, slow_path_unlock);
2466 // slow path re-enters here
2467 __ bind(unlock_done);
2468 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2469 restore_native_result(masm, ret_type, stack_slots);
2470 }
2472 __ bind(done);
2474 }
2475 {
2476 SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2477 save_native_result(masm, ret_type, stack_slots);
2478 __ mov_metadata(c_rarg1, method());
2479 __ call_VM_leaf(
2480 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2481 r15_thread, c_rarg1);
2482 restore_native_result(masm, ret_type, stack_slots);
2483 }
2485 __ reset_last_Java_frame(false, true);
2487 // Unpack oop result
2488 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2489 Label L;
2490 __ testptr(rax, rax);
2491 __ jcc(Assembler::zero, L);
2492 __ movptr(rax, Address(rax, 0));
2493 __ bind(L);
2494 __ verify_oop(rax);
2495 }
2497 if (!is_critical_native) {
2498 // reset handle block
2499 __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2500 __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2501 }
2503 // pop our frame
2505 __ leave();
2507 if (!is_critical_native) {
2508 // Any exception pending?
2509 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2510 __ jcc(Assembler::notEqual, exception_pending);
2511 }
2513 // Return
2515 __ ret(0);
2517 // Unexpected paths are out of line and go here
2519 if (!is_critical_native) {
2520 // forward the exception
2521 __ bind(exception_pending);
2523 // and forward the exception
2524 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2525 }
2527 // Slow path locking & unlocking
2528 if (method->is_synchronized()) {
2530 // BEGIN Slow path lock
2531 __ bind(slow_path_lock);
2533 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2534 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2536 // protect the args we've loaded
2537 save_args(masm, total_c_args, c_arg, out_regs);
2539 __ mov(c_rarg0, obj_reg);
2540 __ mov(c_rarg1, lock_reg);
2541 __ mov(c_rarg2, r15_thread);
2543 // Not a leaf but we have last_Java_frame setup as we want
2544 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2545 restore_args(masm, total_c_args, c_arg, out_regs);
2547 #ifdef ASSERT
2548 { Label L;
2549 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2550 __ jcc(Assembler::equal, L);
2551 __ stop("no pending exception allowed on exit from monitorenter");
2552 __ bind(L);
2553 }
2554 #endif
2555 __ jmp(lock_done);
2557 // END Slow path lock
2559 // BEGIN Slow path unlock
2560 __ bind(slow_path_unlock);
2562 // If we haven't already saved the native result we must save it now as xmm registers
2563 // are still exposed.
2565 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2566 save_native_result(masm, ret_type, stack_slots);
2567 }
2569 __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2571 __ mov(c_rarg0, obj_reg);
2572 __ mov(r12, rsp); // remember sp
2573 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2574 __ andptr(rsp, -16); // align stack as required by ABI
2576 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2577 // NOTE that obj_reg == rbx currently
2578 __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2579 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2581 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2582 __ mov(rsp, r12); // restore sp
2583 __ reinit_heapbase();
2584 #ifdef ASSERT
2585 {
2586 Label L;
2587 __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2588 __ jcc(Assembler::equal, L);
2589 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2590 __ bind(L);
2591 }
2592 #endif /* ASSERT */
2594 __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2596 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2597 restore_native_result(masm, ret_type, stack_slots);
2598 }
2599 __ jmp(unlock_done);
2601 // END Slow path unlock
2603 } // synchronized
2605 // SLOW PATH Reguard the stack if needed
2607 __ bind(reguard);
2608 save_native_result(masm, ret_type, stack_slots);
2609 __ mov(r12, rsp); // remember sp
2610 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2611 __ andptr(rsp, -16); // align stack as required by ABI
2612 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2613 __ mov(rsp, r12); // restore sp
2614 __ reinit_heapbase();
2615 restore_native_result(masm, ret_type, stack_slots);
2616 // and continue
2617 __ jmp(reguard_done);
2621 __ flush();
2623 nmethod *nm = nmethod::new_native_nmethod(method,
2624 compile_id,
2625 masm->code(),
2626 vep_offset,
2627 frame_complete,
2628 stack_slots / VMRegImpl::slots_per_word,
2629 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2630 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2631 oop_maps);
2633 if (is_critical_native) {
2634 nm->set_lazy_critical_native(true);
2635 }
2637 return nm;
2639 }
2641 #ifdef HAVE_DTRACE_H
2642 // ---------------------------------------------------------------------------
2643 // Generate a dtrace nmethod for a given signature. The method takes arguments
2644 // in the Java compiled code convention, marshals them to the native
2645 // abi and then leaves nops at the position you would expect to call a native
2646 // function. When the probe is enabled the nops are replaced with a trap
2647 // instruction that dtrace inserts and the trace will cause a notification
2648 // to dtrace.
2649 //
2650 // The probes are only able to take primitive types and java/lang/String as
2651 // arguments. No other java types are allowed. Strings are converted to utf8
2652 // strings so that from dtrace point of view java strings are converted to C
2653 // strings. There is an arbitrary fixed limit on the total space that a method
2654 // can use for converting the strings. (256 chars per string in the signature).
2655 // So any java string larger then this is truncated.
2657 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2658 static bool offsets_initialized = false;
2661 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2662 methodHandle method) {
2665 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2666 // be single threaded in this method.
2667 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2669 if (!offsets_initialized) {
2670 fp_offset[c_rarg0->as_VMReg()->value()] = -1 * wordSize;
2671 fp_offset[c_rarg1->as_VMReg()->value()] = -2 * wordSize;
2672 fp_offset[c_rarg2->as_VMReg()->value()] = -3 * wordSize;
2673 fp_offset[c_rarg3->as_VMReg()->value()] = -4 * wordSize;
2674 fp_offset[c_rarg4->as_VMReg()->value()] = -5 * wordSize;
2675 fp_offset[c_rarg5->as_VMReg()->value()] = -6 * wordSize;
2677 fp_offset[c_farg0->as_VMReg()->value()] = -7 * wordSize;
2678 fp_offset[c_farg1->as_VMReg()->value()] = -8 * wordSize;
2679 fp_offset[c_farg2->as_VMReg()->value()] = -9 * wordSize;
2680 fp_offset[c_farg3->as_VMReg()->value()] = -10 * wordSize;
2681 fp_offset[c_farg4->as_VMReg()->value()] = -11 * wordSize;
2682 fp_offset[c_farg5->as_VMReg()->value()] = -12 * wordSize;
2683 fp_offset[c_farg6->as_VMReg()->value()] = -13 * wordSize;
2684 fp_offset[c_farg7->as_VMReg()->value()] = -14 * wordSize;
2686 offsets_initialized = true;
2687 }
2688 // Fill in the signature array, for the calling-convention call.
2689 int total_args_passed = method->size_of_parameters();
2691 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2692 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2694 // The signature we are going to use for the trap that dtrace will see
2695 // java/lang/String is converted. We drop "this" and any other object
2696 // is converted to NULL. (A one-slot java/lang/Long object reference
2697 // is converted to a two-slot long, which is why we double the allocation).
2698 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2699 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2701 int i=0;
2702 int total_strings = 0;
2703 int first_arg_to_pass = 0;
2704 int total_c_args = 0;
2706 // Skip the receiver as dtrace doesn't want to see it
2707 if( !method->is_static() ) {
2708 in_sig_bt[i++] = T_OBJECT;
2709 first_arg_to_pass = 1;
2710 }
2712 // We need to convert the java args to where a native (non-jni) function
2713 // would expect them. To figure out where they go we convert the java
2714 // signature to a C signature.
2716 SignatureStream ss(method->signature());
2717 for ( ; !ss.at_return_type(); ss.next()) {
2718 BasicType bt = ss.type();
2719 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2720 out_sig_bt[total_c_args++] = bt;
2721 if( bt == T_OBJECT) {
2722 Symbol* s = ss.as_symbol_or_null(); // symbol is created
2723 if (s == vmSymbols::java_lang_String()) {
2724 total_strings++;
2725 out_sig_bt[total_c_args-1] = T_ADDRESS;
2726 } else if (s == vmSymbols::java_lang_Boolean() ||
2727 s == vmSymbols::java_lang_Character() ||
2728 s == vmSymbols::java_lang_Byte() ||
2729 s == vmSymbols::java_lang_Short() ||
2730 s == vmSymbols::java_lang_Integer() ||
2731 s == vmSymbols::java_lang_Float()) {
2732 out_sig_bt[total_c_args-1] = T_INT;
2733 } else if (s == vmSymbols::java_lang_Long() ||
2734 s == vmSymbols::java_lang_Double()) {
2735 out_sig_bt[total_c_args-1] = T_LONG;
2736 out_sig_bt[total_c_args++] = T_VOID;
2737 }
2738 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2739 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2740 // We convert double to long
2741 out_sig_bt[total_c_args-1] = T_LONG;
2742 out_sig_bt[total_c_args++] = T_VOID;
2743 } else if ( bt == T_FLOAT) {
2744 // We convert float to int
2745 out_sig_bt[total_c_args-1] = T_INT;
2746 }
2747 }
2749 assert(i==total_args_passed, "validly parsed signature");
2751 // Now get the compiled-Java layout as input arguments
2752 int comp_args_on_stack;
2753 comp_args_on_stack = SharedRuntime::java_calling_convention(
2754 in_sig_bt, in_regs, total_args_passed, false);
2756 // Now figure out where the args must be stored and how much stack space
2757 // they require (neglecting out_preserve_stack_slots but space for storing
2758 // the 1st six register arguments). It's weird see int_stk_helper.
2760 int out_arg_slots;
2761 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2763 // Calculate the total number of stack slots we will need.
2765 // First count the abi requirement plus all of the outgoing args
2766 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2768 // Now space for the string(s) we must convert
2769 int* string_locs = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2770 for (i = 0; i < total_strings ; i++) {
2771 string_locs[i] = stack_slots;
2772 stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2773 }
2775 // Plus the temps we might need to juggle register args
2776 // regs take two slots each
2777 stack_slots += (Argument::n_int_register_parameters_c +
2778 Argument::n_float_register_parameters_c) * 2;
2781 // + 4 for return address (which we own) and saved rbp,
2783 stack_slots += 4;
2785 // Ok The space we have allocated will look like:
2786 //
2787 //
2788 // FP-> | |
2789 // |---------------------|
2790 // | string[n] |
2791 // |---------------------| <- string_locs[n]
2792 // | string[n-1] |
2793 // |---------------------| <- string_locs[n-1]
2794 // | ... |
2795 // | ... |
2796 // |---------------------| <- string_locs[1]
2797 // | string[0] |
2798 // |---------------------| <- string_locs[0]
2799 // | outbound memory |
2800 // | based arguments |
2801 // | |
2802 // |---------------------|
2803 // | |
2804 // SP-> | out_preserved_slots |
2805 //
2806 //
2808 // Now compute actual number of stack words we need rounding to make
2809 // stack properly aligned.
2810 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2812 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2814 intptr_t start = (intptr_t)__ pc();
2816 // First thing make an ic check to see if we should even be here
2818 // We are free to use all registers as temps without saving them and
2819 // restoring them except rbp. rbp, is the only callee save register
2820 // as far as the interpreter and the compiler(s) are concerned.
2822 const Register ic_reg = rax;
2823 const Register receiver = rcx;
2824 Label hit;
2825 Label exception_pending;
2828 __ verify_oop(receiver);
2829 __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2830 __ jcc(Assembler::equal, hit);
2832 __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2834 // verified entry must be aligned for code patching.
2835 // and the first 5 bytes must be in the same cache line
2836 // if we align at 8 then we will be sure 5 bytes are in the same line
2837 __ align(8);
2839 __ bind(hit);
2841 int vep_offset = ((intptr_t)__ pc()) - start;
2844 // The instruction at the verified entry point must be 5 bytes or longer
2845 // because it can be patched on the fly by make_non_entrant. The stack bang
2846 // instruction fits that requirement.
2848 // Generate stack overflow check
2850 if (UseStackBanging) {
2851 if (stack_size <= StackShadowPages*os::vm_page_size()) {
2852 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2853 } else {
2854 __ movl(rax, stack_size);
2855 __ bang_stack_size(rax, rbx);
2856 }
2857 } else {
2858 // need a 5 byte instruction to allow MT safe patching to non-entrant
2859 __ fat_nop();
2860 }
2862 assert(((uintptr_t)__ pc() - start - vep_offset) >= 5,
2863 "valid size for make_non_entrant");
2865 // Generate a new frame for the wrapper.
2866 __ enter();
2868 // -4 because return address is already present and so is saved rbp,
2869 if (stack_size - 2*wordSize != 0) {
2870 __ subq(rsp, stack_size - 2*wordSize);
2871 }
2873 // Frame is now completed as far a size and linkage.
2875 int frame_complete = ((intptr_t)__ pc()) - start;
2877 int c_arg, j_arg;
2879 // State of input register args
2881 bool live[ConcreteRegisterImpl::number_of_registers];
2883 live[j_rarg0->as_VMReg()->value()] = false;
2884 live[j_rarg1->as_VMReg()->value()] = false;
2885 live[j_rarg2->as_VMReg()->value()] = false;
2886 live[j_rarg3->as_VMReg()->value()] = false;
2887 live[j_rarg4->as_VMReg()->value()] = false;
2888 live[j_rarg5->as_VMReg()->value()] = false;
2890 live[j_farg0->as_VMReg()->value()] = false;
2891 live[j_farg1->as_VMReg()->value()] = false;
2892 live[j_farg2->as_VMReg()->value()] = false;
2893 live[j_farg3->as_VMReg()->value()] = false;
2894 live[j_farg4->as_VMReg()->value()] = false;
2895 live[j_farg5->as_VMReg()->value()] = false;
2896 live[j_farg6->as_VMReg()->value()] = false;
2897 live[j_farg7->as_VMReg()->value()] = false;
2900 bool rax_is_zero = false;
2902 // All args (except strings) destined for the stack are moved first
2903 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2904 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2905 VMRegPair src = in_regs[j_arg];
2906 VMRegPair dst = out_regs[c_arg];
2908 // Get the real reg value or a dummy (rsp)
2910 int src_reg = src.first()->is_reg() ?
2911 src.first()->value() :
2912 rsp->as_VMReg()->value();
2914 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
2915 (in_sig_bt[j_arg] == T_OBJECT &&
2916 out_sig_bt[c_arg] != T_INT &&
2917 out_sig_bt[c_arg] != T_ADDRESS &&
2918 out_sig_bt[c_arg] != T_LONG);
2920 live[src_reg] = !useless;
2922 if (dst.first()->is_stack()) {
2924 // Even though a string arg in a register is still live after this loop
2925 // after the string conversion loop (next) it will be dead so we take
2926 // advantage of that now for simpler code to manage live.
2928 live[src_reg] = false;
2929 switch (in_sig_bt[j_arg]) {
2931 case T_ARRAY:
2932 case T_OBJECT:
2933 {
2934 Address stack_dst(rsp, reg2offset_out(dst.first()));
2936 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2937 // need to unbox a one-word value
2938 Register in_reg = rax;
2939 if ( src.first()->is_reg() ) {
2940 in_reg = src.first()->as_Register();
2941 } else {
2942 __ movq(rax, Address(rbp, reg2offset_in(src.first())));
2943 rax_is_zero = false;
2944 }
2945 Label skipUnbox;
2946 __ movptr(Address(rsp, reg2offset_out(dst.first())),
2947 (int32_t)NULL_WORD);
2948 __ testq(in_reg, in_reg);
2949 __ jcc(Assembler::zero, skipUnbox);
2951 BasicType bt = out_sig_bt[c_arg];
2952 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2953 Address src1(in_reg, box_offset);
2954 if ( bt == T_LONG ) {
2955 __ movq(in_reg, src1);
2956 __ movq(stack_dst, in_reg);
2957 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2958 ++c_arg; // skip over T_VOID to keep the loop indices in sync
2959 } else {
2960 __ movl(in_reg, src1);
2961 __ movl(stack_dst, in_reg);
2962 }
2964 __ bind(skipUnbox);
2965 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
2966 // Convert the arg to NULL
2967 if (!rax_is_zero) {
2968 __ xorq(rax, rax);
2969 rax_is_zero = true;
2970 }
2971 __ movq(stack_dst, rax);
2972 }
2973 }
2974 break;
2976 case T_VOID:
2977 break;
2979 case T_FLOAT:
2980 // This does the right thing since we know it is destined for the
2981 // stack
2982 float_move(masm, src, dst);
2983 break;
2985 case T_DOUBLE:
2986 // This does the right thing since we know it is destined for the
2987 // stack
2988 double_move(masm, src, dst);
2989 break;
2991 case T_LONG :
2992 long_move(masm, src, dst);
2993 break;
2995 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2997 default:
2998 move32_64(masm, src, dst);
2999 }
3000 }
3002 }
3004 // If we have any strings we must store any register based arg to the stack
3005 // This includes any still live xmm registers too.
3007 int sid = 0;
3009 if (total_strings > 0 ) {
3010 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3011 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3012 VMRegPair src = in_regs[j_arg];
3013 VMRegPair dst = out_regs[c_arg];
3015 if (src.first()->is_reg()) {
3016 Address src_tmp(rbp, fp_offset[src.first()->value()]);
3018 // string oops were left untouched by the previous loop even if the
3019 // eventual (converted) arg is destined for the stack so park them
3020 // away now (except for first)
3022 if (out_sig_bt[c_arg] == T_ADDRESS) {
3023 Address utf8_addr = Address(
3024 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3025 if (sid != 1) {
3026 // The first string arg won't be killed until after the utf8
3027 // conversion
3028 __ movq(utf8_addr, src.first()->as_Register());
3029 }
3030 } else if (dst.first()->is_reg()) {
3031 if (in_sig_bt[j_arg] == T_FLOAT || in_sig_bt[j_arg] == T_DOUBLE) {
3033 // Convert the xmm register to an int and store it in the reserved
3034 // location for the eventual c register arg
3035 XMMRegister f = src.first()->as_XMMRegister();
3036 if (in_sig_bt[j_arg] == T_FLOAT) {
3037 __ movflt(src_tmp, f);
3038 } else {
3039 __ movdbl(src_tmp, f);
3040 }
3041 } else {
3042 // If the arg is an oop type we don't support don't bother to store
3043 // it remember string was handled above.
3044 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3045 (in_sig_bt[j_arg] == T_OBJECT &&
3046 out_sig_bt[c_arg] != T_INT &&
3047 out_sig_bt[c_arg] != T_LONG);
3049 if (!useless) {
3050 __ movq(src_tmp, src.first()->as_Register());
3051 }
3052 }
3053 }
3054 }
3055 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3056 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3057 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3058 }
3059 }
3061 // Now that the volatile registers are safe, convert all the strings
3062 sid = 0;
3064 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3065 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3066 if (out_sig_bt[c_arg] == T_ADDRESS) {
3067 // It's a string
3068 Address utf8_addr = Address(
3069 rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
3070 // The first string we find might still be in the original java arg
3071 // register
3073 VMReg src = in_regs[j_arg].first();
3075 // We will need to eventually save the final argument to the trap
3076 // in the von-volatile location dedicated to src. This is the offset
3077 // from fp we will use.
3078 int src_off = src->is_reg() ?
3079 fp_offset[src->value()] : reg2offset_in(src);
3081 // This is where the argument will eventually reside
3082 VMRegPair dst = out_regs[c_arg];
3084 if (src->is_reg()) {
3085 if (sid == 1) {
3086 __ movq(c_rarg0, src->as_Register());
3087 } else {
3088 __ movq(c_rarg0, utf8_addr);
3089 }
3090 } else {
3091 // arg is still in the original location
3092 __ movq(c_rarg0, Address(rbp, reg2offset_in(src)));
3093 }
3094 Label done, convert;
3096 // see if the oop is NULL
3097 __ testq(c_rarg0, c_rarg0);
3098 __ jcc(Assembler::notEqual, convert);
3100 if (dst.first()->is_reg()) {
3101 // Save the ptr to utf string in the origina src loc or the tmp
3102 // dedicated to it
3103 __ movq(Address(rbp, src_off), c_rarg0);
3104 } else {
3105 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg0);
3106 }
3107 __ jmp(done);
3109 __ bind(convert);
3111 __ lea(c_rarg1, utf8_addr);
3112 if (dst.first()->is_reg()) {
3113 __ movq(Address(rbp, src_off), c_rarg1);
3114 } else {
3115 __ movq(Address(rsp, reg2offset_out(dst.first())), c_rarg1);
3116 }
3117 // And do the conversion
3118 __ call(RuntimeAddress(
3119 CAST_FROM_FN_PTR(address, SharedRuntime::get_utf)));
3121 __ bind(done);
3122 }
3123 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3124 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3125 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3126 }
3127 }
3128 // The get_utf call killed all the c_arg registers
3129 live[c_rarg0->as_VMReg()->value()] = false;
3130 live[c_rarg1->as_VMReg()->value()] = false;
3131 live[c_rarg2->as_VMReg()->value()] = false;
3132 live[c_rarg3->as_VMReg()->value()] = false;
3133 live[c_rarg4->as_VMReg()->value()] = false;
3134 live[c_rarg5->as_VMReg()->value()] = false;
3136 live[c_farg0->as_VMReg()->value()] = false;
3137 live[c_farg1->as_VMReg()->value()] = false;
3138 live[c_farg2->as_VMReg()->value()] = false;
3139 live[c_farg3->as_VMReg()->value()] = false;
3140 live[c_farg4->as_VMReg()->value()] = false;
3141 live[c_farg5->as_VMReg()->value()] = false;
3142 live[c_farg6->as_VMReg()->value()] = false;
3143 live[c_farg7->as_VMReg()->value()] = false;
3144 }
3146 // Now we can finally move the register args to their desired locations
3148 rax_is_zero = false;
3150 for (j_arg = first_arg_to_pass, c_arg = 0 ;
3151 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
3153 VMRegPair src = in_regs[j_arg];
3154 VMRegPair dst = out_regs[c_arg];
3156 // Only need to look for args destined for the interger registers (since we
3157 // convert float/double args to look like int/long outbound)
3158 if (dst.first()->is_reg()) {
3159 Register r = dst.first()->as_Register();
3161 // Check if the java arg is unsupported and thereofre useless
3162 bool useless = in_sig_bt[j_arg] == T_ARRAY ||
3163 (in_sig_bt[j_arg] == T_OBJECT &&
3164 out_sig_bt[c_arg] != T_INT &&
3165 out_sig_bt[c_arg] != T_ADDRESS &&
3166 out_sig_bt[c_arg] != T_LONG);
3169 // If we're going to kill an existing arg save it first
3170 if (live[dst.first()->value()]) {
3171 // you can't kill yourself
3172 if (src.first() != dst.first()) {
3173 __ movq(Address(rbp, fp_offset[dst.first()->value()]), r);
3174 }
3175 }
3176 if (src.first()->is_reg()) {
3177 if (live[src.first()->value()] ) {
3178 if (in_sig_bt[j_arg] == T_FLOAT) {
3179 __ movdl(r, src.first()->as_XMMRegister());
3180 } else if (in_sig_bt[j_arg] == T_DOUBLE) {
3181 __ movdq(r, src.first()->as_XMMRegister());
3182 } else if (r != src.first()->as_Register()) {
3183 if (!useless) {
3184 __ movq(r, src.first()->as_Register());
3185 }
3186 }
3187 } else {
3188 // If the arg is an oop type we don't support don't bother to store
3189 // it
3190 if (!useless) {
3191 if (in_sig_bt[j_arg] == T_DOUBLE ||
3192 in_sig_bt[j_arg] == T_LONG ||
3193 in_sig_bt[j_arg] == T_OBJECT ) {
3194 __ movq(r, Address(rbp, fp_offset[src.first()->value()]));
3195 } else {
3196 __ movl(r, Address(rbp, fp_offset[src.first()->value()]));
3197 }
3198 }
3199 }
3200 live[src.first()->value()] = false;
3201 } else if (!useless) {
3202 // full sized move even for int should be ok
3203 __ movq(r, Address(rbp, reg2offset_in(src.first())));
3204 }
3206 // At this point r has the original java arg in the final location
3207 // (assuming it wasn't useless). If the java arg was an oop
3208 // we have a bit more to do
3210 if (in_sig_bt[j_arg] == T_ARRAY || in_sig_bt[j_arg] == T_OBJECT ) {
3211 if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
3212 // need to unbox a one-word value
3213 Label skip;
3214 __ testq(r, r);
3215 __ jcc(Assembler::equal, skip);
3216 BasicType bt = out_sig_bt[c_arg];
3217 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
3218 Address src1(r, box_offset);
3219 if ( bt == T_LONG ) {
3220 __ movq(r, src1);
3221 } else {
3222 __ movl(r, src1);
3223 }
3224 __ bind(skip);
3226 } else if (out_sig_bt[c_arg] != T_ADDRESS) {
3227 // Convert the arg to NULL
3228 __ xorq(r, r);
3229 }
3230 }
3232 // dst can longer be holding an input value
3233 live[dst.first()->value()] = false;
3234 }
3235 if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
3236 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
3237 ++c_arg; // skip over T_VOID to keep the loop indices in sync
3238 }
3239 }
3242 // Ok now we are done. Need to place the nop that dtrace wants in order to
3243 // patch in the trap
3244 int patch_offset = ((intptr_t)__ pc()) - start;
3246 __ nop();
3249 // Return
3251 __ leave();
3252 __ ret(0);
3254 __ flush();
3256 nmethod *nm = nmethod::new_dtrace_nmethod(
3257 method, masm->code(), vep_offset, patch_offset, frame_complete,
3258 stack_slots / VMRegImpl::slots_per_word);
3259 return nm;
3261 }
3263 #endif // HAVE_DTRACE_H
3265 // this function returns the adjust size (in number of words) to a c2i adapter
3266 // activation for use during deoptimization
3267 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
3268 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3269 }
3272 uint SharedRuntime::out_preserve_stack_slots() {
3273 return 0;
3274 }
3276 //------------------------------generate_deopt_blob----------------------------
3277 void SharedRuntime::generate_deopt_blob() {
3278 // Allocate space for the code
3279 ResourceMark rm;
3280 // Setup code generation tools
3281 CodeBuffer buffer("deopt_blob", 2048, 1024);
3282 MacroAssembler* masm = new MacroAssembler(&buffer);
3283 int frame_size_in_words;
3284 OopMap* map = NULL;
3285 OopMapSet *oop_maps = new OopMapSet();
3287 // -------------
3288 // This code enters when returning to a de-optimized nmethod. A return
3289 // address has been pushed on the the stack, and return values are in
3290 // registers.
3291 // If we are doing a normal deopt then we were called from the patched
3292 // nmethod from the point we returned to the nmethod. So the return
3293 // address on the stack is wrong by NativeCall::instruction_size
3294 // We will adjust the value so it looks like we have the original return
3295 // address on the stack (like when we eagerly deoptimized).
3296 // In the case of an exception pending when deoptimizing, we enter
3297 // with a return address on the stack that points after the call we patched
3298 // into the exception handler. We have the following register state from,
3299 // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
3300 // rax: exception oop
3301 // rbx: exception handler
3302 // rdx: throwing pc
3303 // So in this case we simply jam rdx into the useless return address and
3304 // the stack looks just like we want.
3305 //
3306 // At this point we need to de-opt. We save the argument return
3307 // registers. We call the first C routine, fetch_unroll_info(). This
3308 // routine captures the return values and returns a structure which
3309 // describes the current frame size and the sizes of all replacement frames.
3310 // The current frame is compiled code and may contain many inlined
3311 // functions, each with their own JVM state. We pop the current frame, then
3312 // push all the new frames. Then we call the C routine unpack_frames() to
3313 // populate these frames. Finally unpack_frames() returns us the new target
3314 // address. Notice that callee-save registers are BLOWN here; they have
3315 // already been captured in the vframeArray at the time the return PC was
3316 // patched.
3317 address start = __ pc();
3318 Label cont;
3320 // Prolog for non exception case!
3322 // Save everything in sight.
3323 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3325 // Normal deoptimization. Save exec mode for unpack_frames.
3326 __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
3327 __ jmp(cont);
3329 int reexecute_offset = __ pc() - start;
3331 // Reexecute case
3332 // return address is the pc describes what bci to do re-execute at
3334 // No need to update map as each call to save_live_registers will produce identical oopmap
3335 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3337 __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
3338 __ jmp(cont);
3340 int exception_offset = __ pc() - start;
3342 // Prolog for exception case
3344 // all registers are dead at this entry point, except for rax, and
3345 // rdx which contain the exception oop and exception pc
3346 // respectively. Set them in TLS and fall thru to the
3347 // unpack_with_exception_in_tls entry point.
3349 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3350 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
3352 int exception_in_tls_offset = __ pc() - start;
3354 // new implementation because exception oop is now passed in JavaThread
3356 // Prolog for exception case
3357 // All registers must be preserved because they might be used by LinearScan
3358 // Exceptiop oop and throwing PC are passed in JavaThread
3359 // tos: stack at point of call to method that threw the exception (i.e. only
3360 // args are on the stack, no return address)
3362 // make room on stack for the return address
3363 // It will be patched later with the throwing pc. The correct value is not
3364 // available now because loading it from memory would destroy registers.
3365 __ push(0);
3367 // Save everything in sight.
3368 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3370 // Now it is safe to overwrite any register
3372 // Deopt during an exception. Save exec mode for unpack_frames.
3373 __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
3375 // load throwing pc from JavaThread and patch it as the return address
3376 // of the current frame. Then clear the field in JavaThread
3378 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3379 __ movptr(Address(rbp, wordSize), rdx);
3380 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3382 #ifdef ASSERT
3383 // verify that there is really an exception oop in JavaThread
3384 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3385 __ verify_oop(rax);
3387 // verify that there is no pending exception
3388 Label no_pending_exception;
3389 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3390 __ testptr(rax, rax);
3391 __ jcc(Assembler::zero, no_pending_exception);
3392 __ stop("must not have pending exception here");
3393 __ bind(no_pending_exception);
3394 #endif
3396 __ bind(cont);
3398 // Call C code. Need thread and this frame, but NOT official VM entry
3399 // crud. We cannot block on this call, no GC can happen.
3400 //
3401 // UnrollBlock* fetch_unroll_info(JavaThread* thread)
3403 // fetch_unroll_info needs to call last_java_frame().
3405 __ set_last_Java_frame(noreg, noreg, NULL);
3406 #ifdef ASSERT
3407 { Label L;
3408 __ cmpptr(Address(r15_thread,
3409 JavaThread::last_Java_fp_offset()),
3410 (int32_t)0);
3411 __ jcc(Assembler::equal, L);
3412 __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
3413 __ bind(L);
3414 }
3415 #endif // ASSERT
3416 __ mov(c_rarg0, r15_thread);
3417 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
3419 // Need to have an oopmap that tells fetch_unroll_info where to
3420 // find any register it might need.
3421 oop_maps->add_gc_map(__ pc() - start, map);
3423 __ reset_last_Java_frame(false, false);
3425 // Load UnrollBlock* into rdi
3426 __ mov(rdi, rax);
3428 Label noException;
3429 __ cmpl(r14, Deoptimization::Unpack_exception); // Was exception pending?
3430 __ jcc(Assembler::notEqual, noException);
3431 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3432 // QQQ this is useless it was NULL above
3433 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3434 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
3435 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
3437 __ verify_oop(rax);
3439 // Overwrite the result registers with the exception results.
3440 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3441 // I think this is useless
3442 __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
3444 __ bind(noException);
3446 // Only register save data is on the stack.
3447 // Now restore the result registers. Everything else is either dead
3448 // or captured in the vframeArray.
3449 RegisterSaver::restore_result_registers(masm);
3451 // All of the register save area has been popped of the stack. Only the
3452 // return address remains.
3454 // Pop all the frames we must move/replace.
3455 //
3456 // Frame picture (youngest to oldest)
3457 // 1: self-frame (no frame link)
3458 // 2: deopting frame (no frame link)
3459 // 3: caller of deopting frame (could be compiled/interpreted).
3460 //
3461 // Note: by leaving the return address of self-frame on the stack
3462 // and using the size of frame 2 to adjust the stack
3463 // when we are done the return to frame 3 will still be on the stack.
3465 // Pop deoptimized frame
3466 __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
3467 __ addptr(rsp, rcx);
3469 // rsp should be pointing at the return address to the caller (3)
3471 // Stack bang to make sure there's enough room for these interpreter frames.
3472 if (UseStackBanging) {
3473 __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3474 __ bang_stack_size(rbx, rcx);
3475 }
3477 // Load address of array of frame pcs into rcx
3478 __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3480 // Trash the old pc
3481 __ addptr(rsp, wordSize);
3483 // Load address of array of frame sizes into rsi
3484 __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3486 // Load counter into rdx
3487 __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3489 // Pick up the initial fp we should save
3490 __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3492 // Now adjust the caller's stack to make up for the extra locals
3493 // but record the original sp so that we can save it in the skeletal interpreter
3494 // frame and the stack walking of interpreter_sender will get the unextended sp
3495 // value and not the "real" sp value.
3497 const Register sender_sp = r8;
3499 __ mov(sender_sp, rsp);
3500 __ movl(rbx, Address(rdi,
3501 Deoptimization::UnrollBlock::
3502 caller_adjustment_offset_in_bytes()));
3503 __ subptr(rsp, rbx);
3505 // Push interpreter frames in a loop
3506 Label loop;
3507 __ bind(loop);
3508 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3509 #ifdef CC_INTERP
3510 __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
3511 #ifdef ASSERT
3512 __ push(0xDEADDEAD); // Make a recognizable pattern
3513 __ push(0xDEADDEAD);
3514 #else /* ASSERT */
3515 __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
3516 #endif /* ASSERT */
3517 #else
3518 __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
3519 #endif // CC_INTERP
3520 __ pushptr(Address(rcx, 0)); // Save return address
3521 __ enter(); // Save old & set new ebp
3522 __ subptr(rsp, rbx); // Prolog
3523 #ifdef CC_INTERP
3524 __ movptr(Address(rbp,
3525 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3526 sender_sp); // Make it walkable
3527 #else /* CC_INTERP */
3528 // This value is corrected by layout_activation_impl
3529 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3530 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3531 #endif /* CC_INTERP */
3532 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3533 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3534 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3535 __ decrementl(rdx); // Decrement counter
3536 __ jcc(Assembler::notZero, loop);
3537 __ pushptr(Address(rcx, 0)); // Save final return address
3539 // Re-push self-frame
3540 __ enter(); // Save old & set new ebp
3542 // Allocate a full sized register save area.
3543 // Return address and rbp are in place, so we allocate two less words.
3544 __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3546 // Restore frame locals after moving the frame
3547 __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3548 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3550 // Call C code. Need thread but NOT official VM entry
3551 // crud. We cannot block on this call, no GC can happen. Call should
3552 // restore return values to their stack-slots with the new SP.
3553 //
3554 // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3556 // Use rbp because the frames look interpreted now
3557 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3558 // Don't need the precise return PC here, just precise enough to point into this code blob.
3559 address the_pc = __ pc();
3560 __ set_last_Java_frame(noreg, rbp, the_pc);
3562 __ andptr(rsp, -(StackAlignmentInBytes)); // Fix stack alignment as required by ABI
3563 __ mov(c_rarg0, r15_thread);
3564 __ movl(c_rarg1, r14); // second arg: exec_mode
3565 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3566 // Revert SP alignment after call since we're going to do some SP relative addressing below
3567 __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3569 // Set an oopmap for the call site
3570 // Use the same PC we used for the last java frame
3571 oop_maps->add_gc_map(the_pc - start,
3572 new OopMap( frame_size_in_words, 0 ));
3574 // Clear fp AND pc
3575 __ reset_last_Java_frame(true, true);
3577 // Collect return values
3578 __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3579 __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3580 // I think this is useless (throwing pc?)
3581 __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3583 // Pop self-frame.
3584 __ leave(); // Epilog
3586 // Jump to interpreter
3587 __ ret(0);
3589 // Make sure all code is generated
3590 masm->flush();
3592 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3593 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3594 }
3596 #ifdef COMPILER2
3597 //------------------------------generate_uncommon_trap_blob--------------------
3598 void SharedRuntime::generate_uncommon_trap_blob() {
3599 // Allocate space for the code
3600 ResourceMark rm;
3601 // Setup code generation tools
3602 CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3603 MacroAssembler* masm = new MacroAssembler(&buffer);
3605 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3607 address start = __ pc();
3609 // Push self-frame. We get here with a return address on the
3610 // stack, so rsp is 8-byte aligned until we allocate our frame.
3611 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3613 // No callee saved registers. rbp is assumed implicitly saved
3614 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3616 // compiler left unloaded_class_index in j_rarg0 move to where the
3617 // runtime expects it.
3618 __ movl(c_rarg1, j_rarg0);
3620 __ set_last_Java_frame(noreg, noreg, NULL);
3622 // Call C code. Need thread but NOT official VM entry
3623 // crud. We cannot block on this call, no GC can happen. Call should
3624 // capture callee-saved registers as well as return values.
3625 // Thread is in rdi already.
3626 //
3627 // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3629 __ mov(c_rarg0, r15_thread);
3630 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3632 // Set an oopmap for the call site
3633 OopMapSet* oop_maps = new OopMapSet();
3634 OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3636 // location of rbp is known implicitly by the frame sender code
3638 oop_maps->add_gc_map(__ pc() - start, map);
3640 __ reset_last_Java_frame(false, false);
3642 // Load UnrollBlock* into rdi
3643 __ mov(rdi, rax);
3645 // Pop all the frames we must move/replace.
3646 //
3647 // Frame picture (youngest to oldest)
3648 // 1: self-frame (no frame link)
3649 // 2: deopting frame (no frame link)
3650 // 3: caller of deopting frame (could be compiled/interpreted).
3652 // Pop self-frame. We have no frame, and must rely only on rax and rsp.
3653 __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3655 // Pop deoptimized frame (int)
3656 __ movl(rcx, Address(rdi,
3657 Deoptimization::UnrollBlock::
3658 size_of_deoptimized_frame_offset_in_bytes()));
3659 __ addptr(rsp, rcx);
3661 // rsp should be pointing at the return address to the caller (3)
3663 // Stack bang to make sure there's enough room for these interpreter frames.
3664 if (UseStackBanging) {
3665 __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3666 __ bang_stack_size(rbx, rcx);
3667 }
3669 // Load address of array of frame pcs into rcx (address*)
3670 __ movptr(rcx,
3671 Address(rdi,
3672 Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3674 // Trash the return pc
3675 __ addptr(rsp, wordSize);
3677 // Load address of array of frame sizes into rsi (intptr_t*)
3678 __ movptr(rsi, Address(rdi,
3679 Deoptimization::UnrollBlock::
3680 frame_sizes_offset_in_bytes()));
3682 // Counter
3683 __ movl(rdx, Address(rdi,
3684 Deoptimization::UnrollBlock::
3685 number_of_frames_offset_in_bytes())); // (int)
3687 // Pick up the initial fp we should save
3688 __ movptr(rbp,
3689 Address(rdi,
3690 Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3692 // Now adjust the caller's stack to make up for the extra locals but
3693 // record the original sp so that we can save it in the skeletal
3694 // interpreter frame and the stack walking of interpreter_sender
3695 // will get the unextended sp value and not the "real" sp value.
3697 const Register sender_sp = r8;
3699 __ mov(sender_sp, rsp);
3700 __ movl(rbx, Address(rdi,
3701 Deoptimization::UnrollBlock::
3702 caller_adjustment_offset_in_bytes())); // (int)
3703 __ subptr(rsp, rbx);
3705 // Push interpreter frames in a loop
3706 Label loop;
3707 __ bind(loop);
3708 __ movptr(rbx, Address(rsi, 0)); // Load frame size
3709 __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
3710 __ pushptr(Address(rcx, 0)); // Save return address
3711 __ enter(); // Save old & set new rbp
3712 __ subptr(rsp, rbx); // Prolog
3713 #ifdef CC_INTERP
3714 __ movptr(Address(rbp,
3715 -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
3716 sender_sp); // Make it walkable
3717 #else // CC_INTERP
3718 __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3719 sender_sp); // Make it walkable
3720 // This value is corrected by layout_activation_impl
3721 __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3722 #endif // CC_INTERP
3723 __ mov(sender_sp, rsp); // Pass sender_sp to next frame
3724 __ addptr(rsi, wordSize); // Bump array pointer (sizes)
3725 __ addptr(rcx, wordSize); // Bump array pointer (pcs)
3726 __ decrementl(rdx); // Decrement counter
3727 __ jcc(Assembler::notZero, loop);
3728 __ pushptr(Address(rcx, 0)); // Save final return address
3730 // Re-push self-frame
3731 __ enter(); // Save old & set new rbp
3732 __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3733 // Prolog
3735 // Use rbp because the frames look interpreted now
3736 // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3737 // Don't need the precise return PC here, just precise enough to point into this code blob.
3738 address the_pc = __ pc();
3739 __ set_last_Java_frame(noreg, rbp, the_pc);
3741 // Call C code. Need thread but NOT official VM entry
3742 // crud. We cannot block on this call, no GC can happen. Call should
3743 // restore return values to their stack-slots with the new SP.
3744 // Thread is in rdi already.
3745 //
3746 // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3748 __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3749 __ mov(c_rarg0, r15_thread);
3750 __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3751 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3753 // Set an oopmap for the call site
3754 // Use the same PC we used for the last java frame
3755 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3757 // Clear fp AND pc
3758 __ reset_last_Java_frame(true, true);
3760 // Pop self-frame.
3761 __ leave(); // Epilog
3763 // Jump to interpreter
3764 __ ret(0);
3766 // Make sure all code is generated
3767 masm->flush();
3769 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps,
3770 SimpleRuntimeFrame::framesize >> 1);
3771 }
3772 #endif // COMPILER2
3775 //------------------------------generate_handler_blob------
3776 //
3777 // Generate a special Compile2Runtime blob that saves all registers,
3778 // and setup oopmap.
3779 //
3780 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3781 assert(StubRoutines::forward_exception_entry() != NULL,
3782 "must be generated before");
3784 ResourceMark rm;
3785 OopMapSet *oop_maps = new OopMapSet();
3786 OopMap* map;
3788 // Allocate space for the code. Setup code generation tools.
3789 CodeBuffer buffer("handler_blob", 2048, 1024);
3790 MacroAssembler* masm = new MacroAssembler(&buffer);
3792 address start = __ pc();
3793 address call_pc = NULL;
3794 int frame_size_in_words;
3795 bool cause_return = (poll_type == POLL_AT_RETURN);
3796 bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3798 // Make room for return address (or push it again)
3799 if (!cause_return) {
3800 __ push(rbx);
3801 }
3803 // Save registers, fpu state, and flags
3804 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3806 // The following is basically a call_VM. However, we need the precise
3807 // address of the call in order to generate an oopmap. Hence, we do all the
3808 // work outselves.
3810 __ set_last_Java_frame(noreg, noreg, NULL);
3812 // The return address must always be correct so that frame constructor never
3813 // sees an invalid pc.
3815 if (!cause_return) {
3816 // overwrite the dummy value we pushed on entry
3817 __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3818 __ movptr(Address(rbp, wordSize), c_rarg0);
3819 }
3821 // Do the call
3822 __ mov(c_rarg0, r15_thread);
3823 __ call(RuntimeAddress(call_ptr));
3825 // Set an oopmap for the call site. This oopmap will map all
3826 // oop-registers and debug-info registers as callee-saved. This
3827 // will allow deoptimization at this safepoint to find all possible
3828 // debug-info recordings, as well as let GC find all oops.
3830 oop_maps->add_gc_map( __ pc() - start, map);
3832 Label noException;
3834 __ reset_last_Java_frame(false, false);
3836 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3837 __ jcc(Assembler::equal, noException);
3839 // Exception pending
3841 RegisterSaver::restore_live_registers(masm, save_vectors);
3843 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3845 // No exception case
3846 __ bind(noException);
3848 // Normal exit, restore registers and exit.
3849 RegisterSaver::restore_live_registers(masm, save_vectors);
3851 __ ret(0);
3853 // Make sure all code is generated
3854 masm->flush();
3856 // Fill-out other meta info
3857 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3858 }
3860 //
3861 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3862 //
3863 // Generate a stub that calls into vm to find out the proper destination
3864 // of a java call. All the argument registers are live at this point
3865 // but since this is generic code we don't know what they are and the caller
3866 // must do any gc of the args.
3867 //
3868 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3869 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3871 // allocate space for the code
3872 ResourceMark rm;
3874 CodeBuffer buffer(name, 1000, 512);
3875 MacroAssembler* masm = new MacroAssembler(&buffer);
3877 int frame_size_in_words;
3879 OopMapSet *oop_maps = new OopMapSet();
3880 OopMap* map = NULL;
3882 int start = __ offset();
3884 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3886 int frame_complete = __ offset();
3888 __ set_last_Java_frame(noreg, noreg, NULL);
3890 __ mov(c_rarg0, r15_thread);
3892 __ call(RuntimeAddress(destination));
3895 // Set an oopmap for the call site.
3896 // We need this not only for callee-saved registers, but also for volatile
3897 // registers that the compiler might be keeping live across a safepoint.
3899 oop_maps->add_gc_map( __ offset() - start, map);
3901 // rax contains the address we are going to jump to assuming no exception got installed
3903 // clear last_Java_sp
3904 __ reset_last_Java_frame(false, false);
3905 // check for pending exceptions
3906 Label pending;
3907 __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3908 __ jcc(Assembler::notEqual, pending);
3910 // get the returned Method*
3911 __ get_vm_result_2(rbx, r15_thread);
3912 __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3914 __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3916 RegisterSaver::restore_live_registers(masm);
3918 // We are back the the original state on entry and ready to go.
3920 __ jmp(rax);
3922 // Pending exception after the safepoint
3924 __ bind(pending);
3926 RegisterSaver::restore_live_registers(masm);
3928 // exception pending => remove activation and forward to exception handler
3930 __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3932 __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3933 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3935 // -------------
3936 // make sure all code is generated
3937 masm->flush();
3939 // return the blob
3940 // frame_size_words or bytes??
3941 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3942 }
3945 #ifdef COMPILER2
3946 // This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3947 //
3948 //------------------------------generate_exception_blob---------------------------
3949 // creates exception blob at the end
3950 // Using exception blob, this code is jumped from a compiled method.
3951 // (see emit_exception_handler in x86_64.ad file)
3952 //
3953 // Given an exception pc at a call we call into the runtime for the
3954 // handler in this method. This handler might merely restore state
3955 // (i.e. callee save registers) unwind the frame and jump to the
3956 // exception handler for the nmethod if there is no Java level handler
3957 // for the nmethod.
3958 //
3959 // This code is entered with a jmp.
3960 //
3961 // Arguments:
3962 // rax: exception oop
3963 // rdx: exception pc
3964 //
3965 // Results:
3966 // rax: exception oop
3967 // rdx: exception pc in caller or ???
3968 // destination: exception handler of caller
3969 //
3970 // Note: the exception pc MUST be at a call (precise debug information)
3971 // Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3972 //
3974 void OptoRuntime::generate_exception_blob() {
3975 assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3976 assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3977 assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3979 assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3981 // Allocate space for the code
3982 ResourceMark rm;
3983 // Setup code generation tools
3984 CodeBuffer buffer("exception_blob", 2048, 1024);
3985 MacroAssembler* masm = new MacroAssembler(&buffer);
3988 address start = __ pc();
3990 // Exception pc is 'return address' for stack walker
3991 __ push(rdx);
3992 __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3994 // Save callee-saved registers. See x86_64.ad.
3996 // rbp is an implicitly saved callee saved register (i.e. the calling
3997 // convention will save restore it in prolog/epilog) Other than that
3998 // there are no callee save registers now that adapter frames are gone.
4000 __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
4002 // Store exception in Thread object. We cannot pass any arguments to the
4003 // handle_exception call, since we do not want to make any assumption
4004 // about the size of the frame where the exception happened in.
4005 // c_rarg0 is either rdi (Linux) or rcx (Windows).
4006 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
4007 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
4009 // This call does all the hard work. It checks if an exception handler
4010 // exists in the method.
4011 // If so, it returns the handler address.
4012 // If not, it prepares for stack-unwinding, restoring the callee-save
4013 // registers of the frame being removed.
4014 //
4015 // address OptoRuntime::handle_exception_C(JavaThread* thread)
4017 // At a method handle call, the stack may not be properly aligned
4018 // when returning with an exception.
4019 address the_pc = __ pc();
4020 __ set_last_Java_frame(noreg, noreg, the_pc);
4021 __ mov(c_rarg0, r15_thread);
4022 __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
4023 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
4025 // Set an oopmap for the call site. This oopmap will only be used if we
4026 // are unwinding the stack. Hence, all locations will be dead.
4027 // Callee-saved registers will be the same as the frame above (i.e.,
4028 // handle_exception_stub), since they were restored when we got the
4029 // exception.
4031 OopMapSet* oop_maps = new OopMapSet();
4033 oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
4035 __ reset_last_Java_frame(false, true);
4037 // Restore callee-saved registers
4039 // rbp is an implicitly saved callee saved register (i.e. the calling
4040 // convention will save restore it in prolog/epilog) Other than that
4041 // there are no callee save registers no that adapter frames are gone.
4043 __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
4045 __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
4046 __ pop(rdx); // No need for exception pc anymore
4048 // rax: exception handler
4050 // Restore SP from BP if the exception PC is a MethodHandle call site.
4051 __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0);
4052 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save);
4054 // We have a handler in rax (could be deopt blob).
4055 __ mov(r8, rax);
4057 // Get the exception oop
4058 __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
4059 // Get the exception pc in case we are deoptimized
4060 __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
4061 #ifdef ASSERT
4062 __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
4063 __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
4064 #endif
4065 // Clear the exception oop so GC no longer processes it as a root.
4066 __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
4068 // rax: exception oop
4069 // r8: exception handler
4070 // rdx: exception pc
4071 // Jump to handler
4073 __ jmp(r8);
4075 // Make sure all code is generated
4076 masm->flush();
4078 // Set exception blob
4079 _exception_blob = ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
4080 }
4081 #endif // COMPILER2