Tue, 04 Sep 2018 21:25:12 +0800
#7517 mRegP match a0_RegP
1 /*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "oops/compiledICHolder.hpp"
34 #include "prims/jvmtiRedefineClassesTrace.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/vframeArray.hpp"
37 #include "vmreg_mips.inline.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_Runtime1.hpp"
40 #endif
41 #ifdef COMPILER2
42 #include "opto/runtime.hpp"
43 #endif
45 #define __ masm->
47 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
49 class RegisterSaver {
50 enum { FPU_regs_live = 32 };
51 // Capture info about frame layout
52 enum layout {
53 #define DEF_LAYOUT_OFFS(regname) regname ## _off, regname ## H_off,
54 DEF_LAYOUT_OFFS(for_16_bytes_aligned)
55 DEF_LAYOUT_OFFS(fpr0)
56 DEF_LAYOUT_OFFS(fpr1)
57 DEF_LAYOUT_OFFS(fpr2)
58 DEF_LAYOUT_OFFS(fpr3)
59 DEF_LAYOUT_OFFS(fpr4)
60 DEF_LAYOUT_OFFS(fpr5)
61 DEF_LAYOUT_OFFS(fpr6)
62 DEF_LAYOUT_OFFS(fpr7)
63 DEF_LAYOUT_OFFS(fpr8)
64 DEF_LAYOUT_OFFS(fpr9)
65 DEF_LAYOUT_OFFS(fpr10)
66 DEF_LAYOUT_OFFS(fpr11)
67 DEF_LAYOUT_OFFS(fpr12)
68 DEF_LAYOUT_OFFS(fpr13)
69 DEF_LAYOUT_OFFS(fpr14)
70 DEF_LAYOUT_OFFS(fpr15)
71 DEF_LAYOUT_OFFS(fpr16)
72 DEF_LAYOUT_OFFS(fpr17)
73 DEF_LAYOUT_OFFS(fpr18)
74 DEF_LAYOUT_OFFS(fpr19)
75 DEF_LAYOUT_OFFS(fpr20)
76 DEF_LAYOUT_OFFS(fpr21)
77 DEF_LAYOUT_OFFS(fpr22)
78 DEF_LAYOUT_OFFS(fpr23)
79 DEF_LAYOUT_OFFS(fpr24)
80 DEF_LAYOUT_OFFS(fpr25)
81 DEF_LAYOUT_OFFS(fpr26)
82 DEF_LAYOUT_OFFS(fpr27)
83 DEF_LAYOUT_OFFS(fpr28)
84 DEF_LAYOUT_OFFS(fpr29)
85 DEF_LAYOUT_OFFS(fpr30)
86 DEF_LAYOUT_OFFS(fpr31)
88 DEF_LAYOUT_OFFS(v0)
89 DEF_LAYOUT_OFFS(v1)
90 DEF_LAYOUT_OFFS(a0)
91 DEF_LAYOUT_OFFS(a1)
92 DEF_LAYOUT_OFFS(a2)
93 DEF_LAYOUT_OFFS(a3)
94 DEF_LAYOUT_OFFS(a4)
95 DEF_LAYOUT_OFFS(a5)
96 DEF_LAYOUT_OFFS(a6)
97 DEF_LAYOUT_OFFS(a7)
98 DEF_LAYOUT_OFFS(t0)
99 DEF_LAYOUT_OFFS(t1)
100 DEF_LAYOUT_OFFS(t2)
101 DEF_LAYOUT_OFFS(t3)
102 DEF_LAYOUT_OFFS(s0)
103 DEF_LAYOUT_OFFS(s1)
104 DEF_LAYOUT_OFFS(s2)
105 DEF_LAYOUT_OFFS(s3)
106 DEF_LAYOUT_OFFS(s4)
107 DEF_LAYOUT_OFFS(s5)
108 DEF_LAYOUT_OFFS(s6)
109 DEF_LAYOUT_OFFS(s7)
110 DEF_LAYOUT_OFFS(t8)
111 DEF_LAYOUT_OFFS(t9)
113 DEF_LAYOUT_OFFS(gp)
114 DEF_LAYOUT_OFFS(fp)
115 DEF_LAYOUT_OFFS(return)
116 reg_save_size
117 };
119 public:
121 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors =false );
122 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
123 static int raOffset(void) { return return_off / 2; }
124 //Rmethod
125 static int methodOffset(void) { return s3_off / 2; }
127 static int v0Offset(void) { return v0_off / 2; }
128 static int v1Offset(void) { return v1_off / 2; }
130 static int fpResultOffset(void) { return fpr0_off / 2; }
132 // During deoptimization only the result register need to be restored
133 // all the other values have already been extracted.
134 static void restore_result_registers(MacroAssembler* masm);
135 };
137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors ) {
139 /*
140 int frame_words = reg_save_size + additional_frame_words;
141 int frame_size_in_bytes = frame_words * wordSize;
142 *total_frame_words = frame_words;
143 */
144 // Always make the frame size 16-byte aligned
145 int frame_size_in_bytes = round_to(additional_frame_words*wordSize +
146 reg_save_size*BytesPerInt, 16);
147 // OopMap frame size is in compiler stack slots (jint's) not bytes or words
148 int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
149 // The caller will allocate additional_frame_words
150 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
151 // CodeBlob frame size is in words.
152 int frame_size_in_words = frame_size_in_bytes / wordSize;
153 *total_frame_words = frame_size_in_words;
155 // save registers, fpu state, and flags
156 // We assume caller has already has return address slot on the stack
157 // We push epb twice in this sequence because we want the real ebp
158 // to be under the return like a normal enter and we want to use pushad
159 // We push by hand instead of pusing push
161 __ daddiu(SP, SP, - reg_save_size * jintSize);
163 __ sdc1(F0, SP, fpr0_off * jintSize); __ sdc1(F1, SP, fpr1_off * jintSize);
164 __ sdc1(F2, SP, fpr2_off * jintSize); __ sdc1(F3, SP, fpr3_off * jintSize);
165 __ sdc1(F4, SP, fpr4_off * jintSize); __ sdc1(F5, SP, fpr5_off * jintSize);
166 __ sdc1(F6, SP, fpr6_off * jintSize); __ sdc1(F7, SP, fpr7_off * jintSize);
167 __ sdc1(F8, SP, fpr8_off * jintSize); __ sdc1(F9, SP, fpr9_off * jintSize);
168 __ sdc1(F10, SP, fpr10_off * jintSize); __ sdc1(F11, SP, fpr11_off * jintSize);
169 __ sdc1(F12, SP, fpr12_off * jintSize); __ sdc1(F13, SP, fpr13_off * jintSize);
170 __ sdc1(F14, SP, fpr14_off * jintSize); __ sdc1(F15, SP, fpr15_off * jintSize);
171 __ sdc1(F16, SP, fpr16_off * jintSize); __ sdc1(F17, SP, fpr17_off * jintSize);
172 __ sdc1(F18, SP, fpr18_off * jintSize); __ sdc1(F19, SP, fpr19_off * jintSize);
173 __ sdc1(F20, SP, fpr20_off * jintSize); __ sdc1(F21, SP, fpr21_off * jintSize);
174 __ sdc1(F22, SP, fpr22_off * jintSize); __ sdc1(F23, SP, fpr23_off * jintSize);
175 __ sdc1(F24, SP, fpr24_off * jintSize); __ sdc1(F25, SP, fpr25_off * jintSize);
176 __ sdc1(F26, SP, fpr26_off * jintSize); __ sdc1(F27, SP, fpr27_off * jintSize);
177 __ sdc1(F28, SP, fpr28_off * jintSize); __ sdc1(F29, SP, fpr29_off * jintSize);
178 __ sdc1(F30, SP, fpr30_off * jintSize); __ sdc1(F31, SP, fpr31_off * jintSize);
179 __ sd(V0, SP, v0_off * jintSize); __ sd(V1, SP, v1_off * jintSize);
180 __ sd(A0, SP, a0_off * jintSize); __ sd(A1, SP, a1_off * jintSize);
181 __ sd(A2, SP, a2_off * jintSize); __ sd(A3, SP, a3_off * jintSize);
182 __ sd(A4, SP, a4_off * jintSize); __ sd(A5, SP, a5_off * jintSize);
183 __ sd(A6, SP, a6_off * jintSize); __ sd(A7, SP, a7_off * jintSize);
184 __ sd(T0, SP, t0_off * jintSize);
185 __ sd(T1, SP, t1_off * jintSize);
186 __ sd(T2, SP, t2_off * jintSize);
187 __ sd(T3, SP, t3_off * jintSize);
188 __ sd(S0, SP, s0_off * jintSize);
189 __ sd(S1, SP, s1_off * jintSize);
190 __ sd(S2, SP, s2_off * jintSize);
191 __ sd(S3, SP, s3_off * jintSize);
192 __ sd(S4, SP, s4_off * jintSize);
193 __ sd(S5, SP, s5_off * jintSize);
194 __ sd(S6, SP, s6_off * jintSize);
195 __ sd(S7, SP, s7_off * jintSize);
197 __ sd(T8, SP, t8_off * jintSize);
198 __ sd(T9, SP, t9_off * jintSize);
200 __ sd(GP, SP, gp_off * jintSize);
201 __ sd(FP, SP, fp_off * jintSize);
202 __ sd(RA, SP, return_off * jintSize);
203 __ daddi(FP, SP, fp_off * jintSize);
205 OopMapSet *oop_maps = new OopMapSet();
206 //OopMap* map = new OopMap( frame_words, 0 );
207 OopMap* map = new OopMap( frame_size_in_slots, 0 );
210 //#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
211 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
212 map->set_callee_saved(STACK_OFFSET( v0_off), V0->as_VMReg());
213 map->set_callee_saved(STACK_OFFSET( v1_off), V1->as_VMReg());
214 map->set_callee_saved(STACK_OFFSET( a0_off), A0->as_VMReg());
215 map->set_callee_saved(STACK_OFFSET( a1_off), A1->as_VMReg());
216 map->set_callee_saved(STACK_OFFSET( a2_off), A2->as_VMReg());
217 map->set_callee_saved(STACK_OFFSET( a3_off), A3->as_VMReg());
218 map->set_callee_saved(STACK_OFFSET( a4_off), A4->as_VMReg());
219 map->set_callee_saved(STACK_OFFSET( a5_off), A5->as_VMReg());
220 map->set_callee_saved(STACK_OFFSET( a6_off), A6->as_VMReg());
221 map->set_callee_saved(STACK_OFFSET( a7_off), A7->as_VMReg());
222 map->set_callee_saved(STACK_OFFSET( t0_off), T0->as_VMReg());
223 map->set_callee_saved(STACK_OFFSET( t1_off), T1->as_VMReg());
224 map->set_callee_saved(STACK_OFFSET( t2_off), T2->as_VMReg());
225 map->set_callee_saved(STACK_OFFSET( t3_off), T3->as_VMReg());
226 map->set_callee_saved(STACK_OFFSET( s0_off), S0->as_VMReg());
227 map->set_callee_saved(STACK_OFFSET( s1_off), S1->as_VMReg());
228 map->set_callee_saved(STACK_OFFSET( s2_off), S2->as_VMReg());
229 map->set_callee_saved(STACK_OFFSET( s3_off), S3->as_VMReg());
230 map->set_callee_saved(STACK_OFFSET( s4_off), S4->as_VMReg());
231 map->set_callee_saved(STACK_OFFSET( s5_off), S5->as_VMReg());
232 map->set_callee_saved(STACK_OFFSET( s6_off), S6->as_VMReg());
233 map->set_callee_saved(STACK_OFFSET( s7_off), S7->as_VMReg());
234 map->set_callee_saved(STACK_OFFSET( t8_off), T8->as_VMReg());
235 map->set_callee_saved(STACK_OFFSET( t9_off), T9->as_VMReg());
236 map->set_callee_saved(STACK_OFFSET( gp_off), GP->as_VMReg());
237 map->set_callee_saved(STACK_OFFSET( fp_off), FP->as_VMReg());
238 map->set_callee_saved(STACK_OFFSET( return_off), RA->as_VMReg());
240 map->set_callee_saved(STACK_OFFSET( fpr0_off), F0->as_VMReg());
241 map->set_callee_saved(STACK_OFFSET( fpr1_off), F1->as_VMReg());
242 map->set_callee_saved(STACK_OFFSET( fpr2_off), F2->as_VMReg());
243 map->set_callee_saved(STACK_OFFSET( fpr3_off), F3->as_VMReg());
244 map->set_callee_saved(STACK_OFFSET( fpr4_off), F4->as_VMReg());
245 map->set_callee_saved(STACK_OFFSET( fpr5_off), F5->as_VMReg());
246 map->set_callee_saved(STACK_OFFSET( fpr6_off), F6->as_VMReg());
247 map->set_callee_saved(STACK_OFFSET( fpr7_off), F7->as_VMReg());
248 map->set_callee_saved(STACK_OFFSET( fpr8_off), F8->as_VMReg());
249 map->set_callee_saved(STACK_OFFSET( fpr9_off), F9->as_VMReg());
250 map->set_callee_saved(STACK_OFFSET( fpr10_off), F10->as_VMReg());
251 map->set_callee_saved(STACK_OFFSET( fpr11_off), F11->as_VMReg());
252 map->set_callee_saved(STACK_OFFSET( fpr12_off), F12->as_VMReg());
253 map->set_callee_saved(STACK_OFFSET( fpr13_off), F13->as_VMReg());
254 map->set_callee_saved(STACK_OFFSET( fpr14_off), F14->as_VMReg());
255 map->set_callee_saved(STACK_OFFSET( fpr15_off), F15->as_VMReg());
256 map->set_callee_saved(STACK_OFFSET( fpr16_off), F16->as_VMReg());
257 map->set_callee_saved(STACK_OFFSET( fpr17_off), F17->as_VMReg());
258 map->set_callee_saved(STACK_OFFSET( fpr18_off), F18->as_VMReg());
259 map->set_callee_saved(STACK_OFFSET( fpr19_off), F19->as_VMReg());
260 map->set_callee_saved(STACK_OFFSET( fpr20_off), F20->as_VMReg());
261 map->set_callee_saved(STACK_OFFSET( fpr21_off), F21->as_VMReg());
262 map->set_callee_saved(STACK_OFFSET( fpr22_off), F22->as_VMReg());
263 map->set_callee_saved(STACK_OFFSET( fpr23_off), F23->as_VMReg());
264 map->set_callee_saved(STACK_OFFSET( fpr24_off), F24->as_VMReg());
265 map->set_callee_saved(STACK_OFFSET( fpr25_off), F25->as_VMReg());
266 map->set_callee_saved(STACK_OFFSET( fpr26_off), F26->as_VMReg());
267 map->set_callee_saved(STACK_OFFSET( fpr27_off), F27->as_VMReg());
268 map->set_callee_saved(STACK_OFFSET( fpr28_off), F28->as_VMReg());
269 map->set_callee_saved(STACK_OFFSET( fpr29_off), F29->as_VMReg());
270 map->set_callee_saved(STACK_OFFSET( fpr30_off), F30->as_VMReg());
271 map->set_callee_saved(STACK_OFFSET( fpr31_off), F31->as_VMReg());
273 #undef STACK_OFFSET
274 return map;
275 }
278 // Pop the current frame and restore all the registers that we
279 // saved.
280 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
281 __ ldc1(F0, SP, fpr0_off * jintSize); __ ldc1(F1, SP, fpr1_off * jintSize);
282 __ ldc1(F2, SP, fpr2_off * jintSize); __ ldc1(F3, SP, fpr3_off * jintSize);
283 __ ldc1(F4, SP, fpr4_off * jintSize); __ ldc1(F5, SP, fpr5_off * jintSize);
284 __ ldc1(F6, SP, fpr6_off * jintSize); __ ldc1(F7, SP, fpr7_off * jintSize);
285 __ ldc1(F8, SP, fpr8_off * jintSize); __ ldc1(F9, SP, fpr9_off * jintSize);
286 __ ldc1(F10, SP, fpr10_off * jintSize); __ ldc1(F11, SP, fpr11_off * jintSize);
287 __ ldc1(F12, SP, fpr12_off * jintSize); __ ldc1(F13, SP, fpr13_off * jintSize);
288 __ ldc1(F14, SP, fpr14_off * jintSize); __ ldc1(F15, SP, fpr15_off * jintSize);
289 __ ldc1(F16, SP, fpr16_off * jintSize); __ ldc1(F17, SP, fpr17_off * jintSize);
290 __ ldc1(F18, SP, fpr18_off * jintSize); __ ldc1(F19, SP, fpr19_off * jintSize);
291 __ ldc1(F20, SP, fpr20_off * jintSize); __ ldc1(F21, SP, fpr21_off * jintSize);
292 __ ldc1(F22, SP, fpr22_off * jintSize); __ ldc1(F23, SP, fpr23_off * jintSize);
293 __ ldc1(F24, SP, fpr24_off * jintSize); __ ldc1(F25, SP, fpr25_off * jintSize);
294 __ ldc1(F26, SP, fpr26_off * jintSize); __ ldc1(F27, SP, fpr27_off * jintSize);
295 __ ldc1(F28, SP, fpr28_off * jintSize); __ ldc1(F29, SP, fpr29_off * jintSize);
296 __ ldc1(F30, SP, fpr30_off * jintSize); __ ldc1(F31, SP, fpr31_off * jintSize);
298 __ ld(V0, SP, v0_off * jintSize); __ ld(V1, SP, v1_off * jintSize);
299 __ ld(A0, SP, a0_off * jintSize); __ ld(A1, SP, a1_off * jintSize);
300 __ ld(A2, SP, a2_off * jintSize); __ ld(A3, SP, a3_off * jintSize);
301 __ ld(A4, SP, a4_off * jintSize); __ ld(A5, SP, a5_off * jintSize);
302 __ ld(A6, SP, a6_off * jintSize); __ ld(A7, SP, a7_off * jintSize);
303 __ ld(T0, SP, t0_off * jintSize);
304 __ ld(T1, SP, t1_off * jintSize);
305 __ ld(T2, SP, t2_off * jintSize);
306 __ ld(T3, SP, t3_off * jintSize);
307 __ ld(S0, SP, s0_off * jintSize);
308 __ ld(S1, SP, s1_off * jintSize);
309 __ ld(S2, SP, s2_off * jintSize);
310 __ ld(S3, SP, s3_off * jintSize);
311 __ ld(S4, SP, s4_off * jintSize);
312 __ ld(S5, SP, s5_off * jintSize);
313 __ ld(S6, SP, s6_off * jintSize);
314 __ ld(S7, SP, s7_off * jintSize);
316 __ ld(T8, SP, t8_off * jintSize);
317 __ ld(T9, SP, t9_off * jintSize);
319 __ ld(GP, SP, gp_off * jintSize);
320 __ ld(FP, SP, fp_off * jintSize);
321 __ ld(RA, SP, return_off * jintSize);
323 __ addiu(SP, SP, reg_save_size * jintSize);
324 }
326 // Pop the current frame and restore the registers that might be holding
327 // a result.
328 // FIXME, if the result is float?
329 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
331 // Just restore result register. Only used by deoptimization. By
332 // now any callee save register that needs to be restore to a c2
333 // caller of the deoptee has been extracted into the vframeArray
334 // and will be stuffed into the c2i adapter we create for later
335 // restoration so only result registers need to be restored here.
337 __ ld(V0, SP, v0_off * jintSize);
338 __ ld(V1, SP, v1_off * jintSize);
339 __ addiu(SP, SP, return_off * jintSize);
340 }
342 // Is vector's size (in bytes) bigger than a size saved by default?
343 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
344 bool SharedRuntime::is_wide_vector(int size) {
345 return size > 16;
346 }
348 // The java_calling_convention describes stack locations as ideal slots on
349 // a frame with no abi restrictions. Since we must observe abi restrictions
350 // (like the placement of the register window) the slots must be biased by
351 // the following value.
353 static int reg2offset_in(VMReg r) {
354 // Account for saved ebp and return address
355 // This should really be in_preserve_stack_slots
356 return (r->reg2stack() + 2 * VMRegImpl::slots_per_word) * VMRegImpl::stack_slot_size; // + 2 * VMRegImpl::stack_slot_size);
357 }
359 static int reg2offset_out(VMReg r) {
360 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
361 }
363 // ---------------------------------------------------------------------------
364 // Read the array of BasicTypes from a signature, and compute where the
365 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
366 // quantities. Values less than SharedInfo::stack0 are registers, those above
367 // refer to 4-byte stack slots. All stack slots are based off of the stack pointer
368 // as framesizes are fixed.
369 // VMRegImpl::stack0 refers to the first slot 0(sp).
370 // and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register
371 // up to RegisterImpl::number_of_registers) are the 32-bit
372 // integer registers.
374 // Pass first five oop/int args in registers T0, A0 - A3.
375 // Pass float/double/long args in stack.
376 // Doubles have precedence, so if you pass a mix of floats and doubles
377 // the doubles will grab the registers before the floats will.
379 // Note: the INPUTS in sig_bt are in units of Java argument words, which are
380 // either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit
381 // units regardless of build. Of course for i486 there is no 64 bit build
384 // ---------------------------------------------------------------------------
385 // The compiled Java calling convention.
386 // Pass first five oop/int args in registers T0, A0 - A3.
387 // Pass float/double/long args in stack.
388 // Doubles have precedence, so if you pass a mix of floats and doubles
389 // the doubles will grab the registers before the floats will.
391 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
392 VMRegPair *regs,
393 int total_args_passed,
394 int is_outgoing) {
396 // Create the mapping between argument positions and
397 // registers.
398 //static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
399 static const Register INT_ArgReg[Argument::n_register_parameters + 1] = {
400 T0, A0, A1, A2, A3, A4, A5, A6, A7
401 };
402 //static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
403 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = {
404 F12, F13, F14, F15, F16, F17, F18, F19
405 };
408 uint args = 0;
409 uint stk_args = 0; // inc by 2 each time
411 for (int i = 0; i < total_args_passed; i++) {
412 switch (sig_bt[i]) {
413 case T_VOID:
414 // halves of T_LONG or T_DOUBLE
415 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
416 regs[i].set_bad();
417 break;
418 case T_BOOLEAN:
419 case T_CHAR:
420 case T_BYTE:
421 case T_SHORT:
422 case T_INT:
423 if (args < Argument::n_register_parameters) {
424 regs[i].set1(INT_ArgReg[args++]->as_VMReg());
425 } else {
426 regs[i].set1(VMRegImpl::stack2reg(stk_args));
427 stk_args += 2;
428 }
429 break;
430 case T_LONG:
431 assert(sig_bt[i + 1] == T_VOID, "expecting half");
432 // fall through
433 case T_OBJECT:
434 case T_ARRAY:
435 case T_ADDRESS:
436 if (args < Argument::n_register_parameters) {
437 regs[i].set2(INT_ArgReg[args++]->as_VMReg());
438 } else {
439 regs[i].set2(VMRegImpl::stack2reg(stk_args));
440 stk_args += 2;
441 }
442 break;
443 case T_FLOAT:
444 if (args < Argument::n_float_register_parameters) {
445 regs[i].set1(FP_ArgReg[args++]->as_VMReg());
446 } else {
447 regs[i].set1(VMRegImpl::stack2reg(stk_args));
448 stk_args += 2;
449 }
450 break;
451 case T_DOUBLE:
452 assert(sig_bt[i + 1] == T_VOID, "expecting half");
453 if (args < Argument::n_float_register_parameters) {
454 regs[i].set2(FP_ArgReg[args++]->as_VMReg());
455 } else {
456 regs[i].set2(VMRegImpl::stack2reg(stk_args));
457 stk_args += 2;
458 }
459 break;
460 default:
461 ShouldNotReachHere();
462 break;
463 }
464 }
466 return round_to(stk_args, 2);
467 }
469 // Helper class mostly to avoid passing masm everywhere, and handle store
470 // displacement overflow logic for LP64
471 class AdapterGenerator {
472 MacroAssembler *masm;
473 #ifdef _LP64
474 Register Rdisp;
475 void set_Rdisp(Register r) { Rdisp = r; }
476 #endif // _LP64
478 void patch_callers_callsite();
480 // base+st_off points to top of argument
481 int arg_offset(const int st_off) { return st_off; }
482 int next_arg_offset(const int st_off) {
483 return st_off - Interpreter::stackElementSize;
484 }
486 #ifdef _LP64
487 // On _LP64 argument slot values are loaded first into a register
488 // because they might not fit into displacement.
489 Register arg_slot(const int st_off);
490 Register next_arg_slot(const int st_off);
491 #else
492 int arg_slot(const int st_off) { return arg_offset(st_off); }
493 int next_arg_slot(const int st_off) { return next_arg_offset(st_off); }
494 #endif // _LP64
496 // Stores long into offset pointed to by base
497 void store_c2i_long(Register r, Register base,
498 const int st_off, bool is_stack);
499 void store_c2i_object(Register r, Register base,
500 const int st_off);
501 void store_c2i_int(Register r, Register base,
502 const int st_off);
503 void store_c2i_double(VMReg r_2,
504 VMReg r_1, Register base, const int st_off);
505 void store_c2i_float(FloatRegister f, Register base,
506 const int st_off);
508 public:
509 //void tag_stack(const BasicType sig, int st_off);
510 void gen_c2i_adapter(int total_args_passed,
511 // VMReg max_arg,
512 int comp_args_on_stack, // VMRegStackSlots
513 const BasicType *sig_bt,
514 const VMRegPair *regs,
515 Label& skip_fixup);
516 void gen_i2c_adapter(int total_args_passed,
517 // VMReg max_arg,
518 int comp_args_on_stack, // VMRegStackSlots
519 const BasicType *sig_bt,
520 const VMRegPair *regs);
522 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
523 };
526 // Patch the callers callsite with entry to compiled code if it exists.
527 void AdapterGenerator::patch_callers_callsite() {
528 Label L;
529 __ verify_oop(Rmethod);
530 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset()));
531 __ beq(AT,R0,L);
532 __ delayed()->nop();
533 // Schedule the branch target address early.
534 // Call into the VM to patch the caller, then jump to compiled callee
535 // eax isn't live so capture return address while we easily can
536 __ move(V0, RA);
538 __ pushad();
539 #ifdef COMPILER2
540 // C2 may leave the stack dirty if not in SSE2+ mode
541 __ empty_FPU_stack();
542 #endif /* COMPILER2 */
544 // VM needs caller's callsite
545 // VM needs target method
547 __ move(A0, Rmethod);
548 __ move(A1, V0);
549 //we should preserve the return address
550 __ verify_oop(Rmethod);
551 __ move(S0, SP);
552 __ move(AT, -(StackAlignmentInBytes)); // align the stack
553 __ andr(SP, SP, AT);
554 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite),
555 relocInfo::runtime_call_type);
557 __ delayed()->nop();
558 __ move(SP, S0);
559 __ popad();
560 __ bind(L);
561 }
563 #ifdef _LP64
564 Register AdapterGenerator::arg_slot(const int st_off) {
565 Unimplemented();
566 }
568 Register AdapterGenerator::next_arg_slot(const int st_off){
569 Unimplemented();
570 }
571 #endif // _LP64
573 // Stores long into offset pointed to by base
574 void AdapterGenerator::store_c2i_long(Register r, Register base,
575 const int st_off, bool is_stack) {
576 Unimplemented();
577 }
579 void AdapterGenerator::store_c2i_object(Register r, Register base,
580 const int st_off) {
581 Unimplemented();
582 }
584 void AdapterGenerator::store_c2i_int(Register r, Register base,
585 const int st_off) {
586 Unimplemented();
587 }
589 // Stores into offset pointed to by base
590 void AdapterGenerator::store_c2i_double(VMReg r_2,
591 VMReg r_1, Register base, const int st_off) {
592 Unimplemented();
593 }
595 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
596 const int st_off) {
597 Unimplemented();
598 }
600 void AdapterGenerator::gen_c2i_adapter(
601 int total_args_passed,
602 // VMReg max_arg,
603 int comp_args_on_stack, // VMRegStackSlots
604 const BasicType *sig_bt,
605 const VMRegPair *regs,
606 Label& skip_fixup) {
608 // Before we get into the guts of the C2I adapter, see if we should be here
609 // at all. We've come from compiled code and are attempting to jump to the
610 // interpreter, which means the caller made a static call to get here
611 // (vcalls always get a compiled target if there is one). Check for a
612 // compiled target. If there is one, we need to patch the caller's call.
613 // However we will run interpreted if we come thru here. The next pass
614 // thru the call site will run compiled. If we ran compiled here then
615 // we can (theorectically) do endless i2c->c2i->i2c transitions during
616 // deopt/uncommon trap cycles. If we always go interpreted here then
617 // we can have at most one and don't need to play any tricks to keep
618 // from endlessly growing the stack.
619 //
620 // Actually if we detected that we had an i2c->c2i transition here we
621 // ought to be able to reset the world back to the state of the interpreted
622 // call and not bother building another interpreter arg area. We don't
623 // do that at this point.
625 patch_callers_callsite();
627 __ bind(skip_fixup);
629 #ifdef COMPILER2
630 __ empty_FPU_stack();
631 #endif /* COMPILER2 */
632 //this is for native ?
633 // Since all args are passed on the stack, total_args_passed * interpreter_
634 // stack_element_size is the
635 // space we need.
636 int extraspace = total_args_passed * Interpreter::stackElementSize;
638 // stack is aligned, keep it that way
639 extraspace = round_to(extraspace, 2*wordSize);
641 // Get return address
642 __ move(V0, RA);
643 // set senderSP value
644 //refer to interpreter_mips.cpp:generate_asm_entry
645 __ move(Rsender, SP);
646 __ addi(SP, SP, -extraspace);
648 // Now write the args into the outgoing interpreter space
649 for (int i = 0; i < total_args_passed; i++) {
650 if (sig_bt[i] == T_VOID) {
651 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
652 continue;
653 }
655 // st_off points to lowest address on stack.
656 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
657 // Say 4 args:
658 // i st_off
659 // 0 12 T_LONG
660 // 1 8 T_VOID
661 // 2 4 T_OBJECT
662 // 3 0 T_BOOL
663 VMReg r_1 = regs[i].first();
664 VMReg r_2 = regs[i].second();
665 if (!r_1->is_valid()) {
666 assert(!r_2->is_valid(), "");
667 continue;
668 }
669 if (r_1->is_stack()) {
670 // memory to memory use fpu stack top
671 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
672 if (!r_2->is_valid()) {
673 __ ld_ptr(AT, SP, ld_off);
674 __ st_ptr(AT, SP, st_off);
676 } else {
679 int next_off = st_off - Interpreter::stackElementSize;
680 __ ld_ptr(AT, SP, ld_off);
681 __ st_ptr(AT, SP, st_off);
683 /* Ref to is_Register condition */
684 if(sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
685 __ st_ptr(AT,SP,st_off - 8);
686 }
687 } else if (r_1->is_Register()) {
688 Register r = r_1->as_Register();
689 if (!r_2->is_valid()) {
690 __ sd(r,SP, st_off);
691 } else {
692 //FIXME, mips will not enter here
693 // long/double in gpr
694 __ sd(r,SP, st_off);
695 /* Jin: In [java/util/zip/ZipFile.java]
697 private static native long open(String name, int mode, long lastModified);
698 private static native int getTotal(long jzfile);
699 *
700 * We need to transfer T_LONG paramenters from a compiled method to a native method.
701 * It's a complex process:
702 *
703 * Caller -> lir_static_call -> gen_resolve_stub
704 -> -- resolve_static_call_C
705 `- gen_c2i_adapter() [*]
706 |
707 `- AdapterHandlerLibrary::get_create_apapter_index
708 -> generate_native_entry
709 -> InterpreterRuntime::SignatureHandlerGenerator::pass_long [**]
711 * In [**], T_Long parameter is stored in stack as:
713 (high)
714 | |
715 -----------
716 | 8 bytes |
717 | (void) |
718 -----------
719 | 8 bytes |
720 | (long) |
721 -----------
722 | |
723 (low)
724 *
725 * However, the sequence is reversed here:
726 *
727 (high)
728 | |
729 -----------
730 | 8 bytes |
731 | (long) |
732 -----------
733 | 8 bytes |
734 | (void) |
735 -----------
736 | |
737 (low)
738 *
739 * So I stored another 8 bytes in the T_VOID slot. It then can be accessed from generate_native_entry().
740 */
741 if (sig_bt[i] == T_LONG)
742 __ sd(r,SP, st_off - 8);
743 }
744 } else if (r_1->is_FloatRegister()) {
745 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register");
747 FloatRegister fr = r_1->as_FloatRegister();
748 if (sig_bt[i] == T_FLOAT)
749 __ swc1(fr,SP, st_off);
750 else {
751 __ sdc1(fr,SP, st_off);
752 __ sdc1(fr,SP, st_off - 8); /* T_DOUBLE needs two slots */
753 }
754 }
755 }
757 // Schedule the branch target address early.
758 __ ld_ptr(AT, Rmethod,in_bytes(Method::interpreter_entry_offset()) );
759 // And repush original return address
760 __ move(RA, V0);
761 __ jr (AT);
762 __ delayed()->nop();
763 }
765 void AdapterGenerator::gen_i2c_adapter(
766 int total_args_passed,
767 // VMReg max_arg,
768 int comp_args_on_stack, // VMRegStackSlots
769 const BasicType *sig_bt,
770 const VMRegPair *regs) {
772 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
773 // layout. Lesp was saved by the calling I-frame and will be restored on
774 // return. Meanwhile, outgoing arg space is all owned by the callee
775 // C-frame, so we can mangle it at will. After adjusting the frame size,
776 // hoist register arguments and repack other args according to the compiled
777 // code convention. Finally, end in a jump to the compiled code. The entry
778 // point address is the start of the buffer.
780 // We will only enter here from an interpreted frame and never from after
781 // passing thru a c2i. Azul allowed this but we do not. If we lose the
782 // race and use a c2i we will remain interpreted for the race loser(s).
783 // This removes all sorts of headaches on the mips side and also eliminates
784 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
787 __ move(T9, SP);
789 // Cut-out for having no stack args. Since up to 2 int/oop args are passed
790 // in registers, we will occasionally have no stack args.
791 int comp_words_on_stack = 0;
792 if (comp_args_on_stack) {
793 // Sig words on the stack are greater-than VMRegImpl::stack0. Those in
794 // registers are below. By subtracting stack0, we either get a negative
795 // number (all values in registers) or the maximum stack slot accessed.
796 // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
797 // Convert 4-byte stack slots to words.
798 // did mips need round? FIXME aoqi
799 comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
800 // Round up to miminum stack alignment, in wordSize
801 comp_words_on_stack = round_to(comp_words_on_stack, 2);
802 __ daddi(SP, SP, -comp_words_on_stack * wordSize);
803 }
805 // Align the outgoing SP
806 __ move(AT, -(StackAlignmentInBytes));
807 __ andr(SP, SP, AT);
808 // push the return address on the stack (note that pushing, rather
809 // than storing it, yields the correct frame alignment for the callee)
810 // Put saved SP in another register
811 const Register saved_sp = V0;
812 __ move(saved_sp, T9);
815 // Will jump to the compiled code just as if compiled code was doing it.
816 // Pre-load the register-jump target early, to schedule it better.
817 __ ld(T9, Rmethod, in_bytes(Method::from_compiled_offset()));
819 // Now generate the shuffle code. Pick up all register args and move the
820 // rest through the floating point stack top.
821 for (int i = 0; i < total_args_passed; i++) {
822 if (sig_bt[i] == T_VOID) {
823 // Longs and doubles are passed in native word order, but misaligned
824 // in the 32-bit build.
825 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
826 continue;
827 }
829 // Pick up 0, 1 or 2 words from SP+offset.
831 //FIXME. aoqi. just delete the assert
832 //assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
833 // Load in argument order going down.
834 int ld_off = (total_args_passed -1 - i)*Interpreter::stackElementSize;
835 // Point to interpreter value (vs. tag)
836 int next_off = ld_off - Interpreter::stackElementSize;
837 //
838 //
839 //
840 VMReg r_1 = regs[i].first();
841 VMReg r_2 = regs[i].second();
842 if (!r_1->is_valid()) {
843 assert(!r_2->is_valid(), "");
844 continue;
845 }
846 if (r_1->is_stack()) {
847 // Convert stack slot to an SP offset (+ wordSize to
848 // account for return address )
849 //NOTICE HERE!!!! I sub a wordSize here
850 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
851 //+ wordSize;
853 // We can use esi as a temp here because compiled code doesn't
854 // need esi as an input
855 // and if we end up going thru a c2i because of a miss a reasonable
856 // value of esi
857 // we be generated.
858 if (!r_2->is_valid()) {
859 __ ld(AT, saved_sp, ld_off);
860 __ sd(AT, SP, st_off);
861 } else {
862 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
863 // are accessed as negative so LSW is at LOW address
865 // ld_off is MSW so get LSW
866 // st_off is LSW (i.e. reg.first())
867 /*
868 __ ld(AT, saved_sp, next_off);
869 __ sd(AT, SP, st_off);
870 __ ld(AT, saved_sp, ld_off);
871 __ sd(AT, SP, st_off + wordSize);
872 */
874 /* 2012/4/9 Jin
875 * [./org/eclipse/swt/graphics/GC.java]
876 * void drawImageXRender(Image srcImage, int srcX, int srcY, int srcWidth, int srcHeight,
877 int destX, int destY, int destWidth, int destHeight,
878 boolean simple,
879 int imgWidth, int imgHeight,
880 long maskPixmap, <-- Pass T_LONG in stack
881 int maskType);
882 * Before this modification, Eclipse displays icons with solid black background.
883 */
884 __ ld(AT, saved_sp, ld_off);
885 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
886 __ ld(AT, saved_sp, ld_off - 8);
887 __ sd(AT, SP, st_off);
888 }
889 } else if (r_1->is_Register()) { // Register argument
890 Register r = r_1->as_Register();
891 // assert(r != eax, "must be different");
892 if (r_2->is_valid()) {
893 // assert(r_2->as_Register() != eax, "need another temporary register");
894 // Remember r_1 is low address (and LSB on mips)
895 // So r_2 gets loaded from high address regardless of the platform
896 assert(r_2->as_Register() == r_1->as_Register(), "");
897 __ ld(r, saved_sp, ld_off);
899 /* Jin:
900 *
901 * For T_LONG type, the real layout is as below:
903 (high)
904 | |
905 -----------
906 | 8 bytes |
907 | (void) |
908 -----------
909 | 8 bytes |
910 | (long) |
911 -----------
912 | |
913 (low)
914 *
915 * We should load the low-8 bytes.
916 */
917 if (sig_bt[i] == T_LONG)
918 __ ld(r, saved_sp, ld_off - 8);
919 } else {
920 __ lw(r, saved_sp, ld_off);
921 }
922 } else if (r_1->is_FloatRegister()) { // Float Register
923 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register");
925 FloatRegister fr = r_1->as_FloatRegister();
926 if (sig_bt[i] == T_FLOAT)
927 __ lwc1(fr, saved_sp, ld_off);
928 else {
929 __ ldc1(fr, saved_sp, ld_off);
930 __ ldc1(fr, saved_sp, ld_off - 8);
931 }
932 }
933 }
935 // 6243940 We might end up in handle_wrong_method if
936 // the callee is deoptimized as we race thru here. If that
937 // happens we don't want to take a safepoint because the
938 // caller frame will look interpreted and arguments are now
939 // "compiled" so it is much better to make this transition
940 // invisible to the stack walking code. Unfortunately if
941 // we try and find the callee by normal means a safepoint
942 // is possible. So we stash the desired callee in the thread
943 // and the vm will find there should this case occur.
944 __ get_thread(T8);
945 __ sd(Rmethod, T8, in_bytes(JavaThread::callee_target_offset()));
947 // move methodOop to eax in case we end up in an c2i adapter.
948 // the c2i adapters expect methodOop in eax (c2) because c2's
949 // resolve stubs return the result (the method) in eax.
950 // I'd love to fix this.
951 __ move(V0, Rmethod);
952 __ jr(T9);
953 __ delayed()->nop();
954 }
956 // ---------------------------------------------------------------
957 AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
958 int total_args_passed,
959 // VMReg max_arg,
960 int comp_args_on_stack, // VMRegStackSlots
961 const BasicType *sig_bt,
962 const VMRegPair *regs,
963 AdapterFingerPrint* fingerprint) {
964 address i2c_entry = __ pc();
966 AdapterGenerator agen(masm);
968 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
971 // -------------------------------------------------------------------------
972 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The
973 // args start out packed in the compiled layout. They need to be unpacked
974 // into the interpreter layout. This will almost always require some stack
975 // space. We grow the current (compiled) stack, then repack the args. We
976 // finally end in a jump to the generic interpreter entry point. On exit
977 // from the interpreter, the interpreter will restore our SP (lest the
978 // compiled code, which relys solely on SP and not FP, get sick).
980 address c2i_unverified_entry = __ pc();
981 Label skip_fixup;
982 {
983 Register holder = T1;
984 Register receiver = T0;
985 Register temp = T8;
986 address ic_miss = SharedRuntime::get_ic_miss_stub();
988 Label missed;
990 __ verify_oop(holder);
991 //add for compressedoops
992 __ load_klass(temp, receiver);
993 __ verify_oop(temp);
995 __ ld_ptr(AT, holder, CompiledICHolder::holder_klass_offset());
996 __ ld_ptr(Rmethod, holder, CompiledICHolder::holder_metadata_offset());
997 __ bne(AT, temp, missed);
998 __ delayed()->nop();
999 // Method might have been compiled since the call site was patched to
1000 // interpreted if that is the case treat it as a miss so we can get
1001 // the call site corrected.
1002 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset()));
1003 __ beq(AT, R0, skip_fixup);
1004 __ delayed()->nop();
1005 __ bind(missed);
1007 __ jmp(ic_miss, relocInfo::runtime_call_type);
1008 __ delayed()->nop();
1009 }
1011 address c2i_entry = __ pc();
1013 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1015 __ flush();
1016 return AdapterHandlerLibrary::new_entry(fingerprint,i2c_entry, c2i_entry, c2i_unverified_entry);
1017 }
1019 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1020 VMRegPair *regs,
1021 VMRegPair *regs2,
1022 int total_args_passed) {
1023 assert(regs2 == NULL, "not needed on MIPS");
1024 // Return the number of VMReg stack_slots needed for the args.
1025 // This value does not include an abi space (like register window
1026 // save area).
1028 // The native convention is V8 if !LP64
1029 // The LP64 convention is the V9 convention which is slightly more sane.
1031 // We return the amount of VMReg stack slots we need to reserve for all
1032 // the arguments NOT counting out_preserve_stack_slots. Since we always
1033 // have space for storing at least 6 registers to memory we start with that.
1034 // See int_stk_helper for a further discussion.
1035 // We return the amount of VMRegImpl stack slots we need to reserve for all
1036 // the arguments NOT counting out_preserve_stack_slots.
1037 static const Register INT_ArgReg[Argument::n_register_parameters] = {
1038 A0, A1, A2, A3, A4, A5, A6, A7
1039 };
1040 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = {
1041 F12, F13, F14, F15, F16, F17, F18, F19
1042 };
1043 uint args = 0;
1044 uint stk_args = 0; // inc by 2 each time
1046 /* Example:
1047 --- n java.lang.UNIXProcess::forkAndExec
1048 private native int forkAndExec(byte[] prog,
1049 byte[] argBlock, int argc,
1050 byte[] envBlock, int envc,
1051 byte[] dir,
1052 boolean redirectErrorStream,
1053 FileDescriptor stdin_fd,
1054 FileDescriptor stdout_fd,
1055 FileDescriptor stderr_fd)
1056 JNIEXPORT jint JNICALL
1057 Java_java_lang_UNIXProcess_forkAndExec(JNIEnv *env,
1058 jobject process,
1059 jbyteArray prog,
1060 jbyteArray argBlock, jint argc,
1061 jbyteArray envBlock, jint envc,
1062 jbyteArray dir,
1063 jboolean redirectErrorStream,
1064 jobject stdin_fd,
1065 jobject stdout_fd,
1066 jobject stderr_fd)
1068 ::c_calling_convention
1069 0: // env <-- a0
1070 1: L // klass/obj <-- t0 => a1
1071 2: [ // prog[] <-- a0 => a2
1072 3: [ // argBlock[] <-- a1 => a3
1073 4: I // argc
1074 5: [ // envBlock[] <-- a3 => a5
1075 6: I // envc
1076 7: [ // dir[] <-- a5 => a7
1077 8: Z // redirectErrorStream a6 => sp[0]
1078 9: L // stdin a7 => sp[8]
1079 10: L // stdout fp[16] => sp[16]
1080 11: L // stderr fp[24] => sp[24]
1081 */
1082 for (int i = 0; i < total_args_passed; i++) {
1083 switch (sig_bt[i]) {
1084 case T_VOID: // Halves of longs and doubles
1085 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1086 regs[i].set_bad();
1087 break;
1088 case T_BOOLEAN:
1089 case T_CHAR:
1090 case T_BYTE:
1091 case T_SHORT:
1092 case T_INT:
1093 if (args < Argument::n_register_parameters) {
1094 regs[i].set1(INT_ArgReg[args++]->as_VMReg());
1095 } else {
1096 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1097 stk_args += 2;
1098 }
1099 break;
1100 case T_LONG:
1101 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1102 // fall through
1103 case T_OBJECT:
1104 case T_ARRAY:
1105 case T_ADDRESS:
1106 case T_METADATA:
1107 if (args < Argument::n_register_parameters) {
1108 regs[i].set2(INT_ArgReg[args++]->as_VMReg());
1109 } else {
1110 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1111 stk_args += 2;
1112 }
1113 break;
1114 case T_FLOAT:
1115 if (args < Argument::n_float_register_parameters) {
1116 regs[i].set1(FP_ArgReg[args++]->as_VMReg());
1117 } else {
1118 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1119 stk_args += 2;
1120 }
1121 break;
1122 case T_DOUBLE:
1123 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1124 if (args < Argument::n_float_register_parameters) {
1125 regs[i].set2(FP_ArgReg[args++]->as_VMReg());
1126 } else {
1127 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1128 stk_args += 2;
1129 }
1130 break;
1131 default:
1132 ShouldNotReachHere();
1133 break;
1134 }
1135 }
1137 return round_to(stk_args, 2);
1138 }
1140 // ---------------------------------------------------------------------------
1141 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1142 // We always ignore the frame_slots arg and just use the space just below frame pointer
1143 // which by this time is free to use
1144 switch (ret_type) {
1145 case T_FLOAT:
1146 __ swc1(FSF, FP, -wordSize);
1147 break;
1148 case T_DOUBLE:
1149 __ sdc1(FSF, FP, -wordSize );
1150 break;
1151 case T_VOID: break;
1152 case T_LONG:
1153 __ sd(V0, FP, -wordSize);
1154 break;
1155 case T_OBJECT:
1156 case T_ARRAY:
1157 __ sd(V0, FP, -wordSize);
1158 break;
1159 default: {
1160 __ sw(V0, FP, -wordSize);
1161 }
1162 }
1163 }
1165 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1166 // We always ignore the frame_slots arg and just use the space just below frame pointer
1167 // which by this time is free to use
1168 switch (ret_type) {
1169 case T_FLOAT:
1170 __ lwc1(FSF, FP, -wordSize);
1171 break;
1172 case T_DOUBLE:
1173 __ ldc1(FSF, FP, -wordSize );
1174 break;
1175 case T_LONG:
1176 __ ld(V0, FP, -wordSize);
1177 break;
1178 case T_VOID: break;
1179 case T_OBJECT:
1180 case T_ARRAY:
1181 __ ld(V0, FP, -wordSize);
1182 break;
1183 default: {
1184 __ lw(V0, FP, -wordSize);
1185 }
1186 }
1187 }
1189 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1190 for ( int i = first_arg ; i < arg_count ; i++ ) {
1191 if (args[i].first()->is_Register()) {
1192 __ push(args[i].first()->as_Register());
1193 } else if (args[i].first()->is_FloatRegister()) {
1194 __ push(args[i].first()->as_FloatRegister());
1195 }
1196 }
1197 }
1199 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1200 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1201 if (args[i].first()->is_Register()) {
1202 __ pop(args[i].first()->as_Register());
1203 } else if (args[i].first()->is_FloatRegister()) {
1204 __ pop(args[i].first()->as_FloatRegister());
1205 }
1206 }
1207 }
1209 // A simple move of integer like type
1210 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1211 if (src.first()->is_stack()) {
1212 if (dst.first()->is_stack()) {
1213 // stack to stack
1214 __ lw(AT, FP, reg2offset_in(src.first()));
1215 __ sd(AT,SP, reg2offset_out(dst.first()));
1216 } else {
1217 // stack to reg
1218 __ lw(dst.first()->as_Register(), FP, reg2offset_in(src.first()));
1219 }
1220 } else if (dst.first()->is_stack()) {
1221 // reg to stack
1222 __ sd(src.first()->as_Register(), SP, reg2offset_out(dst.first()));
1223 } else {
1224 if (dst.first() != src.first()){
1225 __ move(dst.first()->as_Register(), src.first()->as_Register()); // fujie error:dst.first()
1226 }
1227 }
1228 }
1230 // An oop arg. Must pass a handle not the oop itself
1231 static void object_move(MacroAssembler* masm,
1232 OopMap* map,
1233 int oop_handle_offset,
1234 int framesize_in_slots,
1235 VMRegPair src,
1236 VMRegPair dst,
1237 bool is_receiver,
1238 int* receiver_offset) {
1240 // must pass a handle. First figure out the location we use as a handle
1242 //FIXME, for mips, dst can be register
1243 if (src.first()->is_stack()) {
1244 // Oop is already on the stack as an argument
1245 Register rHandle = V0;
1246 Label nil;
1247 __ xorr(rHandle, rHandle, rHandle);
1248 __ ld(AT, FP, reg2offset_in(src.first()));
1249 __ beq(AT,R0, nil);
1250 __ delayed()->nop();
1251 __ lea(rHandle, Address(FP, reg2offset_in(src.first())));
1252 __ bind(nil);
1253 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
1254 else __ move( (dst.first())->as_Register(),rHandle);
1255 //if dst is register
1256 //FIXME, do mips need out preserve stack slots?
1257 int offset_in_older_frame = src.first()->reg2stack()
1258 + SharedRuntime::out_preserve_stack_slots();
1259 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1260 if (is_receiver) {
1261 *receiver_offset = (offset_in_older_frame
1262 + framesize_in_slots) * VMRegImpl::stack_slot_size;
1263 }
1264 } else {
1265 // Oop is in an a register we must store it to the space we reserve
1266 // on the stack for oop_handles
1267 const Register rOop = src.first()->as_Register();
1268 assert( (rOop->encoding() >= A0->encoding()) && (rOop->encoding() <= T0->encoding()),"wrong register");
1269 const Register rHandle = V0;
1270 //Important: refer to java_calling_convertion
1271 int oop_slot = (rOop->encoding() - A0->encoding()) * VMRegImpl::slots_per_word + oop_handle_offset;
1272 int offset = oop_slot*VMRegImpl::stack_slot_size;
1273 Label skip;
1274 __ sd( rOop , SP, offset );
1275 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1276 __ xorr( rHandle, rHandle, rHandle);
1277 __ beq(rOop, R0, skip);
1278 __ delayed()->nop();
1279 __ lea(rHandle, Address(SP, offset));
1280 __ bind(skip);
1281 // Store the handle parameter
1282 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
1283 else __ move((dst.first())->as_Register(), rHandle);
1284 //if dst is register
1286 if (is_receiver) {
1287 *receiver_offset = offset;
1288 }
1289 }
1290 }
1292 // A float arg may have to do float reg int reg conversion
1293 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1294 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1296 if (src.first()->is_stack()) {
1297 if (dst.first()->is_stack()) {
1298 __ lwc1(F12 , FP, reg2offset_in(src.first()));
1299 __ swc1(F12 ,SP, reg2offset_out(dst.first()));
1300 }
1301 else
1302 __ lwc1( dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first()));
1303 } else {
1304 // reg to stack
1305 if(dst.first()->is_stack())
1306 __ swc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
1307 else
1308 __ mov_s( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1309 }
1310 }
1312 // A long move
1313 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1315 // The only legal possibility for a long_move VMRegPair is:
1316 // 1: two stack slots (possibly unaligned)
1317 // as neither the java or C calling convention will use registers
1318 // for longs.
1320 if (src.first()->is_stack()) {
1321 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1322 if( dst.first()->is_stack()){
1323 __ ld(AT, FP, reg2offset_in(src.first()));
1324 __ sd(AT, SP, reg2offset_out(dst.first()));
1325 } else {
1326 __ ld( (dst.first())->as_Register() , FP, reg2offset_in(src.first()));
1327 }
1328 } else {
1329 if( dst.first()->is_stack()){
1330 __ sd( (src.first())->as_Register(), SP, reg2offset_out(dst.first()));
1331 } else{
1332 __ move( (dst.first())->as_Register() , (src.first())->as_Register());
1333 }
1334 }
1335 }
1337 // A double move
1338 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1340 // The only legal possibilities for a double_move VMRegPair are:
1341 // The painful thing here is that like long_move a VMRegPair might be
1343 // Because of the calling convention we know that src is either
1344 // 1: a single physical register (xmm registers only)
1345 // 2: two stack slots (possibly unaligned)
1346 // dst can only be a pair of stack slots.
1349 if (src.first()->is_stack()) {
1350 // source is all stack
1351 if( dst.first()->is_stack()){
1352 __ ldc1(F12, FP, reg2offset_in(src.first()));
1354 __ sdc1(F12, SP, reg2offset_out(dst.first()));
1355 } else{
1356 __ ldc1( (dst.first())->as_FloatRegister(), FP, reg2offset_in(src.first()));
1357 }
1359 } else {
1360 // reg to stack
1361 // No worries about stack alignment
1362 if( dst.first()->is_stack()){
1363 __ sdc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
1364 }
1365 else
1366 __ mov_d( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1368 }
1369 }
1371 static void verify_oop_args(MacroAssembler* masm,
1372 methodHandle method,
1373 const BasicType* sig_bt,
1374 const VMRegPair* regs) {
1375 Register temp_reg = T9; // not part of any compiled calling seq
1376 if (VerifyOops) {
1377 for (int i = 0; i < method->size_of_parameters(); i++) {
1378 if (sig_bt[i] == T_OBJECT ||
1379 sig_bt[i] == T_ARRAY) {
1380 VMReg r = regs[i].first();
1381 assert(r->is_valid(), "bad oop arg");
1382 if (r->is_stack()) {
1383 __ ld(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1384 __ verify_oop(temp_reg);
1385 } else {
1386 __ verify_oop(r->as_Register());
1387 }
1388 }
1389 }
1390 }
1391 }
1393 static void gen_special_dispatch(MacroAssembler* masm,
1394 methodHandle method,
1395 const BasicType* sig_bt,
1396 const VMRegPair* regs) {
1397 verify_oop_args(masm, method, sig_bt, regs);
1398 vmIntrinsics::ID iid = method->intrinsic_id();
1400 // Now write the args into the outgoing interpreter space
1401 bool has_receiver = false;
1402 Register receiver_reg = noreg;
1403 int member_arg_pos = -1;
1404 Register member_reg = noreg;
1405 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1406 if (ref_kind != 0) {
1407 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1408 member_reg = S3; // known to be free at this point
1409 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1410 } else if (iid == vmIntrinsics::_invokeBasic) {
1411 has_receiver = true;
1412 } else {
1413 fatal(err_msg_res("unexpected intrinsic id %d", iid));
1414 }
1416 if (member_reg != noreg) {
1417 // Load the member_arg into register, if necessary.
1418 SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1419 VMReg r = regs[member_arg_pos].first();
1420 if (r->is_stack()) {
1421 __ ld(member_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
1422 } else {
1423 // no data motion is needed
1424 member_reg = r->as_Register();
1425 }
1426 }
1428 if (has_receiver) {
1429 // Make sure the receiver is loaded into a register.
1430 assert(method->size_of_parameters() > 0, "oob");
1431 assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1432 VMReg r = regs[0].first();
1433 assert(r->is_valid(), "bad receiver arg");
1434 if (r->is_stack()) {
1435 // Porting note: This assumes that compiled calling conventions always
1436 // pass the receiver oop in a register. If this is not true on some
1437 // platform, pick a temp and load the receiver from stack.
1438 fatal("receiver always in a register");
1439 receiver_reg = SSR; // known to be free at this point
1440 __ ld(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
1441 } else {
1442 // no data motion is needed
1443 receiver_reg = r->as_Register();
1444 }
1445 }
1447 // Figure out which address we are really jumping to:
1448 MethodHandles::generate_method_handle_dispatch(masm, iid,
1449 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1450 }
1452 // ---------------------------------------------------------------------------
1453 // Generate a native wrapper for a given method. The method takes arguments
1454 // in the Java compiled code convention, marshals them to the native
1455 // convention (handlizes oops, etc), transitions to native, makes the call,
1456 // returns to java state (possibly blocking), unhandlizes any result and
1457 // returns.
1458 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1459 methodHandle method,
1460 int compile_id,
1461 BasicType* in_sig_bt,
1462 VMRegPair* in_regs,
1463 BasicType ret_type) {
1464 if (method->is_method_handle_intrinsic()) {
1465 vmIntrinsics::ID iid = method->intrinsic_id();
1466 intptr_t start = (intptr_t)__ pc();
1467 int vep_offset = ((intptr_t)__ pc()) - start;
1468 gen_special_dispatch(masm,
1469 method,
1470 in_sig_bt,
1471 in_regs);
1472 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1473 __ flush();
1474 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1475 return nmethod::new_native_nmethod(method,
1476 compile_id,
1477 masm->code(),
1478 vep_offset,
1479 frame_complete,
1480 stack_slots / VMRegImpl::slots_per_word,
1481 in_ByteSize(-1),
1482 in_ByteSize(-1),
1483 (OopMapSet*)NULL);
1484 }
1485 bool is_critical_native = true;
1486 address native_func = method->critical_native_function();
1487 if (native_func == NULL) {
1488 native_func = method->native_function();
1489 is_critical_native = false;
1490 }
1491 assert(native_func != NULL, "must have function");
1493 // Native nmethod wrappers never take possesion of the oop arguments.
1494 // So the caller will gc the arguments. The only thing we need an
1495 // oopMap for is if the call is static
1496 //
1497 // An OopMap for lock (and class if static), and one for the VM call itself
1498 OopMapSet *oop_maps = new OopMapSet();
1500 // We have received a description of where all the java arg are located
1501 // on entry to the wrapper. We need to convert these args to where
1502 // the jni function will expect them. To figure out where they go
1503 // we convert the java signature to a C signature by inserting
1504 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1506 const int total_in_args = method->size_of_parameters();
1507 int total_c_args = total_in_args;
1508 if (!is_critical_native) {
1509 total_c_args += 1;
1510 if (method->is_static()) {
1511 total_c_args++;
1512 }
1513 } else {
1514 for (int i = 0; i < total_in_args; i++) {
1515 if (in_sig_bt[i] == T_ARRAY) {
1516 total_c_args++;
1517 }
1518 }
1519 }
1521 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1522 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1523 BasicType* in_elem_bt = NULL;
1525 int argc = 0;
1526 if (!is_critical_native) {
1527 out_sig_bt[argc++] = T_ADDRESS;
1528 if (method->is_static()) {
1529 out_sig_bt[argc++] = T_OBJECT;
1530 }
1532 for (int i = 0; i < total_in_args ; i++ ) {
1533 out_sig_bt[argc++] = in_sig_bt[i];
1534 }
1535 } else {
1536 Thread* THREAD = Thread::current();
1537 in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1538 SignatureStream ss(method->signature());
1539 for (int i = 0; i < total_in_args ; i++ ) {
1540 if (in_sig_bt[i] == T_ARRAY) {
1541 // Arrays are passed as int, elem* pair
1542 out_sig_bt[argc++] = T_INT;
1543 out_sig_bt[argc++] = T_ADDRESS;
1544 Symbol* atype = ss.as_symbol(CHECK_NULL);
1545 const char* at = atype->as_C_string();
1546 if (strlen(at) == 2) {
1547 assert(at[0] == '[', "must be");
1548 switch (at[1]) {
1549 case 'B': in_elem_bt[i] = T_BYTE; break;
1550 case 'C': in_elem_bt[i] = T_CHAR; break;
1551 case 'D': in_elem_bt[i] = T_DOUBLE; break;
1552 case 'F': in_elem_bt[i] = T_FLOAT; break;
1553 case 'I': in_elem_bt[i] = T_INT; break;
1554 case 'J': in_elem_bt[i] = T_LONG; break;
1555 case 'S': in_elem_bt[i] = T_SHORT; break;
1556 case 'Z': in_elem_bt[i] = T_BOOLEAN; break;
1557 default: ShouldNotReachHere();
1558 }
1559 }
1560 } else {
1561 out_sig_bt[argc++] = in_sig_bt[i];
1562 in_elem_bt[i] = T_VOID;
1563 }
1564 if (in_sig_bt[i] != T_VOID) {
1565 assert(in_sig_bt[i] == ss.type(), "must match");
1566 ss.next();
1567 }
1568 }
1569 }
1571 // Now figure out where the args must be stored and how much stack space
1572 // they require (neglecting out_preserve_stack_slots but space for storing
1573 // the 1st six register arguments). It's weird see int_stk_helper.
1574 //
1575 int out_arg_slots;
1576 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1578 // Compute framesize for the wrapper. We need to handlize all oops in
1579 // registers. We must create space for them here that is disjoint from
1580 // the windowed save area because we have no control over when we might
1581 // flush the window again and overwrite values that gc has since modified.
1582 // (The live window race)
1583 //
1584 // We always just allocate 6 word for storing down these object. This allow
1585 // us to simply record the base and use the Ireg number to decide which
1586 // slot to use. (Note that the reg number is the inbound number not the
1587 // outbound number).
1588 // We must shuffle args to match the native convention, and include var-args space.
1590 // Calculate the total number of stack slots we will need.
1592 // First count the abi requirement plus all of the outgoing args
1593 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1595 // Now the space for the inbound oop handle area
1596 int total_save_slots = 9 * VMRegImpl::slots_per_word; // 9 arguments passed in registers
1597 if (is_critical_native) {
1598 // Critical natives may have to call out so they need a save area
1599 // for register arguments.
1600 int double_slots = 0;
1601 int single_slots = 0;
1602 for ( int i = 0; i < total_in_args; i++) {
1603 if (in_regs[i].first()->is_Register()) {
1604 const Register reg = in_regs[i].first()->as_Register();
1605 switch (in_sig_bt[i]) {
1606 case T_BOOLEAN:
1607 case T_BYTE:
1608 case T_SHORT:
1609 case T_CHAR:
1610 case T_INT: single_slots++; break;
1611 case T_ARRAY: // specific to LP64 (7145024)
1612 case T_LONG: double_slots++; break;
1613 default: ShouldNotReachHere();
1614 }
1615 } else if (in_regs[i].first()->is_FloatRegister()) {
1616 switch (in_sig_bt[i]) {
1617 case T_FLOAT: single_slots++; break;
1618 case T_DOUBLE: double_slots++; break;
1619 default: ShouldNotReachHere();
1620 }
1621 }
1622 }
1623 total_save_slots = double_slots * 2 + single_slots;
1624 // align the save area
1625 if (double_slots != 0) {
1626 stack_slots = round_to(stack_slots, 2);
1627 }
1628 }
1630 int oop_handle_offset = stack_slots;
1631 stack_slots += total_save_slots;
1633 // Now any space we need for handlizing a klass if static method
1635 int klass_slot_offset = 0;
1636 int klass_offset = -1;
1637 int lock_slot_offset = 0;
1638 bool is_static = false;
1640 if (method->is_static()) {
1641 klass_slot_offset = stack_slots;
1642 stack_slots += VMRegImpl::slots_per_word;
1643 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1644 is_static = true;
1645 }
1647 // Plus a lock if needed
1649 if (method->is_synchronized()) {
1650 lock_slot_offset = stack_slots;
1651 stack_slots += VMRegImpl::slots_per_word;
1652 }
1654 // Now a place to save return value or as a temporary for any gpr -> fpr moves
1655 // + 2 for return address (which we own) and saved ebp
1656 stack_slots += 2 + 9 * VMRegImpl::slots_per_word; // (T0, A0, A1, A2, A3, A4, A5, A6, A7)
1658 // Ok The space we have allocated will look like:
1659 //
1660 //
1661 // FP-> | |
1662 // |---------------------|
1663 // | 2 slots for moves |
1664 // |---------------------|
1665 // | lock box (if sync) |
1666 // |---------------------| <- lock_slot_offset
1667 // | klass (if static) |
1668 // |---------------------| <- klass_slot_offset
1669 // | oopHandle area |
1670 // |---------------------| <- oop_handle_offset
1671 // | outbound memory |
1672 // | based arguments |
1673 // | |
1674 // |---------------------|
1675 // | vararg area |
1676 // |---------------------|
1677 // | |
1678 // SP-> | out_preserved_slots |
1679 //
1680 //
1683 // Now compute actual number of stack words we need rounding to make
1684 // stack properly aligned.
1685 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1687 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1689 intptr_t start = (intptr_t)__ pc();
1693 // First thing make an ic check to see if we should even be here
1694 address ic_miss = SharedRuntime::get_ic_miss_stub();
1696 // We are free to use all registers as temps without saving them and
1697 // restoring them except ebp. ebp is the only callee save register
1698 // as far as the interpreter and the compiler(s) are concerned.
1700 //refer to register_mips.hpp:IC_Klass
1701 const Register ic_reg = T1;
1702 const Register receiver = T0;
1704 Label hit;
1705 Label exception_pending;
1707 __ verify_oop(receiver);
1708 //add for compressedoops
1709 __ load_klass(T9, receiver);
1710 __ beq(T9, ic_reg, hit);
1711 __ delayed()->nop();
1712 __ jmp(ic_miss, relocInfo::runtime_call_type);
1713 __ delayed()->nop();
1714 // verified entry must be aligned for code patching.
1715 // and the first 5 bytes must be in the same cache line
1716 // if we align at 8 then we will be sure 5 bytes are in the same line
1717 __ align(8);
1719 __ bind(hit);
1722 int vep_offset = ((intptr_t)__ pc()) - start;
1723 #ifdef COMPILER1
1724 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1725 // Object.hashCode can pull the hashCode from the header word
1726 // instead of doing a full VM transition once it's been computed.
1727 // Since hashCode is usually polymorphic at call sites we can't do
1728 // this optimization at the call site without a lot of work.
1729 Label slowCase;
1730 Register receiver = T0;
1731 Register result = V0;
1732 __ ld ( result, receiver, oopDesc::mark_offset_in_bytes());
1733 // check if locked
1734 __ andi(AT, result, markOopDesc::unlocked_value);
1735 __ beq(AT, R0, slowCase);
1736 __ delayed()->nop();
1737 if (UseBiasedLocking) {
1738 // Check if biased and fall through to runtime if so
1739 __ andi (AT, result, markOopDesc::biased_lock_bit_in_place);
1740 __ bne(AT,R0, slowCase);
1741 __ delayed()->nop();
1742 }
1743 // get hash
1744 __ li(AT, markOopDesc::hash_mask_in_place);
1745 __ andr (AT, result, AT);
1746 // test if hashCode exists
1747 __ beq (AT, R0, slowCase);
1748 __ delayed()->nop();
1749 __ shr(result, markOopDesc::hash_shift);
1750 __ jr(RA);
1751 __ delayed()->nop();
1752 __ bind (slowCase);
1753 }
1754 #endif // COMPILER1
1756 // The instruction at the verified entry point must be 5 bytes or longer
1757 // because it can be patched on the fly by make_non_entrant. The stack bang
1758 // instruction fits that requirement.
1760 // Generate stack overflow check
1762 if (UseStackBanging) {
1763 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1764 } else {
1765 // need a 5 byte instruction to allow MT safe patching to non-entrant
1766 __ nop();
1767 __ nop();
1768 __ nop();
1769 __ nop();
1770 __ nop();
1771 }
1772 // Generate a new frame for the wrapper.
1773 // do mips need this ?
1774 #ifndef OPT_THREAD
1775 __ get_thread(TREG);
1776 #endif
1777 //FIXME here
1778 __ st_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
1779 __ move(AT, -(StackAlignmentInBytes));
1780 __ andr(SP, SP, AT);
1782 __ enter();
1783 // -2 because return address is already present and so is saved ebp
1784 __ addiu(SP, SP, -1 * (stack_size - 2*wordSize));
1786 // Frame is now completed as far a size and linkage.
1788 int frame_complete = ((intptr_t)__ pc()) - start;
1790 // Calculate the difference between esp and ebp. We need to know it
1791 // after the native call because on windows Java Natives will pop
1792 // the arguments and it is painful to do esp relative addressing
1793 // in a platform independent way. So after the call we switch to
1794 // ebp relative addressing.
1795 //FIXME actually , the fp_adjustment may not be the right, because andr(sp,sp,at)may change
1796 //the SP
1797 int fp_adjustment = stack_size - 2*wordSize;
1799 #ifdef COMPILER2
1800 // C2 may leave the stack dirty if not in SSE2+ mode
1801 __ empty_FPU_stack();
1802 #endif /* COMPILER2 */
1804 // Compute the ebp offset for any slots used after the jni call
1806 int lock_slot_ebp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1807 // We use edi as a thread pointer because it is callee save and
1808 // if we load it once it is usable thru the entire wrapper
1809 // const Register thread = edi;
1810 const Register thread = TREG;
1812 // We use esi as the oop handle for the receiver/klass
1813 // It is callee save so it survives the call to native
1815 // const Register oop_handle_reg = esi;
1816 const Register oop_handle_reg = S4;
1817 if (is_critical_native) {
1818 __ stop("generate_native_wrapper in sharedRuntime <2>");
1819 //TODO:Fu
1820 /*
1821 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1822 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1823 */
1824 }
1826 #ifndef OPT_THREAD
1827 __ get_thread(thread);
1828 #endif
1830 //
1831 // We immediately shuffle the arguments so that any vm call we have to
1832 // make from here on out (sync slow path, jvmpi, etc.) we will have
1833 // captured the oops from our caller and have a valid oopMap for
1834 // them.
1836 // -----------------
1837 // The Grand Shuffle
1838 //
1839 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1840 // and, if static, the class mirror instead of a receiver. This pretty much
1841 // guarantees that register layout will not match (and mips doesn't use reg
1842 // parms though amd does). Since the native abi doesn't use register args
1843 // and the java conventions does we don't have to worry about collisions.
1844 // All of our moved are reg->stack or stack->stack.
1845 // We ignore the extra arguments during the shuffle and handle them at the
1846 // last moment. The shuffle is described by the two calling convention
1847 // vectors we have in our possession. We simply walk the java vector to
1848 // get the source locations and the c vector to get the destinations.
1850 int c_arg = method->is_static() ? 2 : 1 ;
1852 // Record esp-based slot for receiver on stack for non-static methods
1853 int receiver_offset = -1;
1855 // This is a trick. We double the stack slots so we can claim
1856 // the oops in the caller's frame. Since we are sure to have
1857 // more args than the caller doubling is enough to make
1858 // sure we can capture all the incoming oop args from the
1859 // caller.
1860 //
1861 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1863 // Mark location of rbp (someday)
1864 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
1866 // Use eax, ebx as temporaries during any memory-memory moves we have to do
1867 // All inbound args are referenced based on rbp and all outbound args via rsp.
1871 #ifdef ASSERT
1872 bool reg_destroyed[RegisterImpl::number_of_registers];
1873 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
1874 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
1875 reg_destroyed[r] = false;
1876 }
1877 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
1878 freg_destroyed[f] = false;
1879 }
1881 #endif /* ASSERT */
1883 // We know that we only have args in at most two integer registers (ecx, edx). So eax, ebx
1884 // Are free to temporaries if we have to do stack to steck moves.
1885 // All inbound args are referenced based on ebp and all outbound args via esp.
1887 // This may iterate in two different directions depending on the
1888 // kind of native it is. The reason is that for regular JNI natives
1889 // the incoming and outgoing registers are offset upwards and for
1890 // critical natives they are offset down.
1891 GrowableArray<int> arg_order(2 * total_in_args);
1892 VMRegPair tmp_vmreg;
1893 tmp_vmreg.set1(T8->as_VMReg());
1895 if (!is_critical_native) {
1896 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1897 arg_order.push(i);
1898 arg_order.push(c_arg);
1899 }
1900 } else {
1901 // Compute a valid move order, using tmp_vmreg to break any cycles
1902 __ stop("generate_native_wrapper in sharedRuntime <2>");
1903 //TODO:Fu
1904 // ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
1905 }
1907 int temploc = -1;
1908 for (int ai = 0; ai < arg_order.length(); ai += 2) {
1909 int i = arg_order.at(ai);
1910 int c_arg = arg_order.at(ai + 1);
1911 __ block_comment(err_msg("move %d -> %d", i, c_arg));
1912 if (c_arg == -1) {
1913 assert(is_critical_native, "should only be required for critical natives");
1914 // This arg needs to be moved to a temporary
1915 __ move(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
1916 in_regs[i] = tmp_vmreg;
1917 temploc = i;
1918 continue;
1919 } else if (i == -1) {
1920 assert(is_critical_native, "should only be required for critical natives");
1921 // Read from the temporary location
1922 assert(temploc != -1, "must be valid");
1923 i = temploc;
1924 temploc = -1;
1925 }
1926 #ifdef ASSERT
1927 if (in_regs[i].first()->is_Register()) {
1928 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
1929 } else if (in_regs[i].first()->is_FloatRegister()) {
1930 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding()], "destroyed reg!");
1931 }
1932 if (out_regs[c_arg].first()->is_Register()) {
1933 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1934 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1935 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1936 }
1937 #endif /* ASSERT */
1938 switch (in_sig_bt[i]) {
1939 case T_ARRAY:
1940 if (is_critical_native) {
1941 __ stop("generate_native_wrapper in sharedRuntime <2>");
1942 //TODO:Fu
1943 // unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
1944 c_arg++;
1945 #ifdef ASSERT
1946 if (out_regs[c_arg].first()->is_Register()) {
1947 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
1948 } else if (out_regs[c_arg].first()->is_FloatRegister()) {
1949 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding()] = true;
1950 }
1951 #endif
1952 break;
1953 }
1954 case T_OBJECT:
1955 assert(!is_critical_native, "no oop arguments");
1956 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1957 ((i == 0) && (!is_static)),
1958 &receiver_offset);
1959 break;
1960 case T_VOID:
1961 break;
1963 case T_FLOAT:
1964 float_move(masm, in_regs[i], out_regs[c_arg]);
1965 break;
1967 case T_DOUBLE:
1968 assert( i + 1 < total_in_args &&
1969 in_sig_bt[i + 1] == T_VOID &&
1970 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1971 double_move(masm, in_regs[i], out_regs[c_arg]);
1972 break;
1974 case T_LONG :
1975 long_move(masm, in_regs[i], out_regs[c_arg]);
1976 break;
1978 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1980 default:
1981 simple_move32(masm, in_regs[i], out_regs[c_arg]);
1982 }
1983 }
1985 // point c_arg at the first arg that is already loaded in case we
1986 // need to spill before we call out
1987 c_arg = total_c_args - total_in_args;
1988 // Pre-load a static method's oop into esi. Used both by locking code and
1989 // the normal JNI call code.
1991 __ move(oop_handle_reg, A1);
1993 if (method->is_static() && !is_critical_native) {
1995 // load opp into a register
1996 int oop_index = __ oop_recorder()->find_index(JNIHandles::make_local(
1997 (method->method_holder())->java_mirror()));
2000 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2001 __ relocate(rspec);
2002 __ patchable_set48(oop_handle_reg, (long)JNIHandles::make_local((method->method_holder())->java_mirror()));
2003 // Now handlize the static class mirror it's known not-null.
2004 __ sd( oop_handle_reg, SP, klass_offset);
2005 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2007 // Now get the handle
2008 __ lea(oop_handle_reg, Address(SP, klass_offset));
2009 // store the klass handle as second argument
2010 __ move(A1, oop_handle_reg);
2011 // and protect the arg if we must spill
2012 c_arg--;
2013 }
2015 // Change state to native (we save the return address in the thread, since it might not
2016 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2017 // points into the right code segment. It does not have to be the correct return pc.
2018 // We use the same pc/oopMap repeatedly when we call out
2020 intptr_t the_pc = (intptr_t) __ pc();
2021 oop_maps->add_gc_map(the_pc - start, map);
2023 __ set_last_Java_frame(SP, noreg, NULL);
2024 __ relocate(relocInfo::internal_pc_type);
2025 {
2026 intptr_t save_pc = (intptr_t)the_pc ;
2027 __ patchable_set48(AT, save_pc);
2028 }
2029 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
2032 // We have all of the arguments setup at this point. We must not touch any register
2033 // argument registers at this point (what if we save/restore them there are no oop?
2034 {
2035 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2036 int metadata_index = __ oop_recorder()->find_index(method());
2037 RelocationHolder rspec = metadata_Relocation::spec(metadata_index);
2038 __ relocate(rspec);
2039 __ patchable_set48(AT, (long)(method()));
2041 __ call_VM_leaf(
2042 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2043 thread, AT);
2045 }
2047 // These are register definitions we need for locking/unlocking
2048 const Register swap_reg = T8; // Must use eax for cmpxchg instruction
2049 const Register obj_reg = T9; // Will contain the oop
2050 //const Register lock_reg = T6; // Address of compiler lock object (BasicLock)
2051 const Register lock_reg = c_rarg0; // Address of compiler lock object (BasicLock)
2055 Label slow_path_lock;
2056 Label lock_done;
2058 // Lock a synchronized method
2059 if (method->is_synchronized()) {
2060 assert(!is_critical_native, "unhandled");
2062 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2064 // Get the handle (the 2nd argument)
2065 __ move(oop_handle_reg, A1);
2067 // Get address of the box
2068 __ lea(lock_reg, Address(FP, lock_slot_ebp_offset));
2070 // Load the oop from the handle
2071 __ ld(obj_reg, oop_handle_reg, 0);
2073 if (UseBiasedLocking) {
2074 // Note that oop_handle_reg is trashed during this call
2075 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, A1, false, lock_done, &slow_path_lock);
2076 }
2078 // Load immediate 1 into swap_reg %eax
2079 __ move(swap_reg, 1);
2081 __ ld(AT, obj_reg, 0);
2082 __ orr(swap_reg, swap_reg, AT);
2084 __ sd( swap_reg, lock_reg, mark_word_offset);
2085 __ cmpxchg(lock_reg, Address(obj_reg, 0), swap_reg);
2086 __ bne(AT, R0, lock_done);
2087 __ delayed()->nop();
2088 // Test if the oopMark is an obvious stack pointer, i.e.,
2089 // 1) (mark & 3) == 0, and
2090 // 2) esp <= mark < mark + os::pagesize()
2091 // These 3 tests can be done by evaluating the following
2092 // expression: ((mark - esp) & (3 - os::vm_page_size())),
2093 // assuming both stack pointer and pagesize have their
2094 // least significant 2 bits clear.
2095 // NOTE: the oopMark is in swap_reg %eax as the result of cmpxchg
2097 __ dsub(swap_reg, swap_reg, SP);
2098 __ move(AT, 3 - os::vm_page_size());
2099 __ andr(swap_reg , swap_reg, AT);
2100 // Save the test result, for recursive case, the result is zero
2101 __ sd(swap_reg, lock_reg, mark_word_offset);
2102 //FIXME here, Why notEqual?
2103 __ bne(swap_reg, R0, slow_path_lock);
2104 __ delayed()->nop();
2105 // Slow path will re-enter here
2106 __ bind(lock_done);
2108 if (UseBiasedLocking) {
2109 // Re-fetch oop_handle_reg as we trashed it above
2110 __ move(A1, oop_handle_reg);
2111 }
2112 }
2115 // Finally just about ready to make the JNI call
2118 // get JNIEnv* which is first argument to native
2119 if (!is_critical_native) {
2120 __ addi(A0, thread, in_bytes(JavaThread::jni_environment_offset()));
2121 }
2123 // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob)
2124 /* Load the second arguments into A1 */
2125 //__ ld(A1, SP , wordSize ); // klass
2127 // Now set thread in native
2128 __ addi(AT, R0, _thread_in_native);
2129 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2130 /* Jin: do the call */
2131 __ call(method->native_function(), relocInfo::runtime_call_type);
2132 __ delayed()->nop();
2133 // WARNING - on Windows Java Natives use pascal calling convention and pop the
2134 // arguments off of the stack. We could just re-adjust the stack pointer here
2135 // and continue to do SP relative addressing but we instead switch to FP
2136 // relative addressing.
2138 // Unpack native results.
2139 switch (ret_type) {
2140 case T_BOOLEAN: __ c2bool(V0); break;
2141 case T_CHAR : __ andi(V0,V0, 0xFFFF); break;
2142 case T_BYTE : __ sign_extend_byte (V0); break;
2143 case T_SHORT : __ sign_extend_short(V0); break;
2144 case T_INT : // nothing to do break;
2145 case T_DOUBLE :
2146 case T_FLOAT :
2147 // Result is in st0 we'll save as needed
2148 break;
2149 case T_ARRAY: // Really a handle
2150 case T_OBJECT: // Really a handle
2151 break; // can't de-handlize until after safepoint check
2152 case T_VOID: break;
2153 case T_LONG: break;
2154 default : ShouldNotReachHere();
2155 }
2156 // Switch thread to "native transition" state before reading the synchronization state.
2157 // This additional state is necessary because reading and testing the synchronization
2158 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2159 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2160 // VM thread changes sync state to synchronizing and suspends threads for GC.
2161 // Thread A is resumed to finish this native method, but doesn't block here since it
2162 // didn't see any synchronization is progress, and escapes.
2163 __ addi(AT, R0, _thread_in_native_trans);
2164 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2166 //if(os::is_MP()) {}
2168 Label after_transition;
2170 // check for safepoint operation in progress and/or pending suspend requests
2171 {
2172 Label Continue;
2173 __ li(AT, SafepointSynchronize::address_of_state());
2174 __ lw(A0, AT, 0);
2175 __ addi(AT, A0, -SafepointSynchronize::_not_synchronized);
2176 Label L;
2177 __ bne(AT,R0, L);
2178 __ delayed()->nop();
2179 __ lw(AT, thread, in_bytes(JavaThread::suspend_flags_offset()));
2180 __ beq(AT, R0, Continue);
2181 __ delayed()->nop();
2182 __ bind(L);
2184 // Don't use call_VM as it will see a possible pending exception and forward it
2185 // and never return here preventing us from clearing _last_native_pc down below.
2186 // Also can't use call_VM_leaf either as it will check to see if esi & edi are
2187 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2188 // by hand.
2189 //
2190 save_native_result(masm, ret_type, stack_slots);
2191 __ move(A0, thread);
2192 __ addi(SP, SP, -wordSize);
2193 __ push(S2);
2194 __ move(AT, -(StackAlignmentInBytes));
2195 __ move(S2, SP); // use S2 as a sender SP holder
2196 __ andr(SP, SP, AT); // align stack as required by ABI
2197 if (!is_critical_native) {
2198 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::runtime_call_type);
2199 __ delayed()->nop();
2200 } else {
2201 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), relocInfo::runtime_call_type);
2202 __ delayed()->nop();
2203 }
2204 __ move(SP, S2); // use S2 as a sender SP holder
2205 __ pop(S2);
2206 __ addi(SP,SP, wordSize);
2207 //add for compressedoops
2208 __ reinit_heapbase();
2209 // Restore any method result value
2210 restore_native_result(masm, ret_type, stack_slots);
2212 if (is_critical_native) {
2213 // The call above performed the transition to thread_in_Java so
2214 // skip the transition logic below.
2215 __ beq(R0, R0, after_transition);
2216 __ delayed()->nop();
2217 }
2219 __ bind(Continue);
2220 }
2222 // change thread state
2223 __ addi(AT, R0, _thread_in_Java);
2224 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2225 __ bind(after_transition);
2226 Label reguard;
2227 Label reguard_done;
2228 __ lw(AT, thread, in_bytes(JavaThread::stack_guard_state_offset()));
2229 __ addi(AT, AT, -JavaThread::stack_guard_yellow_disabled);
2230 __ beq(AT, R0, reguard);
2231 __ delayed()->nop();
2232 // slow path reguard re-enters here
2233 __ bind(reguard_done);
2235 // Handle possible exception (will unlock if necessary)
2237 // native result if any is live
2239 // Unlock
2240 Label slow_path_unlock;
2241 Label unlock_done;
2242 if (method->is_synchronized()) {
2244 Label done;
2246 // Get locked oop from the handle we passed to jni
2247 __ ld( obj_reg, oop_handle_reg, 0);
2248 //FIXME
2249 if (UseBiasedLocking) {
2250 __ biased_locking_exit(obj_reg, T8, done);
2252 }
2254 // Simple recursive lock?
2256 __ ld(AT, FP, lock_slot_ebp_offset);
2257 __ beq(AT, R0, done);
2258 __ delayed()->nop();
2259 // Must save eax if if it is live now because cmpxchg must use it
2260 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2261 save_native_result(masm, ret_type, stack_slots);
2262 }
2264 // get old displaced header
2265 __ ld (T8, FP, lock_slot_ebp_offset);
2266 // get address of the stack lock
2267 __ addi (c_rarg0, FP, lock_slot_ebp_offset);
2268 // Atomic swap old header if oop still contains the stack lock
2269 __ cmpxchg(T8, Address(obj_reg, 0), c_rarg0);
2271 __ beq(AT, R0, slow_path_unlock);
2272 __ delayed()->nop();
2273 // slow path re-enters here
2274 __ bind(unlock_done);
2275 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2276 restore_native_result(masm, ret_type, stack_slots);
2277 }
2279 __ bind(done);
2281 }
2282 {
2283 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2284 // Tell dtrace about this method exit
2285 save_native_result(masm, ret_type, stack_slots);
2286 int metadata_index = __ oop_recorder()->find_index( (method()));
2287 RelocationHolder rspec = metadata_Relocation::spec(metadata_index);
2288 __ relocate(rspec);
2289 __ patchable_set48(AT, (long)(method()));
2291 __ call_VM_leaf(
2292 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2293 thread, AT);
2294 restore_native_result(masm, ret_type, stack_slots);
2295 }
2297 // We can finally stop using that last_Java_frame we setup ages ago
2299 __ reset_last_Java_frame(false);
2301 // Unpack oop result
2302 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2303 Label L;
2304 __ beq(V0, R0, L);
2305 __ delayed()->nop();
2306 __ ld(V0, V0, 0);
2307 __ bind(L);
2308 __ verify_oop(V0);
2309 }
2311 if (!is_critical_native) {
2312 // reset handle block
2313 __ ld(AT, thread, in_bytes(JavaThread::active_handles_offset()));
2314 __ sw(R0, AT, JNIHandleBlock::top_offset_in_bytes());
2315 }
2317 if (!is_critical_native) {
2318 // Any exception pending?
2319 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2320 __ bne(AT, R0, exception_pending);
2321 __ delayed()->nop();
2322 }
2323 // no exception, we're almost done
2325 // check that only result value is on FPU stack
2326 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2328 // Return
2329 #ifndef OPT_THREAD
2330 __ get_thread(TREG);
2331 #endif
2332 //__ ld_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
2333 __ leave();
2335 __ jr(RA);
2336 __ delayed()->nop();
2337 // Unexpected paths are out of line and go here
2338 /*
2339 if (!is_critical_native) {
2340 // forward the exception
2341 __ bind(exception_pending);
2343 // and forward the exception
2344 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2345 }
2346 */
2347 // Slow path locking & unlocking
2348 if (method->is_synchronized()) {
2350 // BEGIN Slow path lock
2351 __ bind(slow_path_lock);
2353 // protect the args we've loaded
2354 save_args(masm, total_c_args, c_arg, out_regs);
2356 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2357 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2359 __ move(A0, obj_reg);
2360 __ move(A1, lock_reg);
2361 __ move(A2, thread);
2362 __ addi(SP, SP, - 3*wordSize);
2364 __ move(AT, -(StackAlignmentInBytes));
2365 __ move(S2, SP); // use S2 as a sender SP holder
2366 __ andr(SP, SP, AT); // align stack as required by ABI
2368 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2369 __ delayed()->nop();
2370 __ move(SP, S2);
2371 __ addi(SP, SP, 3*wordSize);
2373 restore_args(masm, total_c_args, c_arg, out_regs);
2375 #ifdef ASSERT
2376 { Label L;
2377 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2378 __ beq(AT, R0, L);
2379 __ delayed()->nop();
2380 __ stop("no pending exception allowed on exit from monitorenter");
2381 __ bind(L);
2382 }
2383 #endif
2384 __ b(lock_done);
2385 __ delayed()->nop();
2386 // END Slow path lock
2388 // BEGIN Slow path unlock
2389 __ bind(slow_path_unlock);
2391 // Slow path unlock
2393 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2394 save_native_result(masm, ret_type, stack_slots);
2395 }
2396 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2398 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2399 __ push(AT);
2400 __ sd(R0, thread, in_bytes(Thread::pending_exception_offset()));
2402 __ move(AT, -(StackAlignmentInBytes));
2403 __ move(S2, SP); // use S2 as a sender SP holder
2404 __ andr(SP, SP, AT); // align stack as required by ABI
2406 // should be a peal
2407 // +wordSize because of the push above
2408 __ addi(A1, FP, lock_slot_ebp_offset);
2410 __ move(A0, obj_reg);
2411 __ addi(SP,SP, -2*wordSize);
2412 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2413 relocInfo::runtime_call_type);
2414 __ delayed()->nop();
2415 __ addi(SP,SP, 2*wordSize);
2416 __ move(SP, S2);
2417 //add for compressedoops
2418 __ reinit_heapbase();
2419 #ifdef ASSERT
2420 {
2421 Label L;
2422 __ lw( AT, thread, in_bytes(Thread::pending_exception_offset()));
2423 __ beq(AT, R0, L);
2424 __ delayed()->nop();
2425 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2426 __ bind(L);
2427 }
2428 #endif /* ASSERT */
2430 __ pop(AT);
2431 __ sd(AT, thread, in_bytes(Thread::pending_exception_offset()));
2432 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2433 restore_native_result(masm, ret_type, stack_slots);
2434 }
2435 __ b(unlock_done);
2436 __ delayed()->nop();
2437 // END Slow path unlock
2439 }
2441 // SLOW PATH Reguard the stack if needed
2443 __ bind(reguard);
2444 save_native_result(masm, ret_type, stack_slots);
2445 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages),
2446 relocInfo::runtime_call_type);
2447 __ delayed()->nop();
2448 //add for compressedoops
2449 __ reinit_heapbase();
2450 restore_native_result(masm, ret_type, stack_slots);
2451 __ b(reguard_done);
2452 __ delayed()->nop();
2454 // BEGIN EXCEPTION PROCESSING
2455 if (!is_critical_native) {
2456 // Forward the exception
2457 __ bind(exception_pending);
2459 // remove possible return value from FPU register stack
2460 __ empty_FPU_stack();
2462 // pop our frame
2463 //forward_exception_entry need return address on stack
2464 __ addiu(SP, FP, wordSize);
2465 __ ld(FP, SP, (-1) * wordSize);
2467 // and forward the exception
2468 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
2469 __ delayed()->nop();
2470 }
2471 __ flush();
2473 nmethod *nm = nmethod::new_native_nmethod(method,
2474 compile_id,
2475 masm->code(),
2476 vep_offset,
2477 frame_complete,
2478 stack_slots / VMRegImpl::slots_per_word,
2479 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2480 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2481 oop_maps);
2483 if (is_critical_native) {
2484 nm->set_lazy_critical_native(true);
2485 }
2487 return nm;
2489 }
2491 #ifdef HAVE_DTRACE_H
2492 // ---------------------------------------------------------------------------
2493 // Generate a dtrace nmethod for a given signature. The method takes arguments
2494 // in the Java compiled code convention, marshals them to the native
2495 // abi and then leaves nops at the position you would expect to call a native
2496 // function. When the probe is enabled the nops are replaced with a trap
2497 // instruction that dtrace inserts and the trace will cause a notification
2498 // to dtrace.
2499 //
2500 // The probes are only able to take primitive types and java/lang/String as
2501 // arguments. No other java types are allowed. Strings are converted to utf8
2502 // strings so that from dtrace point of view java strings are converted to C
2503 // strings. There is an arbitrary fixed limit on the total space that a method
2504 // can use for converting the strings. (256 chars per string in the signature).
2505 // So any java string larger then this is truncated.
2507 static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
2508 static bool offsets_initialized = false;
2510 static VMRegPair reg64_to_VMRegPair(Register r) {
2511 VMRegPair ret;
2512 if (wordSize == 8) {
2513 ret.set2(r->as_VMReg());
2514 } else {
2515 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
2516 }
2517 return ret;
2518 }
2521 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
2522 methodHandle method) {
2525 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
2526 // be single threaded in this method.
2527 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
2529 // Fill in the signature array, for the calling-convention call.
2530 int total_args_passed = method->size_of_parameters();
2532 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2533 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2535 // The signature we are going to use for the trap that dtrace will see
2536 // java/lang/String is converted. We drop "this" and any other object
2537 // is converted to NULL. (A one-slot java/lang/Long object reference
2538 // is converted to a two-slot long, which is why we double the allocation).
2539 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
2540 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
2542 int i=0;
2543 int total_strings = 0;
2544 int first_arg_to_pass = 0;
2545 int total_c_args = 0;
2547 // Skip the receiver as dtrace doesn't want to see it
2548 if( !method->is_static() ) {
2549 in_sig_bt[i++] = T_OBJECT;
2550 first_arg_to_pass = 1;
2551 }
2553 SignatureStream ss(method->signature());
2554 for ( ; !ss.at_return_type(); ss.next()) {
2555 BasicType bt = ss.type();
2556 in_sig_bt[i++] = bt; // Collect remaining bits of signature
2557 out_sig_bt[total_c_args++] = bt;
2558 if( bt == T_OBJECT) {
2559 symbolOop s = ss.as_symbol_or_null();
2560 if (s == vmSymbols::java_lang_String()) {
2561 total_strings++;
2562 out_sig_bt[total_c_args-1] = T_ADDRESS;
2563 } else if (s == vmSymbols::java_lang_Boolean() ||
2564 s == vmSymbols::java_lang_Byte()) {
2565 out_sig_bt[total_c_args-1] = T_BYTE;
2566 } else if (s == vmSymbols::java_lang_Character() ||
2567 s == vmSymbols::java_lang_Short()) {
2568 out_sig_bt[total_c_args-1] = T_SHORT;
2569 } else if (s == vmSymbols::java_lang_Integer() ||
2570 s == vmSymbols::java_lang_Float()) {
2571 out_sig_bt[total_c_args-1] = T_INT;
2572 } else if (s == vmSymbols::java_lang_Long() ||
2573 s == vmSymbols::java_lang_Double()) {
2574 out_sig_bt[total_c_args-1] = T_LONG;
2575 out_sig_bt[total_c_args++] = T_VOID;
2576 }
2577 } else if ( bt == T_LONG || bt == T_DOUBLE ) {
2578 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2579 // We convert double to long
2580 out_sig_bt[total_c_args-1] = T_LONG;
2581 out_sig_bt[total_c_args++] = T_VOID;
2582 } else if ( bt == T_FLOAT) {
2583 // We convert float to int
2584 out_sig_bt[total_c_args-1] = T_INT;
2585 }
2586 }
2588 assert(i==total_args_passed, "validly parsed signature");
2590 // Now get the compiled-Java layout as input arguments
2591 int comp_args_on_stack;
2592 comp_args_on_stack = SharedRuntime::java_calling_convention(
2593 in_sig_bt, in_regs, total_args_passed, false);
2595 // We have received a description of where all the java arg are located
2596 // on entry to the wrapper. We need to convert these args to where
2597 // the a native (non-jni) function would expect them. To figure out
2598 // where they go we convert the java signature to a C signature and remove
2599 // T_VOID for any long/double we might have received.
2602 // Now figure out where the args must be stored and how much stack space
2603 // they require (neglecting out_preserve_stack_slots but space for storing
2604 // the 1st six register arguments). It's weird see int_stk_helper.
2606 int out_arg_slots;
2607 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2609 // Calculate the total number of stack slots we will need.
2611 // First count the abi requirement plus all of the outgoing args
2612 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2614 // Plus a temp for possible converion of float/double/long register args
2616 int conversion_temp = stack_slots;
2617 stack_slots += 2;
2620 // Now space for the string(s) we must convert
2622 int string_locs = stack_slots;
2623 stack_slots += total_strings *
2624 (max_dtrace_string_size / VMRegImpl::stack_slot_size);
2626 // Ok The space we have allocated will look like:
2627 //
2628 //
2629 // FP-> | |
2630 // |---------------------|
2631 // | string[n] |
2632 // |---------------------| <- string_locs[n]
2633 // | string[n-1] |
2634 // |---------------------| <- string_locs[n-1]
2635 // | ... |
2636 // | ... |
2637 // |---------------------| <- string_locs[1]
2638 // | string[0] |
2639 // |---------------------| <- string_locs[0]
2640 // | temp |
2641 // |---------------------| <- conversion_temp
2642 // | outbound memory |
2643 // | based arguments |
2644 // | |
2645 // |---------------------|
2646 // | |
2647 // SP-> | out_preserved_slots |
2648 //
2649 //
2651 // Now compute actual number of stack words we need rounding to make
2652 // stack properly aligned.
2653 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word);
2655 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2657 intptr_t start = (intptr_t)__ pc();
2659 // First thing make an ic check to see if we should even be here
2661 {
2662 Label L;
2663 const Register temp_reg = G3_scratch;
2664 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub());
2665 __ verify_oop(O0);
2666 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg);
2667 __ cmp(temp_reg, G5_inline_cache_reg);
2668 __ brx(Assembler::equal, true, Assembler::pt, L);
2669 __ delayed()->nop();
2671 __ jump_to(ic_miss, 0);
2672 __ delayed()->nop();
2673 __ align(CodeEntryAlignment);
2674 __ bind(L);
2675 }
2677 int vep_offset = ((intptr_t)__ pc()) - start;
2680 // The instruction at the verified entry point must be 5 bytes or longer
2681 // because it can be patched on the fly by make_non_entrant. The stack bang
2682 // instruction fits that requirement.
2684 // Generate stack overflow check before creating frame
2685 __ generate_stack_overflow_check(stack_size);
2687 assert(((intptr_t)__ pc() - start - vep_offset) >= 5,
2688 "valid size for make_non_entrant");
2690 // Generate a new frame for the wrapper.
2691 __ save(SP, -stack_size, SP);
2693 // Frame is now completed as far a size and linkage.
2695 int frame_complete = ((intptr_t)__ pc()) - start;
2697 #ifdef ASSERT
2698 bool reg_destroyed[RegisterImpl::number_of_registers];
2699 bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2700 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2701 reg_destroyed[r] = false;
2702 }
2703 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2704 freg_destroyed[f] = false;
2705 }
2707 #endif /* ASSERT */
2709 VMRegPair zero;
2710 const Register g0 = G0; // without this we get a compiler warning (why??)
2711 zero.set2(g0->as_VMReg());
2713 int c_arg, j_arg;
2715 Register conversion_off = noreg;
2717 for (j_arg = first_arg_to_pass, c_arg = 0 ;
2718 j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2720 VMRegPair src = in_regs[j_arg];
2721 VMRegPair dst = out_regs[c_arg];
2723 #ifdef ASSERT
2724 if (src.first()->is_Register()) {
2725 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!");
2726 } else if (src.first()->is_FloatRegister()) {
2727 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding(
2728 FloatRegisterImpl::S)], "ack!");
2729 }
2730 if (dst.first()->is_Register()) {
2731 reg_destroyed[dst.first()->as_Register()->encoding()] = true;
2732 } else if (dst.first()->is_FloatRegister()) {
2733 freg_destroyed[dst.first()->as_FloatRegister()->encoding(
2734 FloatRegisterImpl::S)] = true;
2735 }
2736 #endif /* ASSERT */
2738 switch (in_sig_bt[j_arg]) {
2739 case T_ARRAY:
2740 case T_OBJECT:
2741 {
2742 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT ||
2743 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2744 // need to unbox a one-slot value
2745 Register in_reg = L0;
2746 Register tmp = L2;
2747 if ( src.first()->is_reg() ) {
2748 in_reg = src.first()->as_Register();
2749 } else {
2750 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS),
2751 "must be");
2752 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg);
2753 }
2754 // If the final destination is an acceptable register
2755 if ( dst.first()->is_reg() ) {
2756 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) {
2757 tmp = dst.first()->as_Register();
2758 }
2759 }
2761 Label skipUnbox;
2762 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) {
2763 __ mov(G0, tmp->successor());
2764 }
2765 __ br_null(in_reg, true, Assembler::pn, skipUnbox);
2766 __ delayed()->mov(G0, tmp);
2768 BasicType bt = out_sig_bt[c_arg];
2769 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2770 switch (bt) {
2771 case T_BYTE:
2772 __ ldub(in_reg, box_offset, tmp); break;
2773 case T_SHORT:
2774 __ lduh(in_reg, box_offset, tmp); break;
2775 case T_INT:
2776 __ ld(in_reg, box_offset, tmp); break;
2777 case T_LONG:
2778 __ ld_long(in_reg, box_offset, tmp); break;
2779 default: ShouldNotReachHere();
2780 }
2782 __ bind(skipUnbox);
2783 // If tmp wasn't final destination copy to final destination
2784 if (tmp == L2) {
2785 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2);
2786 if (out_sig_bt[c_arg] == T_LONG) {
2787 long_move(masm, tmp_as_VM, dst);
2788 } else {
2789 move32_64(masm, tmp_as_VM, out_regs[c_arg]);
2790 }
2791 }
2792 if (out_sig_bt[c_arg] == T_LONG) {
2793 assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2794 ++c_arg; // move over the T_VOID to keep the loop indices in sync
2795 }
2796 } else if (out_sig_bt[c_arg] == T_ADDRESS) {
2797 Register s =
2798 src.first()->is_reg() ? src.first()->as_Register() : L2;
2799 Register d =
2800 dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2802 // We store the oop now so that the conversion pass can reach
2803 // while in the inner frame. This will be the only store if
2804 // the oop is NULL.
2805 if (s != L2) {
2806 // src is register
2807 if (d != L2) {
2808 // dst is register
2809 __ mov(s, d);
2810 } else {
2811 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2812 STACK_BIAS), "must be");
2813 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS);
2814 }
2815 } else {
2816 // src not a register
2817 assert(Assembler::is_simm13(reg2offset(src.first()) +
2818 STACK_BIAS), "must be");
2819 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d);
2820 if (d == L2) {
2821 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2822 STACK_BIAS), "must be");
2823 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS);
2824 }
2825 }
2826 } else if (out_sig_bt[c_arg] != T_VOID) {
2827 // Convert the arg to NULL
2828 if (dst.first()->is_reg()) {
2829 __ mov(G0, dst.first()->as_Register());
2830 } else {
2831 assert(Assembler::is_simm13(reg2offset(dst.first()) +
2832 STACK_BIAS), "must be");
2833 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS);
2834 }
2835 }
2836 }
2837 break;
2838 case T_VOID:
2839 break;
2841 case T_FLOAT:
2842 if (src.first()->is_stack()) {
2843 // Stack to stack/reg is simple
2844 move32_64(masm, src, dst);
2845 } else {
2846 if (dst.first()->is_reg()) {
2847 // freg -> reg
2848 int off =
2849 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2850 Register d = dst.first()->as_Register();
2851 if (Assembler::is_simm13(off)) {
2852 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2853 SP, off);
2854 __ ld(SP, off, d);
2855 } else {
2856 if (conversion_off == noreg) {
2857 __ set(off, L6);
2858 conversion_off = L6;
2859 }
2860 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2861 SP, conversion_off);
2862 __ ld(SP, conversion_off , d);
2863 }
2864 } else {
2865 // freg -> mem
2866 int off = STACK_BIAS + reg2offset(dst.first());
2867 if (Assembler::is_simm13(off)) {
2868 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2869 SP, off);
2870 } else {
2871 if (conversion_off == noreg) {
2872 __ set(off, L6);
2873 conversion_off = L6;
2874 }
2875 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(),
2876 SP, conversion_off);
2877 }
2878 }
2879 }
2880 break;
2882 case T_DOUBLE:
2883 assert( j_arg + 1 < total_args_passed &&
2884 in_sig_bt[j_arg + 1] == T_VOID &&
2885 out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2886 if (src.first()->is_stack()) {
2887 // Stack to stack/reg is simple
2888 long_move(masm, src, dst);
2889 } else {
2890 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2;
2892 // Destination could be an odd reg on 32bit in which case
2893 // we can't load direct to the destination.
2895 if (!d->is_even() && wordSize == 4) {
2896 d = L2;
2897 }
2898 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2899 if (Assembler::is_simm13(off)) {
2900 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2901 SP, off);
2902 __ ld_long(SP, off, d);
2903 } else {
2904 if (conversion_off == noreg) {
2905 __ set(off, L6);
2906 conversion_off = L6;
2907 }
2908 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(),
2909 SP, conversion_off);
2910 __ ld_long(SP, conversion_off, d);
2911 }
2912 if (d == L2) {
2913 long_move(masm, reg64_to_VMRegPair(L2), dst);
2914 }
2915 }
2916 break;
2918 case T_LONG :
2919 // 32bit can't do a split move of something like g1 -> O0, O1
2920 // so use a memory temp
2921 if (src.is_single_phys_reg() && wordSize == 4) {
2922 Register tmp = L2;
2923 if (dst.first()->is_reg() &&
2924 (wordSize == 8 || dst.first()->as_Register()->is_even())) {
2925 tmp = dst.first()->as_Register();
2926 }
2928 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size;
2929 if (Assembler::is_simm13(off)) {
2930 __ stx(src.first()->as_Register(), SP, off);
2931 __ ld_long(SP, off, tmp);
2932 } else {
2933 if (conversion_off == noreg) {
2934 __ set(off, L6);
2935 conversion_off = L6;
2936 }
2937 __ stx(src.first()->as_Register(), SP, conversion_off);
2938 __ ld_long(SP, conversion_off, tmp);
2939 }
2941 if (tmp == L2) {
2942 long_move(masm, reg64_to_VMRegPair(L2), dst);
2943 }
2944 } else {
2945 long_move(masm, src, dst);
2946 }
2947 break;
2949 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2951 default:
2952 move32_64(masm, src, dst);
2953 }
2954 }
2957 // If we have any strings we must store any register based arg to the stack
2958 // This includes any still live xmm registers too.
2960 if (total_strings > 0 ) {
2962 // protect all the arg registers
2963 __ save_frame(0);
2964 __ mov(G2_thread, L7_thread_cache);
2965 const Register L2_string_off = L2;
2967 // Get first string offset
2968 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off);
2970 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) {
2971 if (out_sig_bt[c_arg] == T_ADDRESS) {
2973 VMRegPair dst = out_regs[c_arg];
2974 const Register d = dst.first()->is_reg() ?
2975 dst.first()->as_Register()->after_save() : noreg;
2977 // It's a string the oop and it was already copied to the out arg
2978 // position
2979 if (d != noreg) {
2980 __ mov(d, O0);
2981 } else {
2982 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2983 "must be");
2984 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0);
2985 }
2986 Label skip;
2988 __ br_null(O0, false, Assembler::pn, skip);
2989 __ delayed()->add(FP, L2_string_off, O1);
2991 if (d != noreg) {
2992 __ mov(O1, d);
2993 } else {
2994 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS),
2995 "must be");
2996 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS);
2997 }
2999 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf),
3000 relocInfo::runtime_call_type);
3001 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off);
3003 __ bind(skip);
3005 }
3007 }
3008 __ mov(L7_thread_cache, G2_thread);
3009 __ restore();
3011 }
3014 // Ok now we are done. Need to place the nop that dtrace wants in order to
3015 // patch in the trap
3017 int patch_offset = ((intptr_t)__ pc()) - start;
3019 __ nop();
3022 // Return
3024 __ ret();
3025 __ delayed()->restore();
3027 __ flush();
3029 nmethod *nm = nmethod::new_dtrace_nmethod(
3030 method, masm->code(), vep_offset, patch_offset, frame_complete,
3031 stack_slots / VMRegImpl::slots_per_word);
3032 return nm;
3034 }
3036 #endif // HAVE_DTRACE_H
3038 // this function returns the adjust size (in number of words) to a c2i adapter
3039 // activation for use during deoptimization
3040 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3041 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3042 }
3044 // "Top of Stack" slots that may be unused by the calling convention but must
3045 // otherwise be preserved.
3046 // On Intel these are not necessary and the value can be zero.
3047 // On Sparc this describes the words reserved for storing a register window
3048 // when an interrupt occurs.
3049 uint SharedRuntime::out_preserve_stack_slots() {
3050 //return frame::register_save_words * VMRegImpl::slots_per_word;
3051 return 0;
3052 }
3054 //------------------------------generate_deopt_blob----------------------------
3055 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3056 // instead.
3057 void SharedRuntime::generate_deopt_blob() {
3058 // allocate space for the code
3059 ResourceMark rm;
3060 // setup code generation tools
3061 //CodeBuffer buffer ("deopt_blob", 4000, 2048);
3062 CodeBuffer buffer ("deopt_blob", 8000, 2048);//aoqi FIXME for debug
3063 MacroAssembler* masm = new MacroAssembler( & buffer);
3064 int frame_size_in_words;
3065 OopMap* map = NULL;
3066 // Account for the extra args we place on the stack
3067 // by the time we call fetch_unroll_info
3068 const int additional_words = 2; // deopt kind, thread
3070 OopMapSet *oop_maps = new OopMapSet();
3072 address start = __ pc();
3073 Label cont;
3074 // we use S3 for DeOpt reason register
3075 Register reason = S3;
3076 // use S6 for thread register
3077 Register thread = TREG;
3078 // use S7 for fetch_unroll_info returned UnrollBlock
3079 Register unroll = S7;
3080 // Prolog for non exception case!
3081 // Correct the return address we were given.
3082 //FIXME, return address is on the tos or Ra?
3083 __ addi(RA, RA, - (NativeCall::return_address_offset_long));
3084 // Save everything in sight.
3085 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3086 // Normal deoptimization
3087 __ move(reason, Deoptimization::Unpack_deopt);
3088 __ b(cont);
3089 __ delayed()->nop();
3091 int reexecute_offset = __ pc() - start;
3093 // Reexecute case
3094 // return address is the pc describes what bci to do re-execute at
3096 // No need to update map as each call to save_live_registers will produce identical oopmap
3097 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3098 __ move(reason, Deoptimization::Unpack_reexecute);
3099 __ b(cont);
3100 __ delayed()->nop();
3102 int exception_offset = __ pc() - start;
3103 // Prolog for exception case
3105 // all registers are dead at this entry point, except for V0 and
3106 // V1 which contain the exception oop and exception pc
3107 // respectively. Set them in TLS and fall thru to the
3108 // unpack_with_exception_in_tls entry point.
3110 __ get_thread(thread);
3111 __ st_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3112 __ st_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset()));
3113 int exception_in_tls_offset = __ pc() - start;
3114 // new implementation because exception oop is now passed in JavaThread
3116 // Prolog for exception case
3117 // All registers must be preserved because they might be used by LinearScan
3118 // Exceptiop oop and throwing PC are passed in JavaThread
3119 // tos: stack at point of call to method that threw the exception (i.e. only
3120 // args are on the stack, no return address)
3122 // Return address will be patched later with the throwing pc. The correct value is not
3123 // available now because loading it from memory would destroy registers.
3124 // Save everything in sight.
3125 // No need to update map as each call to save_live_registers will produce identical oopmap
3126 __ addi(RA, RA, - (NativeCall::return_address_offset_long));
3127 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3129 // Now it is safe to overwrite any register
3130 // store the correct deoptimization type
3131 __ move(reason, Deoptimization::Unpack_exception);
3132 // load throwing pc from JavaThread and patch it as the return address
3133 // of the current frame. Then clear the field in JavaThread
3134 __ get_thread(thread);
3135 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3136 __ st_ptr(V1, SP, RegisterSaver::raOffset() * wordSize); //save ra
3137 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset()));
3140 #ifdef ASSERT
3141 // verify that there is really an exception oop in JavaThread
3142 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset()));
3143 __ verify_oop(AT);
3144 // verify that there is no pending exception
3145 Label no_pending_exception;
3146 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
3147 __ beq(AT, R0, no_pending_exception);
3148 __ delayed()->nop();
3149 __ stop("must not have pending exception here");
3150 __ bind(no_pending_exception);
3151 #endif
3152 __ bind(cont);
3153 // Compiled code leaves the floating point stack dirty, empty it.
3154 __ empty_FPU_stack();
3157 // Call C code. Need thread and this frame, but NOT official VM entry
3158 // crud. We cannot block on this call, no GC can happen.
3159 #ifndef OPT_THREAD
3160 __ get_thread(thread);
3161 #endif
3163 __ move(A0, thread);
3164 __ addi(SP, SP, -additional_words * wordSize);
3166 __ set_last_Java_frame(NOREG, NOREG, NULL);
3168 // Call fetch_unroll_info(). Need thread and this frame, but NOT official VM entry - cannot block on
3169 // this call, no GC can happen. Call should capture return values.
3171 __ relocate(relocInfo::internal_pc_type);
3172 {
3173 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28;
3174 __ patchable_set48(AT, save_pc);
3175 }
3176 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3178 __ call((address)Deoptimization::fetch_unroll_info);
3179 //__ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3180 __ delayed()->nop();
3181 oop_maps->add_gc_map(__ pc() - start, map);
3182 __ addiu(SP, SP, additional_words * wordSize);
3183 __ get_thread(thread);
3184 __ reset_last_Java_frame(false);
3186 // Load UnrollBlock into S7
3187 __ move(unroll, V0);
3190 // Move the unpack kind to a safe place in the UnrollBlock because
3191 // we are very short of registers
3193 Address unpack_kind(unroll, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
3194 __ sw(reason, unpack_kind);
3195 // save the unpack_kind value
3196 // Retrieve the possible live values (return values)
3197 // All callee save registers representing jvm state
3198 // are now in the vframeArray.
3200 Label noException;
3201 __ move(AT, Deoptimization::Unpack_exception);
3202 __ bne(AT, reason, noException);// Was exception pending?
3203 __ delayed()->nop();
3204 __ ld_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset()));
3205 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3206 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset()));
3207 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset()));
3209 __ verify_oop(V0);
3211 // Overwrite the result registers with the exception results.
3212 __ st_ptr(V0, SP, RegisterSaver::v0Offset()*wordSize);
3213 __ st_ptr(V1, SP, RegisterSaver::v1Offset()*wordSize);
3215 __ bind(noException);
3218 // Stack is back to only having register save data on the stack.
3219 // Now restore the result registers. Everything else is either dead or captured
3220 // in the vframeArray.
3222 RegisterSaver::restore_result_registers(masm);
3223 // All of the register save area has been popped of the stack. Only the
3224 // return address remains.
3225 // Pop all the frames we must move/replace.
3226 // Frame picture (youngest to oldest)
3227 // 1: self-frame (no frame link)
3228 // 2: deopting frame (no frame link)
3229 // 3: caller of deopting frame (could be compiled/interpreted).
3230 //
3231 // Note: by leaving the return address of self-frame on the stack
3232 // and using the size of frame 2 to adjust the stack
3233 // when we are done the return to frame 3 will still be on the stack.
3235 // register for the sender's sp
3236 Register sender_sp = Rsender;
3237 // register for frame pcs
3238 Register pcs = T0;
3239 // register for frame sizes
3240 Register sizes = T1;
3241 // register for frame count
3242 Register count = T3;
3244 // Pop deoptimized frame
3245 __ lw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes());
3246 __ add(SP, SP, AT);
3247 // sp should be pointing at the return address to the caller (3)
3249 // Load array of frame pcs into pcs
3250 __ ld_ptr(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes());
3251 __ addi(SP, SP, wordSize); // trash the old pc
3252 // Load array of frame sizes into T6
3253 __ ld_ptr(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes());
3257 // Load count of frams into T3
3258 __ lw(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes());
3259 // Pick up the initial fp we should save
3260 __ ld(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes());
3261 // Now adjust the caller's stack to make up for the extra locals
3262 // but record the original sp so that we can save it in the skeletal interpreter
3263 // frame and the stack walking of interpreter_sender will get the unextended sp
3264 // value and not the "real" sp value.
3265 __ move(sender_sp, SP);
3266 __ lw(AT, unroll, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes());
3267 __ sub(SP, SP, AT);
3269 // Push interpreter frames in a loop
3270 /*
3271 *
3272 Loop:
3273 0x000000555bd82d18: lw t2, 0x0(t1) ; lw sizes[i] <--- error lw->ld
3274 0x000000555bd82d1c: ld at, 0x0(t0) ; ld pcs[i]
3275 0x000000555bd82d20: daddi t2, t2, 0xfffffff0 ; t2 -= 16
3276 0x000000555bd82d24: daddi sp, sp, 0xfffffff0
3277 0x000000555bd82d28: sd fp, 0x0(sp) ; push fp
3278 0x000000555bd82d2c: sd at, 0x8(sp) ; push at
3279 0x000000555bd82d30: dadd fp, sp, zero ; fp <- sp
3280 0x000000555bd82d34: dsub sp, sp, t2 ; sp -= t2
3281 0x000000555bd82d38: sd zero, 0xfffffff0(fp) ; __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3282 0x000000555bd82d3c: sd s4, 0xfffffff8(fp) ; __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);
3283 0x000000555bd82d40: dadd s4, sp, zero ; move(sender_sp, SP);
3284 0x000000555bd82d44: daddi t3, t3, 0xffffffff ; count --
3285 0x000000555bd82d48: daddi t1, t1, 0x4 ; sizes += 4
3286 0x000000555bd82d4c: bne t3, zero, 0x000000555bd82d18
3287 0x000000555bd82d50: daddi t0, t0, 0x4 ; <--- error t0 += 8
3288 */
3290 // pcs[0] = frame_pcs[0] = deopt_sender.raw_pc(); regex.split
3291 Label loop;
3292 __ bind(loop);
3293 __ ld(T2, sizes, 0); // Load frame size
3294 __ ld_ptr(AT, pcs, 0); // save return address
3295 __ addi(T2, T2, -2*wordSize); // we'll push pc and rbp, by hand
3296 __ push2(AT, FP);
3297 __ move(FP, SP);
3298 __ sub(SP, SP, T2); // Prolog!
3299 // This value is corrected by layout_activation_impl
3300 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3301 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable
3302 __ move(sender_sp, SP); // pass to next frame
3303 __ addi(count, count, -1); // decrement counter
3304 __ addi(sizes, sizes, wordSize); // Bump array pointer (sizes)
3305 __ bne(count, R0, loop);
3306 __ delayed()->addi(pcs, pcs, wordSize); // Bump array pointer (pcs)
3307 __ ld(AT, pcs, 0); // frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
3308 // Re-push self-frame
3309 __ push2(AT, FP);
3310 __ move(FP, SP);
3311 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3312 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);
3313 __ addi(SP, SP, -(frame_size_in_words - 2 - additional_words) * wordSize);
3315 // Restore frame locals after moving the frame
3316 __ sd(V0, SP, RegisterSaver::v0Offset() * wordSize);
3317 __ sd(V1, SP, RegisterSaver::v1Offset() * wordSize);
3318 __ sdc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local
3319 __ sdc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize);
3322 // Call unpack_frames(). Need thread and this frame, but NOT official VM entry - cannot block on
3323 // this call, no GC can happen.
3324 __ move(A1, reason); // exec_mode
3325 __ get_thread(thread);
3326 __ move(A0, thread); // thread
3327 __ addi(SP, SP, (-additional_words) *wordSize);
3329 // set last_Java_sp, last_Java_fp
3330 __ set_last_Java_frame(NOREG, FP, NULL);
3332 __ move(AT, -(StackAlignmentInBytes));
3333 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI
3335 __ relocate(relocInfo::internal_pc_type);
3336 {
3337 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28;
3338 __ patchable_set48(AT, save_pc);
3339 }
3340 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3342 __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), relocInfo::runtime_call_type);
3343 __ delayed()->nop();
3344 // Revert SP alignment after call since we're going to do some SP relative addressing below
3345 __ ld(SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
3346 // Set an oopmap for the call site
3347 oop_maps->add_gc_map(__ offset(), new OopMap( frame_size_in_words , 0));
3349 __ push(V0);
3351 __ get_thread(thread);
3352 __ reset_last_Java_frame(true);
3354 // Collect return values
3355 __ ld(V0, SP, (RegisterSaver::v0Offset() + additional_words +1) * wordSize);
3356 __ ld(V1, SP, (RegisterSaver::v1Offset() + additional_words +1) * wordSize);
3357 __ ldc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local
3358 __ ldc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize);
3359 //FIXME,
3360 // Clear floating point stack before returning to interpreter
3361 __ empty_FPU_stack();
3362 //FIXME, we should consider about float and double
3363 // Push a float or double return value if necessary.
3364 __ leave();
3366 // Jump to interpreter
3367 __ jr(RA);
3368 __ delayed()->nop();
3370 masm->flush();
3371 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3372 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3373 }
3375 #ifdef COMPILER2
3377 //------------------------------generate_uncommon_trap_blob--------------------
3378 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3379 // instead.
3380 void SharedRuntime::generate_uncommon_trap_blob() {
3381 // allocate space for the code
3382 ResourceMark rm;
3383 // setup code generation tools
3384 CodeBuffer buffer ("uncommon_trap_blob", 512*80 , 512*40 );
3385 MacroAssembler* masm = new MacroAssembler(&buffer);
3387 enum frame_layout {
3388 s0_off, s0_off2,
3389 s1_off, s1_off2,
3390 s2_off, s2_off2,
3391 s3_off, s3_off2,
3392 s4_off, s4_off2,
3393 s5_off, s5_off2,
3394 s6_off, s6_off2,
3395 s7_off, s7_off2,
3396 fp_off, fp_off2,
3397 return_off, return_off2, // slot for return address sp + 9
3398 framesize
3399 };
3400 assert(framesize % 4 == 0, "sp not 16-byte aligned");
3402 address start = __ pc();
3404 // Push self-frame.
3405 __ daddiu(SP, SP, -framesize * BytesPerInt);
3407 __ sd(RA, SP, return_off * BytesPerInt);
3408 __ sd(FP, SP, fp_off * BytesPerInt);
3410 // Save callee saved registers. None for UseSSE=0,
3411 // floats-only for UseSSE=1, and doubles for UseSSE=2.
3412 __ sd(S0, SP, s0_off * BytesPerInt);
3413 __ sd(S1, SP, s1_off * BytesPerInt);
3414 __ sd(S2, SP, s2_off * BytesPerInt);
3415 __ sd(S3, SP, s3_off * BytesPerInt);
3416 __ sd(S4, SP, s4_off * BytesPerInt);
3417 __ sd(S5, SP, s5_off * BytesPerInt);
3418 __ sd(S6, SP, s6_off * BytesPerInt);
3419 __ sd(S7, SP, s7_off * BytesPerInt);
3421 __ daddi(FP, SP, fp_off * BytesPerInt);
3423 // Clear the floating point exception stack
3424 __ empty_FPU_stack();
3426 Register thread = TREG;
3428 #ifndef OPT_THREAD
3429 __ get_thread(thread);
3430 #endif
3431 // set last_Java_sp
3432 __ set_last_Java_frame(NOREG, FP, NULL);
3433 __ relocate(relocInfo::internal_pc_type);
3434 {
3435 long save_pc = (long)__ pc() + 52;
3436 __ patchable_set48(AT, (long)save_pc);
3437 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3438 }
3439 // Call C code. Need thread but NOT official VM entry
3440 // crud. We cannot block on this call, no GC can happen. Call should
3441 // capture callee-saved registers as well as return values.
3442 __ move(A0, thread);
3443 // argument already in T0
3444 __ move(A1, T0);
3445 __ patchable_call((address)Deoptimization::uncommon_trap);
3447 // Set an oopmap for the call site
3448 OopMapSet *oop_maps = new OopMapSet();
3449 OopMap* map = new OopMap( framesize, 0 );
3451 map->set_callee_saved( VMRegImpl::stack2reg(s0_off ), S0->as_VMReg() );
3452 map->set_callee_saved( VMRegImpl::stack2reg(s1_off ), S1->as_VMReg() );
3453 map->set_callee_saved( VMRegImpl::stack2reg(s2_off ), S2->as_VMReg() );
3454 map->set_callee_saved( VMRegImpl::stack2reg(s3_off ), S3->as_VMReg() );
3455 map->set_callee_saved( VMRegImpl::stack2reg(s4_off ), S4->as_VMReg() );
3456 map->set_callee_saved( VMRegImpl::stack2reg(s5_off ), S5->as_VMReg() );
3457 map->set_callee_saved( VMRegImpl::stack2reg(s6_off ), S6->as_VMReg() );
3458 map->set_callee_saved( VMRegImpl::stack2reg(s7_off ), S7->as_VMReg() );
3460 //oop_maps->add_gc_map( __ offset(), true, map);
3461 oop_maps->add_gc_map( __ offset(), map);
3463 #ifndef OPT_THREAD
3464 __ get_thread(thread);
3465 #endif
3466 __ reset_last_Java_frame(false);
3468 // Load UnrollBlock into S7
3469 Register unroll = S7;
3470 __ move(unroll, V0);
3472 // Pop all the frames we must move/replace.
3473 //
3474 // Frame picture (youngest to oldest)
3475 // 1: self-frame (no frame link)
3476 // 2: deopting frame (no frame link)
3477 // 3: possible-i2c-adapter-frame
3478 // 4: caller of deopting frame (could be compiled/interpreted. If interpreted we will create an
3479 // and c2i here)
3481 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
3482 __ daddiu(SP, SP, framesize * BytesPerInt);
3484 // Pop deoptimized frame
3485 __ lw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes());
3486 __ dadd(SP, SP, AT);
3488 // register for frame pcs
3489 Register pcs = T8;
3490 // register for frame sizes
3491 Register sizes = T9;
3492 // register for frame count
3493 Register count = T3;
3494 // register for the sender's sp
3495 Register sender_sp = T1;
3497 // sp should be pointing at the return address to the caller (4)
3498 // Load array of frame pcs into ECX
3499 __ ld(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes());
3501 // Load array of frame sizes into ESI
3502 __ ld(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes());
3503 __ lwu(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes());
3505 // Pick up the initial fp we should save
3506 __ ld(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes());
3507 // Now adjust the caller's stack to make up for the extra locals
3508 // but record the original sp so that we can save it in the skeletal interpreter
3509 // frame and the stack walking of interpreter_sender will get the unextended sp
3510 // value and not the "real" sp value.
3512 __ move(sender_sp, SP);
3513 __ lw(AT, unroll, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes());
3514 __ dsub(SP, SP, AT);
3515 // Push interpreter frames in a loop
3516 Label loop;
3517 __ bind(loop);
3518 __ ld(T2, sizes, 0); // Load frame size
3519 __ ld(AT, pcs, 0); // save return address
3520 __ daddi(T2, T2, -2*wordSize); // we'll push pc and rbp, by hand
3521 __ push2(AT, FP);
3522 __ move(FP, SP);
3523 __ dsub(SP, SP, T2); // Prolog!
3524 // This value is corrected by layout_activation_impl
3525 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3526 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable
3527 __ move(sender_sp, SP); // pass to next frame
3528 __ daddi(count, count, -1); // decrement counter
3529 __ daddi(sizes, sizes, wordSize); // Bump array pointer (sizes)
3530 __ addi(pcs, pcs, wordSize); // Bump array pointer (pcs)
3531 __ bne(count, R0, loop);
3532 __ delayed()->nop(); // Bump array pointer (pcs)
3534 __ ld(RA, pcs, 0);
3536 // Re-push self-frame
3537 __ daddi(SP, SP, - 2 * wordSize); // save old & set new FP
3538 __ sd(FP, SP, 0 * wordSize); // save final return address
3539 __ sd(RA, SP, 1 * wordSize);
3540 __ move(FP, SP);
3541 __ daddi(SP, SP, -(framesize / 2 - 2) * wordSize);
3543 // set last_Java_sp, last_Java_fp
3544 __ set_last_Java_frame(NOREG, FP, NULL);
3546 __ move(AT, -(StackAlignmentInBytes));
3547 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI
3549 __ relocate(relocInfo::internal_pc_type);
3550 {
3551 long save_pc = (long)__ pc() + 52;
3552 __ patchable_set48(AT, (long)save_pc);
3553 }
3554 __ sd(AT, thread,in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3556 // Call C code. Need thread but NOT official VM entry
3557 // crud. We cannot block on this call, no GC can happen. Call should
3558 // restore return values to their stack-slots with the new SP.
3559 __ move(A0, thread);
3560 __ move(A1, Deoptimization::Unpack_uncommon_trap);
3561 __ patchable_call((address)Deoptimization::unpack_frames);
3562 // Set an oopmap for the call site
3563 //oop_maps->add_gc_map( __ offset(), true, new OopMap( framesize, 0 ) );
3564 oop_maps->add_gc_map( __ offset(), new OopMap( framesize, 0 ) );//Fu
3566 __ reset_last_Java_frame(true);
3568 // Pop self-frame.
3569 __ leave(); // Epilog!
3571 // Jump to interpreter
3572 __ jr(RA);
3573 __ delayed()->nop();
3574 // -------------
3575 // make sure all code is generated
3576 masm->flush();
3578 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize / 2);
3579 }
3581 #endif // COMPILER2
3583 //------------------------------generate_handler_blob-------------------
3584 //
3585 // Generate a special Compile2Runtime blob that saves all registers, and sets
3586 // up an OopMap and calls safepoint code to stop the compiled code for
3587 // a safepoint.
3588 //
3589 // This blob is jumped to (via a breakpoint and the signal handler) from a
3590 // safepoint in compiled code.
3592 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int pool_type) {
3594 // Account for thread arg in our frame
3595 const int additional_words = 0;
3596 int frame_size_in_words;
3598 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3600 ResourceMark rm;
3601 OopMapSet *oop_maps = new OopMapSet();
3602 OopMap* map;
3604 // allocate space for the code
3605 // setup code generation tools
3606 CodeBuffer buffer ("handler_blob", 2048, 512);
3607 MacroAssembler* masm = new MacroAssembler( &buffer);
3609 const Register thread = TREG;
3610 address start = __ pc();
3611 address call_pc = NULL;
3612 bool cause_return = (pool_type == POLL_AT_RETURN);
3613 bool save_vectors = (pool_type == POLL_AT_VECTOR_LOOP);
3615 // If cause_return is true we are at a poll_return and there is
3616 // the return address in RA to the caller on the nmethod
3617 // that is safepoint. We can leave this return in RA and
3618 // effectively complete the return and safepoint in the caller.
3619 // Otherwise we load exception pc to RA.
3620 __ push(thread);
3621 #ifndef OPT_THREAD
3622 __ get_thread(thread);
3623 #endif
3625 if(!cause_return) {
3626 __ ld_ptr(RA, Address(thread, JavaThread::saved_exception_pc_offset()));
3627 }
3629 __ pop(thread);
3630 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, save_vectors);
3632 #ifndef OPT_THREAD
3633 __ get_thread(thread);
3634 #endif
3635 // The following is basically a call_VM. However, we need the precise
3636 // address of the call in order to generate an oopmap. Hence, we do all the
3637 // work outselvs.
3639 __ move(A0, thread);
3640 __ set_last_Java_frame(NOREG, NOREG, NULL);
3643 // do the call
3644 //__ lui(T9, Assembler::split_high((int)call_ptr));
3645 //__ addiu(T9, T9, Assembler::split_low((int)call_ptr));
3646 __ call(call_ptr);
3647 __ delayed()->nop();
3649 // Set an oopmap for the call site. This oopmap will map all
3650 // oop-registers and debug-info registers as callee-saved. This
3651 // will allow deoptimization at this safepoint to find all possible
3652 // debug-info recordings, as well as let GC find all oops.
3653 oop_maps->add_gc_map(__ offset(), map);
3655 Label noException;
3657 // Clear last_Java_sp again
3658 __ reset_last_Java_frame(false);
3660 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
3661 __ beq(AT, R0, noException);
3662 __ delayed()->nop();
3664 // Exception pending
3666 RegisterSaver::restore_live_registers(masm, save_vectors);
3667 //forward_exception_entry need return address on the stack
3668 __ push(RA);
3669 __ patchable_jump((address)StubRoutines::forward_exception_entry());
3671 // No exception case
3672 __ bind(noException);
3673 // Normal exit, register restoring and exit
3674 RegisterSaver::restore_live_registers(masm, save_vectors);
3675 __ jr(RA);
3676 __ delayed()->nop();
3678 masm->flush();
3680 // Fill-out other meta info
3681 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3682 }
3684 //
3685 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3686 //
3687 // Generate a stub that calls into vm to find out the proper destination
3688 // of a java call. All the argument registers are live at this point
3689 // but since this is generic code we don't know what they are and the caller
3690 // must do any gc of the args.
3691 //
3692 RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3693 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3695 // allocate space for the code
3696 ResourceMark rm;
3698 //CodeBuffer buffer(name, 1000, 512);
3699 //FIXME. aoqi. code_size
3700 CodeBuffer buffer(name, 2000, 2048);
3701 MacroAssembler* masm = new MacroAssembler(&buffer);
3703 int frame_size_words;
3704 //we put the thread in A0
3706 OopMapSet *oop_maps = new OopMapSet();
3707 OopMap* map = NULL;
3709 int start = __ offset();
3710 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3713 int frame_complete = __ offset();
3715 const Register thread = T8;
3716 __ get_thread(thread);
3718 __ move(A0, thread);
3719 __ set_last_Java_frame(noreg, FP, NULL);
3720 //align the stack before invoke native
3721 __ move(AT, -(StackAlignmentInBytes));
3722 __ andr(SP, SP, AT);
3723 __ relocate(relocInfo::internal_pc_type);
3724 {
3725 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 24 + 1 * BytesPerInstWord;
3726 __ patchable_set48(AT, save_pc);
3727 }
3728 __ sd(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
3730 __ call(destination);
3731 __ delayed()->nop();
3733 // Set an oopmap for the call site.
3734 // We need this not only for callee-saved registers, but also for volatile
3735 // registers that the compiler might be keeping live across a safepoint.
3736 oop_maps->add_gc_map( __ offset() - start, map);
3737 // V0 contains the address we are going to jump to assuming no exception got installed
3738 __ get_thread(thread);
3739 __ ld_ptr(SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
3740 // clear last_Java_sp
3741 __ reset_last_Java_frame(true);
3742 // check for pending exceptions
3743 Label pending;
3744 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
3745 __ bne(AT, R0, pending);
3746 __ delayed()->nop();
3747 // get the returned Method*
3748 //FIXME, do mips need this ?
3749 __ get_vm_result_2(Rmethod, thread); // Refer to OpenJDK8
3750 __ st_ptr(Rmethod, SP, RegisterSaver::methodOffset() * wordSize);
3751 __ st_ptr(V0, SP, RegisterSaver::v0Offset() * wordSize);
3752 RegisterSaver::restore_live_registers(masm);
3754 // We are back the the original state on entry and ready to go the callee method.
3755 __ jr(V0);
3756 __ delayed()->nop();
3757 // Pending exception after the safepoint
3759 __ bind(pending);
3761 RegisterSaver::restore_live_registers(masm);
3763 // exception pending => remove activation and forward to exception handler
3764 //forward_exception_entry need return address on the stack
3765 __ push(RA);
3766 __ get_thread(thread);
3767 __ st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
3768 __ ld_ptr(V0, thread, in_bytes(Thread::pending_exception_offset()));
3769 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
3770 __ delayed()->nop();
3771 // -------------
3772 // make sure all code is generated
3773 masm->flush();
3775 RuntimeStub* tmp= RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3776 return tmp;
3777 }
3779 extern "C" int SpinPause() {return 0;}
3780 // extern "C" int SafeFetch32 (int * adr, int errValue) {return 0;} ;
3781 // extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) {return *adr; } ;