src/cpu/mips/vm/sharedRuntime_mips_64.cpp

changeset 6880
52ea28d233d2
parent 410
63bcd8487c2a
child 7997
6cbff0651f1a
equal deleted inserted replaced
6879:11d997b1e656 6880:52ea28d233d2
41 #ifdef COMPILER2 41 #ifdef COMPILER2
42 #include "opto/runtime.hpp" 42 #include "opto/runtime.hpp"
43 #endif 43 #endif
44 44
45 #define __ masm-> 45 #define __ masm->
46
46 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size; 47 const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
47 48
48 class RegisterSaver { 49 class RegisterSaver {
49 enum { FPU_regs_live = 32 }; 50 enum { FPU_regs_live = 32 };
50 // Capture info about frame layout 51 // Capture info about frame layout
51 enum layout { 52 enum layout {
52 #define DEF_LAYOUT_OFFS(regname) regname ## _off, regname ## H_off, 53 #define DEF_LAYOUT_OFFS(regname) regname ## _off, regname ## H_off,
53 DEF_LAYOUT_OFFS(for_16_bytes_aligned) 54 DEF_LAYOUT_OFFS(for_16_bytes_aligned)
54 DEF_LAYOUT_OFFS(fpr0) 55 DEF_LAYOUT_OFFS(fpr0)
55 DEF_LAYOUT_OFFS(fpr1) 56 DEF_LAYOUT_OFFS(fpr1)
56 DEF_LAYOUT_OFFS(fpr2) 57 DEF_LAYOUT_OFFS(fpr2)
57 DEF_LAYOUT_OFFS(fpr3) 58 DEF_LAYOUT_OFFS(fpr3)
58 DEF_LAYOUT_OFFS(fpr4) 59 DEF_LAYOUT_OFFS(fpr4)
59 DEF_LAYOUT_OFFS(fpr5) 60 DEF_LAYOUT_OFFS(fpr5)
60 DEF_LAYOUT_OFFS(fpr6) 61 DEF_LAYOUT_OFFS(fpr6)
61 DEF_LAYOUT_OFFS(fpr7) 62 DEF_LAYOUT_OFFS(fpr7)
62 DEF_LAYOUT_OFFS(fpr8) 63 DEF_LAYOUT_OFFS(fpr8)
63 DEF_LAYOUT_OFFS(fpr9) 64 DEF_LAYOUT_OFFS(fpr9)
64 DEF_LAYOUT_OFFS(fpr10) 65 DEF_LAYOUT_OFFS(fpr10)
65 DEF_LAYOUT_OFFS(fpr11) 66 DEF_LAYOUT_OFFS(fpr11)
66 DEF_LAYOUT_OFFS(fpr12) 67 DEF_LAYOUT_OFFS(fpr12)
67 DEF_LAYOUT_OFFS(fpr13) 68 DEF_LAYOUT_OFFS(fpr13)
68 DEF_LAYOUT_OFFS(fpr14) 69 DEF_LAYOUT_OFFS(fpr14)
69 DEF_LAYOUT_OFFS(fpr15) 70 DEF_LAYOUT_OFFS(fpr15)
70 DEF_LAYOUT_OFFS(fpr16) 71 DEF_LAYOUT_OFFS(fpr16)
71 DEF_LAYOUT_OFFS(fpr17) 72 DEF_LAYOUT_OFFS(fpr17)
72 DEF_LAYOUT_OFFS(fpr18) 73 DEF_LAYOUT_OFFS(fpr18)
73 DEF_LAYOUT_OFFS(fpr19) 74 DEF_LAYOUT_OFFS(fpr19)
74 DEF_LAYOUT_OFFS(fpr20) 75 DEF_LAYOUT_OFFS(fpr20)
75 DEF_LAYOUT_OFFS(fpr21) 76 DEF_LAYOUT_OFFS(fpr21)
76 DEF_LAYOUT_OFFS(fpr22) 77 DEF_LAYOUT_OFFS(fpr22)
77 DEF_LAYOUT_OFFS(fpr23) 78 DEF_LAYOUT_OFFS(fpr23)
78 DEF_LAYOUT_OFFS(fpr24) 79 DEF_LAYOUT_OFFS(fpr24)
79 DEF_LAYOUT_OFFS(fpr25) 80 DEF_LAYOUT_OFFS(fpr25)
80 DEF_LAYOUT_OFFS(fpr26) 81 DEF_LAYOUT_OFFS(fpr26)
81 DEF_LAYOUT_OFFS(fpr27) 82 DEF_LAYOUT_OFFS(fpr27)
82 DEF_LAYOUT_OFFS(fpr28) 83 DEF_LAYOUT_OFFS(fpr28)
83 DEF_LAYOUT_OFFS(fpr29) 84 DEF_LAYOUT_OFFS(fpr29)
84 DEF_LAYOUT_OFFS(fpr30) 85 DEF_LAYOUT_OFFS(fpr30)
85 DEF_LAYOUT_OFFS(fpr31) 86 DEF_LAYOUT_OFFS(fpr31)
86 87
87 DEF_LAYOUT_OFFS(v0) 88 DEF_LAYOUT_OFFS(v0)
88 DEF_LAYOUT_OFFS(v1) 89 DEF_LAYOUT_OFFS(v1)
89 DEF_LAYOUT_OFFS(a0) 90 DEF_LAYOUT_OFFS(a0)
90 DEF_LAYOUT_OFFS(a1) 91 DEF_LAYOUT_OFFS(a1)
91 DEF_LAYOUT_OFFS(a2) 92 DEF_LAYOUT_OFFS(a2)
92 DEF_LAYOUT_OFFS(a3) 93 DEF_LAYOUT_OFFS(a3)
93 DEF_LAYOUT_OFFS(a4) 94 DEF_LAYOUT_OFFS(a4)
94 DEF_LAYOUT_OFFS(a5) 95 DEF_LAYOUT_OFFS(a5)
95 DEF_LAYOUT_OFFS(a6) 96 DEF_LAYOUT_OFFS(a6)
96 DEF_LAYOUT_OFFS(a7) 97 DEF_LAYOUT_OFFS(a7)
97 DEF_LAYOUT_OFFS(t0) 98 DEF_LAYOUT_OFFS(t0)
98 DEF_LAYOUT_OFFS(t1) 99 DEF_LAYOUT_OFFS(t1)
99 DEF_LAYOUT_OFFS(t2) 100 DEF_LAYOUT_OFFS(t2)
100 DEF_LAYOUT_OFFS(t3) 101 DEF_LAYOUT_OFFS(t3)
101 DEF_LAYOUT_OFFS(s0) 102 DEF_LAYOUT_OFFS(s0)
102 DEF_LAYOUT_OFFS(s1) 103 DEF_LAYOUT_OFFS(s1)
103 DEF_LAYOUT_OFFS(s2) 104 DEF_LAYOUT_OFFS(s2)
104 DEF_LAYOUT_OFFS(s3) 105 DEF_LAYOUT_OFFS(s3)
105 DEF_LAYOUT_OFFS(s4) 106 DEF_LAYOUT_OFFS(s4)
106 DEF_LAYOUT_OFFS(s5) 107 DEF_LAYOUT_OFFS(s5)
107 DEF_LAYOUT_OFFS(s6) 108 DEF_LAYOUT_OFFS(s6)
108 DEF_LAYOUT_OFFS(s7) 109 DEF_LAYOUT_OFFS(s7)
109 DEF_LAYOUT_OFFS(t8) 110 DEF_LAYOUT_OFFS(t8)
110 DEF_LAYOUT_OFFS(t9) 111 DEF_LAYOUT_OFFS(t9)
111 112
112 DEF_LAYOUT_OFFS(gp) 113 DEF_LAYOUT_OFFS(gp)
113 DEF_LAYOUT_OFFS(fp) 114 DEF_LAYOUT_OFFS(fp)
114 DEF_LAYOUT_OFFS(return) 115 DEF_LAYOUT_OFFS(return)
115 /* 116 reg_save_size
116 fpr0_off, fpr1_off, 117 };
117 fpr2_off, fpr3_off,
118 fpr4_off, fpr5_off,
119 fpr6_off, fpr7_off,
120 fpr8_off, fpr9_off,
121 fpr10_off, fpr11_off,
122 fpr12_off, fpr13_off,
123 fpr14_off, fpr15_off,
124 fpr16_off, fpr17_off,
125 fpr18_off, fpr19_off,
126 fpr20_off, fpr21_off,
127 fpr22_off, fpr23_off,
128 fpr24_off, fpr25_off,
129 fpr26_off, fpr27_off,
130 fpr28_off, fpr29_off,
131 fpr30_off, fpr31_off,
132
133 v0_off, v1_off,
134 a0_off, a1_off,
135 a2_off, a3_off,
136 a4_off, a5_off,
137 a6_off, a7_off,
138 t0_off, t1_off, t2_off, t3_off,
139 s0_off, s1_off, s2_off, s3_off, s4_off, s5_off, s6_off, s7_off,
140 t8_off, t9_off,
141
142 gp_off, fp_off,
143 return_off,
144 */
145 reg_save_size
146 };
147 118
148 public: 119 public:
149 120
150 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors =false ); 121 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors =false );
151 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false); 122 static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
152 //FIXME, I have no idea which register to use 123 static int raOffset(void) { return return_off / 2; }
153 static int raOffset(void) { return return_off / 2; } 124 //Rmethod
154 //Rmethod 125 static int methodOffset(void) { return s3_off / 2; }
155 static int methodOffset(void) { return s3_off / 2; } 126
156 127 static int v0Offset(void) { return v0_off / 2; }
157 static int v0Offset(void) { return v0_off / 2; } 128 static int v1Offset(void) { return v1_off / 2; }
158 static int v1Offset(void) { return v1_off / 2; } 129
159 130 static int fpResultOffset(void) { return fpr0_off / 2; }
160 static int fpResultOffset(void) { return fpr0_off / 2; } 131
161 132 // During deoptimization only the result register need to be restored
162 // During deoptimization only the result register need to be restored 133 // all the other values have already been extracted.
163 // all the other values have already been extracted. 134 static void restore_result_registers(MacroAssembler* masm);
164
165 static void restore_result_registers(MacroAssembler* masm);
166 }; 135 };
167 136
168 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors ) { 137 OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors ) {
169 138
170 /* 139 /*
181 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt; 150 int additional_frame_slots = additional_frame_words*wordSize / BytesPerInt;
182 // CodeBlob frame size is in words. 151 // CodeBlob frame size is in words.
183 int frame_size_in_words = frame_size_in_bytes / wordSize; 152 int frame_size_in_words = frame_size_in_bytes / wordSize;
184 *total_frame_words = frame_size_in_words; 153 *total_frame_words = frame_size_in_words;
185 154
186 // save registers, fpu state, and flags 155 // save registers, fpu state, and flags
187 // We assume caller has already has return address slot on the stack 156 // We assume caller has already has return address slot on the stack
188 // We push epb twice in this sequence because we want the real ebp 157 // We push epb twice in this sequence because we want the real ebp
189 // to be under the return like a normal enter and we want to use pushad 158 // to be under the return like a normal enter and we want to use pushad
190 // We push by hand instead of pusing push 159 // We push by hand instead of pusing push
191 160
192 __ daddiu(SP, SP, - reg_save_size * jintSize); 161 __ daddiu(SP, SP, - reg_save_size * jintSize);
193 162
194 __ sdc1(F0, SP, fpr0_off * jintSize); __ sdc1(F1, SP, fpr1_off * jintSize); 163 __ sdc1(F0, SP, fpr0_off * jintSize); __ sdc1(F1, SP, fpr1_off * jintSize);
195 __ sdc1(F2, SP, fpr2_off * jintSize); __ sdc1(F3, SP, fpr3_off * jintSize); 164 __ sdc1(F2, SP, fpr2_off * jintSize); __ sdc1(F3, SP, fpr3_off * jintSize);
196 __ sdc1(F4, SP, fpr4_off * jintSize); __ sdc1(F5, SP, fpr5_off * jintSize); 165 __ sdc1(F4, SP, fpr4_off * jintSize); __ sdc1(F5, SP, fpr5_off * jintSize);
197 __ sdc1(F6, SP, fpr6_off * jintSize); __ sdc1(F7, SP, fpr7_off * jintSize); 166 __ sdc1(F6, SP, fpr6_off * jintSize); __ sdc1(F7, SP, fpr7_off * jintSize);
198 __ sdc1(F8, SP, fpr8_off * jintSize); __ sdc1(F9, SP, fpr9_off * jintSize); 167 __ sdc1(F8, SP, fpr8_off * jintSize); __ sdc1(F9, SP, fpr9_off * jintSize);
199 __ sdc1(F10, SP, fpr10_off * jintSize); __ sdc1(F11, SP, fpr11_off * jintSize); 168 __ sdc1(F10, SP, fpr10_off * jintSize); __ sdc1(F11, SP, fpr11_off * jintSize);
200 __ sdc1(F12, SP, fpr12_off * jintSize); __ sdc1(F13, SP, fpr13_off * jintSize); 169 __ sdc1(F12, SP, fpr12_off * jintSize); __ sdc1(F13, SP, fpr13_off * jintSize);
201 __ sdc1(F14, SP, fpr14_off * jintSize); __ sdc1(F15, SP, fpr15_off * jintSize); 170 __ sdc1(F14, SP, fpr14_off * jintSize); __ sdc1(F15, SP, fpr15_off * jintSize);
202 __ sdc1(F16, SP, fpr16_off * jintSize); __ sdc1(F17, SP, fpr17_off * jintSize); 171 __ sdc1(F16, SP, fpr16_off * jintSize); __ sdc1(F17, SP, fpr17_off * jintSize);
203 __ sdc1(F18, SP, fpr18_off * jintSize); __ sdc1(F19, SP, fpr19_off * jintSize); 172 __ sdc1(F18, SP, fpr18_off * jintSize); __ sdc1(F19, SP, fpr19_off * jintSize);
204 __ sdc1(F20, SP, fpr20_off * jintSize); __ sdc1(F21, SP, fpr21_off * jintSize); 173 __ sdc1(F20, SP, fpr20_off * jintSize); __ sdc1(F21, SP, fpr21_off * jintSize);
205 __ sdc1(F22, SP, fpr22_off * jintSize); __ sdc1(F23, SP, fpr23_off * jintSize); 174 __ sdc1(F22, SP, fpr22_off * jintSize); __ sdc1(F23, SP, fpr23_off * jintSize);
206 __ sdc1(F24, SP, fpr24_off * jintSize); __ sdc1(F25, SP, fpr25_off * jintSize); 175 __ sdc1(F24, SP, fpr24_off * jintSize); __ sdc1(F25, SP, fpr25_off * jintSize);
207 __ sdc1(F26, SP, fpr26_off * jintSize); __ sdc1(F27, SP, fpr27_off * jintSize); 176 __ sdc1(F26, SP, fpr26_off * jintSize); __ sdc1(F27, SP, fpr27_off * jintSize);
208 __ sdc1(F28, SP, fpr28_off * jintSize); __ sdc1(F29, SP, fpr29_off * jintSize); 177 __ sdc1(F28, SP, fpr28_off * jintSize); __ sdc1(F29, SP, fpr29_off * jintSize);
209 __ sdc1(F30, SP, fpr30_off * jintSize); __ sdc1(F31, SP, fpr31_off * jintSize); 178 __ sdc1(F30, SP, fpr30_off * jintSize); __ sdc1(F31, SP, fpr31_off * jintSize);
210 __ sd(V0, SP, v0_off * jintSize); __ sd(V1, SP, v1_off * jintSize); 179 __ sd(V0, SP, v0_off * jintSize); __ sd(V1, SP, v1_off * jintSize);
211 __ sd(A0, SP, a0_off * jintSize); __ sd(A1, SP, a1_off * jintSize); 180 __ sd(A0, SP, a0_off * jintSize); __ sd(A1, SP, a1_off * jintSize);
212 __ sd(A2, SP, a2_off * jintSize); __ sd(A3, SP, a3_off * jintSize); 181 __ sd(A2, SP, a2_off * jintSize); __ sd(A3, SP, a3_off * jintSize);
213 __ sd(A4, SP, a4_off * jintSize); __ sd(A5, SP, a5_off * jintSize); 182 __ sd(A4, SP, a4_off * jintSize); __ sd(A5, SP, a5_off * jintSize);
214 __ sd(A6, SP, a6_off * jintSize); __ sd(A7, SP, a7_off * jintSize); 183 __ sd(A6, SP, a6_off * jintSize); __ sd(A7, SP, a7_off * jintSize);
215 __ sd(T0, SP, t0_off * jintSize); 184 __ sd(T0, SP, t0_off * jintSize);
216 __ sd(T1, SP, t1_off * jintSize); 185 __ sd(T1, SP, t1_off * jintSize);
217 __ sd(T2, SP, t2_off * jintSize); 186 __ sd(T2, SP, t2_off * jintSize);
218 __ sd(T3, SP, t3_off * jintSize); 187 __ sd(T3, SP, t3_off * jintSize);
219 __ sd(S0, SP, s0_off * jintSize); 188 __ sd(S0, SP, s0_off * jintSize);
232 __ sd(FP, SP, fp_off * jintSize); 201 __ sd(FP, SP, fp_off * jintSize);
233 __ sd(RA, SP, return_off * jintSize); 202 __ sd(RA, SP, return_off * jintSize);
234 __ daddi(FP, SP, fp_off * jintSize); 203 __ daddi(FP, SP, fp_off * jintSize);
235 204
236 OopMapSet *oop_maps = new OopMapSet(); 205 OopMapSet *oop_maps = new OopMapSet();
237 //OopMap* map = new OopMap( frame_words, 0 ); 206 //OopMap* map = new OopMap( frame_words, 0 );
238 OopMap* map = new OopMap( frame_size_in_slots, 0 ); 207 OopMap* map = new OopMap( frame_size_in_slots, 0 );
239 208
240 209
241 //#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words) 210 //#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
242 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots) 211 #define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_slots)
243 map->set_callee_saved(STACK_OFFSET( v0_off), V0->as_VMReg()); 212 map->set_callee_saved(STACK_OFFSET( v0_off), V0->as_VMReg());
299 map->set_callee_saved(STACK_OFFSET( fpr28_off), F28->as_VMReg()); 268 map->set_callee_saved(STACK_OFFSET( fpr28_off), F28->as_VMReg());
300 map->set_callee_saved(STACK_OFFSET( fpr29_off), F29->as_VMReg()); 269 map->set_callee_saved(STACK_OFFSET( fpr29_off), F29->as_VMReg());
301 map->set_callee_saved(STACK_OFFSET( fpr30_off), F30->as_VMReg()); 270 map->set_callee_saved(STACK_OFFSET( fpr30_off), F30->as_VMReg());
302 map->set_callee_saved(STACK_OFFSET( fpr31_off), F31->as_VMReg()); 271 map->set_callee_saved(STACK_OFFSET( fpr31_off), F31->as_VMReg());
303 272
304 /*
305 if (true) {
306 map->set_callee_saved(STACK_OFFSET( v0H_off), V0->as_VMReg()->next());
307 map->set_callee_saved(STACK_OFFSET( v1H_off), V1->as_VMReg()->next());
308 map->set_callee_saved(STACK_OFFSET( a0H_off), A0->as_VMReg()->next());
309 map->set_callee_saved(STACK_OFFSET( a1H_off), A1->as_VMReg()->next());
310 map->set_callee_saved(STACK_OFFSET( a2H_off), A2->as_VMReg()->next());
311 map->set_callee_saved(STACK_OFFSET( a3H_off), A3->as_VMReg()->next());
312 map->set_callee_saved(STACK_OFFSET( a4H_off), A4->as_VMReg()->next());
313 map->set_callee_saved(STACK_OFFSET( a5H_off), A5->as_VMReg()->next());
314 map->set_callee_saved(STACK_OFFSET( a6H_off), A6->as_VMReg()->next());
315 map->set_callee_saved(STACK_OFFSET( a7H_off), A7->as_VMReg()->next());
316 map->set_callee_saved(STACK_OFFSET( t0H_off), T0->as_VMReg()->next());
317 map->set_callee_saved(STACK_OFFSET( t1H_off), T1->as_VMReg()->next());
318 map->set_callee_saved(STACK_OFFSET( t2H_off), T2->as_VMReg()->next());
319 map->set_callee_saved(STACK_OFFSET( t3H_off), T3->as_VMReg()->next());
320 map->set_callee_saved(STACK_OFFSET( s0H_off), S0->as_VMReg()->next());
321 map->set_callee_saved(STACK_OFFSET( s1H_off), S1->as_VMReg()->next());
322 map->set_callee_saved(STACK_OFFSET( s2H_off), S2->as_VMReg()->next());
323 map->set_callee_saved(STACK_OFFSET( s3H_off), S3->as_VMReg()->next());
324 map->set_callee_saved(STACK_OFFSET( s4H_off), S4->as_VMReg()->next());
325 map->set_callee_saved(STACK_OFFSET( s5H_off), S5->as_VMReg()->next());
326 map->set_callee_saved(STACK_OFFSET( s6H_off), S6->as_VMReg()->next());
327 map->set_callee_saved(STACK_OFFSET( s7H_off), S7->as_VMReg()->next());
328 map->set_callee_saved(STACK_OFFSET( t8H_off), T8->as_VMReg()->next());
329 map->set_callee_saved(STACK_OFFSET( t9H_off), T9->as_VMReg()->next());
330 map->set_callee_saved(STACK_OFFSET( gpH_off), GP->as_VMReg()->next());
331 map->set_callee_saved(STACK_OFFSET( fpH_off), FP->as_VMReg()->next());
332 map->set_callee_saved(STACK_OFFSET( returnH_off), RA->as_VMReg()->next());
333
334 map->set_callee_saved(STACK_OFFSET( fpr0H_off), F0->as_VMReg()->next());
335 map->set_callee_saved(STACK_OFFSET( fpr2H_off), F2->as_VMReg()->next());
336 map->set_callee_saved(STACK_OFFSET( fpr4H_off), F4->as_VMReg()->next());
337 map->set_callee_saved(STACK_OFFSET( fpr6H_off), F6->as_VMReg()->next());
338 map->set_callee_saved(STACK_OFFSET( fpr8H_off), F8->as_VMReg()->next());
339 map->set_callee_saved(STACK_OFFSET( fpr10H_off), F10->as_VMReg()->next());
340 map->set_callee_saved(STACK_OFFSET( fpr12H_off), F12->as_VMReg()->next());
341 map->set_callee_saved(STACK_OFFSET( fpr14H_off), F14->as_VMReg()->next());
342 map->set_callee_saved(STACK_OFFSET( fpr16H_off), F16->as_VMReg()->next());
343 map->set_callee_saved(STACK_OFFSET( fpr18H_off), F18->as_VMReg()->next());
344 map->set_callee_saved(STACK_OFFSET( fpr20H_off), F20->as_VMReg()->next());
345 map->set_callee_saved(STACK_OFFSET( fpr22H_off), F22->as_VMReg()->next());
346 map->set_callee_saved(STACK_OFFSET( fpr24H_off), F24->as_VMReg()->next());
347 map->set_callee_saved(STACK_OFFSET( fpr26H_off), F26->as_VMReg()->next());
348 map->set_callee_saved(STACK_OFFSET( fpr28H_off), F28->as_VMReg()->next());
349 map->set_callee_saved(STACK_OFFSET( fpr30H_off), F30->as_VMReg()->next());
350 }
351 */
352 #undef STACK_OFFSET 273 #undef STACK_OFFSET
353 return map; 274 return map;
354 } 275 }
355 276
356 277
358 // saved. 279 // saved.
359 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) { 280 void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
360 __ ldc1(F0, SP, fpr0_off * jintSize); __ ldc1(F1, SP, fpr1_off * jintSize); 281 __ ldc1(F0, SP, fpr0_off * jintSize); __ ldc1(F1, SP, fpr1_off * jintSize);
361 __ ldc1(F2, SP, fpr2_off * jintSize); __ ldc1(F3, SP, fpr3_off * jintSize); 282 __ ldc1(F2, SP, fpr2_off * jintSize); __ ldc1(F3, SP, fpr3_off * jintSize);
362 __ ldc1(F4, SP, fpr4_off * jintSize); __ ldc1(F5, SP, fpr5_off * jintSize); 283 __ ldc1(F4, SP, fpr4_off * jintSize); __ ldc1(F5, SP, fpr5_off * jintSize);
363 __ ldc1(F6, SP, fpr6_off * jintSize); __ ldc1(F7, SP, fpr7_off * jintSize); 284 __ ldc1(F6, SP, fpr6_off * jintSize); __ ldc1(F7, SP, fpr7_off * jintSize);
364 __ ldc1(F8, SP, fpr8_off * jintSize); __ ldc1(F9, SP, fpr9_off * jintSize); 285 __ ldc1(F8, SP, fpr8_off * jintSize); __ ldc1(F9, SP, fpr9_off * jintSize);
365 __ ldc1(F10, SP, fpr10_off * jintSize); __ ldc1(F11, SP, fpr11_off * jintSize); 286 __ ldc1(F10, SP, fpr10_off * jintSize); __ ldc1(F11, SP, fpr11_off * jintSize);
366 __ ldc1(F12, SP, fpr12_off * jintSize); __ ldc1(F13, SP, fpr13_off * jintSize); 287 __ ldc1(F12, SP, fpr12_off * jintSize); __ ldc1(F13, SP, fpr13_off * jintSize);
367 __ ldc1(F14, SP, fpr14_off * jintSize); __ ldc1(F15, SP, fpr15_off * jintSize); 288 __ ldc1(F14, SP, fpr14_off * jintSize); __ ldc1(F15, SP, fpr15_off * jintSize);
368 __ ldc1(F16, SP, fpr16_off * jintSize); __ ldc1(F17, SP, fpr17_off * jintSize); 289 __ ldc1(F16, SP, fpr16_off * jintSize); __ ldc1(F17, SP, fpr17_off * jintSize);
369 __ ldc1(F18, SP, fpr18_off * jintSize); __ ldc1(F19, SP, fpr19_off * jintSize); 290 __ ldc1(F18, SP, fpr18_off * jintSize); __ ldc1(F19, SP, fpr19_off * jintSize);
370 __ ldc1(F20, SP, fpr20_off * jintSize); __ ldc1(F21, SP, fpr21_off * jintSize); 291 __ ldc1(F20, SP, fpr20_off * jintSize); __ ldc1(F21, SP, fpr21_off * jintSize);
371 __ ldc1(F22, SP, fpr22_off * jintSize); __ ldc1(F23, SP, fpr23_off * jintSize); 292 __ ldc1(F22, SP, fpr22_off * jintSize); __ ldc1(F23, SP, fpr23_off * jintSize);
372 __ ldc1(F24, SP, fpr24_off * jintSize); __ ldc1(F25, SP, fpr25_off * jintSize); 293 __ ldc1(F24, SP, fpr24_off * jintSize); __ ldc1(F25, SP, fpr25_off * jintSize);
373 __ ldc1(F26, SP, fpr26_off * jintSize); __ ldc1(F27, SP, fpr27_off * jintSize); 294 __ ldc1(F26, SP, fpr26_off * jintSize); __ ldc1(F27, SP, fpr27_off * jintSize);
374 __ ldc1(F28, SP, fpr28_off * jintSize); __ ldc1(F29, SP, fpr29_off * jintSize); 295 __ ldc1(F28, SP, fpr28_off * jintSize); __ ldc1(F29, SP, fpr29_off * jintSize);
375 __ ldc1(F30, SP, fpr30_off * jintSize); __ ldc1(F31, SP, fpr31_off * jintSize); 296 __ ldc1(F30, SP, fpr30_off * jintSize); __ ldc1(F31, SP, fpr31_off * jintSize);
376 297
377 __ ld(V0, SP, v0_off * jintSize); __ ld(V1, SP, v1_off * jintSize); 298 __ ld(V0, SP, v0_off * jintSize); __ ld(V1, SP, v1_off * jintSize);
378 __ ld(A0, SP, a0_off * jintSize); __ ld(A1, SP, a1_off * jintSize); 299 __ ld(A0, SP, a0_off * jintSize); __ ld(A1, SP, a1_off * jintSize);
379 __ ld(A2, SP, a2_off * jintSize); __ ld(A3, SP, a3_off * jintSize); 300 __ ld(A2, SP, a2_off * jintSize); __ ld(A3, SP, a3_off * jintSize);
380 __ ld(A4, SP, a4_off * jintSize); __ ld(A5, SP, a5_off * jintSize); 301 __ ld(A4, SP, a4_off * jintSize); __ ld(A5, SP, a5_off * jintSize);
381 __ ld(A6, SP, a6_off * jintSize); __ ld(A7, SP, a7_off * jintSize); 302 __ ld(A6, SP, a6_off * jintSize); __ ld(A7, SP, a7_off * jintSize);
382 __ ld(T0, SP, t0_off * jintSize); 303 __ ld(T0, SP, t0_off * jintSize);
383 __ ld(T1, SP, t1_off * jintSize); 304 __ ld(T1, SP, t1_off * jintSize);
384 __ ld(T2, SP, t2_off * jintSize); 305 __ ld(T2, SP, t2_off * jintSize);
385 __ ld(T3, SP, t3_off * jintSize); 306 __ ld(T3, SP, t3_off * jintSize);
386 __ ld(S0, SP, s0_off * jintSize); 307 __ ld(S0, SP, s0_off * jintSize);
404 325
405 // Pop the current frame and restore the registers that might be holding 326 // Pop the current frame and restore the registers that might be holding
406 // a result. 327 // a result.
407 // FIXME, if the result is float? 328 // FIXME, if the result is float?
408 void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 329 void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
330
409 // Just restore result register. Only used by deoptimization. By 331 // Just restore result register. Only used by deoptimization. By
410 // now any callee save register that needs to be restore to a c2 332 // now any callee save register that needs to be restore to a c2
411 // caller of the deoptee has been extracted into the vframeArray 333 // caller of the deoptee has been extracted into the vframeArray
412 // and will be stuffed into the c2i adapter we create for later 334 // and will be stuffed into the c2i adapter we create for later
413 // restoration so only result registers need to be restored here. 335 // restoration so only result registers need to be restored here.
414 // 336
415 __ ld(V0, SP, v0_off * jintSize); 337 __ ld(V0, SP, v0_off * jintSize);
416 __ ld(V1, SP, v1_off * jintSize); 338 __ ld(V1, SP, v1_off * jintSize);
417 __ addiu(SP, SP, return_off * jintSize); 339 __ addiu(SP, SP, return_off * jintSize);
418 } 340 }
419 341
420 // Is vector's size (in bytes) bigger than a size saved by default? 342 // Is vector's size (in bytes) bigger than a size saved by default?
421 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions. 343 // 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
422 bool SharedRuntime::is_wide_vector(int size) { 344 bool SharedRuntime::is_wide_vector(int size) {
423 return size > 16; 345 return size > 16;
424 } 346 }
425 347
426 // The java_calling_convention describes stack locations as ideal slots on 348 // The java_calling_convention describes stack locations as ideal slots on
427 // a frame with no abi restrictions. Since we must observe abi restrictions 349 // a frame with no abi restrictions. Since we must observe abi restrictions
428 // (like the placement of the register window) the slots must be biased by 350 // (like the placement of the register window) the slots must be biased by
429 // the following value. 351 // the following value.
430 352
431 static int reg2offset_in(VMReg r) { 353 static int reg2offset_in(VMReg r) {
432 // Account for saved ebp and return address 354 // Account for saved ebp and return address
433 // This should really be in_preserve_stack_slots 355 // This should really be in_preserve_stack_slots
434 return (r->reg2stack() + 2 * VMRegImpl::slots_per_word) * VMRegImpl::stack_slot_size; // + 2 * VMRegImpl::stack_slot_size); 356 return (r->reg2stack() + 2 * VMRegImpl::slots_per_word) * VMRegImpl::stack_slot_size; // + 2 * VMRegImpl::stack_slot_size);
435 } 357 }
436 358
437 static int reg2offset_out(VMReg r) { 359 static int reg2offset_out(VMReg r) {
438 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 360 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
439 } 361 }
440 362
441 // --------------------------------------------------------------------------- 363 // ---------------------------------------------------------------------------
442 // Read the array of BasicTypes from a signature, and compute where the 364 // Read the array of BasicTypes from a signature, and compute where the
443 // arguments should go. Values in the VMRegPair regs array refer to 4-byte 365 // arguments should go. Values in the VMRegPair regs array refer to 4-byte
468 390
469 int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 391 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
470 VMRegPair *regs, 392 VMRegPair *regs,
471 int total_args_passed, 393 int total_args_passed,
472 int is_outgoing) { 394 int is_outgoing) {
473 //#define aoqi_test
474 #ifdef aoqi_test
475 tty->print_cr(" SharedRuntime::%s :%d, total_args_passed: %d", __func__, __LINE__, total_args_passed);
476 #endif
477 395
478 // Create the mapping between argument positions and 396 // Create the mapping between argument positions and
479 // registers. 397 // registers.
480 //static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = { 398 //static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
481 static const Register INT_ArgReg[Argument::n_register_parameters + 1] = { 399 static const Register INT_ArgReg[Argument::n_register_parameters + 1] = {
541 break; 459 break;
542 default: 460 default:
543 ShouldNotReachHere(); 461 ShouldNotReachHere();
544 break; 462 break;
545 } 463 }
546 #ifdef aoqi_test
547 tty->print_cr(" SharedRuntime::%s :%d, sig_bt[%d]: %d, reg[%d]:%d|%d, stk_args:%d", __func__, __LINE__, i, sig_bt[i], i, regs[i].first(), regs[i].second(), stk_args);
548 #endif
549 } 464 }
550 465
551 return round_to(stk_args, 2); 466 return round_to(stk_args, 2);
552 /*
553 // Starting stack position for args on stack
554 uint stack = 0;
555
556 // Pass first five oop/int args in registers T0, A0 - A3.
557 uint reg_arg0 = 9999;
558 uint reg_arg1 = 9999;
559 uint reg_arg2 = 9999;
560 uint reg_arg3 = 9999;
561 uint reg_arg4 = 9999;
562
563
564 // Pass doubles & longs &float ligned on the stack. First count stack slots for doubles
565 int i;
566 for( i = 0; i < total_args_passed; i++) {
567 if( sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG ) {
568 stack += 2;
569 }
570 }
571 int dstack = 0; // Separate counter for placing doubles
572 for( i = 0; i < total_args_passed; i++) {
573 // From the type and the argument number (count) compute the location
574 switch( sig_bt[i] ) {
575 case T_SHORT:
576 case T_CHAR:
577 case T_BYTE:
578 case T_BOOLEAN:
579 case T_INT:
580 case T_ARRAY:
581 case T_OBJECT:
582 case T_ADDRESS:
583 if( reg_arg0 == 9999 ) {
584 reg_arg0 = i;
585 regs[i].set1(T0->as_VMReg());
586 } else if( reg_arg1 == 9999 ) {
587 reg_arg1 = i;
588 regs[i].set1(A0->as_VMReg());
589 } else if( reg_arg2 == 9999 ) {
590 reg_arg2 = i;
591 regs[i].set1(A1->as_VMReg());
592 }else if( reg_arg3 == 9999 ) {
593 reg_arg3 = i;
594 regs[i].set1(A2->as_VMReg());
595 }else if( reg_arg4 == 9999 ) {
596 reg_arg4 = i;
597 regs[i].set1(A3->as_VMReg());
598 } else {
599 regs[i].set1(VMRegImpl::stack2reg(stack++));
600 }
601 break;
602 case T_FLOAT:
603 regs[i].set1(VMRegImpl::stack2reg(stack++));
604 break;
605 case T_LONG:
606 assert(sig_bt[i+1] == T_VOID, "missing Half" );
607 regs[i].set2(VMRegImpl::stack2reg(dstack));
608 dstack += 2;
609 break;
610 case T_DOUBLE:
611 assert(sig_bt[i+1] == T_VOID, "missing Half" );
612 regs[i].set2(VMRegImpl::stack2reg(dstack));
613 dstack += 2;
614 break;
615 case T_VOID: regs[i].set_bad(); break;
616 break;
617 default:
618 ShouldNotReachHere();
619 break;
620 }
621 }
622 // return value can be odd number of VMRegImpl stack slots make multiple of 2
623 return round_to(stack, 2);
624 */
625 } 467 }
626 468
627 // Helper class mostly to avoid passing masm everywhere, and handle store 469 // Helper class mostly to avoid passing masm everywhere, and handle store
628 // displacement overflow logic for LP64 470 // displacement overflow logic for LP64
629 class AdapterGenerator { 471 class AdapterGenerator {
632 Register Rdisp; 474 Register Rdisp;
633 void set_Rdisp(Register r) { Rdisp = r; } 475 void set_Rdisp(Register r) { Rdisp = r; }
634 #endif // _LP64 476 #endif // _LP64
635 477
636 void patch_callers_callsite(); 478 void patch_callers_callsite();
637 // void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
638 479
639 // base+st_off points to top of argument 480 // base+st_off points to top of argument
640 int arg_offset(const int st_off) { return st_off; } 481 int arg_offset(const int st_off) { return st_off; }
641 int next_arg_offset(const int st_off) { 482 int next_arg_offset(const int st_off) {
642 return st_off - Interpreter::stackElementSize; 483 return st_off - Interpreter::stackElementSize;
682 }; 523 };
683 524
684 525
685 // Patch the callers callsite with entry to compiled code if it exists. 526 // Patch the callers callsite with entry to compiled code if it exists.
686 void AdapterGenerator::patch_callers_callsite() { 527 void AdapterGenerator::patch_callers_callsite() {
687 Label L; 528 Label L;
688 //FIXME , what is stored in eax? 529 __ verify_oop(Rmethod);
689 //__ verify_oop(ebx); 530 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset()));
690 __ verify_oop(Rmethod); 531 __ beq(AT,R0,L);
691 // __ cmpl(Address(ebx, in_bytes(Method::code_offset())), NULL_WORD); 532 __ delayed()->nop();
692 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset())); 533 // Schedule the branch target address early.
693 //__ jcc(Assembler::equal, L); 534 // Call into the VM to patch the caller, then jump to compiled callee
694 __ beq(AT,R0,L); 535 // eax isn't live so capture return address while we easily can
695 __ delayed()->nop(); 536 __ move(V0, RA);
696 // Schedule the branch target address early. 537
697 // Call into the VM to patch the caller, then jump to compiled callee 538 __ pushad();
698 // eax isn't live so capture return address while we easily can
699 // __ movl(eax, Address(esp, 0));
700 // __ lw(T5,SP,0);
701 __ move(V0, RA);
702
703 __ pushad();
704 //jerome_for_debug
705 // __ pushad();
706 // __ pushfd();
707 #ifdef COMPILER2 539 #ifdef COMPILER2
708 // C2 may leave the stack dirty if not in SSE2+ mode 540 // C2 may leave the stack dirty if not in SSE2+ mode
709 __ empty_FPU_stack(); 541 __ empty_FPU_stack();
710 #endif /* COMPILER2 */ 542 #endif /* COMPILER2 */
711 543
712 // VM needs caller's callsite 544 // VM needs caller's callsite
713 // __ pushl(eax); 545 // VM needs target method
714 546
715 // VM needs target method 547 __ move(A0, Rmethod);
716 // __ pushl(ebx); 548 __ move(A1, V0);
717 // __ push(Rmethod);
718 // __ verify_oop(ebx);
719
720 __ move(A0, Rmethod);
721 __ move(A1, V0);
722 // __ addi(SP, SP, -8);
723 //we should preserve the return address 549 //we should preserve the return address
724 __ verify_oop(Rmethod); 550 __ verify_oop(Rmethod);
725 __ move(S0, SP); 551 __ move(S0, SP);
726 __ move(AT, -(StackAlignmentInBytes)); // align the stack 552 __ move(AT, -(StackAlignmentInBytes)); // align the stack
727 __ andr(SP, SP, AT); 553 __ andr(SP, SP, AT);
728 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), 554 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite),
729 relocInfo::runtime_call_type); 555 relocInfo::runtime_call_type);
730 //__ addl(esp, 2*wordSize); 556
731 557 __ delayed()->nop();
732 __ delayed()->nop(); 558 __ move(SP, S0);
733 // __ addi(SP, SP, 8); 559 __ popad();
734 // __ popfd(); 560 __ bind(L);
735 __ move(SP, S0); 561 }
736 __ popad();
737 __ bind(L);
738 }
739 /*
740 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
741 Register scratch) {
742 Unimplemented();
743 }*/
744 562
745 #ifdef _LP64 563 #ifdef _LP64
746 Register AdapterGenerator::arg_slot(const int st_off) { 564 Register AdapterGenerator::arg_slot(const int st_off) {
747 Unimplemented(); 565 Unimplemented();
748 } 566 }
749 567
750 Register AdapterGenerator::next_arg_slot(const int st_off){ 568 Register AdapterGenerator::next_arg_slot(const int st_off){
751 Unimplemented(); 569 Unimplemented();
752 } 570 }
753 #endif // _LP64 571 #endif // _LP64
754 572
755 // Stores long into offset pointed to by base 573 // Stores long into offset pointed to by base
756 void AdapterGenerator::store_c2i_long(Register r, Register base, 574 void AdapterGenerator::store_c2i_long(Register r, Register base,
757 const int st_off, bool is_stack) { 575 const int st_off, bool is_stack) {
758 Unimplemented(); 576 Unimplemented();
759 } 577 }
760 578
761 void AdapterGenerator::store_c2i_object(Register r, Register base, 579 void AdapterGenerator::store_c2i_object(Register r, Register base,
762 const int st_off) { 580 const int st_off) {
763 Unimplemented(); 581 Unimplemented();
764 } 582 }
765 583
766 void AdapterGenerator::store_c2i_int(Register r, Register base, 584 void AdapterGenerator::store_c2i_int(Register r, Register base,
767 const int st_off) { 585 const int st_off) {
768 Unimplemented(); 586 Unimplemented();
769 } 587 }
770 588
771 // Stores into offset pointed to by base 589 // Stores into offset pointed to by base
772 void AdapterGenerator::store_c2i_double(VMReg r_2, 590 void AdapterGenerator::store_c2i_double(VMReg r_2,
773 VMReg r_1, Register base, const int st_off) { 591 VMReg r_1, Register base, const int st_off) {
774 Unimplemented(); 592 Unimplemented();
775 } 593 }
776 594
777 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 595 void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
778 const int st_off) { 596 const int st_off) {
779 Unimplemented(); 597 Unimplemented();
780 } 598 }
781 /*
782 void AdapterGenerator::tag_stack(const BasicType sig, int st_off) {
783 if (TaggedStackInterpreter) {
784 int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
785 if (sig == T_OBJECT || sig == T_ARRAY) {
786 // __ movl(Address(esp, tag_offset), frame::TagReference);
787 // __ addi(AT,R0, frame::TagReference);
788
789 __ move(AT, frame::TagReference);
790 __ sw (AT, SP, tag_offset);
791 } else if (sig == T_LONG || sig == T_DOUBLE) {
792 int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
793 // __ movl(Address(esp, next_tag_offset), frame::TagValue);
794 // __ addi(AT,R0, frame::TagValue);
795 __ move(AT, frame::TagValue);
796 __ sw (AT, SP, next_tag_offset);
797 //__ movl(Address(esp, tag_offset), frame::TagValue);
798 // __ addi(AT,R0, frame::TagValue);
799 __ move(AT, frame::TagValue);
800 __ sw (AT, SP, tag_offset);
801
802 } else {
803 // __ movl(Address(esp, tag_offset), frame::TagValue);
804 //__ addi(AT,R0, frame::TagValue);
805 __ move(AT, frame::TagValue);
806 __ sw (AT, SP, tag_offset);
807
808 }
809 }
810 }*/
811 599
812 void AdapterGenerator::gen_c2i_adapter( 600 void AdapterGenerator::gen_c2i_adapter(
813 int total_args_passed, 601 int total_args_passed,
814 // VMReg max_arg, 602 // VMReg max_arg,
815 int comp_args_on_stack, // VMRegStackSlots 603 int comp_args_on_stack, // VMRegStackSlots
832 // Actually if we detected that we had an i2c->c2i transition here we 620 // Actually if we detected that we had an i2c->c2i transition here we
833 // ought to be able to reset the world back to the state of the interpreted 621 // ought to be able to reset the world back to the state of the interpreted
834 // call and not bother building another interpreter arg area. We don't 622 // call and not bother building another interpreter arg area. We don't
835 // do that at this point. 623 // do that at this point.
836 624
837 patch_callers_callsite(); 625 patch_callers_callsite();
838 626
839 __ bind(skip_fixup); 627 __ bind(skip_fixup);
840 628
841 #ifdef COMPILER2 629 #ifdef COMPILER2
842 __ empty_FPU_stack(); 630 __ empty_FPU_stack();
843 #endif /* COMPILER2 */ 631 #endif /* COMPILER2 */
844 //this is for native ? 632 //this is for native ?
845 // Since all args are passed on the stack, total_args_passed * interpreter_ 633 // Since all args are passed on the stack, total_args_passed * interpreter_
846 // stack_element_size is the 634 // stack_element_size is the
847 // space we need. 635 // space we need.
848 int extraspace = total_args_passed * Interpreter::stackElementSize; 636 int extraspace = total_args_passed * Interpreter::stackElementSize;
849 637
850 // stack is aligned, keep it that way 638 // stack is aligned, keep it that way
851 extraspace = round_to(extraspace, 2*wordSize); 639 extraspace = round_to(extraspace, 2*wordSize);
852 640
853 // Get return address 641 // Get return address
854 // __ popl(eax); 642 __ move(V0, RA);
855 //__ pop(T4); 643 // set senderSP value
856 __ move(V0, RA); 644 //refer to interpreter_mips.cpp:generate_asm_entry
857 // set senderSP value 645 __ move(Rsender, SP);
858 // __ movl(esi, esp); 646 __ addi(SP, SP, -extraspace);
859 //refer to interpreter_mips.cpp:generate_asm_entry 647
860 __ move(Rsender, SP); 648 // Now write the args into the outgoing interpreter space
861 //__ subl(esp, extraspace); 649 for (int i = 0; i < total_args_passed; i++) {
862 __ addi(SP, SP, -extraspace); 650 if (sig_bt[i] == T_VOID) {
863 651 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
864 // Now write the args into the outgoing interpreter space 652 continue;
865 for (int i = 0; i < total_args_passed; i++) { 653 }
866 if (sig_bt[i] == T_VOID) { 654
867 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), 655 // st_off points to lowest address on stack.
868 "missing half"); 656 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
869 continue; 657 // Say 4 args:
870 } 658 // i st_off
871 659 // 0 12 T_LONG
872 // st_off points to lowest address on stack. 660 // 1 8 T_VOID
873 int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize; 661 // 2 4 T_OBJECT
874 #ifdef aoqi_test 662 // 3 0 T_BOOL
875 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d, st_off:%d", __func__, __LINE__, i, sig_bt[i], total_args_passed, st_off); 663 VMReg r_1 = regs[i].first();
876 #endif 664 VMReg r_2 = regs[i].second();
877 // Say 4 args: 665 if (!r_1->is_valid()) {
878 // i st_off 666 assert(!r_2->is_valid(), "");
879 // 0 12 T_LONG 667 continue;
880 // 1 8 T_VOID 668 }
881 // 2 4 T_OBJECT 669 if (r_1->is_stack()) {
882 // 3 0 T_BOOL 670 // memory to memory use fpu stack top
883 VMReg r_1 = regs[i].first(); 671 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
884 VMReg r_2 = regs[i].second(); 672 if (!r_2->is_valid()) {
885 if (!r_1->is_valid()) { 673 __ ld_ptr(AT, SP, ld_off);
886 assert(!r_2->is_valid(), ""); 674 __ st_ptr(AT, SP, st_off);
887 continue; 675
888 } 676 } else {
889 677
890 if (r_1->is_stack()) { 678
891 // memory to memory use fpu stack top 679 int next_off = st_off - Interpreter::stackElementSize;
892 int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace; 680 __ ld_ptr(AT, SP, ld_off);
893 #ifdef aoqi_test 681 __ st_ptr(AT, SP, st_off);
894 tty->print_cr(" AdapterGenerator::%s :%d, r_1->is_stack, ld_off:%x", __func__, __LINE__, ld_off); 682
895 #endif 683 /* Ref to is_Register condition */
896 684 if(sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
897 if (!r_2->is_valid()) { 685 __ st_ptr(AT,SP,st_off - 8);
898 #ifdef aoqi_test 686 }
899 tty->print_cr(" AdapterGenerator::%s :%d, !r_2->is_valid, ld_off:%x", __func__, __LINE__, ld_off); 687 } else if (r_1->is_Register()) {
900 #endif 688 Register r = r_1->as_Register();
901 __ ld_ptr(AT, SP, ld_off); 689 if (!r_2->is_valid()) {
902 __ st_ptr(AT, SP, st_off); 690 __ sd(r,SP, st_off); //aoqi_test FIXME
903 //tag_stack(sig_bt[i], st_off); 691 } else {
904 } else { 692 //FIXME, mips will not enter here
905 #ifdef aoqi_test 693 // long/double in gpr
906 tty->print_cr(" AdapterGenerator::%s :%d, r_2->is_valid, ld_off:%x", __func__, __LINE__, ld_off); 694 __ sd(r,SP, st_off); //aoqi_test FIXME
907 #endif 695 /* Jin: In [java/util/zip/ZipFile.java]
908
909 // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
910 // st_off == MSW, st_off-wordSize == LSW
911
912 int next_off = st_off - Interpreter::stackElementSize;
913 /*
914 __ lw(AT, SP, ld_off);
915 __ sw(AT, SP, next_off);
916 __ lw(AT, SP, ld_off + wordSize);
917 __ sw(AT, SP, st_off);
918 */
919 __ ld_ptr(AT, SP, ld_off);
920 __ st_ptr(AT, SP, st_off);
921
922 /* Ref to is_Register condition */
923 if(sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
924 __ st_ptr(AT,SP,st_off - 8);
925 //tag_stack(sig_bt[i], next_off);
926 }
927 } else if (r_1->is_Register()) {
928 Register r = r_1->as_Register();
929 if (!r_2->is_valid()) {
930 #ifdef aoqi_test
931 tty->print_cr(" AdapterGenerator::%s :%d, r_1->is_Register, !r_2->is_valid, st_off: %lx", __func__, __LINE__, st_off);
932 #endif
933 // __ movl(Address(esp, st_off), r);
934 __ sd(r,SP, st_off); //aoqi_test FIXME
935 //tag_stack(sig_bt[i], st_off);
936 } else {
937 #ifdef aoqi_test
938 tty->print_cr(" AdapterGenerator::%s :%d, r_1->is_Register, r_2->is_valid, st_off: %lx", __func__, __LINE__, st_off);
939 #endif
940 //FIXME, mips will not enter here
941 // long/double in gpr
942 __ sd(r,SP, st_off); //aoqi_test FIXME
943 /* Jin: In [java/util/zip/ZipFile.java]
944 696
945 private static native long open(String name, int mode, long lastModified); 697 private static native long open(String name, int mode, long lastModified);
946 private static native int getTotal(long jzfile); 698 private static native int getTotal(long jzfile);
947 * 699 *
948 * We need to transfer T_LONG paramenters from a compiled method to a native method. 700 * We need to transfer T_LONG paramenters from a compiled method to a native method.
949 * It's a complex process: 701 * It's a complex process:
950 * 702 *
951 * Caller -> lir_static_call -> gen_resolve_stub 703 * Caller -> lir_static_call -> gen_resolve_stub
952 -> -- resolve_static_call_C 704 -> -- resolve_static_call_C
953 `- gen_c2i_adapter() [*] 705 `- gen_c2i_adapter() [*]
954 | 706 |
955 `- AdapterHandlerLibrary::get_create_apapter_index 707 `- AdapterHandlerLibrary::get_create_apapter_index
956 -> generate_native_entry 708 -> generate_native_entry
957 -> InterpreterRuntime::SignatureHandlerGenerator::pass_long [**] 709 -> InterpreterRuntime::SignatureHandlerGenerator::pass_long [**]
958 710
959 * In [**], T_Long parameter is stored in stack as: 711 * In [**], T_Long parameter is stored in stack as:
960 712
968 | (long) | 720 | (long) |
969 ----------- 721 -----------
970 | | 722 | |
971 (low) 723 (low)
972 * 724 *
973 * However, the sequence is reversed here: 725 * However, the sequence is reversed here:
974 * 726 *
975 (high) 727 (high)
976 | | 728 | |
977 ----------- 729 -----------
978 | 8 bytes | 730 | 8 bytes |
984 | | 736 | |
985 (low) 737 (low)
986 * 738 *
987 * So I stored another 8 bytes in the T_VOID slot. It then can be accessed from generate_native_entry(). 739 * So I stored another 8 bytes in the T_VOID slot. It then can be accessed from generate_native_entry().
988 */ 740 */
989 if (sig_bt[i] == T_LONG) 741 if (sig_bt[i] == T_LONG)
990 __ sd(r,SP, st_off - 8); 742 __ sd(r,SP, st_off - 8);
991 // ShouldNotReachHere(); 743 }
992 // int next_off = st_off - Interpreter::stackElementSize; 744 } else if (r_1->is_FloatRegister()) {
993 // __ sw(r_2->as_Register(),SP, st_off); 745 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register");
994 // __ sw(r,SP, next_off); 746
995 // tag_stack(masm, sig_bt[i], next_off); 747 FloatRegister fr = r_1->as_FloatRegister();
996 } 748 if (sig_bt[i] == T_FLOAT)
997 } else if (r_1->is_FloatRegister()) { 749 __ swc1(fr,SP, st_off);
998 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register"); 750 else {
999 751 __ sdc1(fr,SP, st_off);
1000 FloatRegister fr = r_1->as_FloatRegister(); 752 __ sdc1(fr,SP, st_off - 8); /* T_DOUBLE needs two slots */
1001 if (sig_bt[i] == T_FLOAT) 753 }
1002 __ swc1(fr,SP, st_off); 754 }
1003 else 755 }
1004 { 756
1005 __ sdc1(fr,SP, st_off); 757 // Schedule the branch target address early.
1006 __ sdc1(fr,SP, st_off - 8); /* T_DOUBLE needs two slots */ 758 __ ld_ptr(AT, Rmethod,in_bytes(Method::interpreter_entry_offset()) );
1007 } 759 // And repush original return address
1008 } 760 __ move(RA, V0);
1009 } 761 __ jr (AT);
1010 762 __ delayed()->nop();
1011 // Schedule the branch target address early.
1012 __ ld_ptr(AT, Rmethod,in_bytes(Method::interpreter_entry_offset()) );
1013 // And repush original return address
1014 __ move(RA, V0);
1015 __ jr (AT);
1016 __ delayed()->nop();
1017 } 763 }
1018 764
1019 void AdapterGenerator::gen_i2c_adapter( 765 void AdapterGenerator::gen_i2c_adapter(
1020 int total_args_passed, 766 int total_args_passed,
1021 // VMReg max_arg, 767 // VMReg max_arg,
1022 int comp_args_on_stack, // VMRegStackSlots 768 int comp_args_on_stack, // VMRegStackSlots
1023 const BasicType *sig_bt, 769 const BasicType *sig_bt,
1024 const VMRegPair *regs) { 770 const VMRegPair *regs) {
1025 771
1026 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 772 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
1027 // layout. Lesp was saved by the calling I-frame and will be restored on 773 // layout. Lesp was saved by the calling I-frame and will be restored on
1028 // return. Meanwhile, outgoing arg space is all owned by the callee 774 // return. Meanwhile, outgoing arg space is all owned by the callee
1029 // C-frame, so we can mangle it at will. After adjusting the frame size, 775 // C-frame, so we can mangle it at will. After adjusting the frame size,
1056 __ daddi(SP, SP, -comp_words_on_stack * wordSize); 802 __ daddi(SP, SP, -comp_words_on_stack * wordSize);
1057 } 803 }
1058 804
1059 // Align the outgoing SP 805 // Align the outgoing SP
1060 __ move(AT, -(StackAlignmentInBytes)); 806 __ move(AT, -(StackAlignmentInBytes));
1061 __ andr(SP, SP, AT); 807 __ andr(SP, SP, AT);
1062 // push the return address on the stack (note that pushing, rather 808 // push the return address on the stack (note that pushing, rather
1063 // than storing it, yields the correct frame alignment for the callee) 809 // than storing it, yields the correct frame alignment for the callee)
1064 // Put saved SP in another register 810 // Put saved SP in another register
1065 // const Register saved_sp = eax; 811 // const Register saved_sp = eax;
1066 const Register saved_sp = V0; 812 const Register saved_sp = V0;
1079 // in the 32-bit build. 825 // in the 32-bit build.
1080 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 826 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1081 continue; 827 continue;
1082 } 828 }
1083 829
1084 // Pick up 0, 1 or 2 words from SP+offset. 830 // Pick up 0, 1 or 2 words from SP+offset.
1085 831
1086 //FIXME. aoqi. just delete the assert 832 //FIXME. aoqi. just delete the assert
1087 //assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?"); 833 //assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(), "scrambled load targets?");
1088 // Load in argument order going down. 834 // Load in argument order going down.
1089 int ld_off = (total_args_passed -1 - i)*Interpreter::stackElementSize; 835 int ld_off = (total_args_passed -1 - i)*Interpreter::stackElementSize;
1090 // Point to interpreter value (vs. tag) 836 // Point to interpreter value (vs. tag)
1091 int next_off = ld_off - Interpreter::stackElementSize; 837 int next_off = ld_off - Interpreter::stackElementSize;
1092 // 838 //
1093 // 839 //
1094 // 840 //
1095 VMReg r_1 = regs[i].first(); 841 VMReg r_1 = regs[i].first();
1096 VMReg r_2 = regs[i].second(); 842 VMReg r_2 = regs[i].second();
1097 if (!r_1->is_valid()) { 843 if (!r_1->is_valid()) {
1098 assert(!r_2->is_valid(), ""); 844 assert(!r_2->is_valid(), "");
1099 continue; 845 continue;
1100 } 846 }
1101 #ifdef aoqi_test 847 if (r_1->is_stack()) {
1102 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d, ld_off:%d, next_off: %d", __func__, __LINE__, i, sig_bt[i], total_args_passed, ld_off, next_off); 848 // Convert stack slot to an SP offset (+ wordSize to
1103 #endif
1104 if (r_1->is_stack()) {
1105 // Convert stack slot to an SP offset (+ wordSize to
1106 // account for return address ) 849 // account for return address )
1107 //NOTICE HERE!!!! I sub a wordSize here 850 //NOTICE HERE!!!! I sub a wordSize here
1108 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size; 851 int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size;
1109 //+ wordSize; 852 //+ wordSize;
1110 853
1111 // We can use esi as a temp here because compiled code doesn't 854 // We can use esi as a temp here because compiled code doesn't
1112 // need esi as an input 855 // need esi as an input
1113 // and if we end up going thru a c2i because of a miss a reasonable 856 // and if we end up going thru a c2i because of a miss a reasonable
1114 // value of esi 857 // value of esi
1115 // we be generated. 858 // we be generated.
1116 if (!r_2->is_valid()) { 859 if (!r_2->is_valid()) {
1117 #ifdef aoqi_test 860 __ ld(AT, saved_sp, ld_off);
1118 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d r_1->is_stack() !r_2->is_valid(), st_off:%d", __func__, __LINE__, i, sig_bt[i], total_args_passed, st_off); 861 __ sd(AT, SP, st_off);
1119 #endif
1120 __ ld(AT, saved_sp, ld_off);
1121 __ sd(AT, SP, st_off);
1122 } else { 862 } else {
1123 #ifdef aoqi_test 863 // Interpreter local[n] == MSW, local[n+1] == LSW however locals
1124 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d r_1->is_stack() r_2->is_valid(), st_off:%d", __func__, __LINE__, i, sig_bt[i], total_args_passed, st_off); 864 // are accessed as negative so LSW is at LOW address
1125 #endif 865
1126 // Interpreter local[n] == MSW, local[n+1] == LSW however locals 866 // ld_off is MSW so get LSW
1127 // are accessed as negative so LSW is at LOW address 867 // st_off is LSW (i.e. reg.first())
1128 868 /*
1129 // ld_off is MSW so get LSW 869 __ ld(AT, saved_sp, next_off);
1130 // st_off is LSW (i.e. reg.first()) 870 __ sd(AT, SP, st_off);
1131 /* 871 __ ld(AT, saved_sp, ld_off);
1132 __ ld(AT, saved_sp, next_off); 872 __ sd(AT, SP, st_off + wordSize);
1133 __ sd(AT, SP, st_off); 873 */
1134 __ ld(AT, saved_sp, ld_off); 874
1135 __ sd(AT, SP, st_off + wordSize); 875 /* 2012/4/9 Jin
1136 */ 876 * [./org/eclipse/swt/graphics/GC.java]
1137 877 * void drawImageXRender(Image srcImage, int srcX, int srcY, int srcWidth, int srcHeight,
1138 /* 2012/4/9 Jin 878 int destX, int destY, int destWidth, int destHeight,
1139 * [./org/eclipse/swt/graphics/GC.java] 879 boolean simple,
1140 * void drawImageXRender(Image srcImage, int srcX, int srcY, int srcWidth, int srcHeight, 880 int imgWidth, int imgHeight,
1141 int destX, int destY, int destWidth, int destHeight, 881 long maskPixmap, <-- Pass T_LONG in stack
1142 boolean simple, 882 int maskType);
1143 int imgWidth, int imgHeight, 883 * Before this modification, Eclipse displays icons with solid black background.
1144 long maskPixmap, <-- Pass T_LONG in stack 884 */
1145 int maskType); 885 __ ld(AT, saved_sp, ld_off);
1146 * Before this modification, Eclipse displays icons with solid black background.
1147 */
1148 __ ld(AT, saved_sp, ld_off);
1149 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) 886 if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE)
1150 __ ld(AT, saved_sp, ld_off - 8); 887 __ ld(AT, saved_sp, ld_off - 8);
1151 __ sd(AT, SP, st_off); 888 __ sd(AT, SP, st_off);
1152 //__ ld(AT, saved_sp, next_off);
1153 //__ sd(AT, SP, st_off + wordSize);
1154 } 889 }
1155 } else if (r_1->is_Register()) { // Register argument 890 } else if (r_1->is_Register()) { // Register argument
1156 Register r = r_1->as_Register(); 891 Register r = r_1->as_Register();
1157 // assert(r != eax, "must be different"); 892 // assert(r != eax, "must be different");
1158 if (r_2->is_valid()) { 893 if (r_2->is_valid()) {
1159 #ifdef aoqi_test 894 // assert(r_2->as_Register() != eax, "need another temporary register");
1160 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d r_1->is_Register() r_2->is_valid()", __func__, __LINE__, i, sig_bt[i], total_args_passed); 895 // Remember r_1 is low address (and LSB on mips)
1161 #endif 896 // So r_2 gets loaded from high address regardless of the platform
1162 // assert(r_2->as_Register() != eax, "need another temporary register"); 897 //aoqi
1163 // Remember r_1 is low address (and LSB on mips) 898 assert(r_2->as_Register() == r_1->as_Register(), "");
1164 // So r_2 gets loaded from high address regardless of the platform 899 //__ ld(r_2->as_Register(), saved_sp, ld_off);
1165 //aoqi 900 //__ ld(r, saved_sp, next_off);
1166 assert(r_2->as_Register() == r_1->as_Register(), ""); 901 __ ld(r, saved_sp, ld_off);
1167 //__ ld(r_2->as_Register(), saved_sp, ld_off); 902
1168 //__ ld(r, saved_sp, next_off); 903 /* Jin:
1169 __ ld(r, saved_sp, ld_off); 904 *
1170 905 * For T_LONG type, the real layout is as below:
1171 /* Jin: 906
1172 * 907 (high)
1173 * For T_LONG type, the real layout is as below: 908 | |
1174 909 -----------
1175 (high) 910 | 8 bytes |
1176 | | 911 | (void) |
1177 ----------- 912 -----------
1178 | 8 bytes | 913 | 8 bytes |
1179 | (void) | 914 | (long) |
1180 ----------- 915 -----------
1181 | 8 bytes | 916 | |
1182 | (long) | 917 (low)
1183 ----------- 918 *
1184 | | 919 * We should load the low-8 bytes.
1185 (low) 920 */
1186 * 921 if (sig_bt[i] == T_LONG)
1187 * We should load the low-8 bytes. 922 __ ld(r, saved_sp, ld_off - 8);
1188 */
1189 if (sig_bt[i] == T_LONG)
1190 __ ld(r, saved_sp, ld_off - 8);
1191 } else { 923 } else {
1192 #ifdef aoqi_test 924 __ lw(r, saved_sp, ld_off);
1193 tty->print_cr(" AdapterGenerator::%s :%d, sig_bt[%d]:%d, total_args_passed:%d r_1->is_Register() !r_2->is_valid()", __func__, __LINE__, i, sig_bt[i], total_args_passed);
1194 #endif
1195 __ lw(r, saved_sp, ld_off);
1196 } 925 }
1197 } else if (r_1->is_FloatRegister()) { // Float Register 926 } else if (r_1->is_FloatRegister()) { // Float Register
1198 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register"); 927 assert(sig_bt[i] == T_FLOAT || sig_bt[i] == T_DOUBLE, "Must be a float register");
1199 928
1200 FloatRegister fr = r_1->as_FloatRegister(); 929 FloatRegister fr = r_1->as_FloatRegister();
1201 if (sig_bt[i] == T_FLOAT) 930 if (sig_bt[i] == T_FLOAT)
1202 __ lwc1(fr, saved_sp, ld_off); 931 __ lwc1(fr, saved_sp, ld_off);
1203 else 932 else {
1204 { 933 __ ldc1(fr, saved_sp, ld_off);
1205 __ ldc1(fr, saved_sp, ld_off); 934 __ ldc1(fr, saved_sp, ld_off - 8);
1206 __ ldc1(fr, saved_sp, ld_off - 8); 935 }
1207 } 936 }
1208 }
1209 } 937 }
1210 938
1211 // 6243940 We might end up in handle_wrong_method if 939 // 6243940 We might end up in handle_wrong_method if
1212 // the callee is deoptimized as we race thru here. If that 940 // the callee is deoptimized as we race thru here. If that
1213 // happens we don't want to take a safepoint because the 941 // happens we don't want to take a safepoint because the
1221 __ sd(Rmethod, T8, in_bytes(JavaThread::callee_target_offset())); 949 __ sd(Rmethod, T8, in_bytes(JavaThread::callee_target_offset()));
1222 950
1223 // move methodOop to eax in case we end up in an c2i adapter. 951 // move methodOop to eax in case we end up in an c2i adapter.
1224 // the c2i adapters expect methodOop in eax (c2) because c2's 952 // the c2i adapters expect methodOop in eax (c2) because c2's
1225 // resolve stubs return the result (the method) in eax. 953 // resolve stubs return the result (the method) in eax.
1226 // I'd love to fix this. 954 // I'd love to fix this.
1227 __ move(V0, Rmethod); 955 __ move(V0, Rmethod);
1228 __ jr(T9); 956 __ jr(T9);
1229 __ delayed()->nop(); 957 __ delayed()->nop();
1230 } 958 }
1231 959
1232 // --------------------------------------------------------------- 960 // ---------------------------------------------------------------
1262 address ic_miss = SharedRuntime::get_ic_miss_stub(); 990 address ic_miss = SharedRuntime::get_ic_miss_stub();
1263 991
1264 Label missed; 992 Label missed;
1265 993
1266 __ verify_oop(holder); 994 __ verify_oop(holder);
1267 // __ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
1268 //__ ld_ptr(temp, receiver, oopDesc::klass_offset_in_bytes());
1269 //add for compressedoops 995 //add for compressedoops
1270 __ load_klass(temp, receiver); 996 __ load_klass(temp, receiver);
1271 __ verify_oop(temp); 997 __ verify_oop(temp);
1272 998
1273 // __ cmpl(temp, Address(holder, CompiledICHolder::holder_klass_offset())); 999 __ ld_ptr(AT, holder, CompiledICHolder::holder_klass_offset());
1274 __ ld_ptr(AT, holder, CompiledICHolder::holder_klass_offset());
1275 //__ movl(ebx, Address(holder, CompiledICHolder::holder_method_offset()));
1276 __ ld_ptr(Rmethod, holder, CompiledICHolder::holder_method_offset()); 1000 __ ld_ptr(Rmethod, holder, CompiledICHolder::holder_method_offset());
1277 //__ jcc(Assembler::notEqual, missed); 1001 __ bne(AT, temp, missed);
1278 __ bne(AT, temp, missed); 1002 __ delayed()->nop();
1279 __ delayed()->nop();
1280 // Method might have been compiled since the call site was patched to 1003 // Method might have been compiled since the call site was patched to
1281 // interpreted if that is the case treat it as a miss so we can get 1004 // interpreted if that is the case treat it as a miss so we can get
1282 // the call site corrected. 1005 // the call site corrected.
1283 //__ cmpl(Address(ebx, in_bytes(Method::code_offset())), NULL_WORD);
1284 //__ jcc(Assembler::equal, skip_fixup);
1285 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset())); 1006 __ ld_ptr(AT, Rmethod, in_bytes(Method::code_offset()));
1286 __ beq(AT, R0, skip_fixup); 1007 __ beq(AT, R0, skip_fixup);
1287 __ delayed()->nop(); 1008 __ delayed()->nop();
1288 __ bind(missed); 1009 __ bind(missed);
1289 // __ move(AT, (int)&jerome7);
1290 // __ sw(RA, AT, 0);
1291 1010
1292 __ jmp(ic_miss, relocInfo::runtime_call_type); 1011 __ jmp(ic_miss, relocInfo::runtime_call_type);
1293 __ delayed()->nop(); 1012 __ delayed()->nop();
1294 } 1013 }
1295 1014
1296 address c2i_entry = __ pc(); 1015 address c2i_entry = __ pc();
1297 1016
1298 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 1017 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
1299 1018
1300 __ flush(); 1019 __ flush();
1301 return AdapterHandlerLibrary::new_entry(fingerprint,i2c_entry, c2i_entry, c2i_unverified_entry); 1020 return AdapterHandlerLibrary::new_entry(fingerprint,i2c_entry, c2i_entry, c2i_unverified_entry);
1302 1021 }
1303 }
1304 /*
1305 // Helper function for native calling conventions
1306 static VMReg int_stk_helper( int i ) {
1307 // Bias any stack based VMReg we get by ignoring the window area
1308 // but not the register parameter save area.
1309 //
1310 // This is strange for the following reasons. We'd normally expect
1311 // the calling convention to return an VMReg for a stack slot
1312 // completely ignoring any abi reserved area. C2 thinks of that
1313 // abi area as only out_preserve_stack_slots. This does not include
1314 // the area allocated by the C abi to store down integer arguments
1315 // because the java calling convention does not use it. So
1316 // since c2 assumes that there are only out_preserve_stack_slots
1317 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1318 // location the c calling convention must add in this bias amount
1319 // to make up for the fact that the out_preserve_stack_slots is
1320 // insufficient for C calls. What a mess. I sure hope those 6
1321 // stack words were worth it on every java call!
1322
1323 // Another way of cleaning this up would be for out_preserve_stack_slots
1324 // to take a parameter to say whether it was C or java calling conventions.
1325 // Then things might look a little better (but not much).
1326
1327 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1328 if( mem_parm_offset < 0 ) {
1329 return as_oRegister(i)->as_VMReg();
1330 } else {
1331 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1332 // Now return a biased offset that will be correct when out_preserve_slots is added back in
1333 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1334 }
1335 }
1336 */
1337
1338 1022
1339 int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1023 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1340 VMRegPair *regs, 1024 VMRegPair *regs,
1341 VMRegPair *regs2, 1025 VMRegPair *regs2,
1342 int total_args_passed) { 1026 int total_args_passed) {
1343 assert(regs2 == NULL, "not needed on MIPS"); 1027 assert(regs2 == NULL, "not needed on MIPS");
1344 #ifdef aoqi_test 1028 // Return the number of VMReg stack_slots needed for the args.
1345 tty->print_cr(" SharedRuntime::%s :%d total_args_passed:%d", __func__, __LINE__, total_args_passed); 1029 // This value does not include an abi space (like register window
1346 #endif 1030 // save area).
1347 // Return the number of VMReg stack_slots needed for the args. 1031
1348 // This value does not include an abi space (like register window 1032 // The native convention is V8 if !LP64
1349 // save area). 1033 // The LP64 convention is the V9 convention which is slightly more sane.
1350 1034
1351 // The native convention is V8 if !LP64 1035 // We return the amount of VMReg stack slots we need to reserve for all
1352 // The LP64 convention is the V9 convention which is slightly more sane. 1036 // the arguments NOT counting out_preserve_stack_slots. Since we always
1353 1037 // have space for storing at least 6 registers to memory we start with that.
1354 // We return the amount of VMReg stack slots we need to reserve for all 1038 // See int_stk_helper for a further discussion.
1355 // the arguments NOT counting out_preserve_stack_slots. Since we always 1039 // We return the amount of VMRegImpl stack slots we need to reserve for all
1356 // have space for storing at least 6 registers to memory we start with that. 1040 // the arguments NOT counting out_preserve_stack_slots.
1357 // See int_stk_helper for a further discussion.
1358 // We return the amount of VMRegImpl stack slots we need to reserve for all
1359 // the arguments NOT counting out_preserve_stack_slots.
1360 static const Register INT_ArgReg[Argument::n_register_parameters] = { 1041 static const Register INT_ArgReg[Argument::n_register_parameters] = {
1361 A0, A1, A2, A3, A4, A5, A6, A7 1042 A0, A1, A2, A3, A4, A5, A6, A7
1362 }; 1043 };
1363 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = { 1044 static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters] = {
1364 F12, F13, F14, F15, F16, F17, F18, F19 1045 F12, F13, F14, F15, F16, F17, F18, F19
1365 }; 1046 };
1366 uint args = 0; 1047 uint args = 0;
1367 uint stk_args = 0; // inc by 2 each time 1048 uint stk_args = 0; // inc by 2 each time
1368 1049
1369 /* Example: 1050 /* Example:
1370 --- n java.lang.UNIXProcess::forkAndExec 1051 --- n java.lang.UNIXProcess::forkAndExec
1371 private native int forkAndExec(byte[] prog, 1052 private native int forkAndExec(byte[] prog,
1372 byte[] argBlock, int argc, 1053 byte[] argBlock, int argc,
1387 jobject stdin_fd, 1068 jobject stdin_fd,
1388 jobject stdout_fd, 1069 jobject stdout_fd,
1389 jobject stderr_fd) 1070 jobject stderr_fd)
1390 1071
1391 ::c_calling_convention 1072 ::c_calling_convention
1392 0: // env <-- a0 1073 0: // env <-- a0
1393 1: L // klass/obj <-- t0 => a1 1074 1: L // klass/obj <-- t0 => a1
1394 2: [ // prog[] <-- a0 => a2 1075 2: [ // prog[] <-- a0 => a2
1395 3: [ // argBlock[] <-- a1 => a3 1076 3: [ // argBlock[] <-- a1 => a3
1396 4: I // argc 1077 4: I // argc
1397 5: [ // envBlock[] <-- a3 => a5 1078 5: [ // envBlock[] <-- a3 => a5
1398 6: I // envc 1079 6: I // envc
1399 7: [ // dir[] <-- a5 => a7 1080 7: [ // dir[] <-- a5 => a7
1400 8: Z // redirectErrorStream a6 => sp[0] 1081 8: Z // redirectErrorStream a6 => sp[0]
1401 9: L // stdin a7 => sp[8] 1082 9: L // stdin a7 => sp[8]
1402 10: L // stdout fp[16] => sp[16] 1083 10: L // stdout fp[16] => sp[16]
1403 11: L // stderr fp[24] => sp[24] 1084 11: L // stderr fp[24] => sp[24]
1404 */ 1085 */
1405 for (int i = 0; i < total_args_passed; i++) { 1086 for (int i = 0; i < total_args_passed; i++) {
1406 switch (sig_bt[i]) { 1087 switch (sig_bt[i]) {
1407 case T_VOID: // Halves of longs and doubles 1088 case T_VOID: // Halves of longs and doubles
1408 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half"); 1089 assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1409 regs[i].set_bad(); 1090 regs[i].set_bad();
1410 break; 1091 break;
1411 case T_BOOLEAN:
1412 case T_CHAR:
1413 case T_BYTE:
1414 case T_SHORT:
1415 case T_INT:
1416 if (args < Argument::n_register_parameters) {
1417 regs[i].set1(INT_ArgReg[args++]->as_VMReg());
1418 } else {
1419 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1420 stk_args += 2;
1421 }
1422 break;
1423 case T_LONG:
1424 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1425 // fall through
1426 case T_OBJECT:
1427 case T_ARRAY:
1428 case T_ADDRESS:
1429 case T_METADATA:
1430 if (args < Argument::n_register_parameters) {
1431 regs[i].set2(INT_ArgReg[args++]->as_VMReg());
1432 } else {
1433 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1434 stk_args += 2;
1435 }
1436 break;
1437 case T_FLOAT:
1438 if (args < Argument::n_float_register_parameters) {
1439 regs[i].set1(FP_ArgReg[args++]->as_VMReg());
1440 } else {
1441 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1442 stk_args += 2;
1443 }
1444 break;
1445 case T_DOUBLE:
1446 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1447 if (args < Argument::n_float_register_parameters) {
1448 regs[i].set2(FP_ArgReg[args++]->as_VMReg());
1449 } else {
1450 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1451 stk_args += 2;
1452 }
1453 break;
1454 default:
1455 ShouldNotReachHere();
1456 break;
1457 }
1458 }
1459
1460 return round_to(stk_args, 2);
1461 }
1462 /*
1463 int SharedRuntime::c_calling_convention_jni(const BasicType *sig_bt,
1464 VMRegPair *regs,
1465 int total_args_passed) {
1466 // We return the amount of VMRegImpl stack slots we need to reserve for all
1467 // the arguments NOT counting out_preserve_stack_slots.
1468 bool unalign = 0;
1469 uint stack = 0; // All arguments on stack
1470 #ifdef aoqi_test
1471 tty->print_cr(" SharedRuntime::%s :%d total_args_passed:%d", __func__, __LINE__, total_args_passed);
1472 #endif
1473
1474 for( int i = 0; i < total_args_passed; i++) {
1475 // From the type and the argument number (count) compute the location
1476 switch( sig_bt[i] ) {
1477 case T_BOOLEAN: 1092 case T_BOOLEAN:
1478 case T_CHAR: 1093 case T_CHAR:
1479 case T_FLOAT:
1480 case T_BYTE: 1094 case T_BYTE:
1481 case T_SHORT: 1095 case T_SHORT:
1482 case T_INT: 1096 case T_INT:
1097 if (args < Argument::n_register_parameters) {
1098 regs[i].set1(INT_ArgReg[args++]->as_VMReg());
1099 } else {
1100 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1101 stk_args += 2;
1102 }
1103 break;
1104 case T_LONG:
1105 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1106 // fall through
1483 case T_OBJECT: 1107 case T_OBJECT:
1484 case T_ARRAY: 1108 case T_ARRAY:
1485 case T_ADDRESS: 1109 case T_ADDRESS:
1486 regs[i].set1(VMRegImpl::stack2reg(stack++)); 1110 case T_METADATA:
1487 unalign = !unalign; 1111 if (args < Argument::n_register_parameters) {
1112 regs[i].set2(INT_ArgReg[args++]->as_VMReg());
1113 } else {
1114 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1115 stk_args += 2;
1116 }
1488 break; 1117 break;
1489 case T_LONG: 1118 case T_FLOAT:
1490 case T_DOUBLE: // The stack numbering is reversed from Java 1119 if (args < Argument::n_float_register_parameters) {
1491 // Since C arguments do not get reversed, the ordering for 1120 regs[i].set1(FP_ArgReg[args++]->as_VMReg());
1492 // doubles on the stack must be opposite the Java convention 1121 } else {
1493 assert(sig_bt[i+1] == T_VOID, "missing Half" ); 1122 regs[i].set1(VMRegImpl::stack2reg(stk_args));
1494 if(unalign){ 1123 stk_args += 2;
1495 stack += 1; 1124 }
1496 unalign = ! unalign;
1497 }
1498 regs[i].set2(VMRegImpl::stack2reg(stack));
1499 stack += 2;
1500 break; 1125 break;
1501 case T_VOID: regs[i].set_bad(); break; 1126 case T_DOUBLE:
1127 assert(sig_bt[i + 1] == T_VOID, "expecting half");
1128 if (args < Argument::n_float_register_parameters) {
1129 regs[i].set2(FP_ArgReg[args++]->as_VMReg());
1130 } else {
1131 regs[i].set2(VMRegImpl::stack2reg(stk_args));
1132 stk_args += 2;
1133 }
1134 break;
1502 default: 1135 default:
1503 ShouldNotReachHere(); 1136 ShouldNotReachHere();
1504 break; 1137 break;
1505 } 1138 }
1506 } 1139 }
1507 return stack; 1140
1508 } 1141 return round_to(stk_args, 2);
1509 */ 1142 }
1510 1143
1511 // --------------------------------------------------------------------------- 1144 // ---------------------------------------------------------------------------
1512 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1145 void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1513 // We always ignore the frame_slots arg and just use the space just below frame pointer 1146 // We always ignore the frame_slots arg and just use the space just below frame pointer
1514 // which by this time is free to use 1147 // which by this time is free to use
1515 switch (ret_type) { 1148 switch (ret_type) {
1516 case T_FLOAT: 1149 case T_FLOAT:
1517 __ swc1(FSF, FP, -wordSize); 1150 __ swc1(FSF, FP, -wordSize);
1518 break; 1151 break;
1519 case T_DOUBLE: 1152 case T_DOUBLE:
1520 __ sdc1(FSF, FP, -wordSize ); 1153 __ sdc1(FSF, FP, -wordSize );
1521 break; 1154 break;
1522 case T_VOID: break; 1155 case T_VOID: break;
1523 case T_LONG: 1156 case T_LONG:
1524 __ sd(V0, FP, -wordSize); 1157 __ sd(V0, FP, -wordSize);
1525 break; 1158 break;
1526 case T_OBJECT: 1159 case T_OBJECT:
1527 case T_ARRAY: 1160 case T_ARRAY:
1528 __ sd(V0, FP, -wordSize); 1161 __ sd(V0, FP, -wordSize);
1529 break; 1162 break;
1530 default: { 1163 default: {
1531 __ sw(V0, FP, -wordSize); 1164 __ sw(V0, FP, -wordSize);
1532 } 1165 }
1533 } 1166 }
1534 } 1167 }
1535 1168
1536 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1169 void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1537 // We always ignore the frame_slots arg and just use the space just below frame pointer 1170 // We always ignore the frame_slots arg and just use the space just below frame pointer
1538 // which by this time is free to use 1171 // which by this time is free to use
1539 switch (ret_type) { 1172 switch (ret_type) {
1540 case T_FLOAT: 1173 case T_FLOAT:
1541 __ lwc1(FSF, FP, -wordSize); 1174 __ lwc1(FSF, FP, -wordSize);
1542 break; 1175 break;
1543 case T_DOUBLE: 1176 case T_DOUBLE:
1544 __ ldc1(FSF, FP, -wordSize ); 1177 __ ldc1(FSF, FP, -wordSize );
1545 break; 1178 break;
1546 case T_LONG: 1179 case T_LONG:
1547 __ ld(V0, FP, -wordSize); 1180 __ ld(V0, FP, -wordSize);
1548 break; 1181 break;
1549 case T_VOID: break; 1182 case T_VOID: break;
1550 case T_OBJECT: 1183 case T_OBJECT:
1551 case T_ARRAY: 1184 case T_ARRAY:
1552 __ ld(V0, FP, -wordSize); 1185 __ ld(V0, FP, -wordSize);
1553 break; 1186 break;
1554 default: { 1187 default: {
1555 __ lw(V0, FP, -wordSize); 1188 __ lw(V0, FP, -wordSize);
1556 } 1189 }
1557 } 1190 }
1558 } 1191 }
1559 1192
1560 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1193 static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1561 for ( int i = first_arg ; i < arg_count ; i++ ) { 1194 for ( int i = first_arg ; i < arg_count ; i++ ) {
1562 if (args[i].first()->is_Register()) { 1195 if (args[i].first()->is_Register()) {
1563 __ push(args[i].first()->as_Register()); 1196 __ push(args[i].first()->as_Register());
1564 } else if (args[i].first()->is_FloatRegister()) { 1197 } else if (args[i].first()->is_FloatRegister()) {
1565 __ push(args[i].first()->as_FloatRegister()); 1198 __ push(args[i].first()->as_FloatRegister());
1566 } 1199 }
1567 } 1200 }
1568 } 1201 }
1569 1202
1570 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) { 1203 static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1571 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) { 1204 for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1572 if (args[i].first()->is_Register()) { 1205 if (args[i].first()->is_Register()) {
1573 __ pop(args[i].first()->as_Register()); 1206 __ pop(args[i].first()->as_Register());
1574 } else if (args[i].first()->is_FloatRegister()) { 1207 } else if (args[i].first()->is_FloatRegister()) {
1575 __ pop(args[i].first()->as_FloatRegister()); 1208 __ pop(args[i].first()->as_FloatRegister());
1576 } 1209 }
1577 } 1210 }
1578 } 1211 }
1579 1212
1580 // A simple move of integer like type 1213 // A simple move of integer like type
1581 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1214 static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1582 if (src.first()->is_stack()) { 1215 if (src.first()->is_stack()) {
1583 if (dst.first()->is_stack()) { 1216 if (dst.first()->is_stack()) {
1584 // stack to stack 1217 // stack to stack
1585 __ lw(AT, FP, reg2offset_in(src.first())); 1218 __ lw(AT, FP, reg2offset_in(src.first()));
1586 __ sd(AT,SP, reg2offset_out(dst.first())); 1219 __ sd(AT,SP, reg2offset_out(dst.first()));
1587 } else { 1220 } else {
1588 // stack to reg 1221 // stack to reg
1589 //__ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1222 __ lw(dst.first()->as_Register(), FP, reg2offset_in(src.first()));
1590 __ lw(dst.first()->as_Register(), FP, reg2offset_in(src.first()));
1591 } 1223 }
1592 } else if (dst.first()->is_stack()) { 1224 } else if (dst.first()->is_stack()) {
1593 // reg to stack 1225 // reg to stack
1594 __ sd(src.first()->as_Register(), SP, reg2offset_out(dst.first())); 1226 __ sd(src.first()->as_Register(), SP, reg2offset_out(dst.first()));
1595 } else { 1227 } else {
1596 //__ mov(src.first()->as_Register(), dst.first()->as_Register()); 1228 if (dst.first() != src.first()){
1597 if (dst.first() != src.first()){ 1229 __ move(dst.first()->as_Register(), src.first()->as_Register()); // fujie error:dst.first()
1598 __ move(dst.first()->as_Register(), src.first()->as_Register()); // fujie error:dst.first() 1230 }
1599 } 1231 }
1600 } 1232 }
1601 }
1602 /*
1603 // On 64 bit we will store integer like items to the stack as
1604 // 64 bits items (sparc abi) even though java would only store
1605 // 32bits for a parameter. On 32bit it will simply be 32 bits
1606 // So this routine will do 32->32 on 32bit and 32->64 on 64bit
1607 static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1608 if (src.first()->is_stack()) {
1609 if (dst.first()->is_stack()) {
1610 // stack to stack
1611 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1612 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1613 } else {
1614 // stack to reg
1615 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1616 }
1617 } else if (dst.first()->is_stack()) {
1618 // reg to stack
1619 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1620 } else {
1621 __ mov(src.first()->as_Register(), dst.first()->as_Register());
1622 }
1623 }
1624 */
1625 1233
1626 // An oop arg. Must pass a handle not the oop itself 1234 // An oop arg. Must pass a handle not the oop itself
1627 static void object_move(MacroAssembler* masm, 1235 static void object_move(MacroAssembler* masm,
1628 OopMap* map, 1236 OopMap* map,
1629 int oop_handle_offset, 1237 int oop_handle_offset,
1633 bool is_receiver, 1241 bool is_receiver,
1634 int* receiver_offset) { 1242 int* receiver_offset) {
1635 1243
1636 // must pass a handle. First figure out the location we use as a handle 1244 // must pass a handle. First figure out the location we use as a handle
1637 1245
1638 //FIXME, for mips, dst can be register 1246 //FIXME, for mips, dst can be register
1639 if (src.first()->is_stack()) { 1247 if (src.first()->is_stack()) {
1640 // Oop is already on the stack as an argument 1248 // Oop is already on the stack as an argument
1641 Register rHandle = V0; 1249 Register rHandle = V0;
1642 Label nil; 1250 Label nil;
1643 //__ xorl(rHandle, rHandle); 1251 __ xorr(rHandle, rHandle, rHandle);
1644 __ xorr(rHandle, rHandle, rHandle); 1252 __ ld(AT, FP, reg2offset_in(src.first()));
1645 //__ cmpl(Address(ebp, reg2offset_in(src.first())), NULL_WORD); 1253 __ beq(AT,R0, nil);
1646 __ ld(AT, FP, reg2offset_in(src.first())); 1254 __ delayed()->nop();
1647 //__ jcc(Assembler::equal, nil); 1255 __ lea(rHandle, Address(FP, reg2offset_in(src.first())));
1648 __ beq(AT,R0, nil); 1256 __ bind(nil);
1649 __ delayed()->nop(); 1257 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
1650 // __ leal(rHandle, Address(ebp, reg2offset_in(src.first()))); 1258 else __ move( (dst.first())->as_Register(),rHandle);
1651 __ lea(rHandle, Address(FP, reg2offset_in(src.first()))); 1259 //if dst is register
1652 __ bind(nil); 1260 //FIXME, do mips need out preserve stack slots?
1653 //__ movl(Address(esp, reg2offset_out(dst.first())), rHandle); 1261 int offset_in_older_frame = src.first()->reg2stack()
1654 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first())); 1262 + SharedRuntime::out_preserve_stack_slots();
1655 else __ move( (dst.first())->as_Register(),rHandle); 1263 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1656 //if dst is register 1264 if (is_receiver) {
1657 //FIXME, do mips need out preserve stack slots? 1265 *receiver_offset = (offset_in_older_frame
1658 int offset_in_older_frame = src.first()->reg2stack() 1266 + framesize_in_slots) * VMRegImpl::stack_slot_size;
1659 + SharedRuntime::out_preserve_stack_slots(); 1267 }
1660 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1268 } else {
1661 if (is_receiver) { 1269 // Oop is in an a register we must store it to the space we reserve
1662 *receiver_offset = (offset_in_older_frame 1270 // on the stack for oop_handles
1663 + framesize_in_slots) * VMRegImpl::stack_slot_size; 1271 const Register rOop = src.first()->as_Register();
1664 } 1272 assert( (rOop->encoding() >= A0->encoding()) && (rOop->encoding() <= T0->encoding()),"wrong register");
1665 } else { 1273 const Register rHandle = V0;
1666 // Oop is in an a register we must store it to the space we reserve 1274 //Important: refer to java_calling_convertion
1667 // on the stack for oop_handles 1275 int oop_slot = (rOop->encoding() - A0->encoding()) * VMRegImpl::slots_per_word + oop_handle_offset;
1668 const Register rOop = src.first()->as_Register(); 1276 int offset = oop_slot*VMRegImpl::stack_slot_size;
1669 assert( (rOop->encoding() >= A0->encoding()) && (rOop->encoding() <= T0->encoding()),"wrong register"); 1277 Label skip;
1670 // const Register rHandle = eax; 1278 __ sd( rOop , SP, offset );
1671 const Register rHandle = V0; 1279 map->set_oop(VMRegImpl::stack2reg(oop_slot));
1672 //Important: refer to java_calling_convertion 1280 __ xorr( rHandle, rHandle, rHandle);
1673 int oop_slot = (rOop->encoding() - A0->encoding()) * VMRegImpl::slots_per_word + oop_handle_offset; 1281 __ beq(rOop, R0, skip);
1674 int offset = oop_slot*VMRegImpl::stack_slot_size; 1282 __ delayed()->nop();
1675 Label skip; 1283 __ lea(rHandle, Address(SP, offset));
1676 // __ movl(Address(esp, offset), rOop); 1284 __ bind(skip);
1677 __ sd( rOop , SP, offset ); 1285 // Store the handle parameter
1678 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1286 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
1679 // __ xorl(rHandle, rHandle); 1287 else __ move((dst.first())->as_Register(), rHandle);
1680 __ xorr( rHandle, rHandle, rHandle); 1288 //if dst is register
1681 //__ cmpl(rOop, NULL_WORD); 1289
1682 // __ jcc(Assembler::equal, skip); 1290 if (is_receiver) {
1683 __ beq(rOop, R0, skip); 1291 *receiver_offset = offset;
1684 __ delayed()->nop(); 1292 }
1685 // __ leal(rHandle, Address(esp, offset)); 1293 }
1686 __ lea(rHandle, Address(SP, offset));
1687 __ bind(skip);
1688 // Store the handle parameter
1689 //__ movl(Address(esp, reg2offset_out(dst.first())), rHandle);
1690 if(dst.first()->is_stack())__ sd( rHandle, SP, reg2offset_out(dst.first()));
1691 else __ move((dst.first())->as_Register(), rHandle);
1692 //if dst is register
1693
1694 if (is_receiver) {
1695 *receiver_offset = offset;
1696 }
1697 }
1698 } 1294 }
1699 1295
1700 // A float arg may have to do float reg int reg conversion 1296 // A float arg may have to do float reg int reg conversion
1701 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1297 static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1702 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1298 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1703 1299
1704 if (src.first()->is_stack()) { 1300 if (src.first()->is_stack()) {
1705 if(dst.first()->is_stack()){ 1301 if (dst.first()->is_stack()) {
1706 // __ movl(eax, Address(ebp, reg2offset_in(src.first()))); 1302 __ lwc1(F12 , FP, reg2offset_in(src.first()));
1707 __ lwc1(F12 , FP, reg2offset_in(src.first())); 1303 __ swc1(F12 ,SP, reg2offset_out(dst.first()));
1708 // __ movl(Address(esp, reg2offset_out(dst.first())), eax); 1304 }
1709 __ swc1(F12 ,SP, reg2offset_out(dst.first())); 1305 else
1710 } 1306 __ lwc1( dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first()));
1711 else 1307 } else {
1712 __ lwc1( dst.first()->as_FloatRegister(), FP, reg2offset_in(src.first())); 1308 // reg to stack
1713 } else { 1309 if(dst.first()->is_stack())
1714 // reg to stack 1310 __ swc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
1715 // __ movss(Address(esp, reg2offset_out(dst.first())), 1311 else
1716 // src.first()->as_XMMRegister()); 1312 __ mov_s( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1717 // __ movl(Address(esp, reg2offset_out(dst.first())), eax); 1313 }
1718 if(dst.first()->is_stack()) 1314 }
1719 __ swc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first())); 1315
1720 else
1721 __ mov_s( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1722 }
1723 }
1724 /*
1725 static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1726 VMRegPair src_lo(src.first());
1727 VMRegPair src_hi(src.second());
1728 VMRegPair dst_lo(dst.first());
1729 VMRegPair dst_hi(dst.second());
1730 simple_move32(masm, src_lo, dst_lo);
1731 simple_move32(masm, src_hi, dst_hi);
1732 }
1733 */
1734 // A long move 1316 // A long move
1735 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1317 static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1736 1318
1737 // The only legal possibility for a long_move VMRegPair is: 1319 // The only legal possibility for a long_move VMRegPair is:
1738 // 1: two stack slots (possibly unaligned) 1320 // 1: two stack slots (possibly unaligned)
1739 // as neither the java or C calling convention will use registers 1321 // as neither the java or C calling convention will use registers
1740 // for longs. 1322 // for longs.
1741 1323
1742 if (src.first()->is_stack()) { 1324 if (src.first()->is_stack()) {
1743 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack"); 1325 assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1744 // __ movl(eax, Address(ebp, reg2offset_in(src.first()))); 1326 if( dst.first()->is_stack()){
1745 if( dst.first()->is_stack()){ 1327 __ ld(AT, FP, reg2offset_in(src.first()));
1746 __ ld(AT, FP, reg2offset_in(src.first())); 1328 __ sd(AT, SP, reg2offset_out(dst.first()));
1747 // __ movl(ebx, address(ebp, reg2offset_in(src.second()))); 1329 } else {
1748 //__ lw(V0, FP, reg2offset_in(src.second())); 1330 __ ld( (dst.first())->as_Register() , FP, reg2offset_in(src.first()));
1749 // __ movl(address(esp, reg2offset_out(dst.first())), eax); 1331 }
1750 __ sd(AT, SP, reg2offset_out(dst.first())); 1332 } else {
1751 // __ movl(address(esp, reg2offset_out(dst.second())), ebx); 1333 if( dst.first()->is_stack()){
1752 //__ sw(V0, SP, reg2offset_out(dst.second())); 1334 __ sd( (src.first())->as_Register(), SP, reg2offset_out(dst.first()));
1753 } else{ 1335 } else{
1754 __ ld( (dst.first())->as_Register() , FP, reg2offset_in(src.first())); 1336 __ move( (dst.first())->as_Register() , (src.first())->as_Register());
1755 //__ lw( (dst.second())->as_Register(), FP, reg2offset_in(src.second())); 1337 }
1756 } 1338 }
1757 } else {
1758 if( dst.first()->is_stack()){
1759 __ sd( (src.first())->as_Register(), SP, reg2offset_out(dst.first()));
1760 //__ sw( (src.second())->as_Register(), SP, reg2offset_out(dst.second()));
1761 } else{
1762 __ move( (dst.first())->as_Register() , (src.first())->as_Register());
1763 //__ move( (dst.second())->as_Register(), (src.second())->as_Register());
1764 }
1765 }
1766 } 1339 }
1767 1340
1768 // A double move 1341 // A double move
1769 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1342 static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1770 1343
1771 // The only legal possibilities for a double_move VMRegPair are: 1344 // The only legal possibilities for a double_move VMRegPair are:
1772 // The painful thing here is that like long_move a VMRegPair might be 1345 // The painful thing here is that like long_move a VMRegPair might be
1773 1346
1774 // Because of the calling convention we know that src is either 1347 // Because of the calling convention we know that src is either
1775 // 1: a single physical register (xmm registers only) 1348 // 1: a single physical register (xmm registers only)
1776 // 2: two stack slots (possibly unaligned) 1349 // 2: two stack slots (possibly unaligned)
1777 // dst can only be a pair of stack slots. 1350 // dst can only be a pair of stack slots.
1778 1351
1779 // assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || 1352
1780 // src.first()->is_stack()), "bad args"); 1353 if (src.first()->is_stack()) {
1781 // assert(dst.first()->is_stack() || src.first()->is_stack()), "bad args"); 1354 // source is all stack
1782 1355 if( dst.first()->is_stack()){
1783 if (src.first()->is_stack()) { 1356 __ ldc1(F12, FP, reg2offset_in(src.first()));
1784 // source is all stack 1357
1785 // __ movl(eax, Address(ebp, reg2offset_in(src.first()))); 1358 __ sdc1(F12, SP, reg2offset_out(dst.first()));
1786 if( dst.first()->is_stack()){ 1359 } else{
1787 __ ldc1(F12, FP, reg2offset_in(src.first())); 1360 __ ldc1( (dst.first())->as_FloatRegister(), FP, reg2offset_in(src.first()));
1788 //__ movl(ebx, Address(ebp, reg2offset_in(src.second()))); 1361 }
1789 //__ lwc1(F14, FP, reg2offset_in(src.second())); 1362
1790 1363 } else {
1791 // __ movl(Address(esp, reg2offset_out(dst.first())), eax); 1364 // reg to stack
1792 __ sdc1(F12, SP, reg2offset_out(dst.first())); 1365 // No worries about stack alignment
1793 // __ movl(Address(esp, reg2offset_out(dst.second())), ebx); 1366 if( dst.first()->is_stack()){
1794 //__ swc1(F14, SP, reg2offset_out(dst.second())); 1367 __ sdc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
1795 } else{ 1368 }
1796 __ ldc1( (dst.first())->as_FloatRegister(), FP, reg2offset_in(src.first())); 1369 else
1797 //__ lwc1( (dst.second())->as_FloatRegister(), FP, reg2offset_in(src.second())); 1370 __ mov_d( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1798 } 1371
1799 1372 }
1800 } else {
1801 // reg to stack
1802 // No worries about stack alignment
1803 // __ movsd(Address(esp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1804 if( dst.first()->is_stack()){
1805 __ sdc1( src.first()->as_FloatRegister(),SP, reg2offset_out(dst.first()));
1806 //__ swc1( src.second()->as_FloatRegister(),SP, reg2offset_out(dst.second()));
1807 }
1808 else
1809 __ mov_d( dst.first()->as_FloatRegister(), src.first()->as_FloatRegister());
1810 //__ mov_s( dst.second()->as_FloatRegister(), src.second()->as_FloatRegister());
1811
1812 }
1813 } 1373 }
1814 1374
1815 static void verify_oop_args(MacroAssembler* masm, 1375 static void verify_oop_args(MacroAssembler* masm,
1816 methodHandle method, 1376 methodHandle method,
1817 const BasicType* sig_bt, 1377 const BasicType* sig_bt,
1822 if (sig_bt[i] == T_OBJECT || 1382 if (sig_bt[i] == T_OBJECT ||
1823 sig_bt[i] == T_ARRAY) { 1383 sig_bt[i] == T_ARRAY) {
1824 VMReg r = regs[i].first(); 1384 VMReg r = regs[i].first();
1825 assert(r->is_valid(), "bad oop arg"); 1385 assert(r->is_valid(), "bad oop arg");
1826 if (r->is_stack()) { 1386 if (r->is_stack()) {
1827 // __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1828 __ ld(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize)); 1387 __ ld(temp_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1829 __ verify_oop(temp_reg); 1388 __ verify_oop(temp_reg);
1830 } else { 1389 } else {
1831 __ verify_oop(r->as_Register()); 1390 __ verify_oop(r->as_Register());
1832 } 1391 }
1848 int member_arg_pos = -1; 1407 int member_arg_pos = -1;
1849 Register member_reg = noreg; 1408 Register member_reg = noreg;
1850 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid); 1409 int ref_kind = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1851 if (ref_kind != 0) { 1410 if (ref_kind != 0) {
1852 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument 1411 member_arg_pos = method->size_of_parameters() - 1; // trailing MemberName argument
1853 // member_reg = rbx; // known to be free at this point
1854 member_reg = S3; // known to be free at this point 1412 member_reg = S3; // known to be free at this point
1855 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind); 1413 has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1856 } else if (iid == vmIntrinsics::_invokeBasic) { 1414 } else if (iid == vmIntrinsics::_invokeBasic) {
1857 has_receiver = true; 1415 has_receiver = true;
1858 } else { 1416 } else {
1880 if (r->is_stack()) { 1438 if (r->is_stack()) {
1881 // Porting note: This assumes that compiled calling conventions always 1439 // Porting note: This assumes that compiled calling conventions always
1882 // pass the receiver oop in a register. If this is not true on some 1440 // pass the receiver oop in a register. If this is not true on some
1883 // platform, pick a temp and load the receiver from stack. 1441 // platform, pick a temp and load the receiver from stack.
1884 fatal("receiver always in a register"); 1442 fatal("receiver always in a register");
1885 // receiver_reg = j_rarg0; // known to be free at this point
1886 receiver_reg = SSR; // known to be free at this point 1443 receiver_reg = SSR; // known to be free at this point
1887 __ ld(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size)); 1444 __ ld(receiver_reg, Address(SP, r->reg2stack() * VMRegImpl::stack_slot_size));
1888 } else { 1445 } else {
1889 // no data motion is needed 1446 // no data motion is needed
1890 receiver_reg = r->as_Register(); 1447 receiver_reg = r->as_Register();
1903 // returns to java state (possibly blocking), unhandlizes any result and 1460 // returns to java state (possibly blocking), unhandlizes any result and
1904 // returns. 1461 // returns.
1905 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1462 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1906 methodHandle method, 1463 methodHandle method,
1907 int compile_id, 1464 int compile_id,
1908 BasicType *in_sig_bt, 1465 BasicType* in_sig_bt,
1909 VMRegPair *in_regs, 1466 VMRegPair* in_regs,
1910 BasicType ret_type) { 1467 BasicType ret_type) {
1911
1912 if (method->is_method_handle_intrinsic()) { 1468 if (method->is_method_handle_intrinsic()) {
1913 vmIntrinsics::ID iid = method->intrinsic_id(); 1469 vmIntrinsics::ID iid = method->intrinsic_id();
1914 intptr_t start = (intptr_t)__ pc(); 1470 intptr_t start = (intptr_t)__ pc();
1915 int vep_offset = ((intptr_t)__ pc()) - start; 1471 int vep_offset = ((intptr_t)__ pc()) - start;
1916
1917 gen_special_dispatch(masm, 1472 gen_special_dispatch(masm,
1918 method, 1473 method,
1919 in_sig_bt, 1474 in_sig_bt,
1920 in_regs); 1475 in_regs);
1921
1922 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period 1476 int frame_complete = ((intptr_t)__ pc()) - start; // not complete, period
1923 __ flush(); 1477 __ flush();
1924 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually 1478 int stack_slots = SharedRuntime::out_preserve_stack_slots(); // no out slots at all, actually
1925 return nmethod::new_native_nmethod(method, 1479 return nmethod::new_native_nmethod(method,
1926 compile_id, 1480 compile_id,
1945 // oopMap for is if the call is static 1499 // oopMap for is if the call is static
1946 // 1500 //
1947 // An OopMap for lock (and class if static), and one for the VM call itself 1501 // An OopMap for lock (and class if static), and one for the VM call itself
1948 OopMapSet *oop_maps = new OopMapSet(); 1502 OopMapSet *oop_maps = new OopMapSet();
1949 1503
1950 // We have received a description of where all the java arg are located 1504 // We have received a description of where all the java arg are located
1951 // on entry to the wrapper. We need to convert these args to where 1505 // on entry to the wrapper. We need to convert these args to where
1952 // the jni function will expect them. To figure out where they go 1506 // the jni function will expect them. To figure out where they go
1953 // we convert the java signature to a C signature by inserting 1507 // we convert the java signature to a C signature by inserting
1954 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1508 // the hidden arguments as arg[0] and possibly arg[1] (static method)
1955 1509
1956 const int total_in_args = method->size_of_parameters(); 1510 const int total_in_args = method->size_of_parameters();
1957 int total_c_args = total_in_args; 1511 int total_c_args = total_in_args;
1958 if (!is_critical_native) { 1512 if (!is_critical_native) {
1959 total_c_args += 1; 1513 total_c_args += 1;
1966 total_c_args++; 1520 total_c_args++;
1967 } 1521 }
1968 } 1522 }
1969 } 1523 }
1970 1524
1971 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1525 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1972 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1526 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1973 BasicType* in_elem_bt = NULL; 1527 BasicType* in_elem_bt = NULL;
1974 1528
1975 int argc = 0; 1529 int argc = 0;
1976 if (!is_critical_native) { 1530 if (!is_critical_native) {
1977 out_sig_bt[argc++] = T_ADDRESS; 1531 out_sig_bt[argc++] = T_ADDRESS;
2022 // they require (neglecting out_preserve_stack_slots but space for storing 1576 // they require (neglecting out_preserve_stack_slots but space for storing
2023 // the 1st six register arguments). It's weird see int_stk_helper. 1577 // the 1st six register arguments). It's weird see int_stk_helper.
2024 // 1578 //
2025 int out_arg_slots; 1579 int out_arg_slots;
2026 //out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1580 //out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2027 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 1581 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2028 1582
2029 // Compute framesize for the wrapper. We need to handlize all oops in 1583 // Compute framesize for the wrapper. We need to handlize all oops in
2030 // registers. We must create space for them here that is disjoint from 1584 // registers. We must create space for them here that is disjoint from
2031 // the windowed save area because we have no control over when we might 1585 // the windowed save area because we have no control over when we might
2032 // flush the window again and overwrite values that gc has since modified. 1586 // flush the window again and overwrite values that gc has since modified.
2077 stack_slots = round_to(stack_slots, 2); 1631 stack_slots = round_to(stack_slots, 2);
2078 } 1632 }
2079 } 1633 }
2080 1634
2081 int oop_handle_offset = stack_slots; 1635 int oop_handle_offset = stack_slots;
2082 // stack_slots += 9*VMRegImpl::slots_per_word; // T0, A0 ~ A7
2083 stack_slots += total_save_slots; 1636 stack_slots += total_save_slots;
2084 1637
2085 // Now any space we need for handlizing a klass if static method 1638 // Now any space we need for handlizing a klass if static method
2086 1639
2087 int klass_slot_offset = 0; 1640 int klass_slot_offset = 0;
2088 int klass_offset = -1; 1641 int klass_offset = -1;
2089 int lock_slot_offset = 0; 1642 int lock_slot_offset = 0;
2090 bool is_static = false; 1643 bool is_static = false;
2091 //int oop_temp_slot_offset = 0;
2092 1644
2093 if (method->is_static()) { 1645 if (method->is_static()) {
2094 klass_slot_offset = stack_slots; 1646 klass_slot_offset = stack_slots;
2095 stack_slots += VMRegImpl::slots_per_word; 1647 stack_slots += VMRegImpl::slots_per_word;
2096 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1648 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2103 lock_slot_offset = stack_slots; 1655 lock_slot_offset = stack_slots;
2104 stack_slots += VMRegImpl::slots_per_word; 1656 stack_slots += VMRegImpl::slots_per_word;
2105 } 1657 }
2106 1658
2107 // Now a place to save return value or as a temporary for any gpr -> fpr moves 1659 // Now a place to save return value or as a temporary for any gpr -> fpr moves
2108 // + 2 for return address (which we own) and saved ebp 1660 // + 2 for return address (which we own) and saved ebp
2109 //stack_slots += 2; 1661 stack_slots += 2 + 9 * VMRegImpl::slots_per_word; // (T0, A0, A1, A2, A3, A4, A5, A6, A7)
2110 stack_slots += 2 + 9 * VMRegImpl::slots_per_word; // (T0, A0, A1, A2, A3, A4, A5, A6, A7)
2111 1662
2112 // Ok The space we have allocated will look like: 1663 // Ok The space we have allocated will look like:
2113 // 1664 //
2114 // 1665 //
2115 // FP-> | | 1666 // FP-> | |
2138 // stack properly aligned. 1689 // stack properly aligned.
2139 stack_slots = round_to(stack_slots, StackAlignmentInSlots); 1690 stack_slots = round_to(stack_slots, StackAlignmentInSlots);
2140 1691
2141 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1692 int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2142 1693
2143 intptr_t start = (intptr_t)__ pc(); 1694 intptr_t start = (intptr_t)__ pc();
2144 1695
2145 1696
2146 1697
2147 // First thing make an ic check to see if we should even be here 1698 // First thing make an ic check to see if we should even be here
2148 address ic_miss = SharedRuntime::get_ic_miss_stub(); 1699 address ic_miss = SharedRuntime::get_ic_miss_stub();
2149 1700
2150 // We are free to use all registers as temps without saving them and 1701 // We are free to use all registers as temps without saving them and
2151 // restoring them except ebp. ebp is the only callee save register 1702 // restoring them except ebp. ebp is the only callee save register
2152 // as far as the interpreter and the compiler(s) are concerned. 1703 // as far as the interpreter and the compiler(s) are concerned.
2153 1704
2154 //refer to register_mips.hpp:IC_Klass 1705 //refer to register_mips.hpp:IC_Klass
2155 const Register ic_reg = T1; 1706 const Register ic_reg = T1;
2156 const Register receiver = T0; 1707 const Register receiver = T0;
2157 Label hit; 1708
2158 Label exception_pending; 1709 Label hit;
2159 1710 Label exception_pending;
2160 __ verify_oop(receiver); 1711
2161 //__ lw(AT, receiver, oopDesc::klass_offset_in_bytes()); 1712 __ verify_oop(receiver);
2162 //add for compressedoops 1713 //add for compressedoops
2163 __ load_klass(T9, receiver); 1714 __ load_klass(T9, receiver);
2164 __ beq(T9, ic_reg, hit); 1715 __ beq(T9, ic_reg, hit);
2165 __ delayed()->nop(); 1716 __ delayed()->nop();
2166 __ jmp(ic_miss, relocInfo::runtime_call_type); 1717 __ jmp(ic_miss, relocInfo::runtime_call_type);
2167 __ delayed()->nop(); 1718 __ delayed()->nop();
2168 // verified entry must be aligned for code patching. 1719 // verified entry must be aligned for code patching.
2169 // and the first 5 bytes must be in the same cache line 1720 // and the first 5 bytes must be in the same cache line
2170 // if we align at 8 then we will be sure 5 bytes are in the same line 1721 // if we align at 8 then we will be sure 5 bytes are in the same line
2171 __ align(8); 1722 __ align(8);
2172 1723
2173 __ bind(hit); 1724 __ bind(hit);
2174 1725
2175 1726
2176 int vep_offset = ((intptr_t)__ pc()) - start; 1727 int vep_offset = ((intptr_t)__ pc()) - start;
2177 #ifdef COMPILER1 1728 #ifdef COMPILER1
2178 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1729 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
2179 // Object.hashCode can pull the hashCode from the header word 1730 // Object.hashCode can pull the hashCode from the header word
2180 // instead of doing a full VM transition once it's been computed. 1731 // instead of doing a full VM transition once it's been computed.
2181 // Since hashCode is usually polymorphic at call sites we can't do 1732 // Since hashCode is usually polymorphic at call sites we can't do
2182 // this optimization at the call site without a lot of work. 1733 // this optimization at the call site without a lot of work.
2183 Label slowCase; 1734 Label slowCase;
2184 Register receiver = T0; 1735 Register receiver = T0;
2185 Register result = V0; 1736 Register result = V0;
2186 __ ld ( result, receiver, oopDesc::mark_offset_in_bytes()); 1737 __ ld ( result, receiver, oopDesc::mark_offset_in_bytes());
2187 // check if locked 1738 // check if locked
2188 __ andi(AT, result, markOopDesc::unlocked_value); 1739 __ andi(AT, result, markOopDesc::unlocked_value);
2189 __ beq(AT, R0, slowCase); 1740 __ beq(AT, R0, slowCase);
2190 __ delayed()->nop(); 1741 __ delayed()->nop();
2191 if (UseBiasedLocking) { 1742 if (UseBiasedLocking) {
2192 // Check if biased and fall through to runtime if so 1743 // Check if biased and fall through to runtime if so
2193 __ andi (AT, result, markOopDesc::biased_lock_bit_in_place); 1744 __ andi (AT, result, markOopDesc::biased_lock_bit_in_place);
2194 __ bne(AT,R0, slowCase); 1745 __ bne(AT,R0, slowCase);
2195 __ delayed()->nop(); 1746 __ delayed()->nop();
2196 } 1747 }
2197 // get hash 1748 // get hash
2198 __ li(AT, markOopDesc::hash_mask_in_place); 1749 __ li(AT, markOopDesc::hash_mask_in_place);
2199 __ andr (AT, result, AT); 1750 __ andr (AT, result, AT);
2200 // test if hashCode exists 1751 // test if hashCode exists
2201 __ beq (AT, R0, slowCase); 1752 __ beq (AT, R0, slowCase);
2202 __ delayed()->nop(); 1753 __ delayed()->nop();
2203 __ shr(result, markOopDesc::hash_shift); 1754 __ shr(result, markOopDesc::hash_shift);
2204 __ jr(RA); 1755 __ jr(RA);
2205 __ delayed()->nop(); 1756 __ delayed()->nop();
2206 __ bind (slowCase); 1757 __ bind (slowCase);
2207 } 1758 }
2208 #endif // COMPILER1 1759 #endif // COMPILER1
2209 1760
2210 // The instruction at the verified entry point must be 5 bytes or longer 1761 // The instruction at the verified entry point must be 5 bytes or longer
2211 // because it can be patched on the fly by make_non_entrant. The stack bang 1762 // because it can be patched on the fly by make_non_entrant. The stack bang
2212 // instruction fits that requirement. 1763 // instruction fits that requirement.
2213 1764
2214 // Generate stack overflow check 1765 // Generate stack overflow check
2215 1766
2216 if (UseStackBanging) { 1767 if (UseStackBanging) {
2217 //this function will modify the value in A0 1768 //this function will modify the value in A0
2218 __ push(A0); 1769 __ push(A0);
2219 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size()); 1770 __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2220 __ pop(A0); 1771 __ pop(A0);
2221 } else { 1772 } else {
2222 // need a 5 byte instruction to allow MT safe patching to non-entrant 1773 // need a 5 byte instruction to allow MT safe patching to non-entrant
2223 __ nop(); 1774 __ nop();
2224 __ nop(); 1775 __ nop();
2225 __ nop(); 1776 __ nop();
2226 __ nop(); 1777 __ nop();
2227 __ nop(); 1778 __ nop();
2228 } 1779 }
2229 // Generate a new frame for the wrapper. 1780 // Generate a new frame for the wrapper.
2230 // do mips need this ? 1781 // do mips need this ?
2231 #ifndef OPT_THREAD 1782 #ifndef OPT_THREAD
2232 __ get_thread(TREG); 1783 __ get_thread(TREG);
2233 #endif 1784 #endif
2234 //FIXME here 1785 //FIXME here
2235 __ st_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset())); 1786 __ st_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
2236 // -2 because return address is already present and so is saved ebp 1787 // -2 because return address is already present and so is saved ebp
2237 __ move(AT, -(StackAlignmentInBytes)); 1788 __ move(AT, -(StackAlignmentInBytes));
2238 __ andr(SP, SP, AT); 1789 __ andr(SP, SP, AT);
2239 1790
2240 __ enter(); 1791 __ enter();
2241 __ addiu(SP, SP, -1 * (stack_size - 2*wordSize)); 1792 __ addiu(SP, SP, -1 * (stack_size - 2*wordSize));
2242 1793
2243 // Frame is now completed as far a size and linkage. 1794 // Frame is now completed as far a size and linkage.
2244 1795
2245 int frame_complete = ((intptr_t)__ pc()) - start; 1796 int frame_complete = ((intptr_t)__ pc()) - start;
2246 1797
2247 // Calculate the difference between esp and ebp. We need to know it 1798 // Calculate the difference between esp and ebp. We need to know it
2248 // after the native call because on windows Java Natives will pop 1799 // after the native call because on windows Java Natives will pop
2249 // the arguments and it is painful to do esp relative addressing 1800 // the arguments and it is painful to do esp relative addressing
2250 // in a platform independent way. So after the call we switch to 1801 // in a platform independent way. So after the call we switch to
2251 // ebp relative addressing. 1802 // ebp relative addressing.
2252 //FIXME actually , the fp_adjustment may not be the right, because andr(sp,sp,at)may change 1803 //FIXME actually , the fp_adjustment may not be the right, because andr(sp,sp,at)may change
2253 //the SP 1804 //the SP
2254 int fp_adjustment = stack_size - 2*wordSize; 1805 int fp_adjustment = stack_size - 2*wordSize;
2255 1806
2256 #ifdef COMPILER2 1807 #ifdef COMPILER2
2257 // C2 may leave the stack dirty if not in SSE2+ mode 1808 // C2 may leave the stack dirty if not in SSE2+ mode
2258 // if (UseSSE >= 2) { 1809 __ empty_FPU_stack();
2259 // __ verify_FPU(0, "c2i transition should have clean FPU stack");
2260 //} else {
2261 __ empty_FPU_stack();
2262 //}
2263 #endif /* COMPILER2 */ 1810 #endif /* COMPILER2 */
2264 1811
2265 // Compute the ebp offset for any slots used after the jni call 1812 // Compute the ebp offset for any slots used after the jni call
2266 1813
2267 int lock_slot_ebp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment; 1814 int lock_slot_ebp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
2268 // We use edi as a thread pointer because it is callee save and 1815 // We use edi as a thread pointer because it is callee save and
2269 // if we load it once it is usable thru the entire wrapper 1816 // if we load it once it is usable thru the entire wrapper
2270 // const Register thread = edi; 1817 // const Register thread = edi;
2271 const Register thread = TREG; 1818 const Register thread = TREG;
2272 1819
2273 // We use esi as the oop handle for the receiver/klass 1820 // We use esi as the oop handle for the receiver/klass
2274 // It is callee save so it survives the call to native 1821 // It is callee save so it survives the call to native
2275 1822
2276 // const Register oop_handle_reg = esi; 1823 // const Register oop_handle_reg = esi;
2277 const Register oop_handle_reg = S4; 1824 const Register oop_handle_reg = S4;
2278 if (is_critical_native) { 1825 if (is_critical_native) {
2279 __ stop("generate_native_wrapper in sharedRuntime <2>"); 1826 __ stop("generate_native_wrapper in sharedRuntime <2>");
2280 //TODO:Fu 1827 //TODO:Fu
2281 /* 1828 /*
2282 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args, 1829 check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2283 oop_handle_offset, oop_maps, in_regs, in_sig_bt); 1830 oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2284 */ 1831 */
2285 } 1832 }
2286 1833
2287 #ifndef OPT_THREAD 1834 #ifndef OPT_THREAD
2288 __ get_thread(thread); 1835 __ get_thread(thread);
2289 #endif 1836 #endif
2290 1837
2291 // 1838 //
2292 // We immediately shuffle the arguments so that any vm call we have to 1839 // We immediately shuffle the arguments so that any vm call we have to
2293 // make from here on out (sync slow path, jvmpi, etc.) we will have 1840 // make from here on out (sync slow path, jvmpi, etc.) we will have
2294 // captured the oops from our caller and have a valid oopMap for 1841 // captured the oops from our caller and have a valid oopMap for
2295 // them. 1842 // them.
2296 1843
2297 // ----------------- 1844 // -----------------
2298 // The Grand Shuffle 1845 // The Grand Shuffle
2299 // 1846 //
2300 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 1847 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2301 // and, if static, the class mirror instead of a receiver. This pretty much 1848 // and, if static, the class mirror instead of a receiver. This pretty much
2302 // guarantees that register layout will not match (and mips doesn't use reg 1849 // guarantees that register layout will not match (and mips doesn't use reg
2303 // parms though amd does). Since the native abi doesn't use register args 1850 // parms though amd does). Since the native abi doesn't use register args
2306 // We ignore the extra arguments during the shuffle and handle them at the 1853 // We ignore the extra arguments during the shuffle and handle them at the
2307 // last moment. The shuffle is described by the two calling convention 1854 // last moment. The shuffle is described by the two calling convention
2308 // vectors we have in our possession. We simply walk the java vector to 1855 // vectors we have in our possession. We simply walk the java vector to
2309 // get the source locations and the c vector to get the destinations. 1856 // get the source locations and the c vector to get the destinations.
2310 1857
2311 int c_arg = method->is_static() ? 2 : 1 ; 1858 int c_arg = method->is_static() ? 2 : 1 ;
2312 1859
2313 // Record esp-based slot for receiver on stack for non-static methods 1860 // Record esp-based slot for receiver on stack for non-static methods
2314 int receiver_offset = -1; 1861 int receiver_offset = -1;
2315 1862
2316 // This is a trick. We double the stack slots so we can claim 1863 // This is a trick. We double the stack slots so we can claim
2317 // the oops in the caller's frame. Since we are sure to have 1864 // the oops in the caller's frame. Since we are sure to have
2318 // more args than the caller doubling is enough to make 1865 // more args than the caller doubling is enough to make
2319 // sure we can capture all the incoming oop args from the 1866 // sure we can capture all the incoming oop args from the
2320 // caller. 1867 // caller.
2321 // 1868 //
2322 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 1869 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2323 1870
2324 // Mark location of rbp (someday) 1871 // Mark location of rbp (someday)
2325 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp)); 1872 // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2326 1873
2327 // Use eax, ebx as temporaries during any memory-memory moves we have to do 1874 // Use eax, ebx as temporaries during any memory-memory moves we have to do
2339 freg_destroyed[f] = false; 1886 freg_destroyed[f] = false;
2340 } 1887 }
2341 1888
2342 #endif /* ASSERT */ 1889 #endif /* ASSERT */
2343 1890
2344 // We know that we only have args in at most two integer registers (ecx, edx). So eax, ebx 1891 // We know that we only have args in at most two integer registers (ecx, edx). So eax, ebx
2345 // Are free to temporaries if we have to do stack to steck moves. 1892 // Are free to temporaries if we have to do stack to steck moves.
2346 // All inbound args are referenced based on ebp and all outbound args via esp. 1893 // All inbound args are referenced based on ebp and all outbound args via esp.
2347 1894
2348 // This may iterate in two different directions depending on the 1895 // This may iterate in two different directions depending on the
2349 // kind of native it is. The reason is that for regular JNI natives 1896 // kind of native it is. The reason is that for regular JNI natives
2350 // the incoming and outgoing registers are offset upwards and for 1897 // the incoming and outgoing registers are offset upwards and for
2351 // critical natives they are offset down. 1898 // critical natives they are offset down.
2352 GrowableArray<int> arg_order(2 * total_in_args); 1899 GrowableArray<int> arg_order(2 * total_in_args);
2353 VMRegPair tmp_vmreg; 1900 VMRegPair tmp_vmreg;
2354 // tmp_vmreg.set1(rbx->as_VMReg());
2355 tmp_vmreg.set1(T8->as_VMReg()); 1901 tmp_vmreg.set1(T8->as_VMReg());
2356 1902
2357 if (!is_critical_native) { 1903 if (!is_critical_native) {
2358 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) { 1904 for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2359 arg_order.push(i); 1905 arg_order.push(i);
2398 } 1944 }
2399 #endif /* ASSERT */ 1945 #endif /* ASSERT */
2400 switch (in_sig_bt[i]) { 1946 switch (in_sig_bt[i]) {
2401 case T_ARRAY: 1947 case T_ARRAY:
2402 if (is_critical_native) { 1948 if (is_critical_native) {
2403 __ stop("generate_native_wrapper in sharedRuntime <2>"); 1949 __ stop("generate_native_wrapper in sharedRuntime <2>");
2404 //TODO:Fu 1950 //TODO:Fu
2405 // unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]); 1951 // unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2406 c_arg++; 1952 c_arg++;
2407 #ifdef ASSERT 1953 #ifdef ASSERT
2408 if (out_regs[c_arg].first()->is_Register()) { 1954 if (out_regs[c_arg].first()->is_Register()) {
2438 break; 1984 break;
2439 1985
2440 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 1986 case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2441 1987
2442 default: 1988 default:
2443 // move32_64(masm, in_regs[i], out_regs[c_arg]);
2444 simple_move32(masm, in_regs[i], out_regs[c_arg]); 1989 simple_move32(masm, in_regs[i], out_regs[c_arg]);
2445 } 1990 }
2446 } 1991 }
2447 1992
2448 // point c_arg at the first arg that is already loaded in case we 1993 // point c_arg at the first arg that is already loaded in case we
2449 // need to spill before we call out 1994 // need to spill before we call out
2450 c_arg = total_c_args - total_in_args; 1995 c_arg = total_c_args - total_in_args;
2451 // Pre-load a static method's oop into esi. Used both by locking code and 1996 // Pre-load a static method's oop into esi. Used both by locking code and
2452 // the normal JNI call code. 1997 // the normal JNI call code.
2453 1998
2454 __ move(oop_handle_reg, A1); 1999 __ move(oop_handle_reg, A1);
2455 2000
2456 if (method->is_static() && !is_critical_native) { 2001 if (method->is_static() && !is_critical_native) {
2457 2002
2458 // load opp into a register 2003 // load opp into a register
2459 int oop_index = __ oop_recorder()->find_index(JNIHandles::make_local( 2004 int oop_index = __ oop_recorder()->find_index(JNIHandles::make_local(
2460 (method->method_holder())->java_mirror())); 2005 (method->method_holder())->java_mirror()));
2461 2006
2462 2007
2463 RelocationHolder rspec = oop_Relocation::spec(oop_index); 2008 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2464 __ relocate(rspec); 2009 __ relocate(rspec);
2465 //__ lui(oop_handle_reg, Assembler::split_high((int)JNIHandles::make_local( 2010 __ patchable_set48(oop_handle_reg, (long)JNIHandles::make_local((method->method_holder())->java_mirror()));
2466 // Klass::cast(method->method_holder())->java_mirror()))); 2011 // Now handlize the static class mirror it's known not-null.
2467 //__ addiu(oop_handle_reg, oop_handle_reg, Assembler::split_low((int) 2012 __ sd( oop_handle_reg, SP, klass_offset);
2468 // JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()))); 2013 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2469 __ patchable_set48(oop_handle_reg, (long)JNIHandles::make_local((method->method_holder())->java_mirror())); 2014
2470 // __ verify_oop(oop_handle_reg); 2015 // Now get the handle
2471 // Now handlize the static class mirror it's known not-null. 2016 __ lea(oop_handle_reg, Address(SP, klass_offset));
2472 __ sd( oop_handle_reg, SP, klass_offset); 2017 // store the klass handle as second argument
2473 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2018 __ move(A1, oop_handle_reg);
2474 2019 // and protect the arg if we must spill
2475 // Now get the handle 2020 c_arg--;
2476 __ lea(oop_handle_reg, Address(SP, klass_offset)); 2021 }
2477 // store the klass handle as second argument 2022
2478 __ move(A1, oop_handle_reg);
2479 // and protect the arg if we must spill
2480 c_arg--;
2481 }
2482 // Change state to native (we save the return address in the thread, since it might not 2023 // Change state to native (we save the return address in the thread, since it might not
2483 // be pushed on the stack when we do a a stack traversal). It is enough that the pc() 2024 // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2484 // points into the right code segment. It does not have to be the correct return pc. 2025 // points into the right code segment. It does not have to be the correct return pc.
2485 // We use the same pc/oopMap repeatedly when we call out 2026 // We use the same pc/oopMap repeatedly when we call out
2486 2027
2487 intptr_t the_pc = (intptr_t) __ pc(); 2028 intptr_t the_pc = (intptr_t) __ pc();
2488 2029 oop_maps->add_gc_map(the_pc - start, map);
2489 oop_maps->add_gc_map(the_pc - start, map); 2030
2490 2031 __ set_last_Java_frame(SP, noreg, NULL);
2491 //__ set_last_Java_frame(thread, esp, noreg, (address)the_pc); 2032 __ relocate(relocInfo::internal_pc_type);
2492 __ set_last_Java_frame(SP, noreg, NULL); 2033 {
2493 __ relocate(relocInfo::internal_pc_type); 2034 intptr_t save_pc = (intptr_t)the_pc ;
2494 { 2035 __ patchable_set48(AT, save_pc);
2495 intptr_t save_pc = (intptr_t)the_pc ; 2036 }
2496 __ patchable_set48(AT, save_pc); 2037 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
2497 } 2038
2498 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); 2039
2499 2040 // We have all of the arguments setup at this point. We must not touch any register
2500 2041 // argument registers at this point (what if we save/restore them there are no oop?
2501 // We have all of the arguments setup at this point. We must not touch any register 2042 {
2502 // argument registers at this point (what if we save/restore them there are no oop? 2043 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2503 { 2044 int metadata_index = __ oop_recorder()->find_index(method());
2504 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); 2045 RelocationHolder rspec = metadata_Relocation::spec(metadata_index);
2505 int metadata_index = __ oop_recorder()->find_index(method()); 2046 __ relocate(rspec);
2506 RelocationHolder rspec = metadata_Relocation::spec(metadata_index); 2047 __ patchable_set48(AT, (long)(method()));
2507 __ relocate(rspec); 2048
2508 //__ lui(T6, Assembler::split_high((int)JNIHandles::make_local(method()))); 2049 __ call_VM_leaf(
2509 //__ addiu(T6, T6, Assembler::split_low((int)JNIHandles::make_local(method()))); 2050 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2510 __ patchable_set48(AT, (long)(method())); 2051 thread, AT);
2511 2052
2512 __ call_VM_leaf( 2053 }
2513 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2054
2514 thread, AT); 2055 // These are register definitions we need for locking/unlocking
2515 2056 const Register swap_reg = T8; // Must use eax for cmpxchg instruction
2516 } 2057 const Register obj_reg = T9; // Will contain the oop
2517 2058 //const Register lock_reg = T6; // Address of compiler lock object (BasicLock)
2518 // These are register definitions we need for locking/unlocking 2059 const Register lock_reg = c_rarg0; // Address of compiler lock object (BasicLock)
2519 // const Register swap_reg = eax; // Must use eax for cmpxchg instruction 2060
2520 // const Register obj_reg = ecx; // Will contain the oop 2061
2521 // const Register lock_reg = edx; // Address of compiler lock object (BasicLock) 2062
2522 //FIXME, I hava no idea which register to use 2063 Label slow_path_lock;
2523 const Register swap_reg = T8; // Must use eax for cmpxchg instruction 2064 Label lock_done;
2524 const Register obj_reg = T9; // Will contain the oop 2065
2525 //const Register lock_reg = T6; // Address of compiler lock object (BasicLock) 2066 // Lock a synchronized method
2526 const Register lock_reg = c_rarg0; // Address of compiler lock object (BasicLock) 2067 if (method->is_synchronized()) {
2527 2068 assert(!is_critical_native, "unhandled");
2528 2069
2529 2070 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2530 Label slow_path_lock; 2071
2531 Label lock_done; 2072 // Get the handle (the 2nd argument)
2532 2073 __ move(oop_handle_reg, A1);
2533 // Lock a synchronized method 2074
2534 if (method->is_synchronized()) { 2075 // Get address of the box
2535 assert(!is_critical_native, "unhandled"); 2076 __ lea(lock_reg, Address(FP, lock_slot_ebp_offset));
2536 2077
2537 const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes(); 2078 // Load the oop from the handle
2538 2079 __ ld(obj_reg, oop_handle_reg, 0);
2539 // Get the handle (the 2nd argument) 2080
2540 __ move(oop_handle_reg, A1); 2081 if (UseBiasedLocking) {
2541 2082 // Note that oop_handle_reg is trashed during this call
2542 // Get address of the box 2083 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, A1, false, lock_done, &slow_path_lock);
2543 __ lea(lock_reg, Address(FP, lock_slot_ebp_offset)); 2084 }
2544 2085
2545 // Load the oop from the handle 2086 // Load immediate 1 into swap_reg %eax
2546 __ ld(obj_reg, oop_handle_reg, 0); 2087 __ move(swap_reg, 1);
2547 2088
2548 if (UseBiasedLocking) { 2089 __ ld(AT, obj_reg, 0);
2549 // Note that oop_handle_reg is trashed during this call 2090 __ orr(swap_reg, swap_reg, AT);
2550 __ biased_locking_enter(lock_reg, obj_reg, swap_reg, A1, 2091
2551 false, lock_done, &slow_path_lock); 2092 __ sd( swap_reg, lock_reg, mark_word_offset);
2552 } 2093 __ cmpxchg(lock_reg, Address(obj_reg, 0), swap_reg);
2553 2094 __ bne(AT, R0, lock_done);
2554 // Load immediate 1 into swap_reg %eax 2095 __ delayed()->nop();
2555 __ move(swap_reg, 1); 2096 // Test if the oopMark is an obvious stack pointer, i.e.,
2556 2097 // 1) (mark & 3) == 0, and
2557 __ ld(AT, obj_reg, 0); 2098 // 2) esp <= mark < mark + os::pagesize()
2558 __ orr(swap_reg, swap_reg, AT); 2099 // These 3 tests can be done by evaluating the following
2559 2100 // expression: ((mark - esp) & (3 - os::vm_page_size())),
2560 __ sd( swap_reg, lock_reg, mark_word_offset); 2101 // assuming both stack pointer and pagesize have their
2561 __ cmpxchg(lock_reg, Address(obj_reg, 0), swap_reg); 2102 // least significant 2 bits clear.
2562 __ bne(AT, R0, lock_done); 2103 // NOTE: the oopMark is in swap_reg %eax as the result of cmpxchg
2563 __ delayed()->nop(); 2104
2564 // Test if the oopMark is an obvious stack pointer, i.e., 2105 __ dsub(swap_reg, swap_reg,SP);
2565 // 1) (mark & 3) == 0, and 2106 __ move(AT, 3 - os::vm_page_size());
2566 // 2) esp <= mark < mark + os::pagesize() 2107 __ andr(swap_reg , swap_reg, AT);
2567 // These 3 tests can be done by evaluating the following 2108 // Save the test result, for recursive case, the result is zero
2568 // expression: ((mark - esp) & (3 - os::vm_page_size())), 2109 __ sd(swap_reg, lock_reg, mark_word_offset);
2569 // assuming both stack pointer and pagesize have their 2110 //FIXME here, Why notEqual?
2570 // least significant 2 bits clear. 2111 __ bne(swap_reg,R0, slow_path_lock);
2571 // NOTE: the oopMark is in swap_reg %eax as the result of cmpxchg 2112 __ delayed()->nop();
2572 2113 // Slow path will re-enter here
2573 __ dsub(swap_reg, swap_reg,SP); 2114 __ bind(lock_done);
2574 __ move(AT, 3 - os::vm_page_size()); 2115
2575 __ andr(swap_reg , swap_reg, AT); 2116 if (UseBiasedLocking) {
2576 // Save the test result, for recursive case, the result is zero 2117 // Re-fetch oop_handle_reg as we trashed it above
2577 __ sd(swap_reg, lock_reg, mark_word_offset); 2118 __ move(A1, oop_handle_reg);
2578 //FIXME here, Why notEqual? 2119 }
2579 __ bne(swap_reg,R0, slow_path_lock); 2120 }
2580 __ delayed()->nop(); 2121
2581 // Slow path will re-enter here 2122
2582 __ bind(lock_done); 2123 // Finally just about ready to make the JNI call
2583 2124
2584 if (UseBiasedLocking) { 2125
2585 // Re-fetch oop_handle_reg as we trashed it above 2126 // get JNIEnv* which is first argument to native
2586 __ move(A1, oop_handle_reg);
2587 }
2588 }
2589
2590
2591 // Finally just about ready to make the JNI call
2592
2593
2594 // get JNIEnv* which is first argument to native
2595 if (!is_critical_native) { 2127 if (!is_critical_native) {
2596 __ addi(A0, thread, in_bytes(JavaThread::jni_environment_offset())); 2128 __ addi(A0, thread, in_bytes(JavaThread::jni_environment_offset()));
2597 } 2129 }
2598 2130
2599 // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob) 2131 // Example: Java_java_lang_ref_Finalizer_invokeFinalizeMethod(JNIEnv *env, jclass clazz, jobject ob)
2600 /* Load the second arguments into A1 */ 2132 /* Load the second arguments into A1 */
2601 //__ ld(A1, SP , wordSize ); // klass 2133 //__ ld(A1, SP , wordSize ); // klass
2602 2134
2603 // Now set thread in native 2135 // Now set thread in native
2604 __ addi(AT, R0, _thread_in_native); 2136 __ addi(AT, R0, _thread_in_native);
2605 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset())); 2137 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2606 /* Jin: do the call */ 2138 /* Jin: do the call */
2607 __ call(method->native_function(), relocInfo::runtime_call_type); 2139 __ call(method->native_function(), relocInfo::runtime_call_type);
2608 __ delayed()->nop(); 2140 __ delayed()->nop();
2609 // WARNING - on Windows Java Natives use pascal calling convention and pop the 2141 // WARNING - on Windows Java Natives use pascal calling convention and pop the
2610 // arguments off of the stack. We could just re-adjust the stack pointer here 2142 // arguments off of the stack. We could just re-adjust the stack pointer here
2611 // and continue to do SP relative addressing but we instead switch to FP 2143 // and continue to do SP relative addressing but we instead switch to FP
2612 // relative addressing. 2144 // relative addressing.
2613 2145
2614 // Unpack native results. 2146 // Unpack native results.
2615 switch (ret_type) { 2147 switch (ret_type) {
2616 case T_BOOLEAN: __ c2bool(V0); break; 2148 case T_BOOLEAN: __ c2bool(V0); break;
2617 case T_CHAR : __ andi(V0,V0, 0xFFFF); break; 2149 case T_CHAR : __ andi(V0,V0, 0xFFFF); break;
2618 case T_BYTE : __ sign_extend_byte (V0); break; 2150 case T_BYTE : __ sign_extend_byte (V0); break;
2619 case T_SHORT : __ sign_extend_short(V0); break; 2151 case T_SHORT : __ sign_extend_short(V0); break;
2620 case T_INT : // nothing to do break; 2152 case T_INT : // nothing to do break;
2621 case T_DOUBLE : 2153 case T_DOUBLE :
2622 case T_FLOAT : 2154 case T_FLOAT :
2623 // Result is in st0 we'll save as needed 2155 // Result is in st0 we'll save as needed
2624 break; 2156 break;
2625 case T_ARRAY: // Really a handle 2157 case T_ARRAY: // Really a handle
2626 case T_OBJECT: // Really a handle 2158 case T_OBJECT: // Really a handle
2627 break; // can't de-handlize until after safepoint check 2159 break; // can't de-handlize until after safepoint check
2628 case T_VOID: break; 2160 case T_VOID: break;
2629 case T_LONG: break; 2161 case T_LONG: break;
2630 default : ShouldNotReachHere(); 2162 default : ShouldNotReachHere();
2631 } 2163 }
2632 // Switch thread to "native transition" state before reading the synchronization state. 2164 // Switch thread to "native transition" state before reading the synchronization state.
2633 // This additional state is necessary because reading and testing the synchronization 2165 // This additional state is necessary because reading and testing the synchronization
2634 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2166 // state is not atomic w.r.t. GC, as this scenario demonstrates:
2635 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2167 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2636 // VM thread changes sync state to synchronizing and suspends threads for GC. 2168 // VM thread changes sync state to synchronizing and suspends threads for GC.
2637 // Thread A is resumed to finish this native method, but doesn't block here since it 2169 // Thread A is resumed to finish this native method, but doesn't block here since it
2638 // didn't see any synchronization is progress, and escapes. 2170 // didn't see any synchronization is progress, and escapes.
2639 // __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans); 2171 // __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2640 //__ sw(_thread_in_native_trans, thread, JavaThread::thread_state_offset()); 2172 //__ sw(_thread_in_native_trans, thread, JavaThread::thread_state_offset());
2641 // __ move(AT, (int)_thread_in_native_trans); 2173 // __ move(AT, (int)_thread_in_native_trans);
2642 __ addi(AT, R0, _thread_in_native_trans); 2174 __ addi(AT, R0, _thread_in_native_trans);
2643 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset())); 2175 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2644 2176
2645 Label after_transition; 2177 Label after_transition;
2646 2178
2647 // check for safepoint operation in progress and/or pending suspend requests 2179 // check for safepoint operation in progress and/or pending suspend requests
2648 { Label Continue; 2180 { Label Continue;
2649 //FIXME here, which regiser should we use? 2181 //FIXME here, which regiser should we use?
2650 // SafepointSynchronize::_not_synchronized); 2182 // SafepointSynchronize::_not_synchronized);
2651 __ li(AT, SafepointSynchronize::address_of_state()); 2183 __ li(AT, SafepointSynchronize::address_of_state());
2652 __ lw(A0, AT, 0); 2184 __ lw(A0, AT, 0);
2653 __ addi(AT, A0, -SafepointSynchronize::_not_synchronized); 2185 __ addi(AT, A0, -SafepointSynchronize::_not_synchronized);
2654 Label L; 2186 Label L;
2655 __ bne(AT,R0, L); 2187 __ bne(AT,R0, L);
2656 __ delayed()->nop(); 2188 __ delayed()->nop();
2657 __ lw(AT, thread, in_bytes(JavaThread::suspend_flags_offset())); 2189 __ lw(AT, thread, in_bytes(JavaThread::suspend_flags_offset()));
2658 __ beq(AT, R0, Continue); 2190 __ beq(AT, R0, Continue);
2659 __ delayed()->nop(); 2191 __ delayed()->nop();
2660 __ bind(L); 2192 __ bind(L);
2661 2193
2662 // Don't use call_VM as it will see a possible pending exception and forward it 2194 // Don't use call_VM as it will see a possible pending exception and forward it
2663 // and never return here preventing us from clearing _last_native_pc down below. 2195 // and never return here preventing us from clearing _last_native_pc down below.
2664 // Also can't use call_VM_leaf either as it will check to see if esi & edi are 2196 // Also can't use call_VM_leaf either as it will check to see if esi & edi are
2665 // preserved and correspond to the bcp/locals pointers. So we do a runtime call 2197 // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2666 // by hand. 2198 // by hand.
2667 // 2199 //
2668 save_native_result(masm, ret_type, stack_slots); 2200 save_native_result(masm, ret_type, stack_slots);
2669 __ move (A0, thread); 2201 __ move (A0, thread);
2670 __ addi(SP,SP, -wordSize); 2202 __ addi(SP,SP, -wordSize);
2671 __ push(S2); 2203 __ push(S2);
2672 __ move(AT, -(StackAlignmentInBytes)); 2204 __ move(AT, -(StackAlignmentInBytes));
2673 __ move(S2, SP); // use S2 as a sender SP holder 2205 __ move(S2, SP); // use S2 as a sender SP holder
2674 __ andr(SP, SP, AT); // align stack as required by ABI 2206 __ andr(SP, SP, AT); // align stack as required by ABI
2675 if (!is_critical_native) { 2207 if (!is_critical_native) {
2676 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::runtime_call_type); 2208 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::runtime_call_type);
2677 __ delayed()->nop(); 2209 __ delayed()->nop();
2678 } else { 2210 } else {
2679 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), relocInfo::runtime_call_type); 2211 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition), relocInfo::runtime_call_type);
2680 __ delayed()->nop(); 2212 __ delayed()->nop();
2681 } 2213 }
2682 __ move(SP, S2); // use S2 as a sender SP holder 2214 __ move(SP, S2); // use S2 as a sender SP holder
2683 __ pop(S2); 2215 __ pop(S2);
2684 __ addi(SP,SP, wordSize); 2216 __ addi(SP,SP, wordSize);
2685 //add for compressedoops 2217 //add for compressedoops
2686 __ reinit_heapbase(); 2218 __ reinit_heapbase();
2687 // Restore any method result value 2219 // Restore any method result value
2688 restore_native_result(masm, ret_type, stack_slots); 2220 restore_native_result(masm, ret_type, stack_slots);
2689 2221
2690 if (is_critical_native) { 2222 if (is_critical_native) {
2691 // The call above performed the transition to thread_in_Java so 2223 // The call above performed the transition to thread_in_Java so
2692 // skip the transition logic below. 2224 // skip the transition logic below.
2693 __ beq(R0, R0, after_transition); 2225 __ beq(R0, R0, after_transition);
2694 __ delayed()->nop(); 2226 __ delayed()->nop();
2695 } 2227 }
2696 2228
2697 __ bind(Continue); 2229 __ bind(Continue);
2698 } 2230 }
2699 2231
2700 // change thread state 2232 // change thread state
2701 __ addi(AT, R0, _thread_in_Java); 2233 __ addi(AT, R0, _thread_in_Java);
2702 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset())); 2234 __ sw(AT, thread, in_bytes(JavaThread::thread_state_offset()));
2703 __ bind(after_transition); 2235 __ bind(after_transition);
2704 Label reguard; 2236 Label reguard;
2705 Label reguard_done; 2237 Label reguard_done;
2706 __ lw(AT, thread, in_bytes(JavaThread::stack_guard_state_offset())); 2238 __ lw(AT, thread, in_bytes(JavaThread::stack_guard_state_offset()));
2707 __ addi(AT, AT, -JavaThread::stack_guard_yellow_disabled); 2239 __ addi(AT, AT, -JavaThread::stack_guard_yellow_disabled);
2708 __ beq(AT, R0, reguard); 2240 __ beq(AT, R0, reguard);
2709 __ delayed()->nop(); 2241 __ delayed()->nop();
2710 // slow path reguard re-enters here 2242 // slow path reguard re-enters here
2711 __ bind(reguard_done); 2243 __ bind(reguard_done);
2712 2244
2713 // Handle possible exception (will unlock if necessary) 2245 // Handle possible exception (will unlock if necessary)
2714 2246
2715 // native result if any is live 2247 // native result if any is live
2716 2248
2717 // Unlock 2249 // Unlock
2718 Label slow_path_unlock; 2250 Label slow_path_unlock;
2719 Label unlock_done; 2251 Label unlock_done;
2720 if (method->is_synchronized()) { 2252 if (method->is_synchronized()) {
2721 2253
2722 Label done; 2254 Label done;
2723 2255
2724 // Get locked oop from the handle we passed to jni 2256 // Get locked oop from the handle we passed to jni
2725 __ ld( obj_reg, oop_handle_reg, 0); 2257 __ ld( obj_reg, oop_handle_reg, 0);
2726 //FIXME 2258 //FIXME
2727 if (UseBiasedLocking) { 2259 if (UseBiasedLocking) {
2728 __ biased_locking_exit(obj_reg, T8, done); 2260 __ biased_locking_exit(obj_reg, T8, done);
2729 2261
2730 } 2262 }
2731 2263
2732 // Simple recursive lock? 2264 // Simple recursive lock?
2733 2265
2734 __ ld(AT, FP, lock_slot_ebp_offset); 2266 __ ld(AT, FP, lock_slot_ebp_offset);
2735 __ beq(AT, R0, done); 2267 __ beq(AT, R0, done);
2736 __ delayed()->nop(); 2268 __ delayed()->nop();
2737 // Must save eax if if it is live now because cmpxchg must use it 2269 // Must save eax if if it is live now because cmpxchg must use it
2738 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2270 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2739 save_native_result(masm, ret_type, stack_slots); 2271 save_native_result(masm, ret_type, stack_slots);
2740 } 2272 }
2741 2273
2742 // get old displaced header 2274 // get old displaced header
2743 __ ld (T8, FP, lock_slot_ebp_offset); 2275 __ ld (T8, FP, lock_slot_ebp_offset);
2744 // get address of the stack lock 2276 // get address of the stack lock
2745 //FIXME aoqi 2277 __ addi (c_rarg0, FP, lock_slot_ebp_offset);
2746 //__ addi (T6, FP, lock_slot_ebp_offset); 2278 // Atomic swap old header if oop still contains the stack lock
2747 __ addi (c_rarg0, FP, lock_slot_ebp_offset); 2279 __ cmpxchg(T8, Address(obj_reg, 0), c_rarg0);
2748 // Atomic swap old header if oop still contains the stack lock 2280
2749 //FIXME aoqi 2281 __ beq(AT, R0, slow_path_unlock);
2750 //__ cmpxchg(T8, Address(obj_reg, 0),T6 ); 2282 __ delayed()->nop();
2751 __ cmpxchg(T8, Address(obj_reg, 0), c_rarg0); 2283 // slow path re-enters here
2752 2284 __ bind(unlock_done);
2753 __ beq(AT, R0, slow_path_unlock); 2285 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2754 __ delayed()->nop(); 2286 restore_native_result(masm, ret_type, stack_slots);
2755 // slow path re-enters here 2287 }
2756 __ bind(unlock_done); 2288
2757 if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) { 2289 __ bind(done);
2758 restore_native_result(masm, ret_type, stack_slots); 2290
2759 } 2291 }
2760 2292 {
2761 __ bind(done); 2293 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
2762 2294 // Tell dtrace about this method exit
2763 } 2295 save_native_result(masm, ret_type, stack_slots);
2764 { 2296 int metadata_index = __ oop_recorder()->find_index( (method()));
2765 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); 2297 RelocationHolder rspec = metadata_Relocation::spec(metadata_index);
2766 // Tell dtrace about this method exit 2298 __ relocate(rspec);
2767 save_native_result(masm, ret_type, stack_slots); 2299 __ patchable_set48(AT, (long)(method()));
2768 int metadata_index = __ oop_recorder()->find_index( (method())); 2300
2769 RelocationHolder rspec = metadata_Relocation::spec(metadata_index); 2301 __ call_VM_leaf(
2770 __ relocate(rspec); 2302 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2771 //__ lui(T6, Assembler::split_high((int)JNIHandles::make_local(method()))); 2303 thread, AT);
2772 //__ addiu(T6, T6, Assembler::split_low((int)JNIHandles::make_local(method()))); 2304 restore_native_result(masm, ret_type, stack_slots);
2773 __ patchable_set48(AT, (long)(method())); 2305 }
2774 2306
2775 __ call_VM_leaf( 2307 // We can finally stop using that last_Java_frame we setup ages ago
2776 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2308
2777 thread, AT); 2309 __ reset_last_Java_frame(false, true);
2778 restore_native_result(masm, ret_type, stack_slots); 2310
2779 } 2311 // Unpack oop result
2780 2312 if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2781 // We can finally stop using that last_Java_frame we setup ages ago 2313 Label L;
2782 2314 __ beq(V0, R0,L );
2783 __ reset_last_Java_frame(false, true); 2315 __ delayed()->nop();
2784 2316 __ ld(V0, V0, 0);
2785 // Unpack oop result 2317 __ bind(L);
2786 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2318 __ verify_oop(V0);
2787 Label L; 2319 }
2788 // __ cmpl(eax, NULL_WORD);
2789 // __ jcc(Assembler::equal, L);
2790 __ beq(V0, R0,L );
2791 __ delayed()->nop();
2792 // __ movl(eax, Address(eax));
2793 __ ld(V0, V0, 0);
2794 __ bind(L);
2795 // __ verify_oop(eax);
2796 __ verify_oop(V0);
2797 }
2798 2320
2799 if (!is_critical_native) { 2321 if (!is_critical_native) {
2800 // reset handle block 2322 // reset handle block
2801 __ ld(AT, thread, in_bytes(JavaThread::active_handles_offset())); 2323 __ ld(AT, thread, in_bytes(JavaThread::active_handles_offset()));
2802 __ sw(R0, AT, JNIHandleBlock::top_offset_in_bytes()); 2324 __ sw(R0, AT, JNIHandleBlock::top_offset_in_bytes());
2803 } 2325 }
2804 2326
2805 if (!is_critical_native) { 2327 if (!is_critical_native) {
2806 // Any exception pending? 2328 // Any exception pending?
2807 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset())); 2329 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2808 2330
2809 __ bne(AT, R0, exception_pending); 2331 __ bne(AT, R0, exception_pending);
2810 __ delayed()->nop(); 2332 __ delayed()->nop();
2811 } 2333 }
2812 // no exception, we're almost done 2334 // no exception, we're almost done
2813 2335
2814 // check that only result value is on FPU stack 2336 // check that only result value is on FPU stack
2815 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit"); 2337 __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
2816 2338
2817 // Fixup floating pointer results so that result looks like a return from a compiled method
2818 /* if (ret_type == T_FLOAT) {
2819 if (UseSSE >= 1) {
2820 // Pop st0 and store as float and reload into xmm register
2821 __ fstp_s(Address(ebp, -4));
2822 __ movss(xmm0, Address(ebp, -4));
2823 }
2824 } else if (ret_type == T_DOUBLE) {
2825 if (UseSSE >= 2) {
2826 // Pop st0 and store as double and reload into xmm register
2827 __ fstp_d(Address(ebp, -8));
2828 __ movsd(xmm0, Address(ebp, -8));
2829 }
2830 }
2831 */
2832 // Return 2339 // Return
2833 #ifndef OPT_THREAD 2340 #ifndef OPT_THREAD
2834 __ get_thread(TREG); 2341 __ get_thread(TREG);
2835 #endif 2342 #endif
2836 __ ld_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset())); 2343 __ ld_ptr(SP, TREG, in_bytes(JavaThread::last_Java_sp_offset()));
2837 __ leave(); 2344 __ leave();
2838 2345
2839 __ jr(RA); 2346 __ jr(RA);
2840 __ delayed()->nop(); 2347 __ delayed()->nop();
2841 // Unexpected paths are out of line and go here 2348 // Unexpected paths are out of line and go here
2842 /* 2349 /*
2843 if (!is_critical_native) { 2350 if (!is_critical_native) {
2844 // forward the exception 2351 // forward the exception
2845 __ bind(exception_pending); 2352 __ bind(exception_pending);
2846 2353
2847 // and forward the exception 2354 // and forward the exception
2848 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 2355 __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2849 } 2356 }
2850 */ 2357 */
2851 // Slow path locking & unlocking 2358 // Slow path locking & unlocking
2852 if (method->is_synchronized()) { 2359 if (method->is_synchronized()) {
2853 2360
2854 // BEGIN Slow path lock 2361 // BEGIN Slow path lock
2855 2362 __ bind(slow_path_lock);
2856 __ bind(slow_path_lock); 2363
2857 2364 // protect the args we've loaded
2858 // protect the args we've loaded 2365 save_args(masm, total_c_args, c_arg, out_regs);
2859 save_args(masm, total_c_args, c_arg, out_regs); 2366
2860 2367 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2861 // has last_Java_frame setup. No exceptions so do vanilla call not call_VM 2368 // args are (oop obj, BasicLock* lock, JavaThread* thread)
2862 // args are (oop obj, BasicLock* lock, JavaThread* thread) 2369
2863 2370 __ move(A0, obj_reg);
2864 __ move(A0, obj_reg); 2371 __ move(A1, lock_reg);
2865 __ move(A1, lock_reg); 2372 __ move(A2, thread);
2866 __ move(A2, thread); 2373 __ addi(SP, SP, - 3*wordSize);
2867 __ addi(SP, SP, - 3*wordSize); 2374
2375 __ move(AT, -(StackAlignmentInBytes));
2376 __ move(S2, SP); // use S2 as a sender SP holder
2377 __ andr(SP, SP, AT); // align stack as required by ABI
2378
2379 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2380 __ delayed()->nop();
2381 __ move(SP, S2);
2382 __ addi(SP, SP, 3*wordSize);
2383
2384 restore_args(masm, total_c_args, c_arg, out_regs);
2385
2386 #ifdef ASSERT
2387 { Label L;
2388 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2389 __ beq(AT, R0, L);
2390 __ delayed()->nop();
2391 __ stop("no pending exception allowed on exit from monitorenter");
2392 __ bind(L);
2393 }
2394 #endif
2395 __ b(lock_done);
2396 __ delayed()->nop();
2397 // END Slow path lock
2398
2399 // BEGIN Slow path unlock
2400 __ bind(slow_path_unlock);
2401
2402 // Slow path unlock
2403
2404 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2405 save_native_result(masm, ret_type, stack_slots);
2406 }
2407 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2408
2409 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2410 __ push(AT);
2411 __ sd(R0, thread, in_bytes(Thread::pending_exception_offset()));
2868 2412
2869 __ move(AT, -(StackAlignmentInBytes)); 2413 __ move(AT, -(StackAlignmentInBytes));
2870 __ move(S2, SP); // use S2 as a sender SP holder 2414 __ move(S2, SP); // use S2 as a sender SP holder
2871 __ andr(SP, SP, AT); // align stack as required by ABI 2415 __ andr(SP, SP, AT); // align stack as required by ABI
2872 2416
2873 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2417 // should be a peal
2874 __ delayed()->nop(); 2418 // +wordSize because of the push above
2419 __ addi(A1, FP, lock_slot_ebp_offset);
2420
2421 __ move(A0, obj_reg);
2422 __ addi(SP,SP, -2*wordSize);
2423 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2424 relocInfo::runtime_call_type);
2425 __ delayed()->nop();
2426 __ addi(SP,SP, 2*wordSize);
2875 __ move(SP, S2); 2427 __ move(SP, S2);
2876 __ addi(SP, SP, 3*wordSize); 2428 //add for compressedoops
2877 2429 __ reinit_heapbase();
2878 restore_args(masm, total_c_args, c_arg, out_regs);
2879
2880 #ifdef ASSERT 2430 #ifdef ASSERT
2881 { Label L; 2431 {
2882 // __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD); 2432 Label L;
2883 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset())); 2433 __ lw( AT, thread, in_bytes(Thread::pending_exception_offset()));
2884 //__ jcc(Assembler::equal, L); 2434 __ beq(AT, R0, L);
2885 __ beq(AT, R0, L); 2435 __ delayed()->nop();
2886 __ delayed()->nop(); 2436 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2887 __ stop("no pending exception allowed on exit from monitorenter"); 2437 __ bind(L);
2888 __ bind(L); 2438 }
2889 }
2890 #endif
2891 __ b(lock_done);
2892 __ delayed()->nop();
2893 // END Slow path lock
2894
2895 // BEGIN Slow path unlock
2896 __ bind(slow_path_unlock);
2897
2898 // Slow path unlock
2899
2900 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2901 save_native_result(masm, ret_type, stack_slots);
2902 }
2903 // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2904
2905 __ ld(AT, thread, in_bytes(Thread::pending_exception_offset()));
2906 __ push(AT);
2907 __ sd(R0, thread, in_bytes(Thread::pending_exception_offset()));
2908
2909 __ move(AT, -(StackAlignmentInBytes));
2910 __ move(S2, SP); // use S2 as a sender SP holder
2911 __ andr(SP, SP, AT); // align stack as required by ABI
2912
2913 // should be a peal
2914 // +wordSize because of the push above
2915 __ addi(A1, FP, lock_slot_ebp_offset);
2916
2917 __ move(A0, obj_reg);
2918 __ addi(SP,SP, -2*wordSize);
2919 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C),
2920 relocInfo::runtime_call_type);
2921 __ delayed()->nop();
2922 __ addi(SP,SP, 2*wordSize);
2923 __ move(SP, S2);
2924 //add for compressedoops
2925 __ reinit_heapbase();
2926 #ifdef ASSERT
2927 {
2928 Label L;
2929 // __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
2930 __ lw( AT, thread, in_bytes(Thread::pending_exception_offset()));
2931 //__ jcc(Assembler::equal, L);
2932 __ beq(AT, R0, L);
2933 __ delayed()->nop();
2934 __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2935 __ bind(L);
2936 }
2937 #endif /* ASSERT */ 2439 #endif /* ASSERT */
2938 2440
2939 __ pop(AT); 2441 __ pop(AT);
2940 __ sd(AT, thread, in_bytes(Thread::pending_exception_offset())); 2442 __ sd(AT, thread, in_bytes(Thread::pending_exception_offset()));
2941 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) { 2443 if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2942 restore_native_result(masm, ret_type, stack_slots); 2444 restore_native_result(masm, ret_type, stack_slots);
2943 } 2445 }
2944 __ b(unlock_done); 2446 __ b(unlock_done);
2945 __ delayed()->nop(); 2447 __ delayed()->nop();
2946 // END Slow path unlock 2448 // END Slow path unlock
2947 2449
2948 } 2450 }
2949 2451
2950 // SLOW PATH Reguard the stack if needed 2452 // SLOW PATH Reguard the stack if needed
2951 2453
2952 __ bind(reguard); 2454 __ bind(reguard);
2953 save_native_result(masm, ret_type, stack_slots); 2455 save_native_result(masm, ret_type, stack_slots);
2954 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), 2456 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages),
2955 relocInfo::runtime_call_type); 2457 relocInfo::runtime_call_type);
2956 __ delayed()->nop(); 2458 __ delayed()->nop();
2957 //add for compressedoops 2459 //add for compressedoops
2958 __ reinit_heapbase(); 2460 __ reinit_heapbase();
2959 restore_native_result(masm, ret_type, stack_slots); 2461 restore_native_result(masm, ret_type, stack_slots);
2960 __ b(reguard_done); 2462 __ b(reguard_done);
2961 __ delayed()->nop(); 2463 __ delayed()->nop();
2962 2464
2963 // BEGIN EXCEPTION PROCESSING 2465 // BEGIN EXCEPTION PROCESSING
2964 if (!is_critical_native) { 2466 if (!is_critical_native) {
2965 // Forward the exception 2467 // Forward the exception
2966 __ bind(exception_pending); 2468 __ bind(exception_pending);
2967 2469
2968 // remove possible return value from FPU register stack 2470 // remove possible return value from FPU register stack
2969 __ empty_FPU_stack(); 2471 __ empty_FPU_stack();
2970 2472
2971 // pop our frame 2473 // pop our frame
2972 //forward_exception_entry need return address on stack 2474 //forward_exception_entry need return address on stack
2973 __ addiu(SP, FP, wordSize); 2475 __ addiu(SP, FP, wordSize);
2974 __ ld(FP, SP, (-1) * wordSize); 2476 __ ld(FP, SP, (-1) * wordSize);
2975 2477
2976 // and forward the exception 2478 // and forward the exception
2977 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 2479 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
2978 __ delayed()->nop(); 2480 __ delayed()->nop();
2979 } 2481 }
2980 __ flush(); 2482 __ flush();
2981 2483
2982 nmethod *nm = nmethod::new_native_nmethod(method, 2484 nmethod *nm = nmethod::new_native_nmethod(method,
2983 compile_id, 2485 compile_id,
2984 masm->code(), 2486 masm->code(),
2985 vep_offset, 2487 vep_offset,
2986 frame_complete, 2488 frame_complete,
2987 stack_slots / VMRegImpl::slots_per_word, 2489 stack_slots / VMRegImpl::slots_per_word,
2988 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2490 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2989 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size), 2491 in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2990 oop_maps); 2492 oop_maps);
2991 2493
2992 if (is_critical_native) { 2494 if (is_critical_native) {
2993 nm->set_lazy_critical_native(true); 2495 nm->set_lazy_critical_native(true);
2994 } 2496 }
2995 return nm; 2497
2996 2498 return nm;
2997 2499
2998 } 2500 }
2999 2501
3000 #ifdef HAVE_DTRACE_H 2502 #ifdef HAVE_DTRACE_H
3001 // --------------------------------------------------------------------------- 2503 // ---------------------------------------------------------------------------
3025 } 2527 }
3026 return ret; 2528 return ret;
3027 } 2529 }
3028 2530
3029 2531
3030 nmethod *SharedRuntime::generate_dtrace_nmethod( 2532 nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
3031 MacroAssembler *masm, methodHandle method) { 2533 methodHandle method) {
3032 2534
3033 2535
3034 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2536 // generate_dtrace_nmethod is guarded by a mutex so we are sure to
3035 // be single threaded in this method. 2537 // be single threaded in this method.
3036 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2538 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
3109 2611
3110 2612
3111 // Now figure out where the args must be stored and how much stack space 2613 // Now figure out where the args must be stored and how much stack space
3112 // they require (neglecting out_preserve_stack_slots but space for storing 2614 // they require (neglecting out_preserve_stack_slots but space for storing
3113 // the 1st six register arguments). It's weird see int_stk_helper. 2615 // the 1st six register arguments). It's weird see int_stk_helper.
3114 // 2616
3115 int out_arg_slots; 2617 int out_arg_slots;
3116 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args); 2618 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
3117 2619
3118 // Calculate the total number of stack slots we will need. 2620 // Calculate the total number of stack slots we will need.
3119 2621
3545 #endif // HAVE_DTRACE_H 3047 #endif // HAVE_DTRACE_H
3546 3048
3547 // this function returns the adjust size (in number of words) to a c2i adapter 3049 // this function returns the adjust size (in number of words) to a c2i adapter
3548 // activation for use during deoptimization 3050 // activation for use during deoptimization
3549 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3051 int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
3550 return (callee_locals - callee_parameters) * Interpreter::stackElementWords; 3052 return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
3551 } 3053 }
3552 3054
3553 // "Top of Stack" slots that may be unused by the calling convention but must 3055 // "Top of Stack" slots that may be unused by the calling convention but must
3554 // otherwise be preserved. 3056 // otherwise be preserved.
3555 // On Intel these are not necessary and the value can be zero. 3057 // On Intel these are not necessary and the value can be zero.
3556 // On Sparc this describes the words reserved for storing a register window 3058 // On Sparc this describes the words reserved for storing a register window
3557 // when an interrupt occurs. 3059 // when an interrupt occurs.
3558 uint SharedRuntime::out_preserve_stack_slots() { 3060 uint SharedRuntime::out_preserve_stack_slots() {
3559 //return frame::register_save_words * VMRegImpl::slots_per_word; 3061 //return frame::register_save_words * VMRegImpl::slots_per_word;
3560 return 0; 3062 return 0;
3561 } 3063 }
3562 /*
3563 static void gen_new_frame(MacroAssembler* masm, bool deopt) {
3564 //
3565 // Common out the new frame generation for deopt and uncommon trap
3566 //
3567 Register G3pcs = G3_scratch; // Array of new pcs (input)
3568 Register Oreturn0 = O0;
3569 Register Oreturn1 = O1;
3570 Register O2UnrollBlock = O2;
3571 Register O3array = O3; // Array of frame sizes (input)
3572 Register O4array_size = O4; // number of frames (input)
3573 Register O7frame_size = O7; // number of frames (input)
3574
3575 __ ld_ptr(O3array, 0, O7frame_size);
3576 __ sub(G0, O7frame_size, O7frame_size);
3577 __ save(SP, O7frame_size, SP);
3578 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc
3579
3580 #ifdef ASSERT
3581 // make sure that the frames are aligned properly
3582 #ifndef _LP64
3583 __ btst(wordSize*2-1, SP);
3584 __ breakpoint_trap(Assembler::notZero);
3585 #endif
3586 #endif
3587
3588 // Deopt needs to pass some extra live values from frame to frame
3589
3590 if (deopt) {
3591 __ mov(Oreturn0->after_save(), Oreturn0);
3592 __ mov(Oreturn1->after_save(), Oreturn1);
3593 }
3594
3595 __ mov(O4array_size->after_save(), O4array_size);
3596 __ sub(O4array_size, 1, O4array_size);
3597 __ mov(O3array->after_save(), O3array);
3598 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
3599 __ add(G3pcs, wordSize, G3pcs); // point to next pc value
3600
3601 #ifdef ASSERT
3602 // trash registers to show a clear pattern in backtraces
3603 __ set(0xDEAD0000, I0);
3604 __ add(I0, 2, I1);
3605 __ add(I0, 4, I2);
3606 __ add(I0, 6, I3);
3607 __ add(I0, 8, I4);
3608 // Don't touch I5 could have valuable savedSP
3609 __ set(0xDEADBEEF, L0);
3610 __ mov(L0, L1);
3611 __ mov(L0, L2);
3612 __ mov(L0, L3);
3613 __ mov(L0, L4);
3614 __ mov(L0, L5);
3615
3616 // trash the return value as there is nothing to return yet
3617 __ set(0xDEAD0001, O7);
3618 #endif
3619
3620 __ mov(SP, O5_savedSP);
3621 }
3622
3623
3624 static void make_new_frames(MacroAssembler* masm, bool deopt) {
3625 //
3626 // loop through the UnrollBlock info and create new frames
3627 //
3628 Register G3pcs = G3_scratch;
3629 Register Oreturn0 = O0;
3630 Register Oreturn1 = O1;
3631 Register O2UnrollBlock = O2;
3632 Register O3array = O3;
3633 Register O4array_size = O4;
3634 Label loop;
3635
3636 // Before we make new frames, check to see if stack is available.
3637 // Do this after the caller's return address is on top of stack
3638 if (UseStackBanging) {
3639 // Get total frame size for interpreted frames
3640 __ ld(Address(O2UnrollBlock, 0,
3641 Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4);
3642 __ bang_stack_size(O4, O3, G3_scratch);
3643 }
3644
3645 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size);
3646 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs);
3647
3648 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array);
3649
3650 // Adjust old interpreter frame to make space for new frame's extra java locals
3651 //
3652 // We capture the original sp for the transition frame only because it is needed in
3653 // order to properly calculate interpreter_sp_adjustment. Even though in real life
3654 // every interpreter frame captures a savedSP it is only needed at the transition
3655 // (fortunately). If we had to have it correct everywhere then we would need to
3656 // be told the sp_adjustment for each frame we create. If the frame size array
3657 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
3658 // for each frame we create and keep up the illusion every where.
3659 //
3660
3661 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7);
3662 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment
3663 __ sub(SP, O7, SP);
3664
3665 #ifdef ASSERT
3666 // make sure that there is at least one entry in the array
3667 __ tst(O4array_size);
3668 __ breakpoint_trap(Assembler::zero);
3669 #endif
3670
3671 // Now push the new interpreter frames
3672 __ bind(loop);
3673
3674 // allocate a new frame, filling the registers
3675
3676 gen_new_frame(masm, deopt); // allocate an interpreter frame
3677
3678 __ tst(O4array_size);
3679 __ br(Assembler::notZero, false, Assembler::pn, loop);
3680 __ delayed()->add(O3array, wordSize, O3array);
3681 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc
3682
3683 }
3684 */
3685 3064
3686 //------------------------------generate_deopt_blob---------------------------- 3065 //------------------------------generate_deopt_blob----------------------------
3687 // Ought to generate an ideal graph & compile, but here's some SPARC ASM 3066 // Ought to generate an ideal graph & compile, but here's some SPARC ASM
3688 // instead. 3067 // instead.
3689 void SharedRuntime::generate_deopt_blob() { 3068 void SharedRuntime::generate_deopt_blob() {
3709 Register thread = TREG; 3088 Register thread = TREG;
3710 // use S7 for fetch_unroll_info returned UnrollBlock 3089 // use S7 for fetch_unroll_info returned UnrollBlock
3711 Register unroll = S7; 3090 Register unroll = S7;
3712 // Prolog for non exception case! 3091 // Prolog for non exception case!
3713 // Correct the return address we were given. 3092 // Correct the return address we were given.
3714 //FIXME, return address is on the tos or Ra? 3093 //FIXME, return address is on the tos or Ra?
3715 __ addi(RA, RA, - (NativeCall::return_address_offset_long)); 3094 __ addi(RA, RA, - (NativeCall::return_address_offset_long));
3716 // Save everything in sight. 3095 // Save everything in sight.
3717 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 3096 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3718 // Normal deoptimization 3097 // Normal deoptimization
3719 __ move(reason, Deoptimization::Unpack_deopt); 3098 __ move(reason, Deoptimization::Unpack_deopt);
3720 __ b(cont); 3099 __ b(cont);
3721 __ delayed()->nop(); 3100 __ delayed()->nop();
3722 3101
3723 int reexecute_offset = __ pc() - start; 3102 int reexecute_offset = __ pc() - start;
3724 3103
3725 // Reexecute case 3104 // Reexecute case
3726 // return address is the pc describes what bci to do re-execute at 3105 // return address is the pc describes what bci to do re-execute at
3727 3106
3728 // No need to update map as each call to save_live_registers will produce identical oopmap 3107 // No need to update map as each call to save_live_registers will produce identical oopmap
3729 //__ addi(RA, RA, - (NativeCall::return_address_offset));
3730 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 3108 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3731 __ move(reason, Deoptimization::Unpack_reexecute); 3109 __ move(reason, Deoptimization::Unpack_reexecute);
3732 __ b(cont); 3110 __ b(cont);
3733 __ delayed()->nop(); 3111 __ delayed()->nop();
3734 3112
3735 int exception_offset = __ pc() - start; 3113 int exception_offset = __ pc() - start;
3736 // Prolog for exception case 3114 // Prolog for exception case
3737 3115
3738 // all registers are dead at this entry point, except for eax and 3116 // all registers are dead at this entry point, except for eax and
3739 // edx which contain the exception oop and exception pc 3117 // edx which contain the exception oop and exception pc
3740 // respectively. Set them in TLS and fall thru to the 3118 // respectively. Set them in TLS and fall thru to the
3741 // unpack_with_exception_in_tls entry point. 3119 // unpack_with_exception_in_tls entry point.
3742 3120
3743 __ get_thread(thread); 3121 __ get_thread(thread);
3744 __ st_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset())); 3122 __ st_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3745 __ st_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset())); 3123 __ st_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset()));
3746 int exception_in_tls_offset = __ pc() - start; 3124 int exception_in_tls_offset = __ pc() - start;
3747 // new implementation because exception oop is now passed in JavaThread 3125 // new implementation because exception oop is now passed in JavaThread
3748 3126
3749 // Prolog for exception case 3127 // Prolog for exception case
3750 // All registers must be preserved because they might be used by LinearScan 3128 // All registers must be preserved because they might be used by LinearScan
3751 // Exceptiop oop and throwing PC are passed in JavaThread 3129 // Exceptiop oop and throwing PC are passed in JavaThread
3752 // tos: stack at point of call to method that threw the exception (i.e. only 3130 // tos: stack at point of call to method that threw the exception (i.e. only
3753 // args are on the stack, no return address) 3131 // args are on the stack, no return address)
3754 3132
3755 // Return address will be patched later with the throwing pc. The correct value is not 3133 // Return address will be patched later with the throwing pc. The correct value is not
3756 // available now because loading it from memory would destroy registers. 3134 // available now because loading it from memory would destroy registers.
3757 // Save everything in sight. 3135 // Save everything in sight.
3758 // No need to update map as each call to save_live_registers will produce identical oopmap 3136 // No need to update map as each call to save_live_registers will produce identical oopmap
3759 __ addi(RA, RA, - (NativeCall::return_address_offset_long)); 3137 __ addi(RA, RA, - (NativeCall::return_address_offset_long));
3760 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 3138 (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
3761 3139
3762 // Now it is safe to overwrite any register 3140 // Now it is safe to overwrite any register
3763 // store the correct deoptimization type 3141 // store the correct deoptimization type
3764 __ move(reason, Deoptimization::Unpack_exception); 3142 __ move(reason, Deoptimization::Unpack_exception);
3765 // load throwing pc from JavaThread and patch it as the return address 3143 // load throwing pc from JavaThread and patch it as the return address
3766 // of the current frame. Then clear the field in JavaThread 3144 // of the current frame. Then clear the field in JavaThread
3767 __ get_thread(thread); 3145 __ get_thread(thread);
3768 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset())); 3146 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3769 __ st_ptr(V1, SP, RegisterSaver::raOffset() * wordSize); //save ra 3147 __ st_ptr(V1, SP, RegisterSaver::raOffset() * wordSize); //save ra
3770 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset())); 3148 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset()));
3775 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset())); 3153 __ ld_ptr(AT, thread, in_bytes(JavaThread::exception_oop_offset()));
3776 __ verify_oop(AT); 3154 __ verify_oop(AT);
3777 // verify that there is no pending exception 3155 // verify that there is no pending exception
3778 Label no_pending_exception; 3156 Label no_pending_exception;
3779 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); 3157 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
3780 __ beq(AT, R0, no_pending_exception); 3158 __ beq(AT, R0, no_pending_exception);
3781 __ delayed()->nop(); 3159 __ delayed()->nop();
3782 __ stop("must not have pending exception here"); 3160 __ stop("must not have pending exception here");
3783 __ bind(no_pending_exception); 3161 __ bind(no_pending_exception);
3784 #endif 3162 #endif
3785 __ bind(cont); 3163 __ bind(cont);
3786 // Compiled code leaves the floating point stack dirty, empty it. 3164 // Compiled code leaves the floating point stack dirty, empty it.
3787 __ empty_FPU_stack(); 3165 __ empty_FPU_stack();
3788 3166
3789 3167
3790 // Call C code. Need thread and this frame, but NOT official VM entry 3168 // Call C code. Need thread and this frame, but NOT official VM entry
3791 // crud. We cannot block on this call, no GC can happen. 3169 // crud. We cannot block on this call, no GC can happen.
3792 #ifndef OPT_THREAD 3170 #ifndef OPT_THREAD
3793 __ get_thread(thread); 3171 __ get_thread(thread);
3794 #endif 3172 #endif
3795 3173
3796 __ move(A0, thread); 3174 __ move(A0, thread);
3799 __ set_last_Java_frame(NOREG, NOREG, NULL); 3177 __ set_last_Java_frame(NOREG, NOREG, NULL);
3800 3178
3801 // Call fetch_unroll_info(). Need thread and this frame, but NOT official VM entry - cannot block on 3179 // Call fetch_unroll_info(). Need thread and this frame, but NOT official VM entry - cannot block on
3802 // this call, no GC can happen. Call should capture return values. 3180 // this call, no GC can happen. Call should capture return values.
3803 3181
3804 __ relocate(relocInfo::internal_pc_type); 3182 __ relocate(relocInfo::internal_pc_type);
3805 { 3183 {
3806 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28; 3184 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28;
3807 __ patchable_set48(AT, save_pc); 3185 __ patchable_set48(AT, save_pc);
3808 } 3186 }
3809 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); 3187 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3810 3188
3822 3200
3823 // Move the unpack kind to a safe place in the UnrollBlock because 3201 // Move the unpack kind to a safe place in the UnrollBlock because
3824 // we are very short of registers 3202 // we are very short of registers
3825 3203
3826 Address unpack_kind(unroll, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()); 3204 Address unpack_kind(unroll, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
3827 //__ pop(reason);
3828 __ sw(reason, unpack_kind); 3205 __ sw(reason, unpack_kind);
3829 // save the unpack_kind value 3206 // save the unpack_kind value
3830 // Retrieve the possible live values (return values) 3207 // Retrieve the possible live values (return values)
3831 // All callee save registers representing jvm state 3208 // All callee save registers representing jvm state
3832 // are now in the vframeArray. 3209 // are now in the vframeArray.
3837 __ delayed()->nop(); 3214 __ delayed()->nop();
3838 __ ld_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset())); 3215 __ ld_ptr(V0, thread, in_bytes(JavaThread::exception_oop_offset()));
3839 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset())); 3216 __ ld_ptr(V1, thread, in_bytes(JavaThread::exception_pc_offset()));
3840 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset())); 3217 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_pc_offset()));
3841 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset())); 3218 __ st_ptr(R0, thread, in_bytes(JavaThread::exception_oop_offset()));
3842 3219
3843 __ verify_oop(V0); 3220 __ verify_oop(V0);
3844 3221
3845 // Overwrite the result registers with the exception results. 3222 // Overwrite the result registers with the exception results.
3846 __ st_ptr(V0, SP, RegisterSaver::v0Offset()*wordSize); 3223 __ st_ptr(V0, SP, RegisterSaver::v0Offset()*wordSize);
3847 __ st_ptr(V1, SP, RegisterSaver::v1Offset()*wordSize); 3224 __ st_ptr(V1, SP, RegisterSaver::v1Offset()*wordSize);
3848 3225
3849 __ bind(noException); 3226 __ bind(noException);
3850 3227
3851 3228
3852 // Stack is back to only having register save data on the stack. 3229 // Stack is back to only having register save data on the stack.
3853 // Now restore the result registers. Everything else is either dead or captured 3230 // Now restore the result registers. Everything else is either dead or captured
3854 // in the vframeArray. 3231 // in the vframeArray.
3855 3232
3856 RegisterSaver::restore_result_registers(masm); 3233 RegisterSaver::restore_result_registers(masm);
3857 // All of the register save area has been popped of the stack. Only the 3234 // All of the register save area has been popped of the stack. Only the
3858 // return address remains. 3235 // return address remains.
3859 // Pop all the frames we must move/replace. 3236 // Pop all the frames we must move/replace.
3860 // Frame picture (youngest to oldest) 3237 // Frame picture (youngest to oldest)
3861 // 1: self-frame (no frame link) 3238 // 1: self-frame (no frame link)
3862 // 2: deopting frame (no frame link) 3239 // 2: deopting frame (no frame link)
3863 // 3: caller of deopting frame (could be compiled/interpreted). 3240 // 3: caller of deopting frame (could be compiled/interpreted).
3864 // 3241 //
3865 // Note: by leaving the return address of self-frame on the stack 3242 // Note: by leaving the return address of self-frame on the stack
3866 // and using the size of frame 2 to adjust the stack 3243 // and using the size of frame 2 to adjust the stack
3867 // when we are done the return to frame 3 will still be on the stack. 3244 // when we are done the return to frame 3 will still be on the stack.
3868 3245
3872 Register pcs = T0; 3249 Register pcs = T0;
3873 // register for frame sizes 3250 // register for frame sizes
3874 Register sizes = T1; 3251 Register sizes = T1;
3875 // register for frame count 3252 // register for frame count
3876 Register count = T3; 3253 Register count = T3;
3877 3254
3878 // Pop deoptimized frame 3255 // Pop deoptimized frame
3879 __ lw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()); 3256 __ lw(AT, unroll, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes());
3880 __ add(SP, SP, AT); 3257 __ add(SP, SP, AT);
3881 // sp should be pointing at the return address to the caller (3) 3258 // sp should be pointing at the return address to the caller (3)
3882 3259
3883 // Load array of frame pcs into pcs 3260 // Load array of frame pcs into pcs
3884 __ ld_ptr(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()); 3261 __ ld_ptr(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes());
3885 __ addi(SP, SP, wordSize); // trash the old pc 3262 __ addi(SP, SP, wordSize); // trash the old pc
3886 // Load array of frame sizes into T6 3263 // Load array of frame sizes into T6
3887 __ ld_ptr(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()); 3264 __ ld_ptr(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes());
3888 3265
3889 3266
3890 3267
3891 // Load count of frams into T3 3268 // Load count of frams into T3
3892 __ lw(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()); 3269 __ lw(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes());
3893 // Pick up the initial fp we should save 3270 // Pick up the initial fp we should save
3894 __ ld(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()); 3271 __ ld(FP, unroll, Deoptimization::UnrollBlock::initial_info_offset_in_bytes());
3902 3279
3903 // Push interpreter frames in a loop 3280 // Push interpreter frames in a loop
3904 /* 3281 /*
3905 * 3282 *
3906 Loop: 3283 Loop:
3907 0x000000555bd82d18: lw t2, 0x0(t1) ; lw sizes[i] <--- error lw->ld 3284 0x000000555bd82d18: lw t2, 0x0(t1) ; lw sizes[i] <--- error lw->ld
3908 0x000000555bd82d1c: ld at, 0x0(t0) ; ld pcs[i] 3285 0x000000555bd82d1c: ld at, 0x0(t0) ; ld pcs[i]
3909 0x000000555bd82d20: daddi t2, t2, 0xfffffff0 ; t2 -= 16 3286 0x000000555bd82d20: daddi t2, t2, 0xfffffff0 ; t2 -= 16
3910 0x000000555bd82d24: daddi sp, sp, 0xfffffff0 3287 0x000000555bd82d24: daddi sp, sp, 0xfffffff0
3911 0x000000555bd82d28: sd fp, 0x0(sp) ; push fp 3288 0x000000555bd82d28: sd fp, 0x0(sp) ; push fp
3912 0x000000555bd82d2c: sd at, 0x8(sp) ; push at 3289 0x000000555bd82d2c: sd at, 0x8(sp) ; push at
3913 0x000000555bd82d30: dadd fp, sp, zero ; fp <- sp 3290 0x000000555bd82d30: dadd fp, sp, zero ; fp <- sp
3914 0x000000555bd82d34: dsub sp, sp, t2 ; sp -= t2 3291 0x000000555bd82d34: dsub sp, sp, t2 ; sp -= t2
3915 0x000000555bd82d38: sd zero, 0xfffffff0(fp) ; __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); 3292 0x000000555bd82d38: sd zero, 0xfffffff0(fp) ; __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3916 0x000000555bd82d3c: sd s4, 0xfffffff8(fp) ; __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize); 3293 0x000000555bd82d3c: sd s4, 0xfffffff8(fp) ; __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);
3917 0x000000555bd82d40: dadd s4, sp, zero ; move(sender_sp, SP); 3294 0x000000555bd82d40: dadd s4, sp, zero ; move(sender_sp, SP);
3918 0x000000555bd82d44: daddi t3, t3, 0xffffffff ; count -- 3295 0x000000555bd82d44: daddi t3, t3, 0xffffffff ; count --
3919 0x000000555bd82d48: daddi t1, t1, 0x4 ; sizes += 4 3296 0x000000555bd82d48: daddi t1, t1, 0x4 ; sizes += 4
3922 */ 3299 */
3923 3300
3924 // pcs[0] = frame_pcs[0] = deopt_sender.raw_pc(); regex.split 3301 // pcs[0] = frame_pcs[0] = deopt_sender.raw_pc(); regex.split
3925 Label loop; 3302 Label loop;
3926 __ bind(loop); 3303 __ bind(loop);
3927 __ ld(T2, sizes, 0); // Load frame size 3304 __ ld(T2, sizes, 0); // Load frame size
3928 __ ld_ptr(AT, pcs, 0); // save return address 3305 __ ld_ptr(AT, pcs, 0); // save return address
3929 __ addi(T2, T2, -2*wordSize); // we'll push pc and rbp, by hand 3306 __ addi(T2, T2, -2*wordSize); // we'll push pc and rbp, by hand
3930 __ push2(AT, FP); 3307 __ push2(AT, FP);
3931 __ move(FP, SP); 3308 __ move(FP, SP);
3932 __ sub(SP, SP, T2); // Prolog! 3309 __ sub(SP, SP, T2); // Prolog!
3933 // This value is corrected by layout_activation_impl 3310 // This value is corrected by layout_activation_impl
3934 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); 3311 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3935 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable 3312 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);// Make it walkable
3936 __ move(sender_sp, SP); // pass to next frame 3313 __ move(sender_sp, SP); // pass to next frame
3937 __ addi(count, count, -1); // decrement counter 3314 __ addi(count, count, -1); // decrement counter
3938 __ addi(sizes, sizes, wordSize); // Bump array pointer (sizes) 3315 __ addi(sizes, sizes, wordSize); // Bump array pointer (sizes)
3939 __ bne(count, R0, loop); 3316 __ bne(count, R0, loop);
3940 __ delayed()->addi(pcs, pcs, wordSize); // Bump array pointer (pcs) 3317 __ delayed()->addi(pcs, pcs, wordSize); // Bump array pointer (pcs)
3941 __ ld(AT, pcs, 0); // frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 3318 __ ld(AT, pcs, 0); // frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
3942 // Re-push self-frame 3319 // Re-push self-frame
3943 __ push2(AT, FP); 3320 __ push2(AT, FP);
3944 __ move(FP, SP); 3321 __ move(FP, SP);
3945 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize); 3322 __ sd(R0, FP, frame::interpreter_frame_last_sp_offset * wordSize);
3946 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize); 3323 __ sd(sender_sp, FP, frame::interpreter_frame_sender_sp_offset * wordSize);
3947 __ addi(SP, SP, -(frame_size_in_words - 2 - additional_words) * wordSize); 3324 __ addi(SP, SP, -(frame_size_in_words - 2 - additional_words) * wordSize);
3948 3325
3949 // Restore frame locals after moving the frame 3326 // Restore frame locals after moving the frame
3950 __ sd(V0, SP, RegisterSaver::v0Offset() * wordSize); 3327 __ sd(V0, SP, RegisterSaver::v0Offset() * wordSize);
3951 __ sd(V1, SP, RegisterSaver::v1Offset() * wordSize); 3328 __ sd(V1, SP, RegisterSaver::v1Offset() * wordSize);
3952 __ sdc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local 3329 __ sdc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local
3953 __ sdc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize); 3330 __ sdc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize);
3954 3331
3955 3332
3956 // Call unpack_frames(). Need thread and this frame, but NOT official VM entry - cannot block on 3333 // Call unpack_frames(). Need thread and this frame, but NOT official VM entry - cannot block on
3957 // this call, no GC can happen. 3334 // this call, no GC can happen.
3958 __ move(A1, reason); // exec_mode 3335 __ move(A1, reason); // exec_mode
3959 __ get_thread(thread); 3336 __ get_thread(thread);
3960 __ move(A0, thread); // thread 3337 __ move(A0, thread); // thread
3961 __ addi(SP, SP, (-additional_words) *wordSize); 3338 __ addi(SP, SP, (-additional_words) *wordSize);
3962 3339
3963 // set last_Java_sp, last_Java_fp 3340 // set last_Java_sp, last_Java_fp
3964 __ set_last_Java_frame(NOREG, FP, NULL); 3341 __ set_last_Java_frame(NOREG, FP, NULL);
3965 3342
3966 __ move(AT, -(StackAlignmentInBytes)); 3343 __ move(AT, -(StackAlignmentInBytes));
3967 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI 3344 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI
3968 3345
3969 __ relocate(relocInfo::internal_pc_type); 3346 __ relocate(relocInfo::internal_pc_type);
3970 { 3347 {
3971 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28; 3348 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 28;
3972 __ patchable_set48(AT, save_pc); 3349 __ patchable_set48(AT, save_pc);
3973 } 3350 }
3974 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); 3351 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
3975 3352
3976 //__ call(Deoptimization::unpack_frames);
3977 __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), relocInfo::runtime_call_type); 3353 __ call(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), relocInfo::runtime_call_type);
3978 __ delayed()->nop(); 3354 __ delayed()->nop();
3979 // Revert SP alignment after call since we're going to do some SP relative addressing below 3355 // Revert SP alignment after call since we're going to do some SP relative addressing below
3980 __ ld(SP, thread, in_bytes(JavaThread::last_Java_sp_offset())); 3356 __ ld(SP, thread, in_bytes(JavaThread::last_Java_sp_offset()));
3981 // Set an oopmap for the call site 3357 // Set an oopmap for the call site
3982 oop_maps->add_gc_map(__ offset(), new OopMap( frame_size_in_words , 0)); 3358 oop_maps->add_gc_map(__ offset(), new OopMap( frame_size_in_words , 0));
3983 3359
3984 __ push(V0); 3360 __ push(V0);
3985 3361
3986 __ get_thread(thread); 3362 __ get_thread(thread);
3987 __ reset_last_Java_frame(true, true); 3363 __ reset_last_Java_frame(true, true);
3988 3364
3989 // Collect return values 3365 // Collect return values
3990 __ ld(V0, SP, (RegisterSaver::v0Offset() + additional_words +1) * wordSize); 3366 __ ld(V0, SP, (RegisterSaver::v0Offset() + additional_words +1) * wordSize);
3991 __ ld(V1, SP, (RegisterSaver::v1Offset() + additional_words +1) * wordSize); 3367 __ ld(V1, SP, (RegisterSaver::v1Offset() + additional_words +1) * wordSize);
3992 __ ldc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local 3368 __ ldc1(F0, SP, RegisterSaver::fpResultOffset()* wordSize);// Pop float stack and store in local
3993 __ ldc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize); 3369 __ ldc1(F1, SP, (RegisterSaver::fpResultOffset() + 1) * wordSize);
3994 //FIXME, 3370 //FIXME,
3995 // Clear floating point stack before returning to interpreter 3371 // Clear floating point stack before returning to interpreter
3996 __ empty_FPU_stack(); 3372 __ empty_FPU_stack();
3997 //FIXME, we should consider about float and double 3373 //FIXME, we should consider about float and double
3998 // Push a float or double return value if necessary. 3374 // Push a float or double return value if necessary.
3999 __ leave(); 3375 __ leave();
4014 // instead. 3390 // instead.
4015 void SharedRuntime::generate_uncommon_trap_blob() { 3391 void SharedRuntime::generate_uncommon_trap_blob() {
4016 // allocate space for the code 3392 // allocate space for the code
4017 ResourceMark rm; 3393 ResourceMark rm;
4018 // setup code generation tools 3394 // setup code generation tools
4019 CodeBuffer buffer ("uncommon_trap_blob", 512*80 , 512*40 ); 3395 CodeBuffer buffer ("uncommon_trap_blob", 512*80 , 512*40 );
4020 MacroAssembler* masm = new MacroAssembler(&buffer); 3396 MacroAssembler* masm = new MacroAssembler(&buffer);
4021 3397
4022 enum frame_layout { 3398 enum frame_layout {
4023 s0_off, s0_off2, 3399 s0_off, s0_off2,
4024 s1_off, s1_off2, 3400 s1_off, s1_off2,
4025 s2_off, s2_off2, 3401 s2_off, s2_off2,
4026 s3_off, s3_off2, 3402 s3_off, s3_off2,
4027 s4_off, s4_off2, 3403 s4_off, s4_off2,
4028 s5_off, s5_off2, 3404 s5_off, s5_off2,
4029 s6_off, s6_off2, 3405 s6_off, s6_off2,
4030 s7_off, s7_off2, 3406 s7_off, s7_off2,
4031 fp_off, fp_off2, 3407 fp_off, fp_off2,
4032 return_off, return_off2, // slot for return address sp + 9 3408 return_off, return_off2, // slot for return address sp + 9
4033 framesize 3409 framesize
4034 }; 3410 };
4035 assert(framesize % 4 == 0, "sp not 16-byte aligned"); 3411 assert(framesize % 4 == 0, "sp not 16-byte aligned");
4036 3412
4037 address start = __ pc(); 3413 address start = __ pc();
4040 __ daddiu(SP, SP, -framesize * BytesPerInt); 3416 __ daddiu(SP, SP, -framesize * BytesPerInt);
4041 3417
4042 __ sd(RA, SP, return_off * BytesPerInt); 3418 __ sd(RA, SP, return_off * BytesPerInt);
4043 __ sd(FP, SP, fp_off * BytesPerInt); 3419 __ sd(FP, SP, fp_off * BytesPerInt);
4044 3420
4045 // Save callee saved registers. None for UseSSE=0, 3421 // Save callee saved registers. None for UseSSE=0,
4046 // floats-only for UseSSE=1, and doubles for UseSSE=2. 3422 // floats-only for UseSSE=1, and doubles for UseSSE=2.
4047 __ sd(S0, SP, s0_off * BytesPerInt); 3423 __ sd(S0, SP, s0_off * BytesPerInt);
4048 __ sd(S1, SP, s1_off * BytesPerInt); 3424 __ sd(S1, SP, s1_off * BytesPerInt);
4049 __ sd(S2, SP, s2_off * BytesPerInt); 3425 __ sd(S2, SP, s2_off * BytesPerInt);
4050 __ sd(S3, SP, s3_off * BytesPerInt); 3426 __ sd(S3, SP, s3_off * BytesPerInt);
4063 #ifndef OPT_THREAD 3439 #ifndef OPT_THREAD
4064 __ get_thread(thread); 3440 __ get_thread(thread);
4065 #endif 3441 #endif
4066 // set last_Java_sp 3442 // set last_Java_sp
4067 __ set_last_Java_frame(NOREG, FP, NULL); 3443 __ set_last_Java_frame(NOREG, FP, NULL);
4068 __ relocate(relocInfo::internal_pc_type); 3444 __ relocate(relocInfo::internal_pc_type);
4069 { 3445 {
4070 long save_pc = (long)__ pc() + 52; 3446 long save_pc = (long)__ pc() + 52;
4071 __ patchable_set48(AT, (long)save_pc); 3447 __ patchable_set48(AT, (long)save_pc);
4072 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); 3448 __ sd(AT, thread, in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
4073 } 3449 }
4074 // Call C code. Need thread but NOT official VM entry 3450 // Call C code. Need thread but NOT official VM entry
4081 3457
4082 // Set an oopmap for the call site 3458 // Set an oopmap for the call site
4083 OopMapSet *oop_maps = new OopMapSet(); 3459 OopMapSet *oop_maps = new OopMapSet();
4084 OopMap* map = new OopMap( framesize, 0 ); 3460 OopMap* map = new OopMap( framesize, 0 );
4085 3461
4086 map->set_callee_saved( VMRegImpl::stack2reg(s0_off ), S0->as_VMReg() ); 3462 map->set_callee_saved( VMRegImpl::stack2reg(s0_off ), S0->as_VMReg() );
4087 map->set_callee_saved( VMRegImpl::stack2reg(s1_off ), S1->as_VMReg() ); 3463 map->set_callee_saved( VMRegImpl::stack2reg(s1_off ), S1->as_VMReg() );
4088 map->set_callee_saved( VMRegImpl::stack2reg(s2_off ), S2->as_VMReg() ); 3464 map->set_callee_saved( VMRegImpl::stack2reg(s2_off ), S2->as_VMReg() );
4089 map->set_callee_saved( VMRegImpl::stack2reg(s3_off ), S3->as_VMReg() ); 3465 map->set_callee_saved( VMRegImpl::stack2reg(s3_off ), S3->as_VMReg() );
4090 map->set_callee_saved( VMRegImpl::stack2reg(s4_off ), S4->as_VMReg() ); 3466 map->set_callee_saved( VMRegImpl::stack2reg(s4_off ), S4->as_VMReg() );
4091 map->set_callee_saved( VMRegImpl::stack2reg(s5_off ), S5->as_VMReg() ); 3467 map->set_callee_saved( VMRegImpl::stack2reg(s5_off ), S5->as_VMReg() );
4092 map->set_callee_saved( VMRegImpl::stack2reg(s6_off ), S6->as_VMReg() ); 3468 map->set_callee_saved( VMRegImpl::stack2reg(s6_off ), S6->as_VMReg() );
4093 map->set_callee_saved( VMRegImpl::stack2reg(s7_off ), S7->as_VMReg() ); 3469 map->set_callee_saved( VMRegImpl::stack2reg(s7_off ), S7->as_VMReg() );
4094 3470
4095 //oop_maps->add_gc_map( __ offset(), true, map); 3471 //oop_maps->add_gc_map( __ offset(), true, map);
4096 oop_maps->add_gc_map( __ offset(), map); 3472 oop_maps->add_gc_map( __ offset(), map);
4097 3473
4098 #ifndef OPT_THREAD 3474 #ifndef OPT_THREAD
4099 __ get_thread(thread); 3475 __ get_thread(thread);
4100 #endif 3476 #endif
4101 __ reset_last_Java_frame(false,false); 3477 __ reset_last_Java_frame(false,false);
4102 3478
4103 // Load UnrollBlock into S7 3479 // Load UnrollBlock into S7
4104 Register unroll = S7; 3480 Register unroll = S7;
4105 __ move(unroll, V0); 3481 __ move(unroll, V0);
4106 3482
4107 // Pop all the frames we must move/replace. 3483 // Pop all the frames we must move/replace.
4108 // 3484 //
4109 // Frame picture (youngest to oldest) 3485 // Frame picture (youngest to oldest)
4110 // 1: self-frame (no frame link) 3486 // 1: self-frame (no frame link)
4111 // 2: deopting frame (no frame link) 3487 // 2: deopting frame (no frame link)
4112 // 3: possible-i2c-adapter-frame 3488 // 3: possible-i2c-adapter-frame
4113 // 4: caller of deopting frame (could be compiled/interpreted. If interpreted we will create an 3489 // 4: caller of deopting frame (could be compiled/interpreted. If interpreted we will create an
4114 // and c2i here) 3490 // and c2i here)
4115 3491
4116 // Pop self-frame. We have no frame, and must rely only on EAX and ESP. 3492 // Pop self-frame. We have no frame, and must rely only on EAX and ESP.
4117 __ daddiu(SP, SP, framesize * BytesPerInt); 3493 __ daddiu(SP, SP, framesize * BytesPerInt);
4130 Register sender_sp = T1; 3506 Register sender_sp = T1;
4131 3507
4132 // sp should be pointing at the return address to the caller (4) 3508 // sp should be pointing at the return address to the caller (4)
4133 // Load array of frame pcs into ECX 3509 // Load array of frame pcs into ECX
4134 __ ld(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()); 3510 __ ld(pcs, unroll, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes());
4135
4136 /* 2012/9/7 Not needed in MIPS
4137 __ addiu(SP, SP, wordSize);
4138 */
4139 3511
4140 // Load array of frame sizes into ESI 3512 // Load array of frame sizes into ESI
4141 __ ld(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()); 3513 __ ld(sizes, unroll, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes());
4142 __ lwu(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()); 3514 __ lwu(count, unroll, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes());
4143 3515
4174 3546
4175 // Re-push self-frame 3547 // Re-push self-frame
4176 __ daddi(SP, SP, - 2 * wordSize); // save old & set new FP 3548 __ daddi(SP, SP, - 2 * wordSize); // save old & set new FP
4177 __ sd(FP, SP, 0 * wordSize); // save final return address 3549 __ sd(FP, SP, 0 * wordSize); // save final return address
4178 __ sd(RA, SP, 1 * wordSize); 3550 __ sd(RA, SP, 1 * wordSize);
4179 __ move(FP, SP); 3551 __ move(FP, SP);
4180 __ daddi(SP, SP, -(framesize / 2 - 2) * wordSize); 3552 __ daddi(SP, SP, -(framesize / 2 - 2) * wordSize);
4181 3553
4182 // set last_Java_sp, last_Java_fp 3554 // set last_Java_sp, last_Java_fp
4183 __ set_last_Java_frame(NOREG, FP, NULL); 3555 __ set_last_Java_frame(NOREG, FP, NULL);
4184 3556
4185 __ move(AT, -(StackAlignmentInBytes)); 3557 __ move(AT, -(StackAlignmentInBytes));
4186 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI 3558 __ andr(SP, SP, AT); // Fix stack alignment as required by ABI
4187 3559
4188 __ relocate(relocInfo::internal_pc_type); 3560 __ relocate(relocInfo::internal_pc_type);
4189 { 3561 {
4190 long save_pc = (long)__ pc() + 52; 3562 long save_pc = (long)__ pc() + 52;
4191 __ patchable_set48(AT, (long)save_pc); 3563 __ patchable_set48(AT, (long)save_pc);
4192 } 3564 }
4193 __ sd(AT, thread,in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset())); 3565 __ sd(AT, thread,in_bytes(JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()));
4194 3566
4197 // restore return values to their stack-slots with the new SP. 3569 // restore return values to their stack-slots with the new SP.
4198 __ move(A0, thread); 3570 __ move(A0, thread);
4199 __ move(A1, Deoptimization::Unpack_uncommon_trap); 3571 __ move(A1, Deoptimization::Unpack_uncommon_trap);
4200 __ patchable_call((address)Deoptimization::unpack_frames); 3572 __ patchable_call((address)Deoptimization::unpack_frames);
4201 // Set an oopmap for the call site 3573 // Set an oopmap for the call site
4202 //oop_maps->add_gc_map( __ offset(), true, new OopMap( framesize, 0 ) ); 3574 //oop_maps->add_gc_map( __ offset(), true, new OopMap( framesize, 0 ) );
4203 oop_maps->add_gc_map( __ offset(), new OopMap( framesize, 0 ) );//Fu 3575 oop_maps->add_gc_map( __ offset(), new OopMap( framesize, 0 ) );//Fu
4204 3576
4205 __ reset_last_Java_frame(true,true); 3577 __ reset_last_Java_frame(true,true);
4206 3578
4207 // Pop self-frame. 3579 // Pop self-frame.
4224 // Generate a special Compile2Runtime blob that saves all registers, and sets 3596 // Generate a special Compile2Runtime blob that saves all registers, and sets
4225 // up an OopMap and calls safepoint code to stop the compiled code for 3597 // up an OopMap and calls safepoint code to stop the compiled code for
4226 // a safepoint. 3598 // a safepoint.
4227 // 3599 //
4228 // This blob is jumped to (via a breakpoint and the signal handler) from a 3600 // This blob is jumped to (via a breakpoint and the signal handler) from a
4229 // safepoint in compiled code. 3601 // safepoint in compiled code.
4230 3602
4231 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int pool_type) { 3603 SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int pool_type) {
4232 3604
4233 // Account for thread arg in our frame 3605 // Account for thread arg in our frame
4234 const int additional_words = 0; 3606 const int additional_words = 0;
4235 int frame_size_in_words; 3607 int frame_size_in_words;
4236 3608
4237 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3609 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
4238 3610
4239 ResourceMark rm; 3611 ResourceMark rm;
4240 OopMapSet *oop_maps = new OopMapSet(); 3612 OopMapSet *oop_maps = new OopMapSet();
4241 OopMap* map; 3613 OopMap* map;
4242 3614
4243 // allocate space for the code 3615 // allocate space for the code
4244 // setup code generation tools 3616 // setup code generation tools
4245 CodeBuffer buffer ("handler_blob", 2048, 512); 3617 CodeBuffer buffer ("handler_blob", 2048, 512);
4246 MacroAssembler* masm = new MacroAssembler( &buffer); 3618 MacroAssembler* masm = new MacroAssembler( &buffer);
4247 3619
4248 const Register thread = TREG; 3620 const Register thread = TREG;
4249 address start = __ pc(); 3621 address start = __ pc();
4250 address call_pc = NULL; 3622 address call_pc = NULL;
4251 bool cause_return = (pool_type == POLL_AT_RETURN); 3623 bool cause_return = (pool_type == POLL_AT_RETURN);
4252 bool save_vectors = (pool_type == POLL_AT_VECTOR_LOOP); 3624 bool save_vectors = (pool_type == POLL_AT_VECTOR_LOOP);
4253 3625
4254 // If cause_return is true we are at a poll_return and there is 3626 // If cause_return is true we are at a poll_return and there is
4255 // the return address in RA to the caller on the nmethod 3627 // the return address in RA to the caller on the nmethod
4262 #endif 3634 #endif
4263 3635
4264 if(!cause_return) { 3636 if(!cause_return) {
4265 __ ld_ptr(RA, Address(thread, JavaThread::saved_exception_pc_offset())); 3637 __ ld_ptr(RA, Address(thread, JavaThread::saved_exception_pc_offset()));
4266 } 3638 }
4267 3639
4268 __ pop(thread); 3640 __ pop(thread);
4269 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, save_vectors); 3641 map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, save_vectors);
4270 3642
4271 #ifndef OPT_THREAD 3643 #ifndef OPT_THREAD
4272 __ get_thread(thread); 3644 __ get_thread(thread);
4276 // work outselvs. 3648 // work outselvs.
4277 3649
4278 __ move(A0, thread); 3650 __ move(A0, thread);
4279 __ set_last_Java_frame(NOREG, NOREG, NULL); 3651 __ set_last_Java_frame(NOREG, NOREG, NULL);
4280 3652
4281 //__ relocate(relocInfo::internal_pc_type);
4282 if (!cause_return)
4283 {
4284 /*
4285 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + NativeCall::return_address_offset + 4;
4286 __ li48(AT, save_pc);
4287 __ sd(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
4288 */
4289 }
4290
4291 3653
4292 // do the call 3654 // do the call
4293 //__ lui(T9, Assembler::split_high((int)call_ptr)); 3655 //__ lui(T9, Assembler::split_high((int)call_ptr));
4294 //__ addiu(T9, T9, Assembler::split_low((int)call_ptr)); 3656 //__ addiu(T9, T9, Assembler::split_low((int)call_ptr));
4295 __ call(call_ptr); 3657 __ call(call_ptr);
4313 // Exception pending 3675 // Exception pending
4314 3676
4315 RegisterSaver::restore_live_registers(masm, save_vectors); 3677 RegisterSaver::restore_live_registers(masm, save_vectors);
4316 //forward_exception_entry need return address on the stack 3678 //forward_exception_entry need return address on the stack
4317 __ push(RA); 3679 __ push(RA);
4318 //__ lui(T9, Assembler::split_high((int)StubRoutines::forward_exception_entry()));
4319 //__ addiu(T9, T9, Assembler::split_low((int)StubRoutines::forward_exception_entry()));
4320 __ patchable_jump((address)StubRoutines::forward_exception_entry()); 3680 __ patchable_jump((address)StubRoutines::forward_exception_entry());
4321 3681
4322 // No exception case 3682 // No exception case
4323 __ bind(noException); 3683 __ bind(noException);
4324 // Normal exit, register restoring and exit 3684 // Normal exit, register restoring and exit
4325 RegisterSaver::restore_live_registers(masm, save_vectors); 3685 RegisterSaver::restore_live_registers(masm, save_vectors);
4326 __ jr(RA); 3686 __ jr(RA);
4327 __ delayed()->nop(); 3687 __ delayed()->nop();
4328 3688
4329 masm->flush(); 3689 masm->flush();
4330 3690
4331 // Fill-out other meta info 3691 // Fill-out other meta info
4332 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words); 3692 return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
4333 } 3693 }
4334 3694
4335 // 3695 //
4336 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3696 // generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
4337 // 3697 //
4350 //FIXME. aoqi. code_size 3710 //FIXME. aoqi. code_size
4351 CodeBuffer buffer(name, 20000, 2048); 3711 CodeBuffer buffer(name, 20000, 2048);
4352 MacroAssembler* masm = new MacroAssembler(&buffer); 3712 MacroAssembler* masm = new MacroAssembler(&buffer);
4353 3713
4354 int frame_size_words; 3714 int frame_size_words;
4355 //we put the thread in A0 3715 //we put the thread in A0
4356 3716
4357 OopMapSet *oop_maps = new OopMapSet(); 3717 OopMapSet *oop_maps = new OopMapSet();
4358 OopMap* map = NULL; 3718 OopMap* map = NULL;
4359 3719
4360 int start = __ offset(); 3720 int start = __ offset();
4364 int frame_complete = __ offset(); 3724 int frame_complete = __ offset();
4365 3725
4366 const Register thread = T8; 3726 const Register thread = T8;
4367 __ get_thread(thread); 3727 __ get_thread(thread);
4368 3728
4369 __ move(A0, thread); 3729 __ move(A0, thread);
4370 __ set_last_Java_frame(noreg, FP, NULL); 3730 __ set_last_Java_frame(noreg, FP, NULL);
4371 //__ addi(SP, SP, -wordSize); 3731 //align the stack before invoke native
4372 //align the stack before invoke native
4373 __ move(AT, -(StackAlignmentInBytes)); 3732 __ move(AT, -(StackAlignmentInBytes));
4374 __ andr(SP, SP, AT); 3733 __ andr(SP, SP, AT);
4375 __ relocate(relocInfo::internal_pc_type); 3734 __ relocate(relocInfo::internal_pc_type);
4376 { 3735 {
4377 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 24 + 1 * BytesPerInstWord; 3736 intptr_t save_pc = (intptr_t)__ pc() + NativeMovConstReg::instruction_size + 24 + 1 * BytesPerInstWord;
4378 //tty->print_cr(" %s :%d, name:%s, pc: %lx, save_pc: %lx, frame_size_words: %lx", __func__, __LINE__, name, __ pc(), save_pc, frame_size_words); //aoqi_test
4379 __ patchable_set48(AT, save_pc); 3737 __ patchable_set48(AT, save_pc);
4380 } 3738 }
4381 __ sd(AT, thread, in_bytes(JavaThread::last_Java_pc_offset())); 3739 __ sd(AT, thread, in_bytes(JavaThread::last_Java_pc_offset()));
4382 3740
4383 __ call(destination); 3741 __ call(destination);
4394 __ reset_last_Java_frame(true, true); 3752 __ reset_last_Java_frame(true, true);
4395 // check for pending exceptions 3753 // check for pending exceptions
4396 Label pending; 3754 Label pending;
4397 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset())); 3755 __ ld_ptr(AT, thread, in_bytes(Thread::pending_exception_offset()));
4398 __ bne(AT, R0, pending); 3756 __ bne(AT, R0, pending);
4399 __ delayed()->nop(); 3757 __ delayed()->nop();
4400 // get the returned Method* 3758 // get the returned Method*
4401 //FIXME, do mips need this ? 3759 //FIXME, do mips need this ?
4402 __ get_vm_result_2(Rmethod, thread); // Refer to OpenJDK8 3760 __ get_vm_result_2(Rmethod, thread); // Refer to OpenJDK8
4403 __ st_ptr(Rmethod, SP, RegisterSaver::methodOffset() * wordSize); 3761 __ st_ptr(Rmethod, SP, RegisterSaver::methodOffset() * wordSize);
4404 __ st_ptr(V0, SP, RegisterSaver::v0Offset() * wordSize); 3762 __ st_ptr(V0, SP, RegisterSaver::v0Offset() * wordSize);
4405 RegisterSaver::restore_live_registers(masm); 3763 RegisterSaver::restore_live_registers(masm);
4406 3764
4412 __ bind(pending); 3770 __ bind(pending);
4413 3771
4414 RegisterSaver::restore_live_registers(masm); 3772 RegisterSaver::restore_live_registers(masm);
4415 3773
4416 // exception pending => remove activation and forward to exception handler 3774 // exception pending => remove activation and forward to exception handler
4417 //forward_exception_entry need return address on the stack 3775 //forward_exception_entry need return address on the stack
4418 __ push(RA); 3776 __ push(RA);
4419 __ get_thread(thread); 3777 __ get_thread(thread);
4420 __ st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset())); 3778 __ st_ptr(R0, thread, in_bytes(JavaThread::vm_result_offset()));
4421 __ ld_ptr(V0, thread, in_bytes(Thread::pending_exception_offset())); 3779 __ ld_ptr(V0, thread, in_bytes(Thread::pending_exception_offset()));
4422 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); 3780 __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
4423 __ delayed() -> nop(); 3781 __ delayed() -> nop();
4424 // ------------- 3782 // -------------
4425 // make sure all code is generated 3783 // make sure all code is generated
4426 masm->flush(); 3784 masm->flush();
4427 3785
4428 RuntimeStub* tmp= RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3786 RuntimeStub* tmp= RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
4429 return tmp; 3787 return tmp;
4430 } 3788 }
4431
4432 /*void SharedRuntime::generate_stubs() {
4433 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address,
4434 SharedRuntime::handle_wrong_method),"wrong_method_stub");
4435 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address,
4436 SharedRuntime::handle_wrong_method_ic_miss),"ic_miss_stub");
4437 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address,
4438 SharedRuntime::resolve_opt_virtual_call_C),"resolve_opt_virtual_call");
4439 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address,
4440 SharedRuntime::resolve_virtual_call_C),"resolve_virtual_call");
4441 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address,
4442 SharedRuntime::resolve_static_call_C),"resolve_static_call");
4443 _polling_page_safepoint_handler_blob =generate_handler_blob(CAST_FROM_FN_PTR(address,
4444 SafepointSynchronize::handle_polling_page_exception), false);
4445 _polling_page_return_handler_blob =generate_handler_blob(CAST_FROM_FN_PTR(address,
4446 SafepointSynchronize::handle_polling_page_exception), true);
4447 generate_deopt_blob();
4448 #ifdef COMPILER2
4449 generate_uncommon_trap_blob();
4450 #endif // COMPILER2
4451 }*/
4452 3789
4453 extern "C" int SpinPause() {return 0;} 3790 extern "C" int SpinPause() {return 0;}
4454 // extern "C" int SafeFetch32 (int * adr, int errValue) {return 0;} ; 3791 // extern "C" int SafeFetch32 (int * adr, int errValue) {return 0;} ;
4455 // extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) {return *adr; } ; 3792 // extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) {return *adr; } ;

mercurial