Thu, 10 Nov 2016 10:48:04 +0800
[C2] Remove redundant sync operations.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29);
385 reg_class dbl_reg( F0, F0_H,
386 F1, F1_H,
387 F2, F2_H,
388 F3, F3_H,
389 F4, F4_H,
390 F5, F5_H,
391 F6, F6_H,
392 F7, F7_H,
393 F8, F8_H,
394 F9, F9_H,
395 F10, F10_H,
396 F11, F11_H,
397 F12, F12_H,
398 F13, F13_H,
399 F14, F14_H,
400 F15, F15_H,
401 F16, F16_H,
402 F17, F17_H,
403 F18, F18_H,
404 F19, F19_H,
405 F20, F20_H,
406 F21, F21_H,
407 F22, F22_H,
408 F23, F23_H,
409 F24, F24_H,
410 F25, F25_H,
411 F26, F26_H,
412 F27, F27_H,
413 F28, F28_H,
414 F29, F29_H);
416 reg_class flt_arg0( F12 );
417 reg_class dbl_arg0( F12, F12_H );
418 reg_class dbl_arg1( F14, F14_H );
420 %}
422 //----------DEFINITION BLOCK---------------------------------------------------
423 // Define name --> value mappings to inform the ADLC of an integer valued name
424 // Current support includes integer values in the range [0, 0x7FFFFFFF]
425 // Format:
426 // int_def <name> ( <int_value>, <expression>);
427 // Generated Code in ad_<arch>.hpp
428 // #define <name> (<expression>)
429 // // value == <int_value>
430 // Generated code in ad_<arch>.cpp adlc_verification()
431 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
432 //
433 definitions %{
434 int_def DEFAULT_COST ( 100, 100);
435 int_def HUGE_COST (1000000, 1000000);
437 // Memory refs are twice as expensive as run-of-the-mill.
438 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
440 // Branches are even more expensive.
441 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
442 // we use jr instruction to construct call, so more expensive
443 // by yjl 2/28/2006
444 int_def CALL_COST ( 500, DEFAULT_COST * 5);
445 /*
446 int_def EQUAL ( 1, 1 );
447 int_def NOT_EQUAL ( 2, 2 );
448 int_def GREATER ( 3, 3 );
449 int_def GREATER_EQUAL ( 4, 4 );
450 int_def LESS ( 5, 5 );
451 int_def LESS_EQUAL ( 6, 6 );
452 */
453 %}
457 //----------SOURCE BLOCK-------------------------------------------------------
458 // This is a block of C++ code which provides values, functions, and
459 // definitions necessary in the rest of the architecture description
461 source_hpp %{
462 // Header information of the source block.
463 // Method declarations/definitions which are used outside
464 // the ad-scope can conveniently be defined here.
465 //
466 // To keep related declarations/definitions/uses close together,
467 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
469 class CallStubImpl {
471 //--------------------------------------------------------------
472 //---< Used for optimization in Compile::shorten_branches >---
473 //--------------------------------------------------------------
475 public:
476 // Size of call trampoline stub.
477 static uint size_call_trampoline() {
478 return 0; // no call trampolines on this platform
479 }
481 // number of relocations needed by a call trampoline stub
482 static uint reloc_call_trampoline() {
483 return 0; // no call trampolines on this platform
484 }
485 };
487 class HandlerImpl {
489 public:
491 static int emit_exception_handler(CodeBuffer &cbuf);
492 static int emit_deopt_handler(CodeBuffer& cbuf);
494 static uint size_exception_handler() {
495 // NativeCall instruction size is the same as NativeJump.
496 // exception handler starts out as jump and can be patched to
497 // a call be deoptimization. (4932387)
498 // Note that this value is also credited (in output.cpp) to
499 // the size of the code section.
500 // return NativeJump::instruction_size;
501 int size = NativeCall::instruction_size;
502 return round_to(size, 16);
503 }
505 #ifdef _LP64
506 static uint size_deopt_handler() {
507 int size = NativeCall::instruction_size;
508 return round_to(size, 16);
509 }
510 #else
511 static uint size_deopt_handler() {
512 // NativeCall instruction size is the same as NativeJump.
513 // exception handler starts out as jump and can be patched to
514 // a call be deoptimization. (4932387)
515 // Note that this value is also credited (in output.cpp) to
516 // the size of the code section.
517 return 5 + NativeJump::instruction_size; // pushl(); jmp;
518 }
519 #endif
520 };
522 %} // end source_hpp
524 source %{
526 #define NO_INDEX 0
527 #define RELOC_IMM64 Assembler::imm_operand
528 #define RELOC_DISP32 Assembler::disp32_operand
531 #define __ _masm.
534 // Emit exception handler code.
535 // Stuff framesize into a register and call a VM stub routine.
536 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
537 /*
538 // Note that the code buffer's insts_mark is always relative to insts.
539 // That's why we must use the macroassembler to generate a handler.
540 MacroAssembler _masm(&cbuf);
541 address base = __ start_a_stub(size_exception_handler());
542 if (base == NULL) return 0; // CodeBuffer::expand failed
543 int offset = __ offset();
544 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
545 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
546 __ end_a_stub();
547 return offset;
548 */
549 // Note that the code buffer's insts_mark is always relative to insts.
550 // That's why we must use the macroassembler to generate a handler.
551 MacroAssembler _masm(&cbuf);
552 address base =
553 __ start_a_stub(size_exception_handler());
554 if (base == NULL) return 0; // CodeBuffer::expand failed
555 int offset = __ offset();
557 __ block_comment("; emit_exception_handler");
559 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
560 * * However, this will trigger an assert after the 40th method:
561 * *
562 * * 39 b java.lang.Throwable::<init> (25 bytes)
563 * * --- ns java.lang.Throwable::fillInStackTrace
564 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
565 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
566 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
567 * *
568 * * If we change from JR to JALR, the assert will disappear, but WebClient will
569 * * fail after the 403th method with unknown reason.
570 * */
571 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
572 __ jr(T9);
573 __ delayed()->nop();
574 __ align(16);
575 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
576 __ end_a_stub();
577 return offset;
578 }
580 // Emit deopt handler code.
581 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
582 /*
583 // Note that the code buffer's insts_mark is always relative to insts.
584 // That's why we must use the macroassembler to generate a handler.
585 MacroAssembler _masm(&cbuf);
586 address base = __ start_a_stub(size_deopt_handler());
587 if (base == NULL) return 0; // CodeBuffer::expand failed
588 int offset = __ offset();
590 #ifdef _LP64
591 address the_pc = (address) __ pc();
592 Label next;
593 // push a "the_pc" on the stack without destroying any registers
594 // as they all may be live.
596 // push address of "next"
597 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
598 __ bind(next);
599 // adjust it so it matches "the_pc"
600 __ subptr(Address(rsp, 0), __ offset() - offset);
601 #else
602 InternalAddress here(__ pc());
603 __ pushptr(here.addr());
604 #endif
606 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
607 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
608 __ end_a_stub();
609 return offset;
610 */
611 // Note that the code buffer's insts_mark is always relative to insts.
612 // That's why we must use the macroassembler to generate a handler.
613 MacroAssembler _masm(&cbuf);
614 address base =
615 __ start_a_stub(size_deopt_handler());
617 // FIXME
618 if (base == NULL) return 0; // CodeBuffer::expand failed
619 int offset = __ offset();
621 __ block_comment("; emit_deopt_handler");
623 cbuf.set_insts_mark();
624 __ relocate(relocInfo::runtime_call_type);
626 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
627 __ jalr(T9);
628 __ delayed()->nop();
629 __ align(16);
630 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
631 __ end_a_stub();
632 return offset;
633 }
636 const bool Matcher::match_rule_supported(int opcode) {
637 if (!has_match_rule(opcode))
638 return false;
639 /*
640 switch (opcode) {
641 case Op_PopCountI:
642 case Op_PopCountL:
643 if (!UsePopCountInstruction)
644 return false;
645 break;
646 case Op_MulVI:
647 if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX
648 return false;
649 break;
650 case Op_CompareAndSwapL:
651 #ifdef _LP64
652 case Op_CompareAndSwapP:
653 #endif
654 if (!VM_Version::supports_cx8())
655 return false;
656 break;
657 }
658 */
659 return true; // Per default match rules are supported.
660 }
662 //FIXME
663 // emit call stub, compiled java to interpreter
664 void emit_java_to_interp(CodeBuffer &cbuf ) {
665 // Stub is fixed up when the corresponding call is converted from calling
666 // compiled code to calling interpreted code.
667 // mov rbx,0
668 // jmp -1
670 address mark = cbuf.insts_mark(); // get mark within main instrs section
672 // Note that the code buffer's insts_mark is always relative to insts.
673 // That's why we must use the macroassembler to generate a stub.
674 MacroAssembler _masm(&cbuf);
676 address base =
677 __ start_a_stub(Compile::MAX_stubs_size);
678 if (base == NULL) return; // CodeBuffer::expand failed
679 // static stub relocation stores the instruction address of the call
681 __ relocate(static_stub_Relocation::spec(mark), 0);
683 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
684 /*
685 int oop_index = __ oop_recorder()->allocate_index(NULL);
686 RelocationHolder rspec = oop_Relocation::spec(oop_index);
687 __ relocate(rspec);
688 */
690 // static stub relocation also tags the methodOop in the code-stream.
691 __ li48(S3, (long)0);
692 // This is recognized as unresolved by relocs/nativeInst/ic code
694 __ relocate(relocInfo::runtime_call_type);
696 cbuf.set_insts_mark();
697 address call_pc = (address)-1;
698 __ li48(AT, (long)call_pc);
699 __ jr(AT);
700 __ nop();
701 __ align(16);
702 __ end_a_stub();
703 // Update current stubs pointer and restore code_end.
704 }
706 // size of call stub, compiled java to interpretor
707 uint size_java_to_interp() {
708 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
709 return round_to(size, 16);
710 }
712 // relocation entries for call stub, compiled java to interpreter
713 uint reloc_java_to_interp() {
714 return 16; // in emit_java_to_interp + in Java_Static_Call
715 }
717 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
718 if( Assembler::is_simm16(offset) ) return true;
719 else
720 {
721 assert(false, "Not implemented yet !" );
722 Unimplemented();
723 }
724 }
727 // No additional cost for CMOVL.
728 const int Matcher::long_cmove_cost() { return 0; }
730 // No CMOVF/CMOVD with SSE2
731 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
733 // Does the CPU require late expand (see block.cpp for description of late expand)?
734 const bool Matcher::require_postalloc_expand = false;
736 // Do we need to mask the count passed to shift instructions or does
737 // the cpu only look at the lower 5/6 bits anyway?
738 const bool Matcher::need_masked_shift_count = false;
740 bool Matcher::narrow_oop_use_complex_address() {
741 assert(UseCompressedOops, "only for compressed oops code");
742 return (LogMinObjAlignmentInBytes <= 3);
743 }
745 bool Matcher::narrow_klass_use_complex_address() {
746 assert(UseCompressedClassPointers, "only for compressed klass code");
747 return (LogKlassAlignmentInBytes <= 3);
748 }
750 // This is UltraSparc specific, true just means we have fast l2f conversion
751 const bool Matcher::convL2FSupported(void) {
752 return true;
753 }
757 // Max vector size in bytes. 0 if not supported.
758 const int Matcher::vector_width_in_bytes(BasicType bt) {
759 // return UseSSE >= 2 ? 8 : 0;
760 return 0;
761 }
763 // Register for MODI projection of divmodI
764 RegMask Matcher::modI_proj_mask() {
765 return P_REG_mask();
766 }
768 // Register for DIVL projection of divmodL
769 RegMask Matcher::divL_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 int Matcher::regnum_to_fpu_offset(int regnum) {
775 return regnum - 32; // The FP registers are in the second chunk
776 }
779 const bool Matcher::isSimpleConstant64(jlong value) {
780 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
781 return false;
782 }
784 // Register for DIVI projection of divmodI
785 RegMask Matcher::divI_proj_mask() {
786 return P_REG_mask();
787 }
789 // Limits on vector size (number of elements) loaded into vector.
790 const int Matcher::max_vector_size(const BasicType bt) {
791 return vector_width_in_bytes(bt)/type2aelembytes(bt);
792 }
793 const int Matcher::min_vector_size(const BasicType bt) {
794 int max_size = max_vector_size(bt);
795 // Min size which can be loaded into vector is 4 bytes.
796 int size = (type2aelembytes(bt) == 1) ? 4 : 2;
797 return MIN2(size,max_size);
798 }
800 // Vector ideal reg
801 const int Matcher::vector_ideal_reg(int size) {
802 return 0;
803 }
805 // Only lowest bits of xmm reg are used for vector shift count.
806 const int Matcher::vector_shift_count_ideal_reg(int size) {
807 return Op_VecS;
808 }
810 // x86 supports misaligned vectors store/load.
811 const bool Matcher::misaligned_vectors_ok() {
812 return !AlignVector; // can be changed by flag
813 }
815 // Return whether or not this register is ever used as an argument. This
816 // function is used on startup to build the trampoline stubs in generateOptoStub.
817 // Registers not mentioned will be killed by the VM call in the trampoline, and
818 // arguments in those registers not be available to the callee.
819 bool Matcher::can_be_java_arg( int reg ) {
820 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
821 if ( reg == T0_num || reg == T0_H_num
822 || reg == A0_num || reg == A0_H_num
823 || reg == A1_num || reg == A1_H_num
824 || reg == A2_num || reg == A2_H_num
825 || reg == A3_num || reg == A3_H_num
826 || reg == A4_num || reg == A4_H_num
827 || reg == A5_num || reg == A5_H_num
828 || reg == A6_num || reg == A6_H_num
829 || reg == A7_num || reg == A7_H_num )
830 return true;
832 if ( reg == F12_num || reg == F12_H_num
833 || reg == F13_num || reg == F13_H_num
834 || reg == F14_num || reg == F14_H_num
835 || reg == F15_num || reg == F15_H_num
836 || reg == F16_num || reg == F16_H_num
837 || reg == F17_num || reg == F17_H_num
838 || reg == F18_num || reg == F18_H_num
839 || reg == F19_num || reg == F19_H_num )
840 return true;
842 return false;
843 }
845 bool Matcher::is_spillable_arg( int reg ) {
846 return can_be_java_arg(reg);
847 }
849 //TODO: in MIPS i donot know LEE
850 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
851 // In 64 bit mode a code which use multiply when
852 // devisor is constant is faster than hardware
853 // DIV instruction (it uses MulHiL).
854 return false;
855 }
857 // Register for MODL projection of divmodL
858 RegMask Matcher::modL_proj_mask() {
859 ShouldNotReachHere();
860 return RegMask();
861 }
863 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
864 return FP_REG_mask();
865 }
867 // x86 AES instructions are compatible with SunJCE expanded
868 // keys, hence we do not need to pass the original key to stubs
869 const bool Matcher::pass_original_key_for_aes() {
870 return false;
871 }
873 // The address of the call instruction needs to be 16-byte aligned to
874 // ensure that it does not span a cache line so that it can be patched.
876 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
877 //lui
878 //ori
879 //dsll
880 //ori
882 //jalr
883 //nop
885 return round_to(current_offset, alignment_required()) - current_offset;
886 }
888 // The address of the call instruction needs to be 16-byte aligned to
889 // ensure that it does not span a cache line so that it can be patched.
890 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
891 //li64 <--- skip
893 //lui
894 //ori
895 //dsll
896 //ori
898 //jalr
899 //nop
901 current_offset += 4 * 6; // skip li64
902 return round_to(current_offset, alignment_required()) - current_offset;
903 }
905 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
906 //lui
907 //ori
908 //dsll
909 //ori
911 //jalr
912 //nop
914 return round_to(current_offset, alignment_required()) - current_offset;
915 }
917 int CallLeafDirectNode::compute_padding(int current_offset) const {
918 //lui
919 //ori
920 //dsll
921 //ori
923 //jalr
924 //nop
926 return round_to(current_offset, alignment_required()) - current_offset;
927 }
929 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
930 //lui
931 //ori
932 //dsll
933 //ori
935 //jalr
936 //nop
938 return round_to(current_offset, alignment_required()) - current_offset;
939 }
941 // If CPU can load and store mis-aligned doubles directly then no fixup is
942 // needed. Else we split the double into 2 integer pieces and move it
943 // piece-by-piece. Only happens when passing doubles into C code as the
944 // Java calling convention forces doubles to be aligned.
945 const bool Matcher::misaligned_doubles_ok = false;
946 // Do floats take an entire double register or just half?
947 //const bool Matcher::float_in_double = true;
948 bool Matcher::float_in_double() { return false; }
949 // Do ints take an entire long register or just half?
950 const bool Matcher::int_in_long = false;
951 // Threshold size for cleararray.
952 const int Matcher::init_array_short_size = 8 * BytesPerLong;
953 // Is it better to copy float constants, or load them directly from memory?
954 // Intel can load a float constant from a direct address, requiring no
955 // extra registers. Most RISCs will have to materialize an address into a
956 // register first, so they would do better to copy the constant from stack.
957 const bool Matcher::rematerialize_float_constants = false;
958 // Advertise here if the CPU requires explicit rounding operations
959 // to implement the UseStrictFP mode.
960 const bool Matcher::strict_fp_requires_explicit_rounding = false;
961 // The ecx parameter to rep stos for the ClearArray node is in dwords.
962 const bool Matcher::init_array_count_is_in_bytes = false;
963 // Should the Matcher clone shifts on addressing modes, expecting them to
964 // be subsumed into complex addressing expressions or compute them into
965 // registers? True for Intel but false for most RISCs
966 const bool Matcher::clone_shift_expressions = false;
970 // Indicate if the safepoint node needs the polling page as an input.
971 // Since MIPS doesn't have absolute addressing, it needs.
972 bool SafePointNode::needs_polling_address_input() {
973 return true;
974 }
976 // !!!!! Special hack to get all type of calls to specify the byte offset
977 // from the start of the call to the point where the return address
978 // will point.
979 int MachCallStaticJavaNode::ret_addr_offset() {
980 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
981 //The value ought to be 16 bytes.
982 //lui
983 //ori
984 //dsll
985 //ori
986 //jalr
987 //nop
988 return NativeCall::instruction_size;
989 }
991 int MachCallDynamicJavaNode::ret_addr_offset() {
992 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
994 // return NativeCall::instruction_size;
995 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
996 //The value ought to be 4 + 16 bytes.
997 //lui IC_Klass,
998 //ori IC_Klass,
999 //dsll IC_Klass
1000 //ori IC_Klass
1001 //lui T9
1002 //ori T9
1003 //dsll T9
1004 //ori T9
1005 //jalr T9
1006 //nop
1007 return 6 * 4 + NativeCall::instruction_size;
1009 }
1011 /*
1012 // EMIT_OPCODE()
1013 void emit_opcode(CodeBuffer &cbuf, int code) {
1014 *(cbuf.code_end()) = (unsigned char)code;
1015 cbuf.set_code_end(cbuf.code_end() + 1);
1016 }
1017 */
1019 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1020 int format) {
1021 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1022 cbuf.insts()->emit_int32(d32);
1023 }
1025 //=============================================================================
1027 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1028 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1029 static enum RC rc_class( OptoReg::Name reg ) {
1030 if( !OptoReg::is_valid(reg) ) return rc_bad;
1031 if (OptoReg::is_stack(reg)) return rc_stack;
1032 VMReg r = OptoReg::as_VMReg(reg);
1033 if (r->is_Register()) return rc_int;
1034 assert(r->is_FloatRegister(), "must be");
1035 return rc_float;
1036 }
1038 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1039 // Get registers to move
1040 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1041 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1042 OptoReg::Name dst_second = ra_->get_reg_second(this );
1043 OptoReg::Name dst_first = ra_->get_reg_first(this );
1045 enum RC src_second_rc = rc_class(src_second);
1046 enum RC src_first_rc = rc_class(src_first);
1047 enum RC dst_second_rc = rc_class(dst_second);
1048 enum RC dst_first_rc = rc_class(dst_first);
1050 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1052 // Generate spill code!
1053 int size = 0;
1055 if( src_first == dst_first && src_second == dst_second )
1056 return 0; // Self copy, no move
1058 if (src_first_rc == rc_stack) {
1059 // mem ->
1060 if (dst_first_rc == rc_stack) {
1061 // mem -> mem
1062 assert(src_second != dst_first, "overlap");
1063 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1064 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1065 // 64-bit
1066 int src_offset = ra_->reg2offset(src_first);
1067 int dst_offset = ra_->reg2offset(dst_first);
1068 if (cbuf) {
1069 MacroAssembler _masm(cbuf);
1070 __ ld(AT, Address(SP, src_offset));
1071 __ sd(AT, Address(SP, dst_offset));
1072 #ifndef PRODUCT
1073 } else {
1074 if(!do_size){
1075 if (size != 0) st->print("\n\t");
1076 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1077 "sd AT, [SP + #%d]",
1078 src_offset, dst_offset);
1079 }
1080 #endif
1081 }
1082 size += 8;
1083 } else {
1084 // 32-bit
1085 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1086 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1087 // No pushl/popl, so:
1088 int src_offset = ra_->reg2offset(src_first);
1089 int dst_offset = ra_->reg2offset(dst_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 __ lw(AT, Address(SP, src_offset));
1093 __ sw(AT, Address(SP, dst_offset));
1094 #ifndef PRODUCT
1095 } else {
1096 if(!do_size){
1097 if (size != 0) st->print("\n\t");
1098 st->print("lw AT, [SP + #%d] spill 2\n\t"
1099 "sw AT, [SP + #%d]\n\t",
1100 src_offset, dst_offset);
1101 }
1102 #endif
1103 }
1104 size += 8;
1105 }
1106 return size;
1107 } else if (dst_first_rc == rc_int) {
1108 // mem -> gpr
1109 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1110 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1111 // 64-bit
1112 int offset = ra_->reg2offset(src_first);
1113 if (cbuf) {
1114 MacroAssembler _masm(cbuf);
1115 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1116 #ifndef PRODUCT
1117 } else {
1118 if(!do_size){
1119 if (size != 0) st->print("\n\t");
1120 st->print("ld %s, [SP + #%d]\t# spill 3",
1121 Matcher::regName[dst_first],
1122 offset);
1123 }
1124 #endif
1125 }
1126 size += 4;
1127 } else {
1128 // 32-bit
1129 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1130 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1131 int offset = ra_->reg2offset(src_first);
1132 if (cbuf) {
1133 MacroAssembler _masm(cbuf);
1134 if (this->ideal_reg() == Op_RegI)
1135 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1136 else
1137 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1138 #ifndef PRODUCT
1139 } else {
1140 if(!do_size){
1141 if (size != 0) st->print("\n\t");
1142 if (this->ideal_reg() == Op_RegI)
1143 st->print("lw %s, [SP + #%d]\t# spill 4",
1144 Matcher::regName[dst_first],
1145 offset);
1146 else
1147 st->print("lwu %s, [SP + #%d]\t# spill 5",
1148 Matcher::regName[dst_first],
1149 offset);
1150 }
1151 #endif
1152 }
1153 size += 4;
1154 }
1155 return size;
1156 } else if (dst_first_rc == rc_float) {
1157 // mem-> xmm
1158 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1159 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1160 // 64-bit
1161 int offset = ra_->reg2offset(src_first);
1162 if (cbuf) {
1163 MacroAssembler _masm(cbuf);
1164 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1165 #ifndef PRODUCT
1166 } else {
1167 if(!do_size){
1168 if (size != 0) st->print("\n\t");
1169 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1170 Matcher::regName[dst_first],
1171 offset);
1172 }
1173 #endif
1174 }
1175 size += 4;
1176 } else {
1177 // 32-bit
1178 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1179 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1180 int offset = ra_->reg2offset(src_first);
1181 if (cbuf) {
1182 MacroAssembler _masm(cbuf);
1183 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1184 #ifndef PRODUCT
1185 } else {
1186 if(!do_size){
1187 if (size != 0) st->print("\n\t");
1188 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1189 Matcher::regName[dst_first],
1190 offset);
1191 }
1192 #endif
1193 }
1194 size += 4;
1195 }
1196 return size;
1197 }
1198 } else if (src_first_rc == rc_int) {
1199 // gpr ->
1200 if (dst_first_rc == rc_stack) {
1201 // gpr -> mem
1202 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1203 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1204 // 64-bit
1205 int offset = ra_->reg2offset(dst_first);
1206 if (cbuf) {
1207 MacroAssembler _masm(cbuf);
1208 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1209 #ifndef PRODUCT
1210 } else {
1211 if(!do_size){
1212 if (size != 0) st->print("\n\t");
1213 st->print("sd %s, [SP + #%d] # spill 8",
1214 Matcher::regName[src_first],
1215 offset);
1216 }
1217 #endif
1218 }
1219 size += 4;
1220 } else {
1221 // 32-bit
1222 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1223 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1224 int offset = ra_->reg2offset(dst_first);
1225 if (cbuf) {
1226 MacroAssembler _masm(cbuf);
1227 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1228 #ifndef PRODUCT
1229 } else {
1230 if(!do_size){
1231 if (size != 0) st->print("\n\t");
1232 st->print("sw [SP + #%d], %s\t# spill 9",
1233 offset,
1234 Matcher::regName[src_first]);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s, %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s, %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s, %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s, %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -8);
1500 st->cr(); st->print("\t");
1501 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -16);
1503 if( do_polling() && C->is_method_compilation() ) {
1504 st->print("Poll Safepoint # MachEpilogNode");
1505 }
1506 }
1507 #endif
1509 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1510 Compile *C = ra_->C;
1511 MacroAssembler _masm(&cbuf);
1512 int framesize = C->frame_size_in_bytes();
1514 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1516 __ daddiu(SP, SP, framesize);
1517 __ ld(RA, SP, -wordSize );
1518 __ ld(FP, SP, -wordSize*2 );
1520 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1521 if( do_polling() && C->is_method_compilation() ) {
1522 #ifndef OPT_SAFEPOINT
1523 __ li48(AT, (long)os::get_polling_page());
1524 __ relocate(relocInfo::poll_return_type);
1525 __ lw(AT, AT, 0);
1526 #else
1527 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1528 __ relocate(relocInfo::poll_return_type);
1529 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1530 #endif
1531 }
1532 }
1534 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1535 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1536 }
1538 int MachEpilogNode::reloc() const {
1539 return 0; // a large enough number
1540 }
1542 const Pipeline * MachEpilogNode::pipeline() const {
1543 return MachNode::pipeline_class();
1544 }
1546 int MachEpilogNode::safepoint_offset() const { return 0; }
1548 //=============================================================================
1550 #ifndef PRODUCT
1551 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1552 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1553 int reg = ra_->get_reg_first(this);
1554 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1555 }
1556 #endif
1559 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1560 return 4;
1561 }
1563 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1564 MacroAssembler _masm(&cbuf);
1565 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1566 int reg = ra_->get_encode(this);
1568 __ addi(as_Register(reg), SP, offset);
1569 /*
1570 if( offset >= 128 ) {
1571 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1572 emit_rm(cbuf, 0x2, reg, 0x04);
1573 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1574 emit_d32(cbuf, offset);
1575 }
1576 else {
1577 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1578 emit_rm(cbuf, 0x1, reg, 0x04);
1579 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1580 emit_d8(cbuf, offset);
1581 }
1582 */
1583 }
1586 //static int sizeof_FFree_Float_Stack_All = -1;
1588 int MachCallRuntimeNode::ret_addr_offset() {
1589 //lui
1590 //ori
1591 //dsll
1592 //ori
1593 //jalr
1594 //nop
1595 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1596 return NativeCall::instruction_size;
1597 // return 16;
1598 }
1604 //=============================================================================
1605 #ifndef PRODUCT
1606 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1607 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1608 }
1609 #endif
1611 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1612 MacroAssembler _masm(&cbuf);
1613 int i = 0;
1614 for(i = 0; i < _count; i++)
1615 __ nop();
1616 }
1618 uint MachNopNode::size(PhaseRegAlloc *) const {
1619 return 4 * _count;
1620 }
1621 const Pipeline* MachNopNode::pipeline() const {
1622 return MachNode::pipeline_class();
1623 }
1625 //=============================================================================
1627 //=============================================================================
1628 #ifndef PRODUCT
1629 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1630 st->print_cr("load_klass(AT, T0)");
1631 st->print_cr("\tbeq(AT, iCache, L)");
1632 st->print_cr("\tnop");
1633 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1634 st->print_cr("\tnop");
1635 st->print_cr("\tnop");
1636 st->print_cr(" L:");
1637 }
1638 #endif
1641 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1642 MacroAssembler _masm(&cbuf);
1643 #ifdef ASSERT
1644 //uint code_size = cbuf.code_size();
1645 #endif
1646 int ic_reg = Matcher::inline_cache_reg_encode();
1647 Label L;
1648 Register receiver = T0;
1649 Register iCache = as_Register(ic_reg);
1650 __ load_klass(AT, receiver);
1651 __ beq(AT, iCache, L);
1652 __ nop();
1654 __ relocate(relocInfo::runtime_call_type);
1655 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1656 __ jr(T9);
1657 __ nop();
1659 /* WARNING these NOPs are critical so that verified entry point is properly
1660 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1661 __ align(CodeEntryAlignment);
1662 __ bind(L);
1663 }
1665 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1666 return MachNode::size(ra_);
1667 }
1671 //=============================================================================
1673 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1675 int Compile::ConstantTable::calculate_table_base_offset() const {
1676 return 0; // absolute addressing, no offset
1677 }
1679 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1680 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1681 ShouldNotReachHere();
1682 }
1684 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1685 // Empty encoding
1686 }
1688 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1689 return 0;
1690 }
1692 #ifndef PRODUCT
1693 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1694 st->print("# MachConstantBaseNode (empty encoding)");
1695 }
1696 #endif
1699 //=============================================================================
1700 #ifndef PRODUCT
1701 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1702 Compile* C = ra_->C;
1704 int framesize = C->frame_size_in_bytes();
1705 int bangsize = C->bang_size_in_bytes();
1706 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1708 // Calls to C2R adapters often do not accept exceptional returns.
1709 // We require that their callers must bang for them. But be careful, because
1710 // some VM calls (such as call site linkage) can use several kilobytes of
1711 // stack. But the stack safety zone should account for that.
1712 // See bugs 4446381, 4468289, 4497237.
1713 if (C->need_stack_bang(bangsize)) {
1714 st->print_cr("# stack bang"); st->print("\t");
1715 }
1716 st->print("sd RA, (SP)-8 @ MachPrologNode\n\t");
1717 st->print("sd FP, (SP)-16 \n\t");
1718 st->print("daddiu FP, SP, -16 \n\t");
1719 st->print("daddiu SP, SP, -%d \t",framesize);
1720 }
1721 #endif
1724 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1725 Compile* C = ra_->C;
1726 MacroAssembler _masm(&cbuf);
1728 int framesize = C->frame_size_in_bytes();
1729 int bangsize = C->bang_size_in_bytes();
1731 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1733 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1735 if (C->need_stack_bang(framesize)) {
1736 __ generate_stack_overflow_check(framesize);
1737 }
1739 __ sd(RA, SP, -wordSize);
1740 __ sd(FP, SP, -wordSize*2);
1741 __ daddiu(FP, SP, -wordSize*2);
1742 __ daddiu(SP, SP, -framesize);
1743 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1744 __ nop();
1746 C->set_frame_complete(cbuf.insts_size());
1747 if (C->has_mach_constant_base_node()) {
1748 // NOTE: We set the table base offset here because users might be
1749 // emitted before MachConstantBaseNode.
1750 Compile::ConstantTable& constant_table = C->constant_table();
1751 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1752 }
1754 }
1757 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1758 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1759 return MachNode::size(ra_); // too many variables; just compute it the hard way
1760 }
1762 int MachPrologNode::reloc() const {
1763 return 0; // a large enough number
1764 }
1766 %}
1768 //----------ENCODING BLOCK-----------------------------------------------------
1769 // This block specifies the encoding classes used by the compiler to output
1770 // byte streams. Encoding classes generate functions which are called by
1771 // Machine Instruction Nodes in order to generate the bit encoding of the
1772 // instruction. Operands specify their base encoding interface with the
1773 // interface keyword. There are currently supported four interfaces,
1774 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1775 // operand to generate a function which returns its register number when
1776 // queried. CONST_INTER causes an operand to generate a function which
1777 // returns the value of the constant when queried. MEMORY_INTER causes an
1778 // operand to generate four functions which return the Base Register, the
1779 // Index Register, the Scale Value, and the Offset Value of the operand when
1780 // queried. COND_INTER causes an operand to generate six functions which
1781 // return the encoding code (ie - encoding bits for the instruction)
1782 // associated with each basic boolean condition for a conditional instruction.
1783 // Instructions specify two basic values for encoding. They use the
1784 // ins_encode keyword to specify their encoding class (which must be one of
1785 // the class names specified in the encoding block), and they use the
1786 // opcode keyword to specify, in order, their primary, secondary, and
1787 // tertiary opcode. Only the opcode sections which a particular instruction
1788 // needs for encoding need to be specified.
1789 encode %{
1790 /*
1791 Alias:
1792 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1793 118 B14: # B19 B15 <- B13 Freq: 0.899955
1794 118 add S1, S2, V0 #@addP_reg_reg
1795 11c lb S0, [S1 + #-8257524] #@loadB
1796 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1797 */
1798 //Load byte signed
1799 enc_class load_B_enc (mRegI dst, memory mem) %{
1800 MacroAssembler _masm(&cbuf);
1801 int dst = $dst$$reg;
1802 int base = $mem$$base;
1803 int index = $mem$$index;
1804 int scale = $mem$$scale;
1805 int disp = $mem$$disp;
1807 if( index != 0 ) {
1808 if( Assembler::is_simm16(disp) ) {
1809 if( 0 ) {
1810 if (scale == 0) {
1811 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1812 } else {
1813 __ dsll(AT, as_Register(index), scale);
1814 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ lb(as_Register(dst), AT, disp);
1824 }
1825 } else {
1826 if (scale == 0) {
1827 __ addu(AT, as_Register(base), as_Register(index));
1828 } else {
1829 __ dsll(AT, as_Register(index), scale);
1830 __ addu(AT, as_Register(base), AT);
1831 }
1832 __ move(T9, disp);
1833 if( 0 ) {
1834 __ gslbx(as_Register(dst), AT, T9, 0);
1835 } else {
1836 __ addu(AT, AT, T9);
1837 __ lb(as_Register(dst), AT, 0);
1838 }
1839 }
1840 } else {
1841 if( Assembler::is_simm16(disp) ) {
1842 __ lb(as_Register(dst), as_Register(base), disp);
1843 } else {
1844 __ move(T9, disp);
1845 if( 0 ) {
1846 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1847 } else {
1848 __ addu(AT, as_Register(base), T9);
1849 __ lb(as_Register(dst), AT, 0);
1850 }
1851 }
1852 }
1853 %}
1855 //Load byte unsigned
1856 enc_class load_UB_enc (mRegI dst, memory mem) %{
1857 MacroAssembler _masm(&cbuf);
1858 int dst = $dst$$reg;
1859 int base = $mem$$base;
1860 int index = $mem$$index;
1861 int scale = $mem$$scale;
1862 int disp = $mem$$disp;
1864 if( index != 0 ) {
1865 if (scale == 0) {
1866 __ daddu(AT, as_Register(base), as_Register(index));
1867 } else {
1868 __ dsll(AT, as_Register(index), scale);
1869 __ daddu(AT, as_Register(base), AT);
1870 }
1871 if( Assembler::is_simm16(disp) ) {
1872 __ lbu(as_Register(dst), AT, disp);
1873 } else {
1874 __ move(T9, disp);
1875 __ daddu(AT, AT, T9);
1876 __ lbu(as_Register(dst), AT, 0);
1877 }
1878 } else {
1879 if( Assembler::is_simm16(disp) ) {
1880 __ lbu(as_Register(dst), as_Register(base), disp);
1881 } else {
1882 __ move(T9, disp);
1883 __ daddu(AT, as_Register(base), T9);
1884 __ lbu(as_Register(dst), AT, 0);
1885 }
1886 }
1887 %}
1889 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1890 MacroAssembler _masm(&cbuf);
1891 int src = $src$$reg;
1892 int base = $mem$$base;
1893 int index = $mem$$index;
1894 int scale = $mem$$scale;
1895 int disp = $mem$$disp;
1897 if( index != 0 ) {
1898 if (scale == 0) {
1899 __ addu(AT, as_Register(base), as_Register(index));
1900 } else {
1901 __ dsll(AT, as_Register(index), scale);
1902 __ addu(AT, as_Register(base), AT);
1903 }
1904 if( Assembler::is_simm16(disp) ) {
1905 __ sb(as_Register(src), AT, disp);
1906 } else {
1907 __ move(T9, disp);
1908 __ addu(AT, AT, T9);
1909 __ sb(as_Register(src), AT, 0);
1910 }
1911 } else {
1912 if( Assembler::is_simm16(disp) ) {
1913 __ sb(as_Register(src), as_Register(base), disp);
1914 } else {
1915 __ move(T9, disp);
1916 __ addu(AT, as_Register(base), T9);
1917 __ sb(as_Register(src), AT, 0);
1918 }
1919 }
1920 %}
1922 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1923 MacroAssembler _masm(&cbuf);
1924 int base = $mem$$base;
1925 int index = $mem$$index;
1926 int scale = $mem$$scale;
1927 int disp = $mem$$disp;
1928 int value = $src$$constant;
1930 if( index != 0 ) {
1931 if (scale == 0) {
1932 __ daddu(AT, as_Register(base), as_Register(index));
1933 } else {
1934 __ dsll(AT, as_Register(index), scale);
1935 __ daddu(AT, as_Register(base), AT);
1936 }
1937 if( Assembler::is_simm16(disp) ) {
1938 if (value == 0) {
1939 __ sb(R0, AT, disp);
1940 } else {
1941 __ move(T9, value);
1942 __ sb(T9, AT, disp);
1943 }
1944 } else {
1945 if (value == 0) {
1946 __ move(T9, disp);
1947 __ daddu(AT, AT, T9);
1948 __ sb(R0, AT, 0);
1949 } else {
1950 __ move(T9, disp);
1951 __ daddu(AT, AT, T9);
1952 __ move(T9, value);
1953 __ sb(T9, AT, 0);
1954 }
1955 }
1956 } else {
1957 if( Assembler::is_simm16(disp) ) {
1958 if (value == 0) {
1959 __ sb(R0, as_Register(base), disp);
1960 } else {
1961 __ move(AT, value);
1962 __ sb(AT, as_Register(base), disp);
1963 }
1964 } else {
1965 if (value == 0) {
1966 __ move(T9, disp);
1967 __ daddu(AT, as_Register(base), T9);
1968 __ sb(R0, AT, 0);
1969 } else {
1970 __ move(T9, disp);
1971 __ daddu(AT, as_Register(base), T9);
1972 __ move(T9, value);
1973 __ sb(T9, AT, 0);
1974 }
1975 }
1976 }
1977 %}
1980 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1981 MacroAssembler _masm(&cbuf);
1982 int base = $mem$$base;
1983 int index = $mem$$index;
1984 int scale = $mem$$scale;
1985 int disp = $mem$$disp;
1986 int value = $src$$constant;
1988 if( index != 0 ) {
1989 if (scale == 0) {
1990 __ daddu(AT, as_Register(base), as_Register(index));
1991 } else {
1992 __ dsll(AT, as_Register(index), scale);
1993 __ daddu(AT, as_Register(base), AT);
1994 }
1995 if( Assembler::is_simm16(disp) ) {
1996 if (value == 0) {
1997 __ sb(R0, AT, disp);
1998 } else {
1999 __ move(T9, value);
2000 __ sb(T9, AT, disp);
2001 }
2002 } else {
2003 if (value == 0) {
2004 __ move(T9, disp);
2005 __ daddu(AT, AT, T9);
2006 __ sb(R0, AT, 0);
2007 } else {
2008 __ move(T9, disp);
2009 __ daddu(AT, AT, T9);
2010 __ move(T9, value);
2011 __ sb(T9, AT, 0);
2012 }
2013 }
2014 } else {
2015 if( Assembler::is_simm16(disp) ) {
2016 if (value == 0) {
2017 __ sb(R0, as_Register(base), disp);
2018 } else {
2019 __ move(AT, value);
2020 __ sb(AT, as_Register(base), disp);
2021 }
2022 } else {
2023 if (value == 0) {
2024 __ move(T9, disp);
2025 __ daddu(AT, as_Register(base), T9);
2026 __ sb(R0, AT, 0);
2027 } else {
2028 __ move(T9, disp);
2029 __ daddu(AT, as_Register(base), T9);
2030 __ move(T9, value);
2031 __ sb(T9, AT, 0);
2032 }
2033 }
2034 }
2036 __ sync();
2037 %}
2039 // Load Short (16bit signed)
2040 enc_class load_S_enc (mRegI dst, memory mem) %{
2041 MacroAssembler _masm(&cbuf);
2042 int dst = $dst$$reg;
2043 int base = $mem$$base;
2044 int index = $mem$$index;
2045 int scale = $mem$$scale;
2046 int disp = $mem$$disp;
2048 if( index != 0 ) {
2049 if (scale == 0) {
2050 __ daddu(AT, as_Register(base), as_Register(index));
2051 } else {
2052 __ dsll(AT, as_Register(index), scale);
2053 __ daddu(AT, as_Register(base), AT);
2054 }
2055 if( Assembler::is_simm16(disp) ) {
2056 __ lh(as_Register(dst), AT, disp);
2057 } else {
2058 __ move(T9, disp);
2059 __ addu(AT, AT, T9);
2060 __ lh(as_Register(dst), AT, 0);
2061 }
2062 } else {
2063 if( Assembler::is_simm16(disp) ) {
2064 __ lh(as_Register(dst), as_Register(base), disp);
2065 } else {
2066 __ move(T9, disp);
2067 __ addu(AT, as_Register(base), T9);
2068 __ lh(as_Register(dst), AT, 0);
2069 }
2070 }
2071 %}
2073 // Load Char (16bit unsigned)
2074 enc_class load_C_enc (mRegI dst, memory mem) %{
2075 MacroAssembler _masm(&cbuf);
2076 int dst = $dst$$reg;
2077 int base = $mem$$base;
2078 int index = $mem$$index;
2079 int scale = $mem$$scale;
2080 int disp = $mem$$disp;
2082 if( index != 0 ) {
2083 if (scale == 0) {
2084 __ daddu(AT, as_Register(base), as_Register(index));
2085 } else {
2086 __ dsll(AT, as_Register(index), scale);
2087 __ daddu(AT, as_Register(base), AT);
2088 }
2089 if( Assembler::is_simm16(disp) ) {
2090 __ lhu(as_Register(dst), AT, disp);
2091 } else {
2092 __ move(T9, disp);
2093 __ addu(AT, AT, T9);
2094 __ lhu(as_Register(dst), AT, 0);
2095 }
2096 } else {
2097 if( Assembler::is_simm16(disp) ) {
2098 __ lhu(as_Register(dst), as_Register(base), disp);
2099 } else {
2100 __ move(T9, disp);
2101 __ daddu(AT, as_Register(base), T9);
2102 __ lhu(as_Register(dst), AT, 0);
2103 }
2104 }
2105 %}
2107 // Store Char (16bit unsigned)
2108 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2109 MacroAssembler _masm(&cbuf);
2110 int src = $src$$reg;
2111 int base = $mem$$base;
2112 int index = $mem$$index;
2113 int scale = $mem$$scale;
2114 int disp = $mem$$disp;
2116 if( index != 0 ) {
2117 if( Assembler::is_simm16(disp) ) {
2118 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2119 if (scale == 0) {
2120 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2121 } else {
2122 __ dsll(AT, as_Register(index), scale);
2123 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2124 }
2125 } else {
2126 if (scale == 0) {
2127 __ addu(AT, as_Register(base), as_Register(index));
2128 } else {
2129 __ dsll(AT, as_Register(index), scale);
2130 __ addu(AT, as_Register(base), AT);
2131 }
2132 __ sh(as_Register(src), AT, disp);
2133 }
2134 } else {
2135 if (scale == 0) {
2136 __ addu(AT, as_Register(base), as_Register(index));
2137 } else {
2138 __ dsll(AT, as_Register(index), scale);
2139 __ addu(AT, as_Register(base), AT);
2140 }
2141 __ move(T9, disp);
2142 if( UseLoongsonISA ) {
2143 __ gsshx(as_Register(src), AT, T9, 0);
2144 } else {
2145 __ addu(AT, AT, T9);
2146 __ sh(as_Register(src), AT, 0);
2147 }
2148 }
2149 } else {
2150 if( Assembler::is_simm16(disp) ) {
2151 __ sh(as_Register(src), as_Register(base), disp);
2152 } else {
2153 __ move(T9, disp);
2154 if( UseLoongsonISA ) {
2155 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2156 } else {
2157 __ addu(AT, as_Register(base), T9);
2158 __ sh(as_Register(src), AT, 0);
2159 }
2160 }
2161 }
2162 %}
2164 enc_class load_I_enc (mRegI dst, memory mem) %{
2165 MacroAssembler _masm(&cbuf);
2166 int dst = $dst$$reg;
2167 int base = $mem$$base;
2168 int index = $mem$$index;
2169 int scale = $mem$$scale;
2170 int disp = $mem$$disp;
2172 if( index != 0 ) {
2173 if( Assembler::is_simm16(disp) ) {
2174 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2175 if (scale == 0) {
2176 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2177 } else {
2178 __ dsll(AT, as_Register(index), scale);
2179 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2180 }
2181 } else {
2182 if (scale == 0) {
2183 __ addu(AT, as_Register(base), as_Register(index));
2184 } else {
2185 __ dsll(AT, as_Register(index), scale);
2186 __ addu(AT, as_Register(base), AT);
2187 }
2188 __ lw(as_Register(dst), AT, disp);
2189 }
2190 } else {
2191 if (scale == 0) {
2192 __ addu(AT, as_Register(base), as_Register(index));
2193 } else {
2194 __ dsll(AT, as_Register(index), scale);
2195 __ addu(AT, as_Register(base), AT);
2196 }
2197 __ move(T9, disp);
2198 if( UseLoongsonISA ) {
2199 __ gslwx(as_Register(dst), AT, T9, 0);
2200 } else {
2201 __ addu(AT, AT, T9);
2202 __ lw(as_Register(dst), AT, 0);
2203 }
2204 }
2205 } else {
2206 if( Assembler::is_simm16(disp) ) {
2207 __ lw(as_Register(dst), as_Register(base), disp);
2208 } else {
2209 __ move(T9, disp);
2210 if( UseLoongsonISA ) {
2211 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2212 } else {
2213 __ addu(AT, as_Register(base), T9);
2214 __ lw(as_Register(dst), AT, 0);
2215 }
2216 }
2217 }
2218 %}
2220 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2221 MacroAssembler _masm(&cbuf);
2222 int src = $src$$reg;
2223 int base = $mem$$base;
2224 int index = $mem$$index;
2225 int scale = $mem$$scale;
2226 int disp = $mem$$disp;
2228 if( index != 0 ) {
2229 if( Assembler::is_simm16(disp) ) {
2230 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2231 if (scale == 0) {
2232 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2233 } else {
2234 __ dsll(AT, as_Register(index), scale);
2235 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2236 }
2237 } else {
2238 if (scale == 0) {
2239 __ addu(AT, as_Register(base), as_Register(index));
2240 } else {
2241 __ dsll(AT, as_Register(index), scale);
2242 __ addu(AT, as_Register(base), AT);
2243 }
2244 __ sw(as_Register(src), AT, disp);
2245 }
2246 } else {
2247 if (scale == 0) {
2248 __ addu(AT, as_Register(base), as_Register(index));
2249 } else {
2250 __ dsll(AT, as_Register(index), scale);
2251 __ addu(AT, as_Register(base), AT);
2252 }
2253 __ move(T9, disp);
2254 if( UseLoongsonISA ) {
2255 __ gsswx(as_Register(src), AT, T9, 0);
2256 } else {
2257 __ addu(AT, AT, T9);
2258 __ sw(as_Register(src), AT, 0);
2259 }
2260 }
2261 } else {
2262 if( Assembler::is_simm16(disp) ) {
2263 __ sw(as_Register(src), as_Register(base), disp);
2264 } else {
2265 __ move(T9, disp);
2266 if( UseLoongsonISA ) {
2267 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2268 } else {
2269 __ addu(AT, as_Register(base), T9);
2270 __ sw(as_Register(src), AT, 0);
2271 }
2272 }
2273 }
2274 %}
2276 enc_class store_I_immI_enc (memory mem, immI src) %{
2277 MacroAssembler _masm(&cbuf);
2278 int base = $mem$$base;
2279 int index = $mem$$index;
2280 int scale = $mem$$scale;
2281 int disp = $mem$$disp;
2282 int value = $src$$constant;
2284 if( index != 0 ) {
2285 if (scale == 0) {
2286 __ daddu(AT, as_Register(base), as_Register(index));
2287 } else {
2288 __ dsll(AT, as_Register(index), scale);
2289 __ daddu(AT, as_Register(base), AT);
2290 }
2291 if( Assembler::is_simm16(disp) ) {
2292 if (value == 0) {
2293 __ sw(R0, AT, disp);
2294 } else {
2295 __ move(T9, value);
2296 __ sw(T9, AT, disp);
2297 }
2298 } else {
2299 if (value == 0) {
2300 __ move(T9, disp);
2301 __ addu(AT, AT, T9);
2302 __ sw(R0, AT, 0);
2303 } else {
2304 __ move(T9, disp);
2305 __ addu(AT, AT, T9);
2306 __ move(T9, value);
2307 __ sw(T9, AT, 0);
2308 }
2309 }
2310 } else {
2311 if( Assembler::is_simm16(disp) ) {
2312 if (value == 0) {
2313 __ sw(R0, as_Register(base), disp);
2314 } else {
2315 __ move(AT, value);
2316 __ sw(AT, as_Register(base), disp);
2317 }
2318 } else {
2319 if (value == 0) {
2320 __ move(T9, disp);
2321 __ addu(AT, as_Register(base), T9);
2322 __ sw(R0, AT, 0);
2323 } else {
2324 __ move(T9, disp);
2325 __ addu(AT, as_Register(base), T9);
2326 __ move(T9, value);
2327 __ sw(T9, AT, 0);
2328 }
2329 }
2330 }
2331 %}
2333 enc_class load_N_enc (mRegN dst, memory mem) %{
2334 MacroAssembler _masm(&cbuf);
2335 int dst = $dst$$reg;
2336 int base = $mem$$base;
2337 int index = $mem$$index;
2338 int scale = $mem$$scale;
2339 int disp = $mem$$disp;
2340 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2341 assert(disp_reloc == relocInfo::none, "cannot have disp");
2343 if( index != 0 ) {
2344 if (scale == 0) {
2345 __ daddu(AT, as_Register(base), as_Register(index));
2346 } else {
2347 __ dsll(AT, as_Register(index), scale);
2348 __ daddu(AT, as_Register(base), AT);
2349 }
2350 if( Assembler::is_simm16(disp) ) {
2351 __ lwu(as_Register(dst), AT, disp);
2352 } else {
2353 __ li(T9, disp);
2354 __ daddu(AT, AT, T9);
2355 __ lwu(as_Register(dst), AT, 0);
2356 }
2357 } else {
2358 if( Assembler::is_simm16(disp) ) {
2359 __ lwu(as_Register(dst), as_Register(base), disp);
2360 } else {
2361 __ li(T9, disp);
2362 __ daddu(AT, as_Register(base), T9);
2363 __ lwu(as_Register(dst), AT, 0);
2364 }
2365 }
2367 %}
2370 enc_class load_P_enc (mRegP dst, memory mem) %{
2371 MacroAssembler _masm(&cbuf);
2372 int dst = $dst$$reg;
2373 int base = $mem$$base;
2374 int index = $mem$$index;
2375 int scale = $mem$$scale;
2376 int disp = $mem$$disp;
2377 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2378 assert(disp_reloc == relocInfo::none, "cannot have disp");
2380 if( index != 0 ) {
2381 if (scale == 0) {
2382 __ daddu(AT, as_Register(base), as_Register(index));
2383 } else {
2384 __ dsll(AT, as_Register(index), scale);
2385 __ daddu(AT, as_Register(base), AT);
2386 }
2387 if( Assembler::is_simm16(disp) ) {
2388 __ ld(as_Register(dst), AT, disp);
2389 } else {
2390 __ li(T9, disp);
2391 __ daddu(AT, AT, T9);
2392 __ ld(as_Register(dst), AT, 0);
2393 }
2394 } else {
2395 if( Assembler::is_simm16(disp) ) {
2396 __ ld(as_Register(dst), as_Register(base), disp);
2397 } else {
2398 __ li(T9, disp);
2399 __ daddu(AT, as_Register(base), T9);
2400 __ ld(as_Register(dst), AT, 0);
2401 }
2402 }
2403 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2404 %}
2406 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2407 MacroAssembler _masm(&cbuf);
2408 int src = $src$$reg;
2409 int base = $mem$$base;
2410 int index = $mem$$index;
2411 int scale = $mem$$scale;
2412 int disp = $mem$$disp;
2414 if( index != 0 ) {
2415 if (scale == 0) {
2416 __ daddu(AT, as_Register(base), as_Register(index));
2417 } else {
2418 __ dsll(AT, as_Register(index), scale);
2419 __ daddu(AT, as_Register(base), AT);
2420 }
2421 if( Assembler::is_simm16(disp) ) {
2422 __ sd(as_Register(src), AT, disp);
2423 } else {
2424 __ move(T9, disp);
2425 __ daddu(AT, AT, T9);
2426 __ sd(as_Register(src), AT, 0);
2427 }
2428 } else {
2429 if( Assembler::is_simm16(disp) ) {
2430 __ sd(as_Register(src), as_Register(base), disp);
2431 } else {
2432 __ move(T9, disp);
2433 __ daddu(AT, as_Register(base), T9);
2434 __ sd(as_Register(src), AT, 0);
2435 }
2436 }
2437 %}
2439 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2440 MacroAssembler _masm(&cbuf);
2441 int src = $src$$reg;
2442 int base = $mem$$base;
2443 int index = $mem$$index;
2444 int scale = $mem$$scale;
2445 int disp = $mem$$disp;
2447 if( index != 0 ) {
2448 if (scale == 0) {
2449 __ daddu(AT, as_Register(base), as_Register(index));
2450 } else {
2451 __ dsll(AT, as_Register(index), scale);
2452 __ daddu(AT, as_Register(base), AT);
2453 }
2454 if( Assembler::is_simm16(disp) ) {
2455 __ sw(as_Register(src), AT, disp);
2456 } else {
2457 __ move(T9, disp);
2458 __ addu(AT, AT, T9);
2459 __ sw(as_Register(src), AT, 0);
2460 }
2461 } else {
2462 if( Assembler::is_simm16(disp) ) {
2463 __ sw(as_Register(src), as_Register(base), disp);
2464 } else {
2465 __ move(T9, disp);
2466 __ addu(AT, as_Register(base), T9);
2467 __ sw(as_Register(src), AT, 0);
2468 }
2469 }
2470 %}
2472 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2473 MacroAssembler _masm(&cbuf);
2474 int base = $mem$$base;
2475 int index = $mem$$index;
2476 int scale = $mem$$scale;
2477 int disp = $mem$$disp;
2478 long value = $src$$constant;
2480 if( index != 0 ) {
2481 if (scale == 0) {
2482 __ daddu(AT, as_Register(base), as_Register(index));
2483 } else {
2484 __ dsll(AT, as_Register(index), scale);
2485 __ daddu(AT, as_Register(base), AT);
2486 }
2487 if( Assembler::is_simm16(disp) ) {
2488 if (value == 0) {
2489 __ sd(R0, AT, disp);
2490 } else {
2491 __ move(T9, value);
2492 __ sd(T9, AT, disp);
2493 }
2494 } else {
2495 if (value == 0) {
2496 __ move(T9, disp);
2497 __ daddu(AT, AT, T9);
2498 __ sd(R0, AT, 0);
2499 } else {
2500 __ move(T9, disp);
2501 __ daddu(AT, AT, T9);
2502 __ move(T9, value);
2503 __ sd(T9, AT, 0);
2504 }
2505 }
2506 } else {
2507 if( Assembler::is_simm16(disp) ) {
2508 if (value == 0) {
2509 __ sd(R0, as_Register(base), disp);
2510 } else {
2511 __ move(AT, value);
2512 __ sd(AT, as_Register(base), disp);
2513 }
2514 } else {
2515 if (value == 0) {
2516 __ move(T9, disp);
2517 __ daddu(AT, as_Register(base), T9);
2518 __ sd(R0, AT, 0);
2519 } else {
2520 __ move(T9, disp);
2521 __ daddu(AT, as_Register(base), T9);
2522 __ move(T9, value);
2523 __ sd(T9, AT, 0);
2524 }
2525 }
2526 }
2527 %}
2529 /*
2530 * 1d4 storeImmN [S0 + #16 (8-bit)], narrowoop: spec/benchmarks/_213_javac/Identifier:exact *
2531 * # compressed ptr ! Field: spec/benchmarks/_213_javac/Identifier.value
2532 * 0x00000055648065d4: daddu at, s0, zero
2533 * 0x00000055648065d8: lui t9, 0x0 ; {oop(a 'spec/benchmarks/_213_javac/Identifier')}
2534 * 0x00000055648065dc: ori t9, t9, 0xfffff610
2535 * 0x00000055648065e0: dsll t9, t9, 16
2536 * 0x00000055648065e4: ori t9, t9, 0xffffc628
2537 * 0x00000055648065e8: sw t9, 0x10(at)
2538 */
2539 enc_class storeImmN_enc (memory mem, immN src) %{
2540 MacroAssembler _masm(&cbuf);
2541 int base = $mem$$base;
2542 int index = $mem$$index;
2543 int scale = $mem$$scale;
2544 int disp = $mem$$disp;
2545 long * value = (long *)$src$$constant;
2547 if (value == NULL) {
2548 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
2549 if (index == 0) {
2550 __ sw(R0, as_Register(base), disp);
2551 } else {
2552 if (scale == 0) {
2553 __ daddu(AT, as_Register(base), as_Register(index));
2554 } else {
2555 __ dsll(AT, as_Register(index), scale);
2556 __ daddu(AT, as_Register(base), AT);
2557 }
2558 __ sw(R0, AT, disp);
2559 }
2561 return;
2562 }
2564 int oop_index = __ oop_recorder()->find_index((jobject)value);
2565 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2567 guarantee(scale == 0, "FIXME: scale is not zero !");
2568 guarantee(value != 0, "FIXME: value is zero !");
2570 if (index != 0) {
2571 if (scale == 0) {
2572 __ daddu(AT, as_Register(base), as_Register(index));
2573 } else {
2574 __ dsll(AT, as_Register(index), scale);
2575 __ daddu(AT, as_Register(base), AT);
2576 }
2577 if( Assembler::is_simm16(disp) ) {
2578 if(rspec.type() != relocInfo::none) {
2579 __ relocate(rspec, Assembler::narrow_oop_operand);
2580 __ li48(T9, oop_index);
2581 } else {
2582 __ move(T9, oop_index);
2583 }
2584 __ sw(T9, AT, disp);
2585 } else {
2586 __ move(T9, disp);
2587 __ addu(AT, AT, T9);
2589 if(rspec.type() != relocInfo::none) {
2590 __ relocate(rspec, Assembler::narrow_oop_operand);
2591 __ li48(T9, oop_index);
2592 } else {
2593 __ move(T9, oop_index);
2594 }
2595 __ sw(T9, AT, 0);
2596 }
2597 }
2598 else {
2599 if( Assembler::is_simm16(disp) ) {
2600 if($src->constant_reloc() != relocInfo::none) {
2601 __ relocate(rspec, Assembler::narrow_oop_operand);
2602 __ li48(T9, oop_index);
2603 }
2604 else {
2605 __ li48(T9, oop_index);
2606 }
2607 __ sw(T9, as_Register(base), disp);
2608 } else {
2609 __ move(T9, disp);
2610 __ daddu(AT, as_Register(base), T9);
2612 if($src->constant_reloc() != relocInfo::none){
2613 __ relocate(rspec, Assembler::narrow_oop_operand);
2614 __ li48(T9, oop_index);
2615 } else {
2616 __ li48(T9, oop_index);
2617 }
2618 __ sw(T9, AT, 0);
2619 }
2620 }
2621 %}
2623 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
2624 MacroAssembler _masm(&cbuf);
2626 assert (UseCompressedOops, "should only be used for compressed headers");
2627 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
2629 int base = $mem$$base;
2630 int index = $mem$$index;
2631 int scale = $mem$$scale;
2632 int disp = $mem$$disp;
2633 long value = $src$$constant;
2635 int klass_index = __ oop_recorder()->find_index((Klass*)value);
2636 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
2637 long narrowp = Klass::encode_klass((Klass*)value);
2639 if(index!=0){
2640 if (scale == 0) {
2641 __ daddu(AT, as_Register(base), as_Register(index));
2642 } else {
2643 __ dsll(AT, as_Register(index), scale);
2644 __ daddu(AT, as_Register(base), AT);
2645 }
2647 if( Assembler::is_simm16(disp) ) {
2648 if(rspec.type() != relocInfo::none){
2649 __ relocate(rspec, Assembler::narrow_oop_operand);
2650 __ li48(T9, narrowp);
2651 }
2652 else {
2653 __ li48(T9, narrowp);
2654 }
2655 __ sw(T9, AT, disp);
2656 } else {
2657 __ move(T9, disp);
2658 __ daddu(AT, AT, T9);
2660 if(rspec.type() != relocInfo::none){
2661 __ relocate(rspec, Assembler::narrow_oop_operand);
2662 __ li48(T9, narrowp);
2663 }
2664 else {
2665 __ li48(T9, narrowp);
2666 }
2668 __ sw(T9, AT, 0);
2669 }
2670 }
2671 else {
2672 if( Assembler::is_simm16(disp) ) {
2673 if(rspec.type() != relocInfo::none){
2674 __ relocate(rspec, Assembler::narrow_oop_operand);
2675 __ li48(T9, narrowp);
2676 }
2677 else {
2678 __ li48(T9, narrowp);
2679 }
2680 __ sw(T9, as_Register(base), disp);
2681 } else {
2682 __ move(T9, disp);
2683 __ daddu(AT, as_Register(base), T9);
2685 if(rspec.type() != relocInfo::none){
2686 __ relocate(rspec, Assembler::narrow_oop_operand);
2687 __ li48(T9, narrowp);
2688 }
2689 else {
2690 __ li48(T9, narrowp);
2691 }
2692 __ sw(T9, AT, 0);
2693 }
2694 }
2695 %}
2697 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2698 MacroAssembler _masm(&cbuf);
2699 int base = $mem$$base;
2700 int index = $mem$$index;
2701 int scale = $mem$$scale;
2702 int disp = $mem$$disp;
2704 if(index!=0){
2705 if (scale == 0) {
2706 __ daddu(AT, as_Register(base), as_Register(index));
2707 } else {
2708 __ dsll(AT, as_Register(index), scale);
2709 __ daddu(AT, as_Register(base), AT);
2710 }
2712 if( Assembler::is_simm16(disp) ) {
2713 __ sw(R0, AT, disp);
2714 } else {
2715 __ move(T9, disp);
2716 __ daddu(AT, AT, T9);
2717 __ sw(R0, AT, 0);
2718 }
2719 }
2720 else {
2721 if( Assembler::is_simm16(disp) ) {
2722 __ sw(R0, as_Register(base), disp);
2723 } else {
2724 __ move(T9, disp);
2725 __ daddu(AT, as_Register(base), T9);
2726 __ sw(R0, AT, 0);
2727 }
2728 }
2729 %}
2731 enc_class load_L_enc (mRegL dst, memory mem) %{
2732 MacroAssembler _masm(&cbuf);
2733 int base = $mem$$base;
2734 int index = $mem$$index;
2735 int scale = $mem$$scale;
2736 int disp = $mem$$disp;
2737 Register dst_reg = as_Register($dst$$reg);
2739 /*********************2013/03/27**************************
2740 * Jin: $base may contain a null object.
2741 * Server JIT force the exception_offset to be the pos of
2742 * the first instruction.
2743 * I insert such a 'null_check' at the beginning.
2744 *******************************************************/
2746 __ lw(AT, as_Register(base), 0);
2748 /*********************2012/10/04**************************
2749 * Error case found in SortTest
2750 * 337 b java.util.Arrays::sort1 (401 bytes)
2751 * B73:
2752 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2753 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2754 *
2755 * The original instructions generated here are :
2756 * __ lw(dst_lo, as_Register(base), disp);
2757 * __ lw(dst_hi, as_Register(base), disp + 4);
2758 *******************************************************/
2760 if( index != 0 ) {
2761 if (scale == 0) {
2762 __ daddu(AT, as_Register(base), as_Register(index));
2763 } else {
2764 __ dsll(AT, as_Register(index), scale);
2765 __ daddu(AT, as_Register(base), AT);
2766 }
2767 if( Assembler::is_simm16(disp) ) {
2768 __ ld(dst_reg, AT, disp);
2769 } else {
2770 __ move(T9, disp);
2771 __ daddu(AT, AT, T9);
2772 __ ld(dst_reg, AT, 0);
2773 }
2774 } else {
2775 if( Assembler::is_simm16(disp) ) {
2776 __ move(AT, as_Register(base));
2777 __ ld(dst_reg, AT, disp);
2778 } else {
2779 __ move(T9, disp);
2780 __ daddu(AT, as_Register(base), T9);
2781 __ ld(dst_reg, AT, 0);
2782 }
2783 }
2784 %}
2786 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2787 MacroAssembler _masm(&cbuf);
2788 int base = $mem$$base;
2789 int index = $mem$$index;
2790 int scale = $mem$$scale;
2791 int disp = $mem$$disp;
2792 Register src_reg = as_Register($src$$reg);
2794 if( index != 0 ) {
2795 if (scale == 0) {
2796 __ daddu(AT, as_Register(base), as_Register(index));
2797 } else {
2798 __ dsll(AT, as_Register(index), scale);
2799 __ daddu(AT, as_Register(base), AT);
2800 }
2801 if( Assembler::is_simm16(disp) ) {
2802 __ sd(src_reg, AT, disp);
2803 } else {
2804 __ move(T9, disp);
2805 __ daddu(AT, AT, T9);
2806 __ sd(src_reg, AT, 0);
2807 }
2808 } else {
2809 if( Assembler::is_simm16(disp) ) {
2810 __ move(AT, as_Register(base));
2811 __ sd(src_reg, AT, disp);
2812 } else {
2813 __ move(T9, disp);
2814 __ daddu(AT, as_Register(base), T9);
2815 __ sd(src_reg, AT, 0);
2816 }
2817 }
2818 %}
2820 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2821 MacroAssembler _masm(&cbuf);
2822 int base = $mem$$base;
2823 int index = $mem$$index;
2824 int scale = $mem$$scale;
2825 int disp = $mem$$disp;
2827 if( index != 0 ) {
2828 if (scale == 0) {
2829 __ daddu(AT, as_Register(base), as_Register(index));
2830 } else {
2831 __ dsll(AT, as_Register(index), scale);
2832 __ daddu(AT, as_Register(base), AT);
2833 }
2834 if( Assembler::is_simm16(disp) ) {
2835 __ sd(R0, AT, disp);
2836 } else {
2837 __ move(T9, disp);
2838 __ addu(AT, AT, T9);
2839 __ sd(R0, AT, 0);
2840 }
2841 } else {
2842 if( Assembler::is_simm16(disp) ) {
2843 __ move(AT, as_Register(base));
2844 __ sd(R0, AT, disp);
2845 } else {
2846 __ move(T9, disp);
2847 __ addu(AT, as_Register(base), T9);
2848 __ sd(R0, AT, 0);
2849 }
2850 }
2851 %}
2853 enc_class store_L_immL_enc (memory mem, immL src) %{
2854 MacroAssembler _masm(&cbuf);
2855 int base = $mem$$base;
2856 int index = $mem$$index;
2857 int scale = $mem$$scale;
2858 int disp = $mem$$disp;
2859 long imm = $src$$constant;
2861 if( index != 0 ) {
2862 if (scale == 0) {
2863 __ daddu(AT, as_Register(base), as_Register(index));
2864 } else {
2865 __ dsll(AT, as_Register(index), scale);
2866 __ daddu(AT, as_Register(base), AT);
2867 }
2868 if( Assembler::is_simm16(disp) ) {
2869 __ li(T9, imm);
2870 __ sd(T9, AT, disp);
2871 } else {
2872 __ move(T9, disp);
2873 __ addu(AT, AT, T9);
2874 __ li(T9, imm);
2875 __ sd(T9, AT, 0);
2876 }
2877 } else {
2878 if( Assembler::is_simm16(disp) ) {
2879 __ move(AT, as_Register(base));
2880 __ li(T9, imm);
2881 __ sd(T9, AT, disp);
2882 } else {
2883 __ move(T9, disp);
2884 __ addu(AT, as_Register(base), T9);
2885 __ li(T9, imm);
2886 __ sd(T9, AT, 0);
2887 }
2888 }
2889 %}
2891 enc_class load_F_enc (regF dst, memory mem) %{
2892 MacroAssembler _masm(&cbuf);
2893 int base = $mem$$base;
2894 int index = $mem$$index;
2895 int scale = $mem$$scale;
2896 int disp = $mem$$disp;
2897 FloatRegister dst = $dst$$FloatRegister;
2899 if( index != 0 ) {
2900 if( Assembler::is_simm16(disp) ) {
2901 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2902 if (scale == 0) {
2903 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2904 } else {
2905 __ dsll(AT, as_Register(index), scale);
2906 __ gslwxc1(dst, as_Register(base), AT, disp);
2907 }
2908 } else {
2909 if (scale == 0) {
2910 __ daddu(AT, as_Register(base), as_Register(index));
2911 } else {
2912 __ dsll(AT, as_Register(index), scale);
2913 __ daddu(AT, as_Register(base), AT);
2914 }
2915 __ lwc1(dst, AT, disp);
2916 }
2917 } else {
2918 if (scale == 0) {
2919 __ daddu(AT, as_Register(base), as_Register(index));
2920 } else {
2921 __ dsll(AT, as_Register(index), scale);
2922 __ daddu(AT, as_Register(base), AT);
2923 }
2924 __ move(T9, disp);
2925 if( UseLoongsonISA ) {
2926 __ gslwxc1(dst, AT, T9, 0);
2927 } else {
2928 __ daddu(AT, AT, T9);
2929 __ lwc1(dst, AT, 0);
2930 }
2931 }
2932 } else {
2933 if( Assembler::is_simm16(disp) ) {
2934 __ lwc1(dst, as_Register(base), disp);
2935 } else {
2936 __ move(T9, disp);
2937 if( UseLoongsonISA ) {
2938 __ gslwxc1(dst, as_Register(base), T9, 0);
2939 } else {
2940 __ daddu(AT, as_Register(base), T9);
2941 __ lwc1(dst, AT, 0);
2942 }
2943 }
2944 }
2945 %}
2947 enc_class store_F_reg_enc (memory mem, regF src) %{
2948 MacroAssembler _masm(&cbuf);
2949 int base = $mem$$base;
2950 int index = $mem$$index;
2951 int scale = $mem$$scale;
2952 int disp = $mem$$disp;
2953 FloatRegister src = $src$$FloatRegister;
2955 if( index != 0 ) {
2956 if( Assembler::is_simm16(disp) ) {
2957 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2958 if (scale == 0) {
2959 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2960 } else {
2961 __ dsll(AT, as_Register(index), scale);
2962 __ gsswxc1(src, as_Register(base), AT, disp);
2963 }
2964 } else {
2965 if (scale == 0) {
2966 __ daddu(AT, as_Register(base), as_Register(index));
2967 } else {
2968 __ dsll(AT, as_Register(index), scale);
2969 __ daddu(AT, as_Register(base), AT);
2970 }
2971 __ swc1(src, AT, disp);
2972 }
2973 } else {
2974 if (scale == 0) {
2975 __ daddu(AT, as_Register(base), as_Register(index));
2976 } else {
2977 __ dsll(AT, as_Register(index), scale);
2978 __ daddu(AT, as_Register(base), AT);
2979 }
2980 __ move(T9, disp);
2981 if( UseLoongsonISA ) {
2982 __ gsswxc1(src, AT, T9, 0);
2983 } else {
2984 __ daddu(AT, AT, T9);
2985 __ swc1(src, AT, 0);
2986 }
2987 }
2988 } else {
2989 if( Assembler::is_simm16(disp) ) {
2990 __ swc1(src, as_Register(base), disp);
2991 } else {
2992 __ move(T9, disp);
2993 if( UseLoongsonISA ) {
2994 __ gslwxc1(src, as_Register(base), T9, 0);
2995 } else {
2996 __ daddu(AT, as_Register(base), T9);
2997 __ swc1(src, AT, 0);
2998 }
2999 }
3000 }
3001 %}
3003 enc_class load_D_enc (regD dst, memory mem) %{
3004 MacroAssembler _masm(&cbuf);
3005 int base = $mem$$base;
3006 int index = $mem$$index;
3007 int scale = $mem$$scale;
3008 int disp = $mem$$disp;
3009 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3011 if( index != 0 ) {
3012 if( Assembler::is_simm16(disp) ) {
3013 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3014 if (scale == 0) {
3015 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3016 } else {
3017 __ dsll(AT, as_Register(index), scale);
3018 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3019 }
3020 } else {
3021 if (scale == 0) {
3022 __ daddu(AT, as_Register(base), as_Register(index));
3023 } else {
3024 __ dsll(AT, as_Register(index), scale);
3025 __ daddu(AT, as_Register(base), AT);
3026 }
3027 __ ldc1(dst_reg, AT, disp);
3028 }
3029 } else {
3030 if (scale == 0) {
3031 __ daddu(AT, as_Register(base), as_Register(index));
3032 } else {
3033 __ dsll(AT, as_Register(index), scale);
3034 __ daddu(AT, as_Register(base), AT);
3035 }
3036 __ move(T9, disp);
3037 if( UseLoongsonISA ) {
3038 __ gsldxc1(dst_reg, AT, T9, 0);
3039 } else {
3040 __ addu(AT, AT, T9);
3041 __ ldc1(dst_reg, AT, 0);
3042 }
3043 }
3044 } else {
3045 if( Assembler::is_simm16(disp) ) {
3046 __ ldc1(dst_reg, as_Register(base), disp);
3047 } else {
3048 __ move(T9, disp);
3049 if( UseLoongsonISA ) {
3050 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3051 } else {
3052 __ addu(AT, as_Register(base), T9);
3053 __ ldc1(dst_reg, AT, 0);
3054 }
3055 }
3056 }
3057 %}
3059 enc_class store_D_reg_enc (memory mem, regD src) %{
3060 MacroAssembler _masm(&cbuf);
3061 int base = $mem$$base;
3062 int index = $mem$$index;
3063 int scale = $mem$$scale;
3064 int disp = $mem$$disp;
3065 FloatRegister src_reg = as_FloatRegister($src$$reg);
3067 if( index != 0 ) {
3068 if( Assembler::is_simm16(disp) ) {
3069 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3070 if (scale == 0) {
3071 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3072 } else {
3073 __ dsll(AT, as_Register(index), scale);
3074 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3075 }
3076 } else {
3077 if (scale == 0) {
3078 __ daddu(AT, as_Register(base), as_Register(index));
3079 } else {
3080 __ dsll(AT, as_Register(index), scale);
3081 __ daddu(AT, as_Register(base), AT);
3082 }
3083 __ sdc1(src_reg, AT, disp);
3084 }
3085 } else {
3086 if (scale == 0) {
3087 __ daddu(AT, as_Register(base), as_Register(index));
3088 } else {
3089 __ dsll(AT, as_Register(index), scale);
3090 __ daddu(AT, as_Register(base), AT);
3091 }
3092 __ move(T9, disp);
3093 if( UseLoongsonISA ) {
3094 __ gssdxc1(src_reg, AT, T9, 0);
3095 } else {
3096 __ addu(AT, AT, T9);
3097 __ sdc1(src_reg, AT, 0);
3098 }
3099 }
3100 } else {
3101 if( Assembler::is_simm16(disp) ) {
3102 __ sdc1(src_reg, as_Register(base), disp);
3103 } else {
3104 __ move(T9, disp);
3105 if( UseLoongsonISA ) {
3106 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3107 } else {
3108 __ addu(AT, as_Register(base), T9);
3109 __ sdc1(src_reg, AT, 0);
3110 }
3111 }
3112 }
3113 %}
3115 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3116 MacroAssembler _masm(&cbuf);
3117 // This is the instruction starting address for relocation info.
3118 __ block_comment("Java_To_Runtime");
3119 cbuf.set_insts_mark();
3120 __ relocate(relocInfo::runtime_call_type);
3122 __ li48(T9, (long)$meth$$method);
3123 __ jalr(T9);
3124 __ nop();
3125 %}
3127 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3128 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3129 // who we intended to call.
3130 MacroAssembler _masm(&cbuf);
3131 cbuf.set_insts_mark();
3133 if ( !_method ) {
3134 __ relocate(relocInfo::runtime_call_type);
3135 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3136 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3137 } else if(_optimized_virtual) {
3138 __ relocate(relocInfo::opt_virtual_call_type);
3139 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3140 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3141 } else {
3142 __ relocate(relocInfo::static_call_type);
3143 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3144 // static_call_Relocation::spec(), RELOC_IMM32 );
3145 }
3147 __ li(T9, $meth$$method);
3148 __ jalr(T9);
3149 __ nop();
3150 if( _method ) { // Emit stub for static call
3151 emit_java_to_interp(cbuf);
3152 }
3153 %}
3156 /*
3157 * [Ref: LIR_Assembler::ic_call() ]
3158 */
3159 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3160 MacroAssembler _masm(&cbuf);
3161 __ block_comment("Java_Dynamic_Call");
3162 __ ic_call((address)$meth$$method);
3163 %}
3165 enc_class call_epilog %{
3166 /*
3167 if( VerifyStackAtCalls ) {
3168 // Check that stack depth is unchanged: find majik cookie on stack
3169 int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
3170 if(framesize >= 128) {
3171 emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
3172 emit_d8(cbuf,0xBC);
3173 emit_d8(cbuf,0x24);
3174 emit_d32(cbuf,framesize); // Find majik cookie from ESP
3175 emit_d32(cbuf, 0xbadb100d);
3176 }
3177 else {
3178 emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
3179 emit_d8(cbuf,0x7C);
3180 emit_d8(cbuf,0x24);
3181 emit_d8(cbuf,framesize); // Find majik cookie from ESP
3182 emit_d32(cbuf, 0xbadb100d);
3183 }
3184 // jmp EQ around INT3
3185 // QQQ TODO
3186 const int jump_around = 5; // size of call to breakpoint, 1 for CC
3187 emit_opcode(cbuf,0x74);
3188 emit_d8(cbuf, jump_around);
3189 // QQQ temporary
3190 emit_break(cbuf);
3191 // Die if stack mismatch
3192 // emit_opcode(cbuf,0xCC);
3193 }
3194 */
3195 %}
3199 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3200 Register flags = $cr$$Register;
3201 Label L;
3203 MacroAssembler _masm(&cbuf);
3205 __ addu(flags, R0, R0);
3206 __ beq(AT, R0, L);
3207 __ delayed()->nop();
3208 __ move(flags, 0xFFFFFFFF);
3209 __ bind(L);
3210 %}
3212 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3213 Register result = $result$$Register;
3214 Register sub = $sub$$Register;
3215 Register super = $super$$Register;
3216 Register length = $tmp$$Register;
3217 Register tmp = T9;
3218 Label miss;
3220 /* 2012/9/28 Jin: result may be the same as sub
3221 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3222 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3223 * 4bc mov S2, NULL #@loadConP
3224 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3225 */
3226 MacroAssembler _masm(&cbuf);
3227 Label done;
3228 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3229 NULL, &miss,
3230 /*set_cond_codes:*/ true);
3231 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3232 __ move(result, 0);
3233 __ b(done);
3234 __ nop();
3236 __ bind(miss);
3237 __ move(result, 1);
3238 __ bind(done);
3239 %}
3241 %}
3244 //---------MIPS FRAME--------------------------------------------------------------
3245 // Definition of frame structure and management information.
3246 //
3247 // S T A C K L A Y O U T Allocators stack-slot number
3248 // | (to get allocators register number
3249 // G Owned by | | v add SharedInfo::stack0)
3250 // r CALLER | |
3251 // o | +--------+ pad to even-align allocators stack-slot
3252 // w V | pad0 | numbers; owned by CALLER
3253 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3254 // h ^ | in | 5
3255 // | | args | 4 Holes in incoming args owned by SELF
3256 // | | old | | 3
3257 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3258 // v | | ret | 3 return address
3259 // Owned by +--------+
3260 // Self | pad2 | 2 pad to align old SP
3261 // | +--------+ 1
3262 // | | locks | 0
3263 // | +--------+----> SharedInfo::stack0, even aligned
3264 // | | pad1 | 11 pad to align new SP
3265 // | +--------+
3266 // | | | 10
3267 // | | spills | 9 spills
3268 // V | | 8 (pad0 slot for callee)
3269 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3270 // ^ | out | 7
3271 // | | args | 6 Holes in outgoing args owned by CALLEE
3272 // Owned by new | |
3273 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3274 // | |
3275 //
3276 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3277 // known from SELF's arguments and the Java calling convention.
3278 // Region 6-7 is determined per call site.
3279 // Note 2: If the calling convention leaves holes in the incoming argument
3280 // area, those holes are owned by SELF. Holes in the outgoing area
3281 // are owned by the CALLEE. Holes should not be nessecary in the
3282 // incoming area, as the Java calling convention is completely under
3283 // the control of the AD file. Doubles can be sorted and packed to
3284 // avoid holes. Holes in the outgoing arguments may be nessecary for
3285 // varargs C calling conventions.
3286 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3287 // even aligned with pad0 as needed.
3288 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3289 // region 6-11 is even aligned; it may be padded out more so that
3290 // the region from SP to FP meets the minimum stack alignment.
3291 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3292 // alignment. Region 11, pad1, may be dynamically extended so that
3293 // SP meets the minimum alignment.
3296 frame %{
3298 stack_direction(TOWARDS_LOW);
3300 // These two registers define part of the calling convention
3301 // between compiled code and the interpreter.
3302 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3303 // for more information. by yjl 3/16/2006
3305 inline_cache_reg(T1); // Inline Cache Register
3306 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3307 /*
3308 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3309 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3310 */
3312 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3313 cisc_spilling_operand_name(indOffset32);
3315 // Number of stack slots consumed by locking an object
3316 // generate Compile::sync_stack_slots
3317 #ifdef _LP64
3318 sync_stack_slots(2);
3319 #else
3320 sync_stack_slots(1);
3321 #endif
3323 frame_pointer(SP);
3325 // Interpreter stores its frame pointer in a register which is
3326 // stored to the stack by I2CAdaptors.
3327 // I2CAdaptors convert from interpreted java to compiled java.
3329 interpreter_frame_pointer(FP);
3331 // generate Matcher::stack_alignment
3332 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3334 // Number of stack slots between incoming argument block and the start of
3335 // a new frame. The PROLOG must add this many slots to the stack. The
3336 // EPILOG must remove this many slots. Intel needs one slot for
3337 // return address.
3338 // generate Matcher::in_preserve_stack_slots
3339 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3340 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3342 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3343 // for calls to C. Supports the var-args backing area for register parms.
3344 varargs_C_out_slots_killed(0);
3346 // The after-PROLOG location of the return address. Location of
3347 // return address specifies a type (REG or STACK) and a number
3348 // representing the register number (i.e. - use a register name) or
3349 // stack slot.
3350 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3351 // Otherwise, it is above the locks and verification slot and alignment word
3352 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3353 return_addr(REG RA);
3355 // Body of function which returns an integer array locating
3356 // arguments either in registers or in stack slots. Passed an array
3357 // of ideal registers called "sig" and a "length" count. Stack-slot
3358 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3359 // arguments for a CALLEE. Incoming stack arguments are
3360 // automatically biased by the preserve_stack_slots field above.
3363 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3364 // StartNode::calling_convention call this. by yjl 3/16/2006
3365 calling_convention %{
3366 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3367 %}
3372 // Body of function which returns an integer array locating
3373 // arguments either in registers or in stack slots. Passed an array
3374 // of ideal registers called "sig" and a "length" count. Stack-slot
3375 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3376 // arguments for a CALLEE. Incoming stack arguments are
3377 // automatically biased by the preserve_stack_slots field above.
3380 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3381 c_calling_convention %{
3382 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3383 %}
3386 // Location of C & interpreter return values
3387 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3388 // SEE Matcher::match. by yjl 3/16/2006
3389 c_return_value %{
3390 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3391 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3392 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3393 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3394 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3395 %}
3397 // Location of return values
3398 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3399 // SEE Matcher::match. by yjl 3/16/2006
3401 return_value %{
3402 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3403 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3404 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3405 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3406 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3407 %}
3409 %}
3411 //----------ATTRIBUTES---------------------------------------------------------
3412 //----------Operand Attributes-------------------------------------------------
3413 op_attrib op_cost(0); // Required cost attribute
3415 //----------Instruction Attributes---------------------------------------------
3416 ins_attrib ins_cost(100); // Required cost attribute
3417 ins_attrib ins_size(32); // Required size attribute (in bits)
3418 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3419 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3420 // non-matching short branch variant of some
3421 // long branch?
3422 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3423 // specifies the alignment that some part of the instruction (not
3424 // necessarily the start) requires. If > 1, a compute_padding()
3425 // function must be provided for the instruction
3427 //----------OPERANDS-----------------------------------------------------------
3428 // Operand definitions must precede instruction definitions for correct parsing
3429 // in the ADLC because operands constitute user defined types which are used in
3430 // instruction definitions.
3433 // Flags register, used as output of compare instructions
3434 operand FlagsReg() %{
3435 constraint(ALLOC_IN_RC(mips_flags));
3436 match(RegFlags);
3438 format %{ "EFLAGS" %}
3439 interface(REG_INTER);
3440 %}
3442 //----------Simple Operands----------------------------------------------------
3443 //TODO: Should we need to define some more special immediate number ?
3444 // Immediate Operands
3445 // Integer Immediate
3446 operand immI() %{
3447 match(ConI);
3448 //TODO: should not match immI8 here LEE
3449 match(immI8);
3451 op_cost(20);
3452 format %{ %}
3453 interface(CONST_INTER);
3454 %}
3456 // Long Immediate 8-bit
3457 operand immL8()
3458 %{
3459 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3460 match(ConL);
3462 op_cost(5);
3463 format %{ %}
3464 interface(CONST_INTER);
3465 %}
3467 // Constant for test vs zero
3468 operand immI0() %{
3469 predicate(n->get_int() == 0);
3470 match(ConI);
3472 op_cost(0);
3473 format %{ %}
3474 interface(CONST_INTER);
3475 %}
3477 // Constant for increment
3478 operand immI1() %{
3479 predicate(n->get_int() == 1);
3480 match(ConI);
3482 op_cost(0);
3483 format %{ %}
3484 interface(CONST_INTER);
3485 %}
3487 // Constant for decrement
3488 operand immI_M1() %{
3489 predicate(n->get_int() == -1);
3490 match(ConI);
3492 op_cost(0);
3493 format %{ %}
3494 interface(CONST_INTER);
3495 %}
3497 // Valid scale values for addressing modes
3498 operand immI2() %{
3499 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3500 match(ConI);
3502 format %{ %}
3503 interface(CONST_INTER);
3504 %}
3506 operand immI8() %{
3507 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3508 match(ConI);
3510 op_cost(5);
3511 format %{ %}
3512 interface(CONST_INTER);
3513 %}
3515 operand immI16() %{
3516 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3517 match(ConI);
3519 op_cost(10);
3520 format %{ %}
3521 interface(CONST_INTER);
3522 %}
3524 // Constant for long shifts
3525 operand immI_32() %{
3526 predicate( n->get_int() == 32 );
3527 match(ConI);
3529 op_cost(0);
3530 format %{ %}
3531 interface(CONST_INTER);
3532 %}
3534 operand immI_0_31() %{
3535 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3536 match(ConI);
3538 op_cost(0);
3539 format %{ %}
3540 interface(CONST_INTER);
3541 %}
3543 operand immI_32_63() %{
3544 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3545 match(ConI);
3546 op_cost(0);
3548 format %{ %}
3549 interface(CONST_INTER);
3550 %}
3552 operand immI16_sub() %{
3553 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32767));
3554 match(ConI);
3556 op_cost(10);
3557 format %{ %}
3558 interface(CONST_INTER);
3559 %}
3561 operand immI_0_65535() %{
3562 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3563 match(ConI);
3564 op_cost(0);
3566 format %{ %}
3567 interface(CONST_INTER);
3568 %}
3570 operand immI_1() %{
3571 predicate( n->get_int() == 1 );
3572 match(ConI);
3574 op_cost(0);
3575 format %{ %}
3576 interface(CONST_INTER);
3577 %}
3579 operand immI_2() %{
3580 predicate( n->get_int() == 2 );
3581 match(ConI);
3583 op_cost(0);
3584 format %{ %}
3585 interface(CONST_INTER);
3586 %}
3588 operand immI_3() %{
3589 predicate( n->get_int() == 3 );
3590 match(ConI);
3592 op_cost(0);
3593 format %{ %}
3594 interface(CONST_INTER);
3595 %}
3597 // Immediates for special shifts (sign extend)
3599 // Constants for increment
3600 operand immI_16() %{
3601 predicate( n->get_int() == 16 );
3602 match(ConI);
3604 format %{ %}
3605 interface(CONST_INTER);
3606 %}
3608 operand immI_24() %{
3609 predicate( n->get_int() == 24 );
3610 match(ConI);
3612 format %{ %}
3613 interface(CONST_INTER);
3614 %}
3616 // Constant for byte-wide masking
3617 operand immI_255() %{
3618 predicate( n->get_int() == 255 );
3619 match(ConI);
3621 format %{ %}
3622 interface(CONST_INTER);
3623 %}
3625 // Pointer Immediate
3626 operand immP() %{
3627 match(ConP);
3629 op_cost(10);
3630 format %{ %}
3631 interface(CONST_INTER);
3632 %}
3634 operand immP31()
3635 %{
3636 predicate(n->as_Type()->type()->reloc() == relocInfo::none
3637 && (n->get_ptr() >> 31) == 0);
3638 match(ConP);
3640 op_cost(5);
3641 format %{ %}
3642 interface(CONST_INTER);
3643 %}
3645 // NULL Pointer Immediate
3646 operand immP0() %{
3647 predicate( n->get_ptr() == 0 );
3648 match(ConP);
3649 op_cost(0);
3651 format %{ %}
3652 interface(CONST_INTER);
3653 %}
3655 // Pointer Immediate
3656 operand immN() %{
3657 match(ConN);
3659 op_cost(10);
3660 format %{ %}
3661 interface(CONST_INTER);
3662 %}
3664 operand immNKlass() %{
3665 match(ConNKlass);
3667 op_cost(10);
3668 format %{ %}
3669 interface(CONST_INTER);
3670 %}
3672 // NULL Pointer Immediate
3673 operand immN0() %{
3674 predicate(n->get_narrowcon() == 0);
3675 match(ConN);
3677 op_cost(5);
3678 format %{ %}
3679 interface(CONST_INTER);
3680 %}
3682 // Long Immediate
3683 operand immL() %{
3684 match(ConL);
3686 op_cost(20);
3687 format %{ %}
3688 interface(CONST_INTER);
3689 %}
3691 // Long Immediate zero
3692 operand immL0() %{
3693 predicate( n->get_long() == 0L );
3694 match(ConL);
3695 op_cost(0);
3697 format %{ %}
3698 interface(CONST_INTER);
3699 %}
3701 // Long Immediate zero
3702 operand immL_M1() %{
3703 predicate( n->get_long() == -1L );
3704 match(ConL);
3705 op_cost(0);
3707 format %{ %}
3708 interface(CONST_INTER);
3709 %}
3711 // Long immediate from 0 to 127.
3712 // Used for a shorter form of long mul by 10.
3713 operand immL_127() %{
3714 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3715 match(ConL);
3716 op_cost(0);
3718 format %{ %}
3719 interface(CONST_INTER);
3720 %}
3722 operand immL_0_65535() %{
3723 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3724 match(ConL);
3725 op_cost(0);
3727 format %{ %}
3728 interface(CONST_INTER);
3729 %}
3731 operand immL16() %{
3732 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3733 match(ConL);
3735 op_cost(10);
3736 format %{ %}
3737 interface(CONST_INTER);
3738 %}
3740 // Long Immediate: low 32-bit mask
3741 operand immL_32bits() %{
3742 predicate(n->get_long() == 0xFFFFFFFFL);
3743 match(ConL);
3744 op_cost(20);
3746 format %{ %}
3747 interface(CONST_INTER);
3748 %}
3750 // Long Immediate 32-bit signed
3751 operand immL32()
3752 %{
3753 predicate(n->get_long() == (int) (n->get_long()));
3754 match(ConL);
3756 op_cost(15);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3762 //single-precision floating-point zero
3763 operand immF0() %{
3764 predicate(jint_cast(n->getf()) == 0);
3765 match(ConF);
3767 op_cost(5);
3768 format %{ %}
3769 interface(CONST_INTER);
3770 %}
3772 //single-precision floating-point immediate
3773 operand immF() %{
3774 match(ConF);
3776 op_cost(20);
3777 format %{ %}
3778 interface(CONST_INTER);
3779 %}
3781 //double-precision floating-point zero
3782 operand immD0() %{
3783 predicate(jlong_cast(n->getd()) == 0);
3784 match(ConD);
3786 op_cost(5);
3787 format %{ %}
3788 interface(CONST_INTER);
3789 %}
3791 //double-precision floating-point immediate
3792 operand immD() %{
3793 match(ConD);
3795 op_cost(20);
3796 format %{ %}
3797 interface(CONST_INTER);
3798 %}
3800 // Register Operands
3801 // Integer Register
3802 operand mRegI() %{
3803 constraint(ALLOC_IN_RC(int_reg));
3804 match(RegI);
3806 format %{ %}
3807 interface(REG_INTER);
3808 %}
3810 operand no_Ax_mRegI() %{
3811 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3812 match(RegI);
3813 match(mRegI);
3815 format %{ %}
3816 interface(REG_INTER);
3817 %}
3819 operand mS0RegI() %{
3820 constraint(ALLOC_IN_RC(s0_reg));
3821 match(RegI);
3822 match(mRegI);
3824 format %{ "S0" %}
3825 interface(REG_INTER);
3826 %}
3828 operand mS1RegI() %{
3829 constraint(ALLOC_IN_RC(s1_reg));
3830 match(RegI);
3831 match(mRegI);
3833 format %{ "S1" %}
3834 interface(REG_INTER);
3835 %}
3837 operand mS2RegI() %{
3838 constraint(ALLOC_IN_RC(s2_reg));
3839 match(RegI);
3840 match(mRegI);
3842 format %{ "S2" %}
3843 interface(REG_INTER);
3844 %}
3846 operand mS3RegI() %{
3847 constraint(ALLOC_IN_RC(s3_reg));
3848 match(RegI);
3849 match(mRegI);
3851 format %{ "S3" %}
3852 interface(REG_INTER);
3853 %}
3855 operand mS4RegI() %{
3856 constraint(ALLOC_IN_RC(s4_reg));
3857 match(RegI);
3858 match(mRegI);
3860 format %{ "S4" %}
3861 interface(REG_INTER);
3862 %}
3864 operand mS5RegI() %{
3865 constraint(ALLOC_IN_RC(s5_reg));
3866 match(RegI);
3867 match(mRegI);
3869 format %{ "S5" %}
3870 interface(REG_INTER);
3871 %}
3873 operand mS6RegI() %{
3874 constraint(ALLOC_IN_RC(s6_reg));
3875 match(RegI);
3876 match(mRegI);
3878 format %{ "S6" %}
3879 interface(REG_INTER);
3880 %}
3882 operand mS7RegI() %{
3883 constraint(ALLOC_IN_RC(s7_reg));
3884 match(RegI);
3885 match(mRegI);
3887 format %{ "S7" %}
3888 interface(REG_INTER);
3889 %}
3892 operand mT0RegI() %{
3893 constraint(ALLOC_IN_RC(t0_reg));
3894 match(RegI);
3895 match(mRegI);
3897 format %{ "T0" %}
3898 interface(REG_INTER);
3899 %}
3901 operand mT1RegI() %{
3902 constraint(ALLOC_IN_RC(t1_reg));
3903 match(RegI);
3904 match(mRegI);
3906 format %{ "T1" %}
3907 interface(REG_INTER);
3908 %}
3910 operand mT2RegI() %{
3911 constraint(ALLOC_IN_RC(t2_reg));
3912 match(RegI);
3913 match(mRegI);
3915 format %{ "T2" %}
3916 interface(REG_INTER);
3917 %}
3919 operand mT3RegI() %{
3920 constraint(ALLOC_IN_RC(t3_reg));
3921 match(RegI);
3922 match(mRegI);
3924 format %{ "T3" %}
3925 interface(REG_INTER);
3926 %}
3928 operand mT8RegI() %{
3929 constraint(ALLOC_IN_RC(t8_reg));
3930 match(RegI);
3931 match(mRegI);
3933 format %{ "T8" %}
3934 interface(REG_INTER);
3935 %}
3937 operand mT9RegI() %{
3938 constraint(ALLOC_IN_RC(t9_reg));
3939 match(RegI);
3940 match(mRegI);
3942 format %{ "T9" %}
3943 interface(REG_INTER);
3944 %}
3946 operand mA0RegI() %{
3947 constraint(ALLOC_IN_RC(a0_reg));
3948 match(RegI);
3949 match(mRegI);
3951 format %{ "A0" %}
3952 interface(REG_INTER);
3953 %}
3955 operand mA1RegI() %{
3956 constraint(ALLOC_IN_RC(a1_reg));
3957 match(RegI);
3958 match(mRegI);
3960 format %{ "A1" %}
3961 interface(REG_INTER);
3962 %}
3964 operand mA2RegI() %{
3965 constraint(ALLOC_IN_RC(a2_reg));
3966 match(RegI);
3967 match(mRegI);
3969 format %{ "A2" %}
3970 interface(REG_INTER);
3971 %}
3973 operand mA3RegI() %{
3974 constraint(ALLOC_IN_RC(a3_reg));
3975 match(RegI);
3976 match(mRegI);
3978 format %{ "A3" %}
3979 interface(REG_INTER);
3980 %}
3982 operand mA4RegI() %{
3983 constraint(ALLOC_IN_RC(a4_reg));
3984 match(RegI);
3985 match(mRegI);
3987 format %{ "A4" %}
3988 interface(REG_INTER);
3989 %}
3991 operand mA5RegI() %{
3992 constraint(ALLOC_IN_RC(a5_reg));
3993 match(RegI);
3994 match(mRegI);
3996 format %{ "A5" %}
3997 interface(REG_INTER);
3998 %}
4000 operand mA6RegI() %{
4001 constraint(ALLOC_IN_RC(a6_reg));
4002 match(RegI);
4003 match(mRegI);
4005 format %{ "A6" %}
4006 interface(REG_INTER);
4007 %}
4009 operand mA7RegI() %{
4010 constraint(ALLOC_IN_RC(a7_reg));
4011 match(RegI);
4012 match(mRegI);
4014 format %{ "A7" %}
4015 interface(REG_INTER);
4016 %}
4018 operand mV0RegI() %{
4019 constraint(ALLOC_IN_RC(v0_reg));
4020 match(RegI);
4021 match(mRegI);
4023 format %{ "V0" %}
4024 interface(REG_INTER);
4025 %}
4027 operand mV1RegI() %{
4028 constraint(ALLOC_IN_RC(v1_reg));
4029 match(RegI);
4030 match(mRegI);
4032 format %{ "V1" %}
4033 interface(REG_INTER);
4034 %}
4036 operand mRegN() %{
4037 constraint(ALLOC_IN_RC(int_reg));
4038 match(RegN);
4040 format %{ %}
4041 interface(REG_INTER);
4042 %}
4044 operand t0_RegN() %{
4045 constraint(ALLOC_IN_RC(t0_reg));
4046 match(RegN);
4047 match(mRegN);
4049 format %{ %}
4050 interface(REG_INTER);
4051 %}
4053 operand t1_RegN() %{
4054 constraint(ALLOC_IN_RC(t1_reg));
4055 match(RegN);
4056 match(mRegN);
4058 format %{ %}
4059 interface(REG_INTER);
4060 %}
4062 operand t2_RegN() %{
4063 constraint(ALLOC_IN_RC(t2_reg));
4064 match(RegN);
4065 match(mRegN);
4067 format %{ %}
4068 interface(REG_INTER);
4069 %}
4071 operand t3_RegN() %{
4072 constraint(ALLOC_IN_RC(t3_reg));
4073 match(RegN);
4074 match(mRegN);
4076 format %{ %}
4077 interface(REG_INTER);
4078 %}
4080 operand t8_RegN() %{
4081 constraint(ALLOC_IN_RC(t8_reg));
4082 match(RegN);
4083 match(mRegN);
4085 format %{ %}
4086 interface(REG_INTER);
4087 %}
4089 operand t9_RegN() %{
4090 constraint(ALLOC_IN_RC(t9_reg));
4091 match(RegN);
4092 match(mRegN);
4094 format %{ %}
4095 interface(REG_INTER);
4096 %}
4098 operand a0_RegN() %{
4099 constraint(ALLOC_IN_RC(a0_reg));
4100 match(RegN);
4101 match(mRegN);
4103 format %{ %}
4104 interface(REG_INTER);
4105 %}
4107 operand a1_RegN() %{
4108 constraint(ALLOC_IN_RC(a1_reg));
4109 match(RegN);
4110 match(mRegN);
4112 format %{ %}
4113 interface(REG_INTER);
4114 %}
4116 operand a2_RegN() %{
4117 constraint(ALLOC_IN_RC(a2_reg));
4118 match(RegN);
4119 match(mRegN);
4121 format %{ %}
4122 interface(REG_INTER);
4123 %}
4125 operand a3_RegN() %{
4126 constraint(ALLOC_IN_RC(a3_reg));
4127 match(RegN);
4128 match(mRegN);
4130 format %{ %}
4131 interface(REG_INTER);
4132 %}
4134 operand a4_RegN() %{
4135 constraint(ALLOC_IN_RC(a4_reg));
4136 match(RegN);
4137 match(mRegN);
4139 format %{ %}
4140 interface(REG_INTER);
4141 %}
4143 operand a5_RegN() %{
4144 constraint(ALLOC_IN_RC(a5_reg));
4145 match(RegN);
4146 match(mRegN);
4148 format %{ %}
4149 interface(REG_INTER);
4150 %}
4152 operand a6_RegN() %{
4153 constraint(ALLOC_IN_RC(a6_reg));
4154 match(RegN);
4155 match(mRegN);
4157 format %{ %}
4158 interface(REG_INTER);
4159 %}
4161 operand a7_RegN() %{
4162 constraint(ALLOC_IN_RC(a7_reg));
4163 match(RegN);
4164 match(mRegN);
4166 format %{ %}
4167 interface(REG_INTER);
4168 %}
4170 operand s0_RegN() %{
4171 constraint(ALLOC_IN_RC(s0_reg));
4172 match(RegN);
4173 match(mRegN);
4175 format %{ %}
4176 interface(REG_INTER);
4177 %}
4179 operand s1_RegN() %{
4180 constraint(ALLOC_IN_RC(s1_reg));
4181 match(RegN);
4182 match(mRegN);
4184 format %{ %}
4185 interface(REG_INTER);
4186 %}
4188 operand s2_RegN() %{
4189 constraint(ALLOC_IN_RC(s2_reg));
4190 match(RegN);
4191 match(mRegN);
4193 format %{ %}
4194 interface(REG_INTER);
4195 %}
4197 operand s3_RegN() %{
4198 constraint(ALLOC_IN_RC(s3_reg));
4199 match(RegN);
4200 match(mRegN);
4202 format %{ %}
4203 interface(REG_INTER);
4204 %}
4206 operand s4_RegN() %{
4207 constraint(ALLOC_IN_RC(s4_reg));
4208 match(RegN);
4209 match(mRegN);
4211 format %{ %}
4212 interface(REG_INTER);
4213 %}
4215 operand s5_RegN() %{
4216 constraint(ALLOC_IN_RC(s5_reg));
4217 match(RegN);
4218 match(mRegN);
4220 format %{ %}
4221 interface(REG_INTER);
4222 %}
4224 operand s6_RegN() %{
4225 constraint(ALLOC_IN_RC(s6_reg));
4226 match(RegN);
4227 match(mRegN);
4229 format %{ %}
4230 interface(REG_INTER);
4231 %}
4233 operand s7_RegN() %{
4234 constraint(ALLOC_IN_RC(s7_reg));
4235 match(RegN);
4236 match(mRegN);
4238 format %{ %}
4239 interface(REG_INTER);
4240 %}
4242 operand v0_RegN() %{
4243 constraint(ALLOC_IN_RC(v0_reg));
4244 match(RegN);
4245 match(mRegN);
4247 format %{ %}
4248 interface(REG_INTER);
4249 %}
4251 operand v1_RegN() %{
4252 constraint(ALLOC_IN_RC(v1_reg));
4253 match(RegN);
4254 match(mRegN);
4256 format %{ %}
4257 interface(REG_INTER);
4258 %}
4260 // Pointer Register
4261 operand mRegP() %{
4262 constraint(ALLOC_IN_RC(p_reg));
4263 match(RegP);
4265 format %{ %}
4266 interface(REG_INTER);
4267 %}
4269 operand no_T8_mRegP() %{
4270 constraint(ALLOC_IN_RC(no_T8_p_reg));
4271 match(RegP);
4272 match(mRegP);
4274 format %{ %}
4275 interface(REG_INTER);
4276 %}
4278 operand s0_RegP()
4279 %{
4280 constraint(ALLOC_IN_RC(s0_long_reg));
4281 match(RegP);
4282 match(mRegP);
4283 match(no_T8_mRegP);
4285 format %{ %}
4286 interface(REG_INTER);
4287 %}
4289 operand s1_RegP()
4290 %{
4291 constraint(ALLOC_IN_RC(s1_long_reg));
4292 match(RegP);
4293 match(mRegP);
4294 match(no_T8_mRegP);
4296 format %{ %}
4297 interface(REG_INTER);
4298 %}
4300 operand s2_RegP()
4301 %{
4302 constraint(ALLOC_IN_RC(s2_long_reg));
4303 match(RegP);
4304 match(mRegP);
4305 match(no_T8_mRegP);
4307 format %{ %}
4308 interface(REG_INTER);
4309 %}
4311 operand s3_RegP()
4312 %{
4313 constraint(ALLOC_IN_RC(s3_long_reg));
4314 match(RegP);
4315 match(mRegP);
4316 match(no_T8_mRegP);
4318 format %{ %}
4319 interface(REG_INTER);
4320 %}
4322 operand s4_RegP()
4323 %{
4324 constraint(ALLOC_IN_RC(s4_long_reg));
4325 match(RegP);
4326 match(mRegP);
4327 match(no_T8_mRegP);
4329 format %{ %}
4330 interface(REG_INTER);
4331 %}
4333 operand s5_RegP()
4334 %{
4335 constraint(ALLOC_IN_RC(s5_long_reg));
4336 match(RegP);
4337 match(mRegP);
4338 match(no_T8_mRegP);
4340 format %{ %}
4341 interface(REG_INTER);
4342 %}
4344 operand s6_RegP()
4345 %{
4346 constraint(ALLOC_IN_RC(s6_long_reg));
4347 match(RegP);
4348 match(mRegP);
4349 match(no_T8_mRegP);
4351 format %{ %}
4352 interface(REG_INTER);
4353 %}
4355 operand s7_RegP()
4356 %{
4357 constraint(ALLOC_IN_RC(s7_long_reg));
4358 match(RegP);
4359 match(mRegP);
4360 match(no_T8_mRegP);
4362 format %{ %}
4363 interface(REG_INTER);
4364 %}
4366 operand t0_RegP()
4367 %{
4368 constraint(ALLOC_IN_RC(t0_long_reg));
4369 match(RegP);
4370 match(mRegP);
4371 match(no_T8_mRegP);
4373 format %{ %}
4374 interface(REG_INTER);
4375 %}
4377 operand t1_RegP()
4378 %{
4379 constraint(ALLOC_IN_RC(t1_long_reg));
4380 match(RegP);
4381 match(mRegP);
4382 match(no_T8_mRegP);
4384 format %{ %}
4385 interface(REG_INTER);
4386 %}
4388 operand t2_RegP()
4389 %{
4390 constraint(ALLOC_IN_RC(t2_long_reg));
4391 match(RegP);
4392 match(mRegP);
4393 match(no_T8_mRegP);
4395 format %{ %}
4396 interface(REG_INTER);
4397 %}
4399 operand t3_RegP()
4400 %{
4401 constraint(ALLOC_IN_RC(t3_long_reg));
4402 match(RegP);
4403 match(mRegP);
4404 match(no_T8_mRegP);
4406 format %{ %}
4407 interface(REG_INTER);
4408 %}
4410 operand t8_RegP()
4411 %{
4412 constraint(ALLOC_IN_RC(t8_long_reg));
4413 match(RegP);
4414 match(mRegP);
4416 format %{ %}
4417 interface(REG_INTER);
4418 %}
4420 operand t9_RegP()
4421 %{
4422 constraint(ALLOC_IN_RC(t9_long_reg));
4423 match(RegP);
4424 match(mRegP);
4425 match(no_T8_mRegP);
4427 format %{ %}
4428 interface(REG_INTER);
4429 %}
4431 operand a0_RegP()
4432 %{
4433 constraint(ALLOC_IN_RC(a0_long_reg));
4434 match(RegP);
4435 match(mRegP);
4436 match(no_T8_mRegP);
4438 format %{ %}
4439 interface(REG_INTER);
4440 %}
4442 operand a1_RegP()
4443 %{
4444 constraint(ALLOC_IN_RC(a1_long_reg));
4445 match(RegP);
4446 match(mRegP);
4447 match(no_T8_mRegP);
4449 format %{ %}
4450 interface(REG_INTER);
4451 %}
4453 operand a2_RegP()
4454 %{
4455 constraint(ALLOC_IN_RC(a2_long_reg));
4456 match(RegP);
4457 match(mRegP);
4458 match(no_T8_mRegP);
4460 format %{ %}
4461 interface(REG_INTER);
4462 %}
4464 operand a3_RegP()
4465 %{
4466 constraint(ALLOC_IN_RC(a3_long_reg));
4467 match(RegP);
4468 match(mRegP);
4469 match(no_T8_mRegP);
4471 format %{ %}
4472 interface(REG_INTER);
4473 %}
4475 operand a4_RegP()
4476 %{
4477 constraint(ALLOC_IN_RC(a4_long_reg));
4478 match(RegP);
4479 match(mRegP);
4480 match(no_T8_mRegP);
4482 format %{ %}
4483 interface(REG_INTER);
4484 %}
4487 operand a5_RegP()
4488 %{
4489 constraint(ALLOC_IN_RC(a5_long_reg));
4490 match(RegP);
4491 match(mRegP);
4492 match(no_T8_mRegP);
4494 format %{ %}
4495 interface(REG_INTER);
4496 %}
4498 operand a6_RegP()
4499 %{
4500 constraint(ALLOC_IN_RC(a6_long_reg));
4501 match(RegP);
4502 match(mRegP);
4503 match(no_T8_mRegP);
4505 format %{ %}
4506 interface(REG_INTER);
4507 %}
4509 operand a7_RegP()
4510 %{
4511 constraint(ALLOC_IN_RC(a7_long_reg));
4512 match(RegP);
4513 match(mRegP);
4514 match(no_T8_mRegP);
4516 format %{ %}
4517 interface(REG_INTER);
4518 %}
4520 operand v0_RegP()
4521 %{
4522 constraint(ALLOC_IN_RC(v0_long_reg));
4523 match(RegP);
4524 match(mRegP);
4525 match(no_T8_mRegP);
4527 format %{ %}
4528 interface(REG_INTER);
4529 %}
4531 operand v1_RegP()
4532 %{
4533 constraint(ALLOC_IN_RC(v1_long_reg));
4534 match(RegP);
4535 match(mRegP);
4536 match(no_T8_mRegP);
4538 format %{ %}
4539 interface(REG_INTER);
4540 %}
4542 /*
4543 operand mSPRegP(mRegP reg) %{
4544 constraint(ALLOC_IN_RC(sp_reg));
4545 match(reg);
4547 format %{ "SP" %}
4548 interface(REG_INTER);
4549 %}
4551 operand mFPRegP(mRegP reg) %{
4552 constraint(ALLOC_IN_RC(fp_reg));
4553 match(reg);
4555 format %{ "FP" %}
4556 interface(REG_INTER);
4557 %}
4558 */
4560 operand mRegL() %{
4561 constraint(ALLOC_IN_RC(long_reg));
4562 match(RegL);
4564 format %{ %}
4565 interface(REG_INTER);
4566 %}
4568 operand v0RegL() %{
4569 constraint(ALLOC_IN_RC(v0_long_reg));
4570 match(RegL);
4571 match(mRegL);
4573 format %{ %}
4574 interface(REG_INTER);
4575 %}
4577 operand v1RegL() %{
4578 constraint(ALLOC_IN_RC(v1_long_reg));
4579 match(RegL);
4580 match(mRegL);
4582 format %{ %}
4583 interface(REG_INTER);
4584 %}
4586 operand a0RegL() %{
4587 constraint(ALLOC_IN_RC(a0_long_reg));
4588 match(RegL);
4589 match(mRegL);
4591 format %{ "A0" %}
4592 interface(REG_INTER);
4593 %}
4595 operand a1RegL() %{
4596 constraint(ALLOC_IN_RC(a1_long_reg));
4597 match(RegL);
4598 match(mRegL);
4600 format %{ %}
4601 interface(REG_INTER);
4602 %}
4604 operand a2RegL() %{
4605 constraint(ALLOC_IN_RC(a2_long_reg));
4606 match(RegL);
4607 match(mRegL);
4609 format %{ %}
4610 interface(REG_INTER);
4611 %}
4613 operand a3RegL() %{
4614 constraint(ALLOC_IN_RC(a3_long_reg));
4615 match(RegL);
4616 match(mRegL);
4618 format %{ %}
4619 interface(REG_INTER);
4620 %}
4622 operand t0RegL() %{
4623 constraint(ALLOC_IN_RC(t0_long_reg));
4624 match(RegL);
4625 match(mRegL);
4627 format %{ %}
4628 interface(REG_INTER);
4629 %}
4631 operand t1RegL() %{
4632 constraint(ALLOC_IN_RC(t1_long_reg));
4633 match(RegL);
4634 match(mRegL);
4636 format %{ %}
4637 interface(REG_INTER);
4638 %}
4640 operand t2RegL() %{
4641 constraint(ALLOC_IN_RC(t2_long_reg));
4642 match(RegL);
4643 match(mRegL);
4645 format %{ %}
4646 interface(REG_INTER);
4647 %}
4649 operand t3RegL() %{
4650 constraint(ALLOC_IN_RC(t3_long_reg));
4651 match(RegL);
4652 match(mRegL);
4654 format %{ %}
4655 interface(REG_INTER);
4656 %}
4658 operand t8RegL() %{
4659 constraint(ALLOC_IN_RC(t8_long_reg));
4660 match(RegL);
4661 match(mRegL);
4663 format %{ %}
4664 interface(REG_INTER);
4665 %}
4667 operand a4RegL() %{
4668 constraint(ALLOC_IN_RC(a4_long_reg));
4669 match(RegL);
4670 match(mRegL);
4672 format %{ %}
4673 interface(REG_INTER);
4674 %}
4676 operand a5RegL() %{
4677 constraint(ALLOC_IN_RC(a5_long_reg));
4678 match(RegL);
4679 match(mRegL);
4681 format %{ %}
4682 interface(REG_INTER);
4683 %}
4685 operand a6RegL() %{
4686 constraint(ALLOC_IN_RC(a6_long_reg));
4687 match(RegL);
4688 match(mRegL);
4690 format %{ %}
4691 interface(REG_INTER);
4692 %}
4694 operand a7RegL() %{
4695 constraint(ALLOC_IN_RC(a7_long_reg));
4696 match(RegL);
4697 match(mRegL);
4699 format %{ %}
4700 interface(REG_INTER);
4701 %}
4703 operand s0RegL() %{
4704 constraint(ALLOC_IN_RC(s0_long_reg));
4705 match(RegL);
4706 match(mRegL);
4708 format %{ %}
4709 interface(REG_INTER);
4710 %}
4712 operand s1RegL() %{
4713 constraint(ALLOC_IN_RC(s1_long_reg));
4714 match(RegL);
4715 match(mRegL);
4717 format %{ %}
4718 interface(REG_INTER);
4719 %}
4721 operand s2RegL() %{
4722 constraint(ALLOC_IN_RC(s2_long_reg));
4723 match(RegL);
4724 match(mRegL);
4726 format %{ %}
4727 interface(REG_INTER);
4728 %}
4730 operand s3RegL() %{
4731 constraint(ALLOC_IN_RC(s3_long_reg));
4732 match(RegL);
4733 match(mRegL);
4735 format %{ %}
4736 interface(REG_INTER);
4737 %}
4739 operand s4RegL() %{
4740 constraint(ALLOC_IN_RC(s4_long_reg));
4741 match(RegL);
4742 match(mRegL);
4744 format %{ %}
4745 interface(REG_INTER);
4746 %}
4748 operand s7RegL() %{
4749 constraint(ALLOC_IN_RC(s7_long_reg));
4750 match(RegL);
4751 match(mRegL);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 // Floating register operands
4758 operand regF() %{
4759 constraint(ALLOC_IN_RC(flt_reg));
4760 match(RegF);
4762 format %{ %}
4763 interface(REG_INTER);
4764 %}
4766 //Double Precision Floating register operands
4767 operand regD() %{
4768 constraint(ALLOC_IN_RC(dbl_reg));
4769 match(RegD);
4771 format %{ %}
4772 interface(REG_INTER);
4773 %}
4775 //----------Memory Operands----------------------------------------------------
4776 // Indirect Memory Operand
4777 operand indirect(mRegP reg) %{
4778 constraint(ALLOC_IN_RC(p_reg));
4779 op_cost(10);
4780 match(reg);
4782 format %{ "[$reg] @ indirect" %}
4783 interface(MEMORY_INTER) %{
4784 base($reg);
4785 index(0x0); /* NO_INDEX */
4786 scale(0x0);
4787 disp(0x0);
4788 %}
4789 %}
4791 // Indirect Memory Plus Short Offset Operand
4792 operand indOffset8(mRegP reg, immL8 off)
4793 %{
4794 constraint(ALLOC_IN_RC(p_reg));
4795 op_cost(20);
4796 match(AddP reg off);
4798 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4799 interface(MEMORY_INTER) %{
4800 base($reg);
4801 index(0x0); /* NO_INDEX */
4802 scale(0x0);
4803 disp($off);
4804 %}
4805 %}
4807 // [base + index + offset]
4808 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4809 %{
4810 constraint(ALLOC_IN_RC(p_reg));
4811 op_cost(5);
4812 match(AddP (AddP base index) off);
4814 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4815 interface(MEMORY_INTER) %{
4816 base($base);
4817 index($index);
4818 scale(0x0);
4819 disp($off);
4820 %}
4821 %}
4823 // [base + index + offset]
4824 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
4825 %{
4826 constraint(ALLOC_IN_RC(p_reg));
4827 op_cost(5);
4828 match(AddP (AddP base (ConvI2L index)) off);
4830 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
4831 interface(MEMORY_INTER) %{
4832 base($base);
4833 index($index);
4834 scale(0x0);
4835 disp($off);
4836 %}
4837 %}
4839 // [base + index<<scale + offset]
4840 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
4841 %{
4842 constraint(ALLOC_IN_RC(p_reg));
4843 predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
4844 op_cost(10);
4845 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
4847 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
4848 interface(MEMORY_INTER) %{
4849 base($base);
4850 index($index);
4851 scale($scale);
4852 disp($off);
4853 %}
4854 %}
4856 // [base + index<<scale + offset] for compressd Oops
4857 operand indPosIndexScaleOffset8Narrow(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
4858 %{
4859 constraint(ALLOC_IN_RC(p_reg));
4860 predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
4861 op_cost(10);
4862 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
4864 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexScaleOffsetNarrow" %}
4865 interface(MEMORY_INTER) %{
4866 base($base);
4867 index($index);
4868 scale($scale);
4869 disp($off);
4870 %}
4871 %}
4873 //FIXME: I think it's better to limit the immI to be 16-bit at most!
4874 // Indirect Memory Plus Long Offset Operand
4875 operand indOffset32(mRegP reg, immL32 off) %{
4876 constraint(ALLOC_IN_RC(p_reg));
4877 op_cost(20);
4878 match(AddP reg off);
4880 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
4881 interface(MEMORY_INTER) %{
4882 base($reg);
4883 index(0x0); /* NO_INDEX */
4884 scale(0x0);
4885 disp($off);
4886 %}
4887 %}
4889 // Indirect Memory Plus Index Register
4890 operand indIndex(mRegP addr, mRegL index) %{
4891 constraint(ALLOC_IN_RC(p_reg));
4892 match(AddP addr index);
4894 op_cost(20);
4895 format %{"[$addr + $index] @ indIndex" %}
4896 interface(MEMORY_INTER) %{
4897 base($addr);
4898 index($index);
4899 scale(0x0);
4900 disp(0x0);
4901 %}
4902 %}
4904 operand indirectNarrowKlass(mRegN reg)
4905 %{
4906 predicate(Universe::narrow_klass_shift() == 0);
4907 constraint(ALLOC_IN_RC(p_reg));
4908 op_cost(10);
4909 match(DecodeNKlass reg);
4911 format %{ "[$reg] @ indirectNarrowKlass" %}
4912 interface(MEMORY_INTER) %{
4913 base($reg);
4914 index(0x0);
4915 scale(0x0);
4916 disp(0x0);
4917 %}
4918 %}
4920 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
4921 %{
4922 predicate(Universe::narrow_klass_shift() == 0);
4923 constraint(ALLOC_IN_RC(p_reg));
4924 op_cost(10);
4925 match(AddP (DecodeNKlass reg) off);
4927 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
4928 interface(MEMORY_INTER) %{
4929 base($reg);
4930 index(0x0);
4931 scale(0x0);
4932 disp($off);
4933 %}
4934 %}
4936 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
4937 %{
4938 predicate(Universe::narrow_klass_shift() == 0);
4939 constraint(ALLOC_IN_RC(p_reg));
4940 op_cost(10);
4941 match(AddP (DecodeNKlass reg) off);
4943 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
4944 interface(MEMORY_INTER) %{
4945 base($reg);
4946 index(0x0);
4947 scale(0x0);
4948 disp($off);
4949 %}
4950 %}
4952 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
4953 %{
4954 predicate(Universe::narrow_klass_shift() == 0);
4955 constraint(ALLOC_IN_RC(p_reg));
4956 match(AddP (AddP (DecodeNKlass reg) lreg) off);
4958 op_cost(10);
4959 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
4960 interface(MEMORY_INTER) %{
4961 base($reg);
4962 index($lreg);
4963 scale(0x0);
4964 disp($off);
4965 %}
4966 %}
4968 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
4969 %{
4970 predicate(Universe::narrow_klass_shift() == 0);
4971 constraint(ALLOC_IN_RC(p_reg));
4972 match(AddP (DecodeNKlass reg) lreg);
4974 op_cost(10);
4975 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
4976 interface(MEMORY_INTER) %{
4977 base($reg);
4978 index($lreg);
4979 scale(0x0);
4980 disp(0x0);
4981 %}
4982 %}
4984 // Indirect Memory Operand
4985 operand indirectNarrow(mRegN reg)
4986 %{
4987 predicate(Universe::narrow_oop_shift() == 0);
4988 constraint(ALLOC_IN_RC(p_reg));
4989 op_cost(10);
4990 match(DecodeN reg);
4992 format %{ "[$reg] @ indirectNarrow" %}
4993 interface(MEMORY_INTER) %{
4994 base($reg);
4995 index(0x0);
4996 scale(0x0);
4997 disp(0x0);
4998 %}
4999 %}
5001 //----------Load Long Memory Operands------------------------------------------
5002 // The load-long idiom will use it's address expression again after loading
5003 // the first word of the long. If the load-long destination overlaps with
5004 // registers used in the addressing expression, the 2nd half will be loaded
5005 // from a clobbered address. Fix this by requiring that load-long use
5006 // address registers that do not overlap with the load-long target.
5008 // load-long support
5009 operand load_long_RegP() %{
5010 constraint(ALLOC_IN_RC(p_reg));
5011 match(RegP);
5012 match(mRegP);
5013 op_cost(100);
5014 format %{ %}
5015 interface(REG_INTER);
5016 %}
5018 // Indirect Memory Operand Long
5019 operand load_long_indirect(load_long_RegP reg) %{
5020 constraint(ALLOC_IN_RC(p_reg));
5021 match(reg);
5023 format %{ "[$reg]" %}
5024 interface(MEMORY_INTER) %{
5025 base($reg);
5026 index(0x0);
5027 scale(0x0);
5028 disp(0x0);
5029 %}
5030 %}
5032 // Indirect Memory Plus Long Offset Operand
5033 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5034 match(AddP reg off);
5036 format %{ "[$reg + $off]" %}
5037 interface(MEMORY_INTER) %{
5038 base($reg);
5039 index(0x0);
5040 scale(0x0);
5041 disp($off);
5042 %}
5043 %}
5045 //----------Conditional Branch Operands----------------------------------------
5046 // Comparison Op - This is the operation of the comparison, and is limited to
5047 // the following set of codes:
5048 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5049 //
5050 // Other attributes of the comparison, such as unsignedness, are specified
5051 // by the comparison instruction that sets a condition code flags register.
5052 // That result is represented by a flags operand whose subtype is appropriate
5053 // to the unsignedness (etc.) of the comparison.
5054 //
5055 // Later, the instruction which matches both the Comparison Op (a Bool) and
5056 // the flags (produced by the Cmp) specifies the coding of the comparison op
5057 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5059 // Comparision Code
5060 operand cmpOp() %{
5061 match(Bool);
5063 format %{ "" %}
5064 interface(COND_INTER) %{
5065 equal(0x01);
5066 not_equal(0x02);
5067 greater(0x03);
5068 greater_equal(0x04);
5069 less(0x05);
5070 less_equal(0x06);
5071 overflow(0x7);
5072 no_overflow(0x8);
5073 %}
5074 %}
5077 // Comparision Code
5078 // Comparison Code, unsigned compare. Used by FP also, with
5079 // C2 (unordered) turned into GT or LT already. The other bits
5080 // C0 and C3 are turned into Carry & Zero flags.
5081 operand cmpOpU() %{
5082 match(Bool);
5084 format %{ "" %}
5085 interface(COND_INTER) %{
5086 equal(0x01);
5087 not_equal(0x02);
5088 greater(0x03);
5089 greater_equal(0x04);
5090 less(0x05);
5091 less_equal(0x06);
5092 overflow(0x7);
5093 no_overflow(0x8);
5094 %}
5095 %}
5097 /*
5098 // Comparison Code, unsigned compare. Used by FP also, with
5099 // C2 (unordered) turned into GT or LT already. The other bits
5100 // C0 and C3 are turned into Carry & Zero flags.
5101 operand cmpOpU() %{
5102 match(Bool);
5104 format %{ "" %}
5105 interface(COND_INTER) %{
5106 equal(0x4);
5107 not_equal(0x5);
5108 less(0x2);
5109 greater_equal(0x3);
5110 less_equal(0x6);
5111 greater(0x7);
5112 %}
5113 %}
5114 */
5115 /*
5116 // Comparison Code for FP conditional move
5117 operand cmpOp_fcmov() %{
5118 match(Bool);
5120 format %{ "" %}
5121 interface(COND_INTER) %{
5122 equal (0x01);
5123 not_equal (0x02);
5124 greater (0x03);
5125 greater_equal(0x04);
5126 less (0x05);
5127 less_equal (0x06);
5128 %}
5129 %}
5131 // Comparision Code used in long compares
5132 operand cmpOp_commute() %{
5133 match(Bool);
5135 format %{ "" %}
5136 interface(COND_INTER) %{
5137 equal(0x4);
5138 not_equal(0x5);
5139 less(0xF);
5140 greater_equal(0xE);
5141 less_equal(0xD);
5142 greater(0xC);
5143 %}
5144 %}
5145 */
5147 /*
5148 //----------Special Memory Operands--------------------------------------------
5149 // Stack Slot Operand - This operand is used for loading and storing temporary
5150 // values on the stack where a match requires a value to
5151 // flow through memory.
5152 operand stackSlotP(sRegP reg) %{
5153 constraint(ALLOC_IN_RC(stack_slots));
5154 // No match rule because this operand is only generated in matching
5155 op_cost(50);
5156 format %{ "[$reg]" %}
5157 interface(MEMORY_INTER) %{
5158 base(0x1d); // SP
5159 index(0x0); // No Index
5160 scale(0x0); // No Scale
5161 disp($reg); // Stack Offset
5162 %}
5163 %}
5165 operand stackSlotI(sRegI reg) %{
5166 constraint(ALLOC_IN_RC(stack_slots));
5167 // No match rule because this operand is only generated in matching
5168 op_cost(50);
5169 format %{ "[$reg]" %}
5170 interface(MEMORY_INTER) %{
5171 base(0x1d); // SP
5172 index(0x0); // No Index
5173 scale(0x0); // No Scale
5174 disp($reg); // Stack Offset
5175 %}
5176 %}
5178 operand stackSlotF(sRegF reg) %{
5179 constraint(ALLOC_IN_RC(stack_slots));
5180 // No match rule because this operand is only generated in matching
5181 op_cost(50);
5182 format %{ "[$reg]" %}
5183 interface(MEMORY_INTER) %{
5184 base(0x1d); // SP
5185 index(0x0); // No Index
5186 scale(0x0); // No Scale
5187 disp($reg); // Stack Offset
5188 %}
5189 %}
5191 operand stackSlotD(sRegD reg) %{
5192 constraint(ALLOC_IN_RC(stack_slots));
5193 // No match rule because this operand is only generated in matching
5194 op_cost(50);
5195 format %{ "[$reg]" %}
5196 interface(MEMORY_INTER) %{
5197 base(0x1d); // SP
5198 index(0x0); // No Index
5199 scale(0x0); // No Scale
5200 disp($reg); // Stack Offset
5201 %}
5202 %}
5204 operand stackSlotL(sRegL reg) %{
5205 constraint(ALLOC_IN_RC(stack_slots));
5206 // No match rule because this operand is only generated in matching
5207 op_cost(50);
5208 format %{ "[$reg]" %}
5209 interface(MEMORY_INTER) %{
5210 base(0x1d); // SP
5211 index(0x0); // No Index
5212 scale(0x0); // No Scale
5213 disp($reg); // Stack Offset
5214 %}
5215 %}
5216 */
5219 //------------------------OPERAND CLASSES--------------------------------------
5220 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5221 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, basePosIndexScaleOffset8, indPosIndexScaleOffset8Narrow);
5224 //----------PIPELINE-----------------------------------------------------------
5225 // Rules which define the behavior of the target architectures pipeline.
5227 pipeline %{
5229 //----------ATTRIBUTES---------------------------------------------------------
5230 attributes %{
5231 fixed_size_instructions; // Fixed size instructions
5232 branch_has_delay_slot; // branch have delay slot in gs2
5233 max_instructions_per_bundle = 1; // 1 instruction per bundle
5234 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5235 bundle_unit_size=4;
5236 instruction_unit_size = 4; // An instruction is 4 bytes long
5237 instruction_fetch_unit_size = 16; // The processor fetches one line
5238 instruction_fetch_units = 1; // of 16 bytes
5240 // List of nop instructions
5241 nops( MachNop );
5242 %}
5244 //----------RESOURCES----------------------------------------------------------
5245 // Resources are the functional units available to the machine
5247 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5249 //----------PIPELINE DESCRIPTION-----------------------------------------------
5250 // Pipeline Description specifies the stages in the machine's pipeline
5252 // IF: fetch
5253 // ID: decode
5254 // RD: read
5255 // CA: caculate
5256 // WB: write back
5257 // CM: commit
5259 pipe_desc(IF, ID, RD, CA, WB, CM);
5262 //----------PIPELINE CLASSES---------------------------------------------------
5263 // Pipeline Classes describe the stages in which input and output are
5264 // referenced by the hardware pipeline.
5266 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5267 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5268 single_instruction;
5269 src1 : RD(read);
5270 src2 : RD(read);
5271 dst : WB(write)+1;
5272 DECODE : ID;
5273 ALU : CA;
5274 %}
5276 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5277 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5278 src1 : RD(read);
5279 src2 : RD(read);
5280 dst : WB(write)+5;
5281 DECODE : ID;
5282 ALU2 : CA;
5283 %}
5285 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5286 src1 : RD(read);
5287 src2 : RD(read);
5288 dst : WB(write)+10;
5289 DECODE : ID;
5290 ALU2 : CA;
5291 %}
5293 //No.19 Integer div operation : dst <-- reg1 div reg2
5294 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5295 src1 : RD(read);
5296 src2 : RD(read);
5297 dst : WB(write)+10;
5298 DECODE : ID;
5299 ALU2 : CA;
5300 %}
5302 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5303 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5304 instruction_count(2);
5305 src1 : RD(read);
5306 src2 : RD(read);
5307 dst : WB(write)+10;
5308 DECODE : ID;
5309 ALU2 : CA;
5310 %}
5312 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5313 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5314 instruction_count(2);
5315 src1 : RD(read);
5316 src2 : RD(read);
5317 dst : WB(write);
5318 DECODE : ID;
5319 ALU : CA;
5320 %}
5322 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5323 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5324 instruction_count(2);
5325 src : RD(read);
5326 dst : WB(write);
5327 DECODE : ID;
5328 ALU : CA;
5329 %}
5331 //no.16 load Long from memory :
5332 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5333 instruction_count(2);
5334 mem : RD(read);
5335 dst : WB(write)+5;
5336 DECODE : ID;
5337 MEM : RD;
5338 %}
5340 //No.17 Store Long to Memory :
5341 pipe_class ialu_storeL(mRegL src, memory mem) %{
5342 instruction_count(2);
5343 mem : RD(read);
5344 src : RD(read);
5345 DECODE : ID;
5346 MEM : RD;
5347 %}
5349 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5350 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5351 single_instruction;
5352 src : RD(read);
5353 dst : WB(write);
5354 DECODE : ID;
5355 ALU : CA;
5356 %}
5358 //No.3 Integer move operation : dst <-- reg
5359 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5360 src : RD(read);
5361 dst : WB(write);
5362 DECODE : ID;
5363 ALU : CA;
5364 %}
5366 //No.4 No instructions : do nothing
5367 pipe_class empty( ) %{
5368 instruction_count(0);
5369 %}
5371 //No.5 UnConditional branch :
5372 pipe_class pipe_jump( label labl ) %{
5373 multiple_bundles;
5374 DECODE : ID;
5375 BR : RD;
5376 %}
5378 //No.6 ALU Conditional branch :
5379 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5380 multiple_bundles;
5381 src1 : RD(read);
5382 src2 : RD(read);
5383 DECODE : ID;
5384 BR : RD;
5385 %}
5387 //no.7 load integer from memory :
5388 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5389 mem : RD(read);
5390 dst : WB(write)+3;
5391 DECODE : ID;
5392 MEM : RD;
5393 %}
5395 //No.8 Store Integer to Memory :
5396 pipe_class ialu_storeI(mRegI src, memory mem) %{
5397 mem : RD(read);
5398 src : RD(read);
5399 DECODE : ID;
5400 MEM : RD;
5401 %}
5404 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5405 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5406 src1 : RD(read);
5407 src2 : RD(read);
5408 dst : WB(write);
5409 DECODE : ID;
5410 FPU : CA;
5411 %}
5413 //No.22 Floating div operation : dst <-- reg1 div reg2
5414 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5415 src1 : RD(read);
5416 src2 : RD(read);
5417 dst : WB(write);
5418 DECODE : ID;
5419 FPU2 : CA;
5420 %}
5422 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5423 src : RD(read);
5424 dst : WB(write);
5425 DECODE : ID;
5426 FPU1 : CA;
5427 %}
5429 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5430 src : RD(read);
5431 dst : WB(write);
5432 DECODE : ID;
5433 FPU1 : CA;
5434 %}
5436 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5437 src : RD(read);
5438 dst : WB(write);
5439 DECODE : ID;
5440 MEM : RD;
5441 %}
5443 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5444 src : RD(read);
5445 dst : WB(write);
5446 DECODE : ID;
5447 MEM : RD(5);
5448 %}
5450 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5451 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5452 multiple_bundles;
5453 src1 : RD(read);
5454 src2 : RD(read);
5455 dst : WB(write);
5456 DECODE : ID;
5457 FPU2 : CA;
5458 %}
5460 //No.11 Load Floating from Memory :
5461 pipe_class fpu_loadF(regF dst, memory mem) %{
5462 instruction_count(1);
5463 mem : RD(read);
5464 dst : WB(write)+3;
5465 DECODE : ID;
5466 MEM : RD;
5467 %}
5469 //No.12 Store Floating to Memory :
5470 pipe_class fpu_storeF(regF src, memory mem) %{
5471 instruction_count(1);
5472 mem : RD(read);
5473 src : RD(read);
5474 DECODE : ID;
5475 MEM : RD;
5476 %}
5478 //No.13 FPU Conditional branch :
5479 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5480 multiple_bundles;
5481 src1 : RD(read);
5482 src2 : RD(read);
5483 DECODE : ID;
5484 BR : RD;
5485 %}
5487 //No.14 Floating FPU reg operation : dst <-- op reg
5488 pipe_class fpu1_regF(regF dst, regF src) %{
5489 src : RD(read);
5490 dst : WB(write);
5491 DECODE : ID;
5492 FPU : CA;
5493 %}
5495 pipe_class long_memory_op() %{
5496 instruction_count(10); multiple_bundles; force_serialization;
5497 fixed_latency(30);
5498 %}
5500 pipe_class simple_call() %{
5501 instruction_count(10); multiple_bundles; force_serialization;
5502 fixed_latency(200);
5503 BR : RD;
5504 %}
5506 pipe_class call() %{
5507 instruction_count(10); multiple_bundles; force_serialization;
5508 fixed_latency(200);
5509 %}
5511 //FIXME:
5512 //No.9 Piple slow : for multi-instructions
5513 pipe_class pipe_slow( ) %{
5514 instruction_count(20);
5515 force_serialization;
5516 multiple_bundles;
5517 fixed_latency(50);
5518 %}
5520 %}
5524 //----------INSTRUCTIONS-------------------------------------------------------
5525 //
5526 // match -- States which machine-independent subtree may be replaced
5527 // by this instruction.
5528 // ins_cost -- The estimated cost of this instruction is used by instruction
5529 // selection to identify a minimum cost tree of machine
5530 // instructions that matches a tree of machine-independent
5531 // instructions.
5532 // format -- A string providing the disassembly for this instruction.
5533 // The value of an instruction's operand may be inserted
5534 // by referring to it with a '$' prefix.
5535 // opcode -- Three instruction opcodes may be provided. These are referred
5536 // to within an encode class as $primary, $secondary, and $tertiary
5537 // respectively. The primary opcode is commonly used to
5538 // indicate the type of machine instruction, while secondary
5539 // and tertiary are often used for prefix options or addressing
5540 // modes.
5541 // ins_encode -- A list of encode classes with parameters. The encode class
5542 // name must have been defined in an 'enc_class' specification
5543 // in the encode section of the architecture description.
5546 // Load Integer
5547 instruct loadI(mRegI dst, memory mem) %{
5548 match(Set dst (LoadI mem));
5550 ins_cost(125);
5551 format %{ "lw $dst, $mem #@loadI" %}
5552 ins_encode (load_I_enc(dst, mem));
5553 ins_pipe( ialu_loadI );
5554 %}
5556 instruct loadI_convI2L(mRegL dst, memory mem) %{
5557 match(Set dst (ConvI2L (LoadI mem)));
5559 ins_cost(125);
5560 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5561 ins_encode (load_I_enc(dst, mem));
5562 ins_pipe( ialu_loadI );
5563 %}
5565 // Load Long.
5566 instruct loadL(mRegL dst, memory mem) %{
5567 // predicate(!((LoadLNode*)n)->require_atomic_access());
5568 match(Set dst (LoadL mem));
5570 ins_cost(250);
5571 format %{ "ld $dst, $mem #@loadL" %}
5572 ins_encode(load_L_enc(dst, mem));
5573 ins_pipe( ialu_loadL );
5574 %}
5576 // Load Long - UNaligned
5577 instruct loadL_unaligned(mRegL dst, memory mem) %{
5578 match(Set dst (LoadL_unaligned mem));
5580 // FIXME: Jin: Need more effective ldl/ldr
5581 ins_cost(450);
5582 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5583 ins_encode(load_L_enc(dst, mem));
5584 ins_pipe( ialu_loadL );
5585 %}
5587 // Store Long
5588 instruct storeL_reg(memory mem, mRegL src) %{
5589 predicate(!((StoreLNode*)n)->require_atomic_access());
5590 match(Set mem (StoreL mem src));
5592 ins_cost(200);
5593 format %{ "sd $mem, $src #@storeL_reg\n" %}
5594 ins_encode(store_L_reg_enc(mem, src));
5595 ins_pipe( ialu_storeL );
5596 %}
5598 //FIXME:volatile! atomic!
5599 // Volatile Store Long. Must be atomic, so move it into
5600 // the FP TOS and then do a 64-bit FIST. Has to probe the
5601 // target address before the store (for null-ptr checks)
5602 // so the memory operand is used twice in the encoding.
5603 instruct storeL_reg_atomic(memory mem, mRegL src) %{
5604 predicate(((StoreLNode*)n)->require_atomic_access());
5605 match(Set mem (StoreL mem src));
5607 ins_cost(200);
5608 format %{ "sw $mem, $src #@storeL_reg_atomic\n" %}
5609 ins_encode %{
5610 Register src = as_Register($src$$reg);
5612 int base = $mem$$base;
5613 int index = $mem$$index;
5614 int scale = $mem$$scale;
5615 int disp = $mem$$disp;
5617 if( index != 0 ) {
5618 if( Assembler::is_simm16(disp) ) {
5619 if (scale == 0) {
5620 __ addu(AT, as_Register(base), as_Register(index));
5621 } else {
5622 __ dsll(AT, as_Register(index), scale);
5623 __ addu(AT, as_Register(base), AT);
5624 }
5625 __ sd(src, AT, disp);
5626 } else {
5627 if (scale == 0) {
5628 __ addu(AT, as_Register(base), as_Register(index));
5629 } else {
5630 __ dsll(AT, as_Register(index), scale);
5631 __ addu(AT, as_Register(base), AT);
5632 }
5633 __ move(T9, disp);
5634 __ addu(AT, AT, T9);
5635 __ sd(src, AT, 0);
5636 }
5637 } else {
5638 if( Assembler::is_simm16(disp) ) {
5639 __ move(AT, as_Register(base));
5640 __ sd(src, AT, disp);
5641 } else {
5642 __ move(AT, as_Register(base));
5643 __ move(T9, disp);
5644 __ addu(AT, AT, T9);
5645 __ sd(src, AT, 0);
5646 }
5647 }
5649 %}
5650 ins_pipe( ialu_storeL );
5651 %}
5653 instruct storeL_immL0(memory mem, immL0 zero) %{
5654 match(Set mem (StoreL mem zero));
5656 ins_cost(180);
5657 format %{ "sd $mem, zero #@storeL_immL0" %}
5658 ins_encode(store_L_immL0_enc(mem, zero));
5659 ins_pipe( ialu_storeL );
5660 %}
5662 instruct storeL_imm(memory mem, immL src) %{
5663 match(Set mem (StoreL mem src));
5665 ins_cost(200);
5666 format %{ "sw $mem, $src #@storeL_imm" %}
5667 ins_encode(store_L_immL_enc(mem, src));
5668 ins_pipe( ialu_storeL );
5669 %}
5671 // Load Compressed Pointer
5672 instruct loadN(mRegN dst, memory mem)
5673 %{
5674 match(Set dst (LoadN mem));
5676 ins_cost(125); // XXX
5677 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5678 //TODO: Address should be implemented
5679 /*
5680 ins_encode %{
5681 __ lwu($dst$$Register, $mem$$Address);
5682 %}
5683 */
5684 ins_encode (load_N_enc(dst, mem));
5685 ins_pipe( ialu_loadI ); // XXX
5686 %}
5688 // Load Pointer
5689 instruct loadP(mRegP dst, memory mem) %{
5690 match(Set dst (LoadP mem));
5692 ins_cost(125);
5693 format %{ "ld $dst, $mem #@loadP" %}
5694 ins_encode (load_P_enc(dst, mem));
5695 ins_pipe( ialu_loadI );
5696 %}
5698 // Load Klass Pointer
5699 instruct loadKlass(mRegP dst, memory mem) %{
5700 match(Set dst (LoadKlass mem));
5702 ins_cost(125);
5703 format %{ "MOV $dst,$mem @ loadKlass" %}
5704 ins_encode (load_P_enc(dst, mem));
5705 ins_pipe( ialu_loadI );
5706 %}
5708 // Load narrow Klass Pointer
5709 instruct loadNKlass(mRegN dst, memory mem)
5710 %{
5711 match(Set dst (LoadNKlass mem));
5713 ins_cost(125); // XXX
5714 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5715 ins_encode (load_N_enc(dst, mem));
5716 /*
5717 ins_encode %{
5718 __ lwu($dst$$Register, $mem$$Address);
5719 %}
5720 */
5721 ins_pipe( ialu_loadI ); // XXX
5722 %}
5724 // Load Constant
5725 instruct loadConI(mRegI dst, immI src) %{
5726 match(Set dst src);
5728 format %{ "mov $dst, $src #@loadConI" %}
5729 ins_encode %{
5730 Register dst = $dst$$Register;
5731 int value = $src$$constant;
5732 __ move(dst, value);
5733 %}
5734 ins_pipe( ialu_regI_regI );
5735 %}
5738 instruct loadConL(mRegL dst, immL src) %{
5739 match(Set dst src);
5740 // effect(ILL cr);
5741 ins_cost(200);
5742 format %{ "li $dst, $src #@loadConL\t"
5743 %}
5744 ins_encode %{
5745 Register dst_reg = as_Register($dst$$reg);
5746 __ li(dst_reg, (long)$src$$constant);
5747 %}
5748 ins_pipe( ialu_regL_regL );
5749 %}
5752 // Load Range
5753 instruct loadRange(mRegI dst, memory mem) %{
5754 match(Set dst (LoadRange mem));
5756 ins_cost(125);
5757 format %{ "MOV $dst,$mem @ loadRange" %}
5758 ins_encode(load_I_enc(dst, mem));
5759 ins_pipe( ialu_loadI );
5760 %}
5763 instruct storeP(memory mem, mRegP src ) %{
5764 match(Set mem (StoreP mem src));
5766 ins_cost(125);
5767 format %{ "sd $src, $mem #@storeP" %}
5768 ins_encode(store_P_reg_enc(mem, src));
5769 ins_pipe( ialu_storeI );
5770 %}
5772 /*
5773 [Ref: loadConP]
5775 Error:
5776 0x2d4b6d40: lui t9, 0x4f <--- handle
5777 0x2d4b6d44: addiu t9, t9, 0xffff808c
5778 0x2d4b6d48: sw t9, 0x4(s2)
5780 OK:
5781 0x2cc5ed40: lui t9, 0x336a <--- klass
5782 0x2cc5ed44: addiu t9, t9, 0x5a10
5783 0x2cc5ed48: sw t9, 0x4(s2)
5784 */
5785 // Store Pointer Immediate; null pointers or constant oops that do not
5786 // need card-mark barriers.
5788 // Store NULL Pointer, mark word, or other simple pointer constant.
5789 instruct storeImmP(memory mem, immP31 src) %{
5790 match(Set mem (StoreP mem src));
5792 ins_cost(150);
5793 format %{ "mov $mem, $src #@storeImmP" %}
5794 ins_encode(store_P_immP_enc(mem, src));
5795 ins_pipe( ialu_storeI );
5796 %}
5798 // Store Byte Immediate
5799 instruct storeImmB(memory mem, immI8 src) %{
5800 match(Set mem (StoreB mem src));
5802 ins_cost(150);
5803 format %{ "movb $mem, $src #@storeImmB" %}
5804 ins_encode(store_B_immI_enc(mem, src));
5805 ins_pipe( ialu_storeI );
5806 %}
5808 // Store Compressed Pointer
5809 instruct storeN(memory mem, mRegN src)
5810 %{
5811 match(Set mem (StoreN mem src));
5813 ins_cost(125); // XXX
5814 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
5815 ins_encode(store_N_reg_enc(mem, src));
5816 ins_pipe( ialu_storeI );
5817 %}
5819 instruct storeNKlass(memory mem, mRegN src)
5820 %{
5821 match(Set mem (StoreNKlass mem src));
5823 ins_cost(125); // XXX
5824 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
5825 ins_encode(store_N_reg_enc(mem, src));
5826 ins_pipe( ialu_storeI );
5827 %}
5829 instruct storeImmN0(memory mem, immN0 zero)
5830 %{
5831 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
5832 match(Set mem (StoreN mem zero));
5834 ins_cost(125); // XXX
5835 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
5836 ins_encode(storeImmN0_enc(mem, zero));
5837 ins_pipe( ialu_storeI );
5838 %}
5840 instruct storeImmN(memory mem, immN src)
5841 %{
5842 match(Set mem (StoreN mem src));
5844 ins_cost(150); // XXX
5845 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
5846 ins_encode(storeImmN_enc(mem, src));
5847 ins_pipe( ialu_storeI );
5848 %}
5850 instruct storeImmNKlass(memory mem, immNKlass src)
5851 %{
5852 match(Set mem (StoreNKlass mem src));
5854 ins_cost(150); // XXX
5855 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
5856 ins_encode(storeImmNKlass_enc(mem, src));
5857 ins_pipe( ialu_storeI );
5858 %}
5860 // Store Byte
5861 instruct storeB(memory mem, mRegI src) %{
5862 match(Set mem (StoreB mem src));
5864 ins_cost(125);
5865 format %{ "sb $src, $mem #@storeB" %}
5866 ins_encode(store_B_reg_enc(mem, src));
5867 ins_pipe( ialu_storeI );
5868 %}
5870 // Load Byte (8bit signed)
5871 instruct loadB(mRegI dst, memory mem) %{
5872 match(Set dst (LoadB mem));
5874 ins_cost(125);
5875 format %{ "lb $dst, $mem #@loadB" %}
5876 ins_encode(load_B_enc(dst, mem));
5877 ins_pipe( ialu_loadI );
5878 %}
5880 instruct loadB_convI2L(mRegL dst, memory mem) %{
5881 match(Set dst (ConvI2L (LoadB mem)));
5883 ins_cost(125);
5884 format %{ "lb $dst, $mem #@loadB_convI2L" %}
5885 ins_encode(load_B_enc(dst, mem));
5886 ins_pipe( ialu_loadI );
5887 %}
5889 // Load Byte (8bit UNsigned)
5890 instruct loadUB(mRegI dst, memory mem) %{
5891 match(Set dst (LoadUB mem));
5893 ins_cost(125);
5894 format %{ "lbu $dst, $mem #@loadUB" %}
5895 ins_encode(load_UB_enc(dst, mem));
5896 ins_pipe( ialu_loadI );
5897 %}
5899 instruct loadUB_convI2L(mRegL dst, memory mem) %{
5900 match(Set dst (ConvI2L (LoadUB mem)));
5902 ins_cost(125);
5903 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
5904 ins_encode(load_UB_enc(dst, mem));
5905 ins_pipe( ialu_loadI );
5906 %}
5908 // Load Short (16bit signed)
5909 instruct loadS(mRegI dst, memory mem) %{
5910 match(Set dst (LoadS mem));
5912 ins_cost(125);
5913 format %{ "lh $dst, $mem #@loadS" %}
5914 ins_encode(load_S_enc(dst, mem));
5915 ins_pipe( ialu_loadI );
5916 %}
5918 instruct loadS_convI2L(mRegL dst, memory mem) %{
5919 match(Set dst (ConvI2L (LoadS mem)));
5921 ins_cost(125);
5922 format %{ "lh $dst, $mem #@loadS_convI2L" %}
5923 ins_encode(load_S_enc(dst, mem));
5924 ins_pipe( ialu_loadI );
5925 %}
5927 instruct prefetchAllocNTA( memory mem ) %{
5928 match(PrefetchAllocation mem);
5929 ins_cost(400);
5930 format %{ "PREFETCHNTA $mem\t# Prefetch allocation to non-temporal cache for write (empty)" %}
5931 ins_encode %{
5932 // __ sync();
5933 %}
5934 ins_pipe(empty);
5935 %}
5937 // Store Integer Immediate
5938 instruct storeImmI(memory mem, immI src) %{
5939 match(Set mem (StoreI mem src));
5941 ins_cost(150);
5942 format %{ "mov $mem, $src #@storeImmI" %}
5943 ins_encode(store_I_immI_enc(mem, src));
5944 ins_pipe( ialu_storeI );
5945 %}
5947 // Store Integer
5948 instruct storeI(memory mem, mRegI src) %{
5949 match(Set mem (StoreI mem src));
5951 ins_cost(125);
5952 format %{ "sw $mem, $src #@storeI" %}
5953 ins_encode(store_I_reg_enc(mem, src));
5954 ins_pipe( ialu_storeI );
5955 %}
5957 instruct storeI_convL2I(memory mem, mRegL src) %{
5958 match(Set mem (StoreI mem (ConvL2I src)));
5960 ins_cost(125);
5961 format %{ "sw $mem, $src #@storeI_convL2I" %}
5962 ins_encode(store_I_reg_enc(mem, src));
5963 ins_pipe( ialu_storeI );
5964 %}
5966 // Load Float
5967 instruct loadF(regF dst, memory mem) %{
5968 match(Set dst (LoadF mem));
5970 ins_cost(150);
5971 format %{ "loadF $dst, $mem #@loadF" %}
5972 ins_encode(load_F_enc(dst, mem));
5973 ins_pipe( ialu_loadI );
5974 %}
5976 instruct loadConP(mRegP dst, immP src) %{
5977 match(Set dst src);
5979 format %{ "li $dst, $src #@loadConP" %}
5981 ins_encode %{
5982 Register dst = $dst$$Register;
5983 long* value = (long*)$src$$constant;
5984 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
5986 /* During GC, klassOop may be moved to new position in the heap.
5987 * It must be relocated.
5988 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
5989 */
5990 if (is_need_reloc) {
5991 if($src->constant_reloc() == relocInfo::metadata_type){
5992 int klass_index = __ oop_recorder()->find_index((Klass*)value);
5993 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5995 __ relocate(rspec);
5996 __ li48(dst, (long)value);
5997 }
5999 if($src->constant_reloc() == relocInfo::oop_type){
6000 int oop_index = __ oop_recorder()->find_index((jobject)value);
6001 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6003 __ relocate(rspec);
6004 __ li48(dst, (long)value);
6005 }
6006 } else {
6007 if((long)value == (long)os::get_polling_page()) {
6008 #ifndef OPT_SAFEPOINT
6009 __ li48(dst, (long)os::get_polling_page());
6010 #else
6011 __ lui(dst, Assembler::split_high((intptr_t)os::get_polling_page()));
6012 #endif
6013 } else {
6014 __ li48(dst, (long)value);
6015 }
6016 }
6017 %}
6019 ins_pipe( ialu_regI_regI );
6020 %}
6022 instruct loadConP0(mRegP dst, immP0 src)
6023 %{
6024 match(Set dst src);
6026 ins_cost(50);
6027 format %{ "mov $dst, R0\t# ptr" %}
6028 ins_encode %{
6029 Register dst_reg = $dst$$Register;
6030 __ move(dst_reg, R0);
6031 %}
6032 ins_pipe( ialu_regI_regI );
6033 %}
6035 instruct loadConN0(mRegN dst, immN0 src) %{
6036 match(Set dst src);
6037 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6038 ins_encode %{
6039 __ move($dst$$Register, R0);
6040 %}
6041 ins_pipe( ialu_regI_regI );
6042 %}
6044 instruct loadConN(mRegN dst, immN src) %{
6045 match(Set dst src);
6047 ins_cost(125);
6048 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6049 ins_encode %{
6050 address con = (address)$src$$constant;
6051 if (con == NULL) {
6052 ShouldNotReachHere();
6053 } else {
6054 assert (UseCompressedOops, "should only be used for compressed headers");
6055 assert (Universe::heap() != NULL, "java heap should be initialized");
6056 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6058 Register dst = $dst$$Register;
6059 long* value = (long*)$src$$constant;
6060 int oop_index = __ oop_recorder()->find_index((jobject)value);
6061 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6062 if(rspec.type()!=relocInfo::none){
6063 __ relocate(rspec, Assembler::narrow_oop_operand);
6064 __ li48(dst, oop_index);
6065 }
6066 else {
6067 __ li48(dst, oop_index);
6068 }
6069 }
6070 %}
6071 ins_pipe( ialu_regI_regI ); // XXX
6072 %}
6074 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6075 match(Set dst src);
6077 ins_cost(125);
6078 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6079 ins_encode %{
6080 address con = (address)$src$$constant;
6081 if (con == NULL) {
6082 ShouldNotReachHere();
6083 } else {
6084 Register dst = $dst$$Register;
6085 long* value = (long*)$src$$constant;
6087 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6088 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6089 long narrowp = (long)Klass::encode_klass((Klass*)value);
6091 if(rspec.type()!=relocInfo::none){
6092 __ relocate(rspec, Assembler::narrow_oop_operand);
6093 __ li48(dst, narrowp);
6094 }
6095 else {
6096 __ li48(dst, narrowp);
6097 }
6098 }
6099 %}
6100 ins_pipe( ialu_regI_regI ); // XXX
6101 %}
6103 /*
6104 // Load Stack Slot
6105 instruct loadSSI(mRegI dst, stackSlotI src) %{
6106 match(Set dst src);
6107 ins_cost(25);
6109 format %{ "MOV $dst,$src #@loadSSI" %}
6110 ins_encode %{
6111 Register dst = $dst$$Register;
6113 int base = $src$$base;
6114 int index = $src$$index;
6115 int scale = $src$$scale;
6116 int disp = $src$$disp;
6118 fprintf(stderr, "\n?????????????????????????\n");//fujie debug
6119 if( scale != 0 ) Unimplemented();
6120 if( index != 0 ) {
6121 __ add(AT, as_Register(base), as_Register(index));
6122 __ lw(dst, AT, disp);
6123 } else {
6124 __ lw(dst, as_Register(base), disp);
6125 }
6127 %}
6129 ins_pipe( ialu_reg_mem );
6130 %}
6132 // Load Stack Slot
6133 instruct loadSSP(mRegP dst, stackSlotP src) %{
6134 match(Set dst src);
6135 ins_cost(25);
6137 format %{ "MOV $dst,$src #@loadSSP" %}
6138 ins_encode %{
6139 Register dst = $dst$$Register;
6141 int base = $src$$base;
6142 int index = $src$$index;
6143 int scale = $src$$scale;
6144 int disp = $src$$disp;
6146 fprintf(stderr, "\n?????????????????????????\n");//fujie debug
6147 if( scale != 0 ) Unimplemented();
6148 if( index != 0 ) {
6149 __ add(AT, as_Register(base), as_Register(index));
6150 __ lw(dst, AT, disp);
6151 } else {
6152 __ lw(dst, as_Register(base), disp);
6153 }
6155 %}
6157 ins_pipe( ialu_reg_mem );
6158 %}
6159 */
6161 //FIXME
6162 // Tail Call; Jump from runtime stub to Java code.
6163 // Also known as an 'interprocedural jump'.
6164 // Target of jump will eventually return to caller.
6165 // TailJump below removes the return address.
6166 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6167 match(TailCall jump_target method_oop );
6168 ins_cost(300);
6169 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6171 ins_encode %{
6172 Register target = $jump_target$$Register;
6173 Register oop = $method_oop$$Register;
6175 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6176 __ push(RA);
6178 __ move(S3, oop);
6179 __ jr(target);
6180 __ nop();
6181 %}
6183 ins_pipe( pipe_jump );
6184 %}
6186 // Create exception oop: created by stack-crawling runtime code.
6187 // Created exception is now available to this handler, and is setup
6188 // just prior to jumping to this handler. No code emitted.
6189 instruct CreateException( a0_RegP ex_oop )
6190 %{
6191 match(Set ex_oop (CreateEx));
6193 // use the following format syntax
6194 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6195 ins_encode %{
6196 /* Jin: X86 leaves this function empty */
6197 __ block_comment("CreateException is empty in X86/MIPS");
6198 %}
6199 ins_pipe( empty );
6200 // ins_pipe( pipe_jump );
6201 %}
6204 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6206 - Common try/catch:
6207 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6208 |- V0, V1 are created
6209 |- T9 <= SharedRuntime::exception_handler_for_return_address
6210 `- jr T9
6211 `- the caller's exception_handler
6212 `- jr OptoRuntime::exception_blob
6213 `- here
6214 - Rethrow(e.g. 'unwind'):
6215 * The callee:
6216 |- an exception is triggered during execution
6217 `- exits the callee method through RethrowException node
6218 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6219 `- The callee jumps to OptoRuntime::rethrow_stub()
6220 * In OptoRuntime::rethrow_stub:
6221 |- The VM calls _rethrow_Java to determine the return address in the caller method
6222 `- exits the stub with tailjmpInd
6223 |- pops exception_oop(V0) and exception_pc(V1)
6224 `- jumps to the return address(usually an exception_handler)
6225 * The caller:
6226 `- continues processing the exception_blob with V0/V1
6227 */
6229 /*
6230 Disassembling OptoRuntime::rethrow_stub()
6232 ; locals
6233 0x2d3bf320: addiu sp, sp, 0xfffffff8
6234 0x2d3bf324: sw ra, 0x4(sp)
6235 0x2d3bf328: sw fp, 0x0(sp)
6236 0x2d3bf32c: addu fp, sp, zero
6237 0x2d3bf330: addiu sp, sp, 0xfffffff0
6238 0x2d3bf334: sw ra, 0x8(sp)
6239 0x2d3bf338: sw t0, 0x4(sp)
6240 0x2d3bf33c: sw sp, 0x0(sp)
6242 ; get_thread(S2)
6243 0x2d3bf340: addu s2, sp, zero
6244 0x2d3bf344: srl s2, s2, 12
6245 0x2d3bf348: sll s2, s2, 2
6246 0x2d3bf34c: lui at, 0x2c85
6247 0x2d3bf350: addu at, at, s2
6248 0x2d3bf354: lw s2, 0xffffcc80(at)
6250 0x2d3bf358: lw s0, 0x0(sp)
6251 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6252 0x2d3bf360: sw s2, 0xc(sp)
6254 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6255 0x2d3bf364: lw a0, 0x4(sp)
6256 0x2d3bf368: lw a1, 0xc(sp)
6257 0x2d3bf36c: lw a2, 0x8(sp)
6258 ;; Java_To_Runtime
6259 0x2d3bf370: lui t9, 0x2c34
6260 0x2d3bf374: addiu t9, t9, 0xffff8a48
6261 0x2d3bf378: jalr t9
6262 0x2d3bf37c: nop
6264 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6266 0x2d3bf384: lw s0, 0xc(sp)
6267 0x2d3bf388: sw zero, 0x118(s0)
6268 0x2d3bf38c: sw zero, 0x11c(s0)
6269 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6270 0x2d3bf394: addu s2, s0, zero
6271 0x2d3bf398: sw zero, 0x144(s2)
6272 0x2d3bf39c: lw s0, 0x4(s2)
6273 0x2d3bf3a0: addiu s4, zero, 0x0
6274 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6275 0x2d3bf3a8: nop
6276 0x2d3bf3ac: addiu sp, sp, 0x10
6277 0x2d3bf3b0: addiu sp, sp, 0x8
6278 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6279 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6280 0x2d3bf3bc: lui at, 0x2b48
6281 0x2d3bf3c0: lw at, 0x100(at)
6283 ; tailjmpInd: Restores exception_oop & exception_pc
6284 0x2d3bf3c4: addu v1, ra, zero
6285 0x2d3bf3c8: addu v0, s1, zero
6286 0x2d3bf3cc: jr s3
6287 0x2d3bf3d0: nop
6288 ; Exception:
6289 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6290 0x2d3bf3d8: addiu s1, s1, 0x40
6291 0x2d3bf3dc: addiu s2, zero, 0x0
6292 0x2d3bf3e0: addiu sp, sp, 0x10
6293 0x2d3bf3e4: addiu sp, sp, 0x8
6294 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6295 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6296 0x2d3bf3f0: lui at, 0x2b48
6297 0x2d3bf3f4: lw at, 0x100(at)
6298 ; TailCalljmpInd
6299 __ push(RA); ; to be used in generate_forward_exception()
6300 0x2d3bf3f8: addu t7, s2, zero
6301 0x2d3bf3fc: jr s1
6302 0x2d3bf400: nop
6303 */
6304 // Rethrow exception:
6305 // The exception oop will come in the first argument position.
6306 // Then JUMP (not call) to the rethrow stub code.
6307 instruct RethrowException()
6308 %{
6309 match(Rethrow);
6311 // use the following format syntax
6312 format %{ "JMP rethrow_stub #@RethrowException" %}
6313 ins_encode %{
6314 __ block_comment("@ RethrowException");
6316 cbuf.set_insts_mark();
6317 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6319 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6320 __ li(T9, OptoRuntime::rethrow_stub());
6321 __ jr(T9);
6322 __ nop();
6323 %}
6324 ins_pipe( pipe_jump );
6325 %}
6327 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6328 match(If cmp (CmpP op1 zero));
6329 effect(USE labl);
6331 ins_cost(180);
6332 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6334 ins_encode %{
6335 Register op1 = $op1$$Register;
6336 Register op2 = R0;
6337 Label &L = *($labl$$label);
6338 int flag = $cmp$$cmpcode;
6340 switch(flag)
6341 {
6342 case 0x01: //equal
6343 if (&L)
6344 __ beq(op1, op2, L);
6345 else
6346 __ beq(op1, op2, (int)0);
6347 break;
6348 case 0x02: //not_equal
6349 if (&L)
6350 __ bne(op1, op2, L);
6351 else
6352 __ bne(op1, op2, (int)0);
6353 break;
6354 /*
6355 case 0x03: //above
6356 __ sltu(AT, op2, op1);
6357 if(&L)
6358 __ bne(R0, AT, L);
6359 else
6360 __ bne(R0, AT, (int)0);
6361 break;
6362 case 0x04: //above_equal
6363 __ sltu(AT, op1, op2);
6364 if(&L)
6365 __ beq(AT, R0, L);
6366 else
6367 __ beq(AT, R0, (int)0);
6368 break;
6369 case 0x05: //below
6370 __ sltu(AT, op1, op2);
6371 if(&L)
6372 __ bne(R0, AT, L);
6373 else
6374 __ bne(R0, AT, (int)0);
6375 break;
6376 case 0x06: //below_equal
6377 __ sltu(AT, op2, op1);
6378 if(&L)
6379 __ beq(AT, R0, L);
6380 else
6381 __ beq(AT, R0, (int)0);
6382 break;
6383 */
6384 default:
6385 Unimplemented();
6386 }
6387 __ nop();
6388 %}
6390 ins_pc_relative(1);
6391 ins_pipe( pipe_alu_branch );
6392 %}
6395 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6396 match(If cmp (CmpP op1 op2));
6397 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6398 effect(USE labl);
6400 ins_cost(200);
6401 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6403 ins_encode %{
6404 Register op1 = $op1$$Register;
6405 Register op2 = $op2$$Register;
6406 Label &L = *($labl$$label);
6407 int flag = $cmp$$cmpcode;
6409 switch(flag)
6410 {
6411 case 0x01: //equal
6412 if (&L)
6413 __ beq(op1, op2, L);
6414 else
6415 __ beq(op1, op2, (int)0);
6416 break;
6417 case 0x02: //not_equal
6418 if (&L)
6419 __ bne(op1, op2, L);
6420 else
6421 __ bne(op1, op2, (int)0);
6422 break;
6423 case 0x03: //above
6424 __ sltu(AT, op2, op1);
6425 if(&L)
6426 __ bne(R0, AT, L);
6427 else
6428 __ bne(R0, AT, (int)0);
6429 break;
6430 case 0x04: //above_equal
6431 __ sltu(AT, op1, op2);
6432 if(&L)
6433 __ beq(AT, R0, L);
6434 else
6435 __ beq(AT, R0, (int)0);
6436 break;
6437 case 0x05: //below
6438 __ sltu(AT, op1, op2);
6439 if(&L)
6440 __ bne(R0, AT, L);
6441 else
6442 __ bne(R0, AT, (int)0);
6443 break;
6444 case 0x06: //below_equal
6445 __ sltu(AT, op2, op1);
6446 if(&L)
6447 __ beq(AT, R0, L);
6448 else
6449 __ beq(AT, R0, (int)0);
6450 break;
6451 default:
6452 Unimplemented();
6453 }
6454 __ nop();
6455 %}
6457 ins_pc_relative(1);
6458 ins_pipe( pipe_alu_branch );
6459 %}
6461 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6462 match(If cmp (CmpN op1 null));
6463 effect(USE labl);
6465 ins_cost(180);
6466 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6467 "BP$cmp $labl @ cmpN_null_branch" %}
6468 ins_encode %{
6469 Register op1 = $op1$$Register;
6470 Register op2 = R0;
6471 Label &L = *($labl$$label);
6472 int flag = $cmp$$cmpcode;
6474 switch(flag)
6475 {
6476 case 0x01: //equal
6477 if (&L)
6478 __ beq(op1, op2, L);
6479 else
6480 __ beq(op1, op2, (int)0);
6481 break;
6482 case 0x02: //not_equal
6483 if (&L)
6484 __ bne(op1, op2, L);
6485 else
6486 __ bne(op1, op2, (int)0);
6487 break;
6488 default:
6489 Unimplemented();
6490 }
6491 __ nop();
6492 %}
6493 //TODO: pipe_branchP or create pipe_branchN LEE
6494 ins_pc_relative(1);
6495 ins_pipe( pipe_alu_branch );
6496 %}
6498 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6499 match(If cmp (CmpN op1 op2));
6500 effect(USE labl);
6502 ins_cost(180);
6503 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6504 "BP$cmp $labl" %}
6505 ins_encode %{
6506 Register op1_reg = $op1$$Register;
6507 Register op2_reg = $op2$$Register;
6508 Label &L = *($labl$$label);
6509 int flag = $cmp$$cmpcode;
6511 switch(flag)
6512 {
6513 case 0x01: //equal
6514 if (&L)
6515 __ beq(op1_reg, op2_reg, L);
6516 else
6517 __ beq(op1_reg, op2_reg, (int)0);
6518 break;
6519 case 0x02: //not_equal
6520 if (&L)
6521 __ bne(op1_reg, op2_reg, L);
6522 else
6523 __ bne(op1_reg, op2_reg, (int)0);
6524 break;
6525 case 0x03: //above
6526 __ sltu(AT, op2_reg, op1_reg);
6527 if(&L)
6528 __ bne(R0, AT, L);
6529 else
6530 __ bne(R0, AT, (int)0);
6531 break;
6532 case 0x04: //above_equal
6533 __ sltu(AT, op1_reg, op2_reg);
6534 if(&L)
6535 __ beq(AT, R0, L);
6536 else
6537 __ beq(AT, R0, (int)0);
6538 break;
6539 case 0x05: //below
6540 __ sltu(AT, op1_reg, op2_reg);
6541 if(&L)
6542 __ bne(R0, AT, L);
6543 else
6544 __ bne(R0, AT, (int)0);
6545 break;
6546 case 0x06: //below_equal
6547 __ sltu(AT, op2_reg, op1_reg);
6548 if(&L)
6549 __ beq(AT, R0, L);
6550 else
6551 __ beq(AT, R0, (int)0);
6552 break;
6553 default:
6554 Unimplemented();
6555 }
6556 __ nop();
6557 %}
6558 ins_pc_relative(1);
6559 ins_pipe( pipe_alu_branch );
6560 %}
6562 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6563 match( If cmp (CmpU src1 src2) );
6564 effect(USE labl);
6565 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6567 ins_encode %{
6568 Register op1 = $src1$$Register;
6569 Register op2 = $src2$$Register;
6570 Label &L = *($labl$$label);
6571 int flag = $cmp$$cmpcode;
6573 switch(flag)
6574 {
6575 case 0x01: //equal
6576 if (&L)
6577 __ beq(op1, op2, L);
6578 else
6579 __ beq(op1, op2, (int)0);
6580 break;
6581 case 0x02: //not_equal
6582 if (&L)
6583 __ bne(op1, op2, L);
6584 else
6585 __ bne(op1, op2, (int)0);
6586 break;
6587 case 0x03: //above
6588 __ sltu(AT, op2, op1);
6589 if(&L)
6590 __ bne(AT, R0, L);
6591 else
6592 __ bne(AT, R0, (int)0);
6593 break;
6594 case 0x04: //above_equal
6595 __ sltu(AT, op1, op2);
6596 if(&L)
6597 __ beq(AT, R0, L);
6598 else
6599 __ beq(AT, R0, (int)0);
6600 break;
6601 case 0x05: //below
6602 __ sltu(AT, op1, op2);
6603 if(&L)
6604 __ bne(AT, R0, L);
6605 else
6606 __ bne(AT, R0, (int)0);
6607 break;
6608 case 0x06: //below_equal
6609 __ sltu(AT, op2, op1);
6610 if(&L)
6611 __ beq(AT, R0, L);
6612 else
6613 __ beq(AT, R0, (int)0);
6614 break;
6615 default:
6616 Unimplemented();
6617 }
6618 __ nop();
6619 %}
6621 ins_pc_relative(1);
6622 ins_pipe( pipe_alu_branch );
6623 %}
6626 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6627 match( If cmp (CmpU src1 src2) );
6628 effect(USE labl);
6629 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6631 ins_encode %{
6632 Register op1 = $src1$$Register;
6633 int val = $src2$$constant;
6634 Label &L = *($labl$$label);
6635 int flag = $cmp$$cmpcode;
6637 __ move(AT, val);
6638 switch(flag)
6639 {
6640 case 0x01: //equal
6641 if (&L)
6642 __ beq(op1, AT, L);
6643 else
6644 __ beq(op1, AT, (int)0);
6645 break;
6646 case 0x02: //not_equal
6647 if (&L)
6648 __ bne(op1, AT, L);
6649 else
6650 __ bne(op1, AT, (int)0);
6651 break;
6652 case 0x03: //above
6653 __ sltu(AT, AT, op1);
6654 if(&L)
6655 __ bne(R0, AT, L);
6656 else
6657 __ bne(R0, AT, (int)0);
6658 break;
6659 case 0x04: //above_equal
6660 __ sltu(AT, op1, AT);
6661 if(&L)
6662 __ beq(AT, R0, L);
6663 else
6664 __ beq(AT, R0, (int)0);
6665 break;
6666 case 0x05: //below
6667 __ sltu(AT, op1, AT);
6668 if(&L)
6669 __ bne(R0, AT, L);
6670 else
6671 __ bne(R0, AT, (int)0);
6672 break;
6673 case 0x06: //below_equal
6674 __ sltu(AT, AT, op1);
6675 if(&L)
6676 __ beq(AT, R0, L);
6677 else
6678 __ beq(AT, R0, (int)0);
6679 break;
6680 default:
6681 Unimplemented();
6682 }
6683 __ nop();
6684 %}
6686 ins_pc_relative(1);
6687 ins_pipe( pipe_alu_branch );
6688 %}
6690 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6691 match( If cmp (CmpI src1 src2) );
6692 effect(USE labl);
6693 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6695 ins_encode %{
6696 Register op1 = $src1$$Register;
6697 Register op2 = $src2$$Register;
6698 Label &L = *($labl$$label);
6699 int flag = $cmp$$cmpcode;
6701 switch(flag)
6702 {
6703 case 0x01: //equal
6704 if (&L)
6705 __ beq(op1, op2, L);
6706 else
6707 __ beq(op1, op2, (int)0);
6708 break;
6709 case 0x02: //not_equal
6710 if (&L)
6711 __ bne(op1, op2, L);
6712 else
6713 __ bne(op1, op2, (int)0);
6714 break;
6715 case 0x03: //above
6716 __ slt(AT, op2, op1);
6717 if(&L)
6718 __ bne(R0, AT, L);
6719 else
6720 __ bne(R0, AT, (int)0);
6721 break;
6722 case 0x04: //above_equal
6723 __ slt(AT, op1, op2);
6724 if(&L)
6725 __ beq(AT, R0, L);
6726 else
6727 __ beq(AT, R0, (int)0);
6728 break;
6729 case 0x05: //below
6730 __ slt(AT, op1, op2);
6731 if(&L)
6732 __ bne(R0, AT, L);
6733 else
6734 __ bne(R0, AT, (int)0);
6735 break;
6736 case 0x06: //below_equal
6737 __ slt(AT, op2, op1);
6738 if(&L)
6739 __ beq(AT, R0, L);
6740 else
6741 __ beq(AT, R0, (int)0);
6742 break;
6743 default:
6744 Unimplemented();
6745 }
6746 __ nop();
6747 %}
6749 ins_pc_relative(1);
6750 ins_pipe( pipe_alu_branch );
6751 %}
6753 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6754 match( If cmp (CmpI src1 src2) );
6755 effect(USE labl);
6756 ins_cost(170);
6757 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6759 ins_encode %{
6760 Register op1 = $src1$$Register;
6761 // int val = $src2$$constant;
6762 Label &L = *($labl$$label);
6763 int flag = $cmp$$cmpcode;
6765 //__ move(AT, val);
6766 switch(flag)
6767 {
6768 case 0x01: //equal
6769 if (&L)
6770 __ beq(op1, R0, L);
6771 else
6772 __ beq(op1, R0, (int)0);
6773 break;
6774 case 0x02: //not_equal
6775 if (&L)
6776 __ bne(op1, R0, L);
6777 else
6778 __ bne(op1, R0, (int)0);
6779 break;
6780 case 0x03: //greater
6781 if(&L)
6782 __ bgtz(op1, L);
6783 else
6784 __ bgtz(op1, (int)0);
6785 break;
6786 case 0x04: //greater_equal
6787 if(&L)
6788 __ bgez(op1, L);
6789 else
6790 __ bgez(op1, (int)0);
6791 break;
6792 case 0x05: //less
6793 if(&L)
6794 __ bltz(op1, L);
6795 else
6796 __ bltz(op1, (int)0);
6797 break;
6798 case 0x06: //less_equal
6799 if(&L)
6800 __ blez(op1, L);
6801 else
6802 __ blez(op1, (int)0);
6803 break;
6804 default:
6805 Unimplemented();
6806 }
6807 __ nop();
6808 %}
6810 ins_pc_relative(1);
6811 ins_pipe( pipe_alu_branch );
6812 %}
6815 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
6816 match( If cmp (CmpI src1 src2) );
6817 effect(USE labl);
6818 ins_cost(200);
6819 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
6821 ins_encode %{
6822 Register op1 = $src1$$Register;
6823 int val = $src2$$constant;
6824 Label &L = *($labl$$label);
6825 int flag = $cmp$$cmpcode;
6827 __ move(AT, val);
6828 switch(flag)
6829 {
6830 case 0x01: //equal
6831 if (&L)
6832 __ beq(op1, AT, L);
6833 else
6834 __ beq(op1, AT, (int)0);
6835 break;
6836 case 0x02: //not_equal
6837 if (&L)
6838 __ bne(op1, AT, L);
6839 else
6840 __ bne(op1, AT, (int)0);
6841 break;
6842 case 0x03: //greater
6843 __ slt(AT, AT, op1);
6844 if(&L)
6845 __ bne(R0, AT, L);
6846 else
6847 __ bne(R0, AT, (int)0);
6848 break;
6849 case 0x04: //greater_equal
6850 __ slt(AT, op1, AT);
6851 if(&L)
6852 __ beq(AT, R0, L);
6853 else
6854 __ beq(AT, R0, (int)0);
6855 break;
6856 case 0x05: //less
6857 __ slt(AT, op1, AT);
6858 if(&L)
6859 __ bne(R0, AT, L);
6860 else
6861 __ bne(R0, AT, (int)0);
6862 break;
6863 case 0x06: //less_equal
6864 __ slt(AT, AT, op1);
6865 if(&L)
6866 __ beq(AT, R0, L);
6867 else
6868 __ beq(AT, R0, (int)0);
6869 break;
6870 default:
6871 Unimplemented();
6872 }
6873 __ nop();
6874 %}
6876 ins_pc_relative(1);
6877 ins_pipe( pipe_alu_branch );
6878 %}
6880 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
6881 match( If cmp (CmpU src1 zero) );
6882 effect(USE labl);
6883 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
6885 ins_encode %{
6886 Register op1 = $src1$$Register;
6887 Label &L = *($labl$$label);
6888 int flag = $cmp$$cmpcode;
6890 switch(flag)
6891 {
6892 case 0x01: //equal
6893 if (&L)
6894 __ beq(op1, R0, L);
6895 else
6896 __ beq(op1, R0, (int)0);
6897 break;
6898 case 0x02: //not_equal
6899 if (&L)
6900 __ bne(op1, R0, L);
6901 else
6902 __ bne(op1, R0, (int)0);
6903 break;
6904 case 0x03: //above
6905 if(&L)
6906 __ bne(R0, op1, L);
6907 else
6908 __ bne(R0, op1, (int)0);
6909 break;
6910 case 0x04: //above_equal
6911 if(&L)
6912 __ beq(R0, R0, L);
6913 else
6914 __ beq(R0, R0, (int)0);
6915 break;
6916 case 0x05: //below
6917 return;
6918 break;
6919 case 0x06: //below_equal
6920 if(&L)
6921 __ beq(op1, R0, L);
6922 else
6923 __ beq(op1, R0, (int)0);
6924 break;
6925 default:
6926 Unimplemented();
6927 }
6928 __ nop();
6929 %}
6931 ins_pc_relative(1);
6932 ins_pipe( pipe_alu_branch );
6933 %}
6936 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
6937 match( If cmp (CmpU src1 src2) );
6938 effect(USE labl);
6939 ins_cost(180);
6940 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
6942 ins_encode %{
6943 Register op1 = $src1$$Register;
6944 int val = $src2$$constant;
6945 Label &L = *($labl$$label);
6946 int flag = $cmp$$cmpcode;
6948 switch(flag)
6949 {
6950 case 0x01: //equal
6951 __ move(AT, val);
6952 if (&L)
6953 __ beq(op1, AT, L);
6954 else
6955 __ beq(op1, AT, (int)0);
6956 break;
6957 case 0x02: //not_equal
6958 __ move(AT, val);
6959 if (&L)
6960 __ bne(op1, AT, L);
6961 else
6962 __ bne(op1, AT, (int)0);
6963 break;
6964 case 0x03: //above
6965 __ move(AT, val);
6966 __ sltu(AT, AT, op1);
6967 if(&L)
6968 __ bne(R0, AT, L);
6969 else
6970 __ bne(R0, AT, (int)0);
6971 break;
6972 case 0x04: //above_equal
6973 __ sltiu(AT, op1, val);
6974 if(&L)
6975 __ beq(AT, R0, L);
6976 else
6977 __ beq(AT, R0, (int)0);
6978 break;
6979 case 0x05: //below
6980 __ sltiu(AT, op1, val);
6981 if(&L)
6982 __ bne(R0, AT, L);
6983 else
6984 __ bne(R0, AT, (int)0);
6985 break;
6986 case 0x06: //below_equal
6987 __ move(AT, val);
6988 __ sltu(AT, AT, op1);
6989 if(&L)
6990 __ beq(AT, R0, L);
6991 else
6992 __ beq(AT, R0, (int)0);
6993 break;
6994 default:
6995 Unimplemented();
6996 }
6997 __ nop();
6998 %}
7000 ins_pc_relative(1);
7001 ins_pipe( pipe_alu_branch );
7002 %}
7005 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7006 match( If cmp (CmpL src1 src2) );
7007 effect(USE labl);
7008 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7009 ins_cost(250);
7011 ins_encode %{
7012 Register opr1_reg = as_Register($src1$$reg);
7013 Register opr2_reg = as_Register($src2$$reg);
7015 Label &target = *($labl$$label);
7016 int flag = $cmp$$cmpcode;
7018 switch(flag)
7019 {
7020 case 0x01: //equal
7021 if (&target)
7022 __ beq(opr1_reg, opr2_reg, target);
7023 else
7024 __ beq(opr1_reg, opr2_reg, (int)0);
7025 __ delayed()->nop();
7026 break;
7028 case 0x02: //not_equal
7029 if(&target)
7030 __ bne(opr1_reg, opr2_reg, target);
7031 else
7032 __ bne(opr1_reg, opr2_reg, (int)0);
7033 __ delayed()->nop();
7034 break;
7036 case 0x03: //greater
7037 __ slt(AT, opr2_reg, opr1_reg);
7038 if(&target)
7039 __ bne(AT, R0, target);
7040 else
7041 __ bne(AT, R0, (int)0);
7042 __ delayed()->nop();
7043 break;
7045 case 0x04: //greater_equal
7046 __ slt(AT, opr1_reg, opr2_reg);
7047 if(&target)
7048 __ beq(AT, R0, target);
7049 else
7050 __ beq(AT, R0, (int)0);
7051 __ delayed()->nop();
7053 break;
7055 case 0x05: //less
7056 __ slt(AT, opr1_reg, opr2_reg);
7057 if(&target)
7058 __ bne(AT, R0, target);
7059 else
7060 __ bne(AT, R0, (int)0);
7061 __ delayed()->nop();
7063 break;
7065 case 0x06: //less_equal
7066 __ slt(AT, opr2_reg, opr1_reg);
7068 if(&target)
7069 __ beq(AT, R0, target);
7070 else
7071 __ beq(AT, R0, (int)0);
7072 __ delayed()->nop();
7074 break;
7076 default:
7077 Unimplemented();
7078 }
7079 %}
7082 ins_pc_relative(1);
7083 ins_pipe( pipe_alu_branch );
7084 %}
7086 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7087 match( If cmp (CmpI src1 src2) );
7088 effect(USE labl);
7089 ins_cost(180);
7090 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7092 ins_encode %{
7093 Register op1 = $src1$$Register;
7094 int val = $src2$$constant;
7095 Label &L = *($labl$$label);
7096 int flag = $cmp$$cmpcode;
7098 __ addiu32(AT, op1, -1 * val);
7099 switch(flag)
7100 {
7101 case 0x01: //equal
7102 if (&L)
7103 __ beq(R0, AT, L);
7104 else
7105 __ beq(R0, AT, (int)0);
7106 break;
7107 case 0x02: //not_equal
7108 if (&L)
7109 __ bne(R0, AT, L);
7110 else
7111 __ bne(R0, AT, (int)0);
7112 break;
7113 case 0x03: //greater
7114 if(&L)
7115 __ bgtz(AT, L);
7116 else
7117 __ bgtz(AT, (int)0);
7118 break;
7119 case 0x04: //greater_equal
7120 if(&L)
7121 __ bgez(AT, L);
7122 else
7123 __ bgez(AT, (int)0);
7124 break;
7125 case 0x05: //less
7126 if(&L)
7127 __ bltz(AT, L);
7128 else
7129 __ bltz(AT, (int)0);
7130 break;
7131 case 0x06: //less_equal
7132 if(&L)
7133 __ blez(AT, L);
7134 else
7135 __ blez(AT, (int)0);
7136 break;
7137 default:
7138 Unimplemented();
7139 }
7140 __ nop();
7141 %}
7143 ins_pc_relative(1);
7144 ins_pipe( pipe_alu_branch );
7145 %}
7147 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7148 match( If cmp (CmpL src1 zero) );
7149 effect(USE labl);
7150 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7151 ins_cost(220);
7153 ins_encode %{
7154 Register opr1_reg = as_Register($src1$$reg);
7155 Label &target = *($labl$$label);
7156 int flag = $cmp$$cmpcode;
7158 switch(flag)
7159 {
7160 case 0x01: //equal
7161 if (&target)
7162 __ beq(opr1_reg, R0, target);
7163 else
7164 __ beq(opr1_reg, R0, int(0));
7165 break;
7167 case 0x02: //not_equal
7168 if(&target)
7169 __ bne(opr1_reg, R0, target);
7170 else
7171 __ bne(opr1_reg, R0, (int)0);
7172 break;
7174 case 0x03: //greater
7175 if(&target)
7176 __ bgtz(opr1_reg, target);
7177 else
7178 __ bgtz(opr1_reg, (int)0);
7179 break;
7181 case 0x04: //greater_equal
7182 if(&target)
7183 __ bgez(opr1_reg, target);
7184 else
7185 __ bgez(opr1_reg, (int)0);
7186 break;
7188 case 0x05: //less
7189 __ slt(AT, opr1_reg, R0);
7190 if(&target)
7191 __ bne(AT, R0, target);
7192 else
7193 __ bne(AT, R0, (int)0);
7194 break;
7196 case 0x06: //less_equal
7197 if (&target)
7198 __ blez(opr1_reg, target);
7199 else
7200 __ blez(opr1_reg, int(0));
7201 break;
7203 default:
7204 Unimplemented();
7205 }
7206 __ delayed()->nop();
7207 %}
7210 ins_pc_relative(1);
7211 ins_pipe( pipe_alu_branch );
7212 %}
7214 /*
7215 // Conditional Direct Branch
7216 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
7217 match(If cmp icc);
7218 effect(USE labl);
7220 size(8);
7221 ins_cost(BRANCH_COST);
7222 format %{ "BP$cmp $icc,$labl" %}
7223 // Prim = bits 24-22, Secnd = bits 31-30
7224 ins_encode( enc_bp( labl, cmp, icc ) );
7225 ins_pc_relative(1);
7226 ins_pipe(br_cc);
7227 %}
7228 */
7230 //FIXME
7231 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7232 match( If cmp (CmpF src1 src2) );
7233 effect(USE labl);
7234 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7236 ins_encode %{
7237 FloatRegister reg_op1 = $src1$$FloatRegister;
7238 FloatRegister reg_op2 = $src2$$FloatRegister;
7239 Label &L = *($labl$$label);
7240 int flag = $cmp$$cmpcode;
7242 switch(flag)
7243 {
7244 case 0x01: //equal
7245 __ c_eq_s(reg_op1, reg_op2);
7246 if (&L)
7247 __ bc1t(L);
7248 else
7249 __ bc1t((int)0);
7250 break;
7251 case 0x02: //not_equal
7252 __ c_eq_s(reg_op1, reg_op2);
7253 if (&L)
7254 __ bc1f(L);
7255 else
7256 __ bc1f((int)0);
7257 break;
7258 case 0x03: //greater
7259 __ c_ule_s(reg_op1, reg_op2);
7260 if(&L)
7261 __ bc1f(L);
7262 else
7263 __ bc1f((int)0);
7264 break;
7265 case 0x04: //greater_equal
7266 __ c_ult_s(reg_op1, reg_op2);
7267 if(&L)
7268 __ bc1f(L);
7269 else
7270 __ bc1f((int)0);
7271 break;
7272 case 0x05: //less
7273 __ c_ult_s(reg_op1, reg_op2);
7274 if(&L)
7275 __ bc1t(L);
7276 else
7277 __ bc1t((int)0);
7278 break;
7279 case 0x06: //less_equal
7280 __ c_ule_s(reg_op1, reg_op2);
7281 if(&L)
7282 __ bc1t(L);
7283 else
7284 __ bc1t((int)0);
7285 break;
7286 default:
7287 Unimplemented();
7288 }
7289 __ nop();
7290 %}
7292 ins_pc_relative(1);
7293 ins_pipe(pipe_slow);
7294 %}
7296 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7297 match( If cmp (CmpD src1 src2) );
7298 effect(USE labl);
7299 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7301 ins_encode %{
7302 FloatRegister reg_op1 = $src1$$FloatRegister;
7303 FloatRegister reg_op2 = $src2$$FloatRegister;
7304 Label &L = *($labl$$label);
7305 int flag = $cmp$$cmpcode;
7307 switch(flag)
7308 {
7309 case 0x01: //equal
7310 __ c_eq_d(reg_op1, reg_op2);
7311 if (&L)
7312 __ bc1t(L);
7313 else
7314 __ bc1t((int)0);
7315 break;
7316 case 0x02: //not_equal
7317 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7318 __ c_eq_d(reg_op1, reg_op2);
7319 if (&L)
7320 __ bc1f(L);
7321 else
7322 __ bc1f((int)0);
7323 break;
7324 case 0x03: //greater
7325 __ c_ule_d(reg_op1, reg_op2);
7326 if(&L)
7327 __ bc1f(L);
7328 else
7329 __ bc1f((int)0);
7330 break;
7331 case 0x04: //greater_equal
7332 __ c_ult_d(reg_op1, reg_op2);
7333 if(&L)
7334 __ bc1f(L);
7335 else
7336 __ bc1f((int)0);
7337 break;
7338 case 0x05: //less
7339 __ c_ult_d(reg_op1, reg_op2);
7340 if(&L)
7341 __ bc1t(L);
7342 else
7343 __ bc1t((int)0);
7344 break;
7345 case 0x06: //less_equal
7346 __ c_ule_d(reg_op1, reg_op2);
7347 if(&L)
7348 __ bc1t(L);
7349 else
7350 __ bc1t((int)0);
7351 break;
7352 default:
7353 Unimplemented();
7354 }
7355 __ nop();
7356 %}
7358 ins_pc_relative(1);
7359 ins_pipe(pipe_slow);
7360 %}
7363 // Call Runtime Instruction
7364 instruct CallRuntimeDirect(method meth) %{
7365 match(CallRuntime );
7366 effect(USE meth);
7368 ins_cost(300);
7369 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7370 ins_encode( Java_To_Runtime( meth ) );
7371 ins_pipe( pipe_slow );
7372 ins_alignment(16);
7373 %}
7377 //------------------------MemBar Instructions-------------------------------
7378 //Memory barrier flavors
7380 instruct membar_acquire() %{
7381 match(MemBarAcquire);
7382 match(LoadFence);
7383 ins_cost(400);
7385 format %{ "MEMBAR-acquire" %}
7386 // ins_encode( enc_membar_acquire );
7387 ins_encode %{
7388 __ sync();
7389 %}
7390 ins_pipe(pipe_slow);
7391 %}
7393 instruct membar_acquire_lock() %{
7394 match(MemBarAcquireLock);
7395 ins_cost(400);
7396 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock just sync)" %}
7397 ins_encode %{
7398 __ sync();
7399 %}
7400 ins_pipe(pipe_slow);
7401 %}
7403 instruct membar_release() %{
7404 match(MemBarRelease);
7405 match(StoreFence);
7406 ins_cost(400);
7408 format %{ "MEMBAR-release" %}
7410 ins_encode %{
7411 __ sync();
7412 %}
7414 ins_pipe(pipe_slow);
7415 %}
7417 instruct membar_release_lock() %{
7418 match(MemBarReleaseLock);
7419 ins_cost(400);
7420 format %{ "MEMBAR-release (a FastiUnlock follows so just sync)" %}
7422 ins_encode %{
7423 __ sync();
7424 %}
7425 ins_pipe(pipe_slow);
7426 %}
7428 instruct membar_volatile() %{
7429 match(MemBarVolatile);
7430 ins_cost(400);
7432 format %{ "MEMBAR-volatile" %}
7433 /* ins_encode( enc_membar_volatile ); */
7434 ins_encode %{
7435 if( !os::is_MP() ) return; // Not needed on single CPU
7436 __ sync();
7438 %}
7439 ins_pipe(pipe_slow);
7440 %}
7442 instruct unnecessary_membar_volatile() %{
7443 match(MemBarVolatile);
7444 predicate(Matcher::post_store_load_barrier(n));
7445 ins_cost(400);
7446 format %{ "MEMBAR-volatile (unnecessary so just sync)" %}
7447 ins_encode %{
7448 __ sync();
7449 %}
7450 ins_pipe(pipe_slow);
7451 %}
7453 instruct membar_storestore() %{
7454 match(MemBarStoreStore);
7455 format %{ "MEMBAR-storestore (sync)" %}
7456 ins_encode %{
7457 __ sync();
7458 %}
7459 ins_cost(400);
7460 ins_pipe(pipe_slow);
7461 %}
7463 //----------Move Instructions--------------------------------------------------
7464 instruct castX2P(mRegP dst, mRegL src) %{
7465 match(Set dst (CastX2P src));
7466 format %{ "castX2P $dst, $src @ castX2P" %}
7467 ins_encode %{
7468 Register src = $src$$Register;
7469 Register dst = $dst$$Register;
7471 if(src != dst)
7472 __ move(dst, src);
7473 %}
7474 ins_cost(10);
7475 ins_pipe( ialu_regI_mov );
7476 %}
7478 instruct castP2X(mRegL dst, mRegP src ) %{
7479 match(Set dst (CastP2X src));
7481 format %{ "mov $dst, $src\t #@castP2X" %}
7482 ins_encode %{
7483 Register src = $src$$Register;
7484 Register dst = $dst$$Register;
7486 if(src != dst)
7487 __ move(dst, src);
7488 %}
7489 ins_pipe( ialu_regI_mov );
7490 %}
7492 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7493 match(Set dst (MoveF2I src));
7494 effect(DEF dst, USE src);
7495 ins_cost(85);
7496 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7497 ins_encode %{
7498 Register dst = as_Register($dst$$reg);
7499 FloatRegister src = as_FloatRegister($src$$reg);
7501 __ mfc1(dst, src);
7502 %}
7503 ins_pipe( pipe_slow );
7504 %}
7506 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7507 match(Set dst (MoveI2F src));
7508 effect(DEF dst, USE src);
7509 ins_cost(85);
7510 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7511 ins_encode %{
7512 Register src = as_Register($src$$reg);
7513 FloatRegister dst = as_FloatRegister($dst$$reg);
7515 __ mtc1(src, dst);
7516 %}
7517 ins_pipe( pipe_slow );
7518 %}
7520 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7521 match(Set dst (MoveD2L src));
7522 effect(DEF dst, USE src);
7523 ins_cost(85);
7524 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7525 ins_encode %{
7526 Register dst = as_Register($dst$$reg);
7527 FloatRegister src = as_FloatRegister($src$$reg);
7529 __ dmfc1(dst, src);
7530 %}
7531 ins_pipe( pipe_slow );
7532 %}
7534 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7535 match(Set dst (MoveL2D src));
7536 effect(DEF dst, USE src);
7537 ins_cost(85);
7538 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7539 ins_encode %{
7540 FloatRegister dst = as_FloatRegister($dst$$reg);
7541 Register src = as_Register($src$$reg);
7543 __ dmtc1(src, dst);
7544 %}
7545 ins_pipe( pipe_slow );
7546 %}
7548 //----------Conditional Move---------------------------------------------------
7549 // Conditional move
7550 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7551 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7552 ins_cost(80);
7553 format %{
7554 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7555 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7556 %}
7558 ins_encode %{
7559 Register op1 = $tmp1$$Register;
7560 Register op2 = $tmp2$$Register;
7561 Register dst = $dst$$Register;
7562 Register src = $src$$Register;
7563 int flag = $cop$$cmpcode;
7565 switch(flag)
7566 {
7567 case 0x01: //equal
7568 __ subu32(AT, op1, op2);
7569 __ movz(dst, src, AT);
7570 break;
7572 case 0x02: //not_equal
7573 __ subu32(AT, op1, op2);
7574 __ movn(dst, src, AT);
7575 break;
7577 case 0x03: //great
7578 __ slt(AT, op2, op1);
7579 __ movn(dst, src, AT);
7580 break;
7582 case 0x04: //great_equal
7583 __ slt(AT, op1, op2);
7584 __ movz(dst, src, AT);
7585 break;
7587 case 0x05: //less
7588 __ slt(AT, op1, op2);
7589 __ movn(dst, src, AT);
7590 break;
7592 case 0x06: //less_equal
7593 __ slt(AT, op2, op1);
7594 __ movz(dst, src, AT);
7595 break;
7597 default:
7598 Unimplemented();
7599 }
7600 %}
7602 ins_pipe( pipe_slow );
7603 %}
7605 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7606 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7607 ins_cost(80);
7608 format %{
7609 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7610 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7611 %}
7612 ins_encode %{
7613 Register op1 = $tmp1$$Register;
7614 Register op2 = $tmp2$$Register;
7615 Register dst = $dst$$Register;
7616 Register src = $src$$Register;
7617 int flag = $cop$$cmpcode;
7619 switch(flag)
7620 {
7621 case 0x01: //equal
7622 __ subu(AT, op1, op2);
7623 __ movz(dst, src, AT);
7624 break;
7626 case 0x02: //not_equal
7627 __ subu(AT, op1, op2);
7628 __ movn(dst, src, AT);
7629 break;
7631 case 0x03: //above
7632 __ sltu(AT, op2, op1);
7633 __ movn(dst, src, AT);
7634 break;
7636 case 0x04: //above_equal
7637 __ sltu(AT, op1, op2);
7638 __ movz(dst, src, AT);
7639 break;
7641 case 0x05: //below
7642 __ sltu(AT, op1, op2);
7643 __ movn(dst, src, AT);
7644 break;
7646 case 0x06: //below_equal
7647 __ sltu(AT, op2, op1);
7648 __ movz(dst, src, AT);
7649 break;
7651 default:
7652 Unimplemented();
7653 }
7654 %}
7656 ins_pipe( pipe_slow );
7657 %}
7659 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7660 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7661 ins_cost(80);
7662 format %{
7663 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7664 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7665 %}
7666 ins_encode %{
7667 Register op1 = $tmp1$$Register;
7668 Register op2 = $tmp2$$Register;
7669 Register dst = $dst$$Register;
7670 Register src = $src$$Register;
7671 int flag = $cop$$cmpcode;
7673 switch(flag)
7674 {
7675 case 0x01: //equal
7676 __ subu32(AT, op1, op2);
7677 __ movz(dst, src, AT);
7678 break;
7680 case 0x02: //not_equal
7681 __ subu32(AT, op1, op2);
7682 __ movn(dst, src, AT);
7683 break;
7685 case 0x03: //above
7686 __ sltu(AT, op2, op1);
7687 __ movn(dst, src, AT);
7688 break;
7690 case 0x04: //above_equal
7691 __ sltu(AT, op1, op2);
7692 __ movz(dst, src, AT);
7693 break;
7695 case 0x05: //below
7696 __ sltu(AT, op1, op2);
7697 __ movn(dst, src, AT);
7698 break;
7700 case 0x06: //below_equal
7701 __ sltu(AT, op2, op1);
7702 __ movz(dst, src, AT);
7703 break;
7705 default:
7706 Unimplemented();
7707 }
7708 %}
7710 ins_pipe( pipe_slow );
7711 %}
7713 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7714 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7715 ins_cost(80);
7716 format %{
7717 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
7718 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
7719 %}
7720 ins_encode %{
7721 Register op1 = $tmp1$$Register;
7722 Register op2 = $tmp2$$Register;
7723 Register dst = $dst$$Register;
7724 Register src = $src$$Register;
7725 int flag = $cop$$cmpcode;
7727 switch(flag)
7728 {
7729 case 0x01: //equal
7730 __ subu32(AT, op1, op2);
7731 __ movz(dst, src, AT);
7732 break;
7734 case 0x02: //not_equal
7735 __ subu32(AT, op1, op2);
7736 __ movn(dst, src, AT);
7737 break;
7739 case 0x03: //above
7740 __ sltu(AT, op2, op1);
7741 __ movn(dst, src, AT);
7742 break;
7744 case 0x04: //above_equal
7745 __ sltu(AT, op1, op2);
7746 __ movz(dst, src, AT);
7747 break;
7749 case 0x05: //below
7750 __ sltu(AT, op1, op2);
7751 __ movn(dst, src, AT);
7752 break;
7754 case 0x06: //below_equal
7755 __ sltu(AT, op2, op1);
7756 __ movz(dst, src, AT);
7757 break;
7759 default:
7760 Unimplemented();
7761 }
7762 %}
7764 ins_pipe( pipe_slow );
7765 %}
7767 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7768 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7769 ins_cost(80);
7770 format %{
7771 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
7772 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
7773 %}
7774 ins_encode %{
7775 Register op1 = $tmp1$$Register;
7776 Register op2 = $tmp2$$Register;
7777 Register dst = $dst$$Register;
7778 Register src = $src$$Register;
7779 int flag = $cop$$cmpcode;
7781 switch(flag)
7782 {
7783 case 0x01: //equal
7784 __ subu(AT, op1, op2);
7785 __ movz(dst, src, AT);
7786 break;
7788 case 0x02: //not_equal
7789 __ subu(AT, op1, op2);
7790 __ movn(dst, src, AT);
7791 break;
7793 case 0x03: //above
7794 __ sltu(AT, op2, op1);
7795 __ movn(dst, src, AT);
7796 break;
7798 case 0x04: //above_equal
7799 __ sltu(AT, op1, op2);
7800 __ movz(dst, src, AT);
7801 break;
7803 case 0x05: //below
7804 __ sltu(AT, op1, op2);
7805 __ movn(dst, src, AT);
7806 break;
7808 case 0x06: //below_equal
7809 __ sltu(AT, op2, op1);
7810 __ movz(dst, src, AT);
7811 break;
7813 default:
7814 Unimplemented();
7815 }
7816 %}
7818 ins_pipe( pipe_slow );
7819 %}
7821 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
7822 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
7823 ins_cost(80);
7824 format %{
7825 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
7826 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
7827 %}
7828 ins_encode %{
7829 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
7830 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
7831 Register dst = as_Register($dst$$reg);
7832 Register src = as_Register($src$$reg);
7834 int flag = $cop$$cmpcode;
7836 switch(flag)
7837 {
7838 case 0x01: //equal
7839 __ c_eq_d(reg_op1, reg_op2);
7840 __ movt(dst, src);
7841 break;
7842 case 0x02: //not_equal
7843 __ c_eq_d(reg_op1, reg_op2);
7844 __ movf(dst, src);
7845 break;
7846 case 0x03: //greater
7847 __ c_ole_d(reg_op1, reg_op2);
7848 __ movf(dst, src);
7849 break;
7850 case 0x04: //greater_equal
7851 __ c_olt_d(reg_op1, reg_op2);
7852 __ movf(dst, src);
7853 break;
7854 case 0x05: //less
7855 __ c_ult_d(reg_op1, reg_op2);
7856 __ movt(dst, src);
7857 break;
7858 case 0x06: //less_equal
7859 __ c_ule_d(reg_op1, reg_op2);
7860 __ movt(dst, src);
7861 break;
7862 default:
7863 Unimplemented();
7864 }
7865 %}
7867 ins_pipe( pipe_slow );
7868 %}
7871 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7872 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7873 ins_cost(80);
7874 format %{
7875 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
7876 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
7877 %}
7878 ins_encode %{
7879 Register op1 = $tmp1$$Register;
7880 Register op2 = $tmp2$$Register;
7881 Register dst = $dst$$Register;
7882 Register src = $src$$Register;
7883 int flag = $cop$$cmpcode;
7885 switch(flag)
7886 {
7887 case 0x01: //equal
7888 __ subu32(AT, op1, op2);
7889 __ movz(dst, src, AT);
7890 break;
7892 case 0x02: //not_equal
7893 __ subu32(AT, op1, op2);
7894 __ movn(dst, src, AT);
7895 break;
7897 case 0x03: //above
7898 __ sltu(AT, op2, op1);
7899 __ movn(dst, src, AT);
7900 break;
7902 case 0x04: //above_equal
7903 __ sltu(AT, op1, op2);
7904 __ movz(dst, src, AT);
7905 break;
7907 case 0x05: //below
7908 __ sltu(AT, op1, op2);
7909 __ movn(dst, src, AT);
7910 break;
7912 case 0x06: //below_equal
7913 __ sltu(AT, op2, op1);
7914 __ movz(dst, src, AT);
7915 break;
7917 default:
7918 Unimplemented();
7919 }
7920 %}
7922 ins_pipe( pipe_slow );
7923 %}
7926 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
7927 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
7928 ins_cost(80);
7929 format %{
7930 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
7931 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
7932 %}
7933 ins_encode %{
7934 Register op1 = $tmp1$$Register;
7935 Register op2 = $tmp2$$Register;
7936 Register dst = $dst$$Register;
7937 Register src = $src$$Register;
7938 int flag = $cop$$cmpcode;
7940 switch(flag)
7941 {
7942 case 0x01: //equal
7943 __ subu(AT, op1, op2);
7944 __ movz(dst, src, AT);
7945 break;
7947 case 0x02: //not_equal
7948 __ subu(AT, op1, op2);
7949 __ movn(dst, src, AT);
7950 break;
7952 case 0x03: //above
7953 __ sltu(AT, op2, op1);
7954 __ movn(dst, src, AT);
7955 break;
7957 case 0x04: //above_equal
7958 __ sltu(AT, op1, op2);
7959 __ movz(dst, src, AT);
7960 break;
7962 case 0x05: //below
7963 __ sltu(AT, op1, op2);
7964 __ movn(dst, src, AT);
7965 break;
7967 case 0x06: //below_equal
7968 __ sltu(AT, op2, op1);
7969 __ movz(dst, src, AT);
7970 break;
7972 default:
7973 Unimplemented();
7974 }
7975 %}
7977 ins_pipe( pipe_slow );
7978 %}
7980 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
7981 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
7982 ins_cost(80);
7983 format %{
7984 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
7985 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
7986 %}
7987 ins_encode %{
7988 Register opr1 = as_Register($tmp1$$reg);
7989 Register opr2 = as_Register($tmp2$$reg);
7990 Register dst = $dst$$Register;
7991 Register src = $src$$Register;
7992 int flag = $cop$$cmpcode;
7994 switch(flag)
7995 {
7996 case 0x01: //equal
7997 __ subu(AT, opr1, opr2);
7998 __ movz(dst, src, AT);
7999 break;
8001 case 0x02: //not_equal
8002 __ subu(AT, opr1, opr2);
8003 __ movn(dst, src, AT);
8004 break;
8006 case 0x03: //greater
8007 __ slt(AT, opr2, opr1);
8008 __ movn(dst, src, AT);
8009 break;
8011 case 0x04: //greater_equal
8012 __ slt(AT, opr1, opr2);
8013 __ movz(dst, src, AT);
8014 break;
8016 case 0x05: //less
8017 __ slt(AT, opr1, opr2);
8018 __ movn(dst, src, AT);
8019 break;
8021 case 0x06: //less_equal
8022 __ slt(AT, opr2, opr1);
8023 __ movz(dst, src, AT);
8024 break;
8026 default:
8027 Unimplemented();
8028 }
8029 %}
8031 ins_pipe( pipe_slow );
8032 %}
8034 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8035 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8036 ins_cost(80);
8037 format %{
8038 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8039 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8040 %}
8041 ins_encode %{
8042 Register opr1 = as_Register($tmp1$$reg);
8043 Register opr2 = as_Register($tmp2$$reg);
8044 Register dst = $dst$$Register;
8045 Register src = $src$$Register;
8046 int flag = $cop$$cmpcode;
8048 switch(flag)
8049 {
8050 case 0x01: //equal
8051 __ subu(AT, opr1, opr2);
8052 __ movz(dst, src, AT);
8053 break;
8055 case 0x02: //not_equal
8056 __ subu(AT, opr1, opr2);
8057 __ movn(dst, src, AT);
8058 break;
8060 case 0x03: //greater
8061 __ slt(AT, opr2, opr1);
8062 __ movn(dst, src, AT);
8063 break;
8065 case 0x04: //greater_equal
8066 __ slt(AT, opr1, opr2);
8067 __ movz(dst, src, AT);
8068 break;
8070 case 0x05: //less
8071 __ slt(AT, opr1, opr2);
8072 __ movn(dst, src, AT);
8073 break;
8075 case 0x06: //less_equal
8076 __ slt(AT, opr2, opr1);
8077 __ movz(dst, src, AT);
8078 break;
8080 default:
8081 Unimplemented();
8082 }
8083 %}
8085 ins_pipe( pipe_slow );
8086 %}
8088 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8089 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8090 ins_cost(80);
8091 format %{
8092 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8093 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8094 %}
8095 ins_encode %{
8096 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8097 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8098 Register dst = as_Register($dst$$reg);
8099 Register src = as_Register($src$$reg);
8101 int flag = $cop$$cmpcode;
8103 switch(flag)
8104 {
8105 case 0x01: //equal
8106 __ c_eq_d(reg_op1, reg_op2);
8107 __ movt(dst, src);
8108 break;
8109 case 0x02: //not_equal
8110 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8111 __ c_eq_d(reg_op1, reg_op2);
8112 __ movf(dst, src);
8113 break;
8114 case 0x03: //greater
8115 __ c_ole_d(reg_op1, reg_op2);
8116 __ movf(dst, src);
8117 break;
8118 case 0x04: //greater_equal
8119 __ c_olt_d(reg_op1, reg_op2);
8120 __ movf(dst, src);
8121 break;
8122 case 0x05: //less
8123 __ c_ult_d(reg_op1, reg_op2);
8124 __ movt(dst, src);
8125 break;
8126 case 0x06: //less_equal
8127 __ c_ule_d(reg_op1, reg_op2);
8128 __ movt(dst, src);
8129 break;
8130 default:
8131 Unimplemented();
8132 }
8133 %}
8135 ins_pipe( pipe_slow );
8136 %}
8139 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8140 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8141 ins_cost(80);
8142 format %{
8143 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8144 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8145 %}
8146 ins_encode %{
8147 Register op1 = $tmp1$$Register;
8148 Register op2 = $tmp2$$Register;
8149 Register dst = $dst$$Register;
8150 Register src = $src$$Register;
8151 int flag = $cop$$cmpcode;
8153 switch(flag)
8154 {
8155 case 0x01: //equal
8156 __ subu(AT, op1, op2);
8157 __ movz(dst, src, AT);
8158 break;
8160 case 0x02: //not_equal
8161 __ subu(AT, op1, op2);
8162 __ movn(dst, src, AT);
8163 break;
8165 case 0x03: //above
8166 __ sltu(AT, op2, op1);
8167 __ movn(dst, src, AT);
8168 break;
8170 case 0x04: //above_equal
8171 __ sltu(AT, op1, op2);
8172 __ movz(dst, src, AT);
8173 break;
8175 case 0x05: //below
8176 __ sltu(AT, op1, op2);
8177 __ movn(dst, src, AT);
8178 break;
8180 case 0x06: //below_equal
8181 __ sltu(AT, op2, op1);
8182 __ movz(dst, src, AT);
8183 break;
8185 default:
8186 Unimplemented();
8187 }
8188 %}
8190 ins_pipe( pipe_slow );
8191 %}
8193 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8194 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8195 ins_cost(80);
8196 format %{
8197 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8198 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8199 %}
8200 ins_encode %{
8201 Register op1 = $tmp1$$Register;
8202 Register op2 = $tmp2$$Register;
8203 Register dst = $dst$$Register;
8204 Register src = $src$$Register;
8205 int flag = $cop$$cmpcode;
8207 switch(flag)
8208 {
8209 case 0x01: //equal
8210 __ subu32(AT, op1, op2);
8211 __ movz(dst, src, AT);
8212 break;
8214 case 0x02: //not_equal
8215 __ subu32(AT, op1, op2);
8216 __ movn(dst, src, AT);
8217 break;
8219 case 0x03: //above
8220 __ slt(AT, op2, op1);
8221 __ movn(dst, src, AT);
8222 break;
8224 case 0x04: //above_equal
8225 __ slt(AT, op1, op2);
8226 __ movz(dst, src, AT);
8227 break;
8229 case 0x05: //below
8230 __ slt(AT, op1, op2);
8231 __ movn(dst, src, AT);
8232 break;
8234 case 0x06: //below_equal
8235 __ slt(AT, op2, op1);
8236 __ movz(dst, src, AT);
8237 break;
8239 default:
8240 Unimplemented();
8241 }
8242 %}
8244 ins_pipe( pipe_slow );
8245 %}
8247 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8248 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8249 ins_cost(80);
8250 format %{
8251 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8252 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8253 %}
8254 ins_encode %{
8255 Register op1 = $tmp1$$Register;
8256 Register op2 = $tmp2$$Register;
8257 Register dst = $dst$$Register;
8258 Register src = $src$$Register;
8259 int flag = $cop$$cmpcode;
8261 switch(flag)
8262 {
8263 case 0x01: //equal
8264 __ subu32(AT, op1, op2);
8265 __ movz(dst, src, AT);
8266 break;
8268 case 0x02: //not_equal
8269 __ subu32(AT, op1, op2);
8270 __ movn(dst, src, AT);
8271 break;
8273 case 0x03: //above
8274 __ slt(AT, op2, op1);
8275 __ movn(dst, src, AT);
8276 break;
8278 case 0x04: //above_equal
8279 __ slt(AT, op1, op2);
8280 __ movz(dst, src, AT);
8281 break;
8283 case 0x05: //below
8284 __ slt(AT, op1, op2);
8285 __ movn(dst, src, AT);
8286 break;
8288 case 0x06: //below_equal
8289 __ slt(AT, op2, op1);
8290 __ movz(dst, src, AT);
8291 break;
8293 default:
8294 Unimplemented();
8295 }
8296 %}
8298 ins_pipe( pipe_slow );
8299 %}
8302 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8303 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8304 ins_cost(80);
8305 format %{
8306 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8307 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8308 %}
8310 ins_encode %{
8311 Register op1 = $tmp1$$Register;
8312 Register op2 = $tmp2$$Register;
8313 Register dst = as_Register($dst$$reg);
8314 Register src = as_Register($src$$reg);
8315 int flag = $cop$$cmpcode;
8317 switch(flag)
8318 {
8319 case 0x01: //equal
8320 __ subu32(AT, op1, op2);
8321 __ movz(dst, src, AT);
8322 break;
8324 case 0x02: //not_equal
8325 __ subu32(AT, op1, op2);
8326 __ movn(dst, src, AT);
8327 break;
8329 case 0x03: //great
8330 __ slt(AT, op2, op1);
8331 __ movn(dst, src, AT);
8332 break;
8334 case 0x04: //great_equal
8335 __ slt(AT, op1, op2);
8336 __ movz(dst, src, AT);
8337 break;
8339 case 0x05: //less
8340 __ slt(AT, op1, op2);
8341 __ movn(dst, src, AT);
8342 break;
8344 case 0x06: //less_equal
8345 __ slt(AT, op2, op1);
8346 __ movz(dst, src, AT);
8347 break;
8349 default:
8350 Unimplemented();
8351 }
8352 %}
8354 ins_pipe( pipe_slow );
8355 %}
8357 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8358 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8359 ins_cost(80);
8360 format %{
8361 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8362 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8363 %}
8364 ins_encode %{
8365 Register opr1 = as_Register($tmp1$$reg);
8366 Register opr2 = as_Register($tmp2$$reg);
8367 Register dst = as_Register($dst$$reg);
8368 Register src = as_Register($src$$reg);
8369 int flag = $cop$$cmpcode;
8371 switch(flag)
8372 {
8373 case 0x01: //equal
8374 __ subu(AT, opr1, opr2);
8375 __ movz(dst, src, AT);
8376 break;
8378 case 0x02: //not_equal
8379 __ subu(AT, opr1, opr2);
8380 __ movn(dst, src, AT);
8381 break;
8383 case 0x03: //greater
8384 __ slt(AT, opr2, opr1);
8385 __ movn(dst, src, AT);
8386 break;
8388 case 0x04: //greater_equal
8389 __ slt(AT, opr1, opr2);
8390 __ movz(dst, src, AT);
8391 break;
8393 case 0x05: //less
8394 __ slt(AT, opr1, opr2);
8395 __ movn(dst, src, AT);
8396 break;
8398 case 0x06: //less_equal
8399 __ slt(AT, opr2, opr1);
8400 __ movz(dst, src, AT);
8401 break;
8403 default:
8404 Unimplemented();
8405 }
8406 %}
8408 ins_pipe( pipe_slow );
8409 %}
8411 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8412 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8413 ins_cost(80);
8414 format %{
8415 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8416 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8417 %}
8418 ins_encode %{
8419 Register op1 = $tmp1$$Register;
8420 Register op2 = $tmp2$$Register;
8421 Register dst = $dst$$Register;
8422 Register src = $src$$Register;
8423 int flag = $cop$$cmpcode;
8425 switch(flag)
8426 {
8427 case 0x01: //equal
8428 __ subu32(AT, op1, op2);
8429 __ movz(dst, src, AT);
8430 break;
8432 case 0x02: //not_equal
8433 __ subu32(AT, op1, op2);
8434 __ movn(dst, src, AT);
8435 break;
8437 case 0x03: //above
8438 __ sltu(AT, op2, op1);
8439 __ movn(dst, src, AT);
8440 break;
8442 case 0x04: //above_equal
8443 __ sltu(AT, op1, op2);
8444 __ movz(dst, src, AT);
8445 break;
8447 case 0x05: //below
8448 __ sltu(AT, op1, op2);
8449 __ movn(dst, src, AT);
8450 break;
8452 case 0x06: //below_equal
8453 __ sltu(AT, op2, op1);
8454 __ movz(dst, src, AT);
8455 break;
8457 default:
8458 Unimplemented();
8459 }
8460 %}
8462 ins_pipe( pipe_slow );
8463 %}
8466 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8467 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8468 ins_cost(80);
8469 format %{
8470 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8471 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8472 %}
8473 ins_encode %{
8474 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8475 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8476 Register dst = as_Register($dst$$reg);
8477 Register src = as_Register($src$$reg);
8479 int flag = $cop$$cmpcode;
8481 switch(flag)
8482 {
8483 case 0x01: //equal
8484 __ c_eq_d(reg_op1, reg_op2);
8485 __ movt(dst, src);
8486 break;
8487 case 0x02: //not_equal
8488 __ c_eq_d(reg_op1, reg_op2);
8489 __ movf(dst, src);
8490 break;
8491 case 0x03: //greater
8492 __ c_ole_d(reg_op1, reg_op2);
8493 __ movf(dst, src);
8494 break;
8495 case 0x04: //greater_equal
8496 __ c_olt_d(reg_op1, reg_op2);
8497 __ movf(dst, src);
8498 break;
8499 case 0x05: //less
8500 __ c_ult_d(reg_op1, reg_op2);
8501 __ movt(dst, src);
8502 break;
8503 case 0x06: //less_equal
8504 __ c_ule_d(reg_op1, reg_op2);
8505 __ movt(dst, src);
8506 break;
8507 default:
8508 Unimplemented();
8509 }
8510 %}
8512 ins_pipe( pipe_slow );
8513 %}
8515 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8516 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8517 ins_cost(200);
8518 format %{
8519 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8520 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8521 %}
8522 ins_encode %{
8523 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8524 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8525 FloatRegister dst = as_FloatRegister($dst$$reg);
8526 FloatRegister src = as_FloatRegister($src$$reg);
8528 int flag = $cop$$cmpcode;
8530 Label L;
8532 switch(flag)
8533 {
8534 case 0x01: //equal
8535 __ c_eq_d(reg_op1, reg_op2);
8536 __ bc1f(L);
8537 __ nop();
8538 __ mov_d(dst, src);
8539 __ bind(L);
8540 break;
8541 case 0x02: //not_equal
8542 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8543 __ c_eq_d(reg_op1, reg_op2);
8544 __ bc1t(L);
8545 __ nop();
8546 __ mov_d(dst, src);
8547 __ bind(L);
8548 break;
8549 case 0x03: //greater
8550 __ c_ole_d(reg_op1, reg_op2);
8551 __ bc1t(L);
8552 __ nop();
8553 __ mov_d(dst, src);
8554 __ bind(L);
8555 break;
8556 case 0x04: //greater_equal
8557 __ c_olt_d(reg_op1, reg_op2);
8558 __ bc1t(L);
8559 __ nop();
8560 __ mov_d(dst, src);
8561 __ bind(L);
8562 break;
8563 case 0x05: //less
8564 __ c_ult_d(reg_op1, reg_op2);
8565 __ bc1f(L);
8566 __ nop();
8567 __ mov_d(dst, src);
8568 __ bind(L);
8569 break;
8570 case 0x06: //less_equal
8571 __ c_ule_d(reg_op1, reg_op2);
8572 __ bc1f(L);
8573 __ nop();
8574 __ mov_d(dst, src);
8575 __ bind(L);
8576 break;
8577 default:
8578 Unimplemented();
8579 }
8580 %}
8582 ins_pipe( pipe_slow );
8583 %}
8585 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8586 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8587 ins_cost(200);
8588 format %{
8589 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8590 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8591 %}
8593 ins_encode %{
8594 Register op1 = $tmp1$$Register;
8595 Register op2 = $tmp2$$Register;
8596 FloatRegister dst = as_FloatRegister($dst$$reg);
8597 FloatRegister src = as_FloatRegister($src$$reg);
8598 int flag = $cop$$cmpcode;
8599 Label L;
8601 switch(flag)
8602 {
8603 case 0x01: //equal
8604 __ bne(op1, op2, L);
8605 __ nop();
8606 __ mov_s(dst, src);
8607 __ bind(L);
8608 break;
8609 case 0x02: //not_equal
8610 __ beq(op1, op2, L);
8611 __ nop();
8612 __ mov_s(dst, src);
8613 __ bind(L);
8614 break;
8615 case 0x03: //great
8616 __ slt(AT, op2, op1);
8617 __ beq(AT, R0, L);
8618 __ nop();
8619 __ mov_s(dst, src);
8620 __ bind(L);
8621 break;
8622 case 0x04: //great_equal
8623 __ slt(AT, op1, op2);
8624 __ bne(AT, R0, L);
8625 __ nop();
8626 __ mov_s(dst, src);
8627 __ bind(L);
8628 break;
8629 case 0x05: //less
8630 __ slt(AT, op1, op2);
8631 __ beq(AT, R0, L);
8632 __ nop();
8633 __ mov_s(dst, src);
8634 __ bind(L);
8635 break;
8636 case 0x06: //less_equal
8637 __ slt(AT, op2, op1);
8638 __ bne(AT, R0, L);
8639 __ nop();
8640 __ mov_s(dst, src);
8641 __ bind(L);
8642 break;
8643 default:
8644 Unimplemented();
8645 }
8646 %}
8648 ins_pipe( pipe_slow );
8649 %}
8651 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8652 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8653 ins_cost(200);
8654 format %{
8655 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8656 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8657 %}
8659 ins_encode %{
8660 Register op1 = $tmp1$$Register;
8661 Register op2 = $tmp2$$Register;
8662 FloatRegister dst = as_FloatRegister($dst$$reg);
8663 FloatRegister src = as_FloatRegister($src$$reg);
8664 int flag = $cop$$cmpcode;
8665 Label L;
8667 switch(flag)
8668 {
8669 case 0x01: //equal
8670 __ bne(op1, op2, L);
8671 __ nop();
8672 __ mov_d(dst, src);
8673 __ bind(L);
8674 break;
8675 case 0x02: //not_equal
8676 __ beq(op1, op2, L);
8677 __ nop();
8678 __ mov_d(dst, src);
8679 __ bind(L);
8680 break;
8681 case 0x03: //great
8682 __ slt(AT, op2, op1);
8683 __ beq(AT, R0, L);
8684 __ nop();
8685 __ mov_d(dst, src);
8686 __ bind(L);
8687 break;
8688 case 0x04: //great_equal
8689 __ slt(AT, op1, op2);
8690 __ bne(AT, R0, L);
8691 __ nop();
8692 __ mov_d(dst, src);
8693 __ bind(L);
8694 break;
8695 case 0x05: //less
8696 __ slt(AT, op1, op2);
8697 __ beq(AT, R0, L);
8698 __ nop();
8699 __ mov_d(dst, src);
8700 __ bind(L);
8701 break;
8702 case 0x06: //less_equal
8703 __ slt(AT, op2, op1);
8704 __ bne(AT, R0, L);
8705 __ nop();
8706 __ mov_d(dst, src);
8707 __ bind(L);
8708 break;
8709 default:
8710 Unimplemented();
8711 }
8712 %}
8714 ins_pipe( pipe_slow );
8715 %}
8717 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
8718 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8719 ins_cost(200);
8720 format %{
8721 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
8722 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
8723 %}
8725 ins_encode %{
8726 Register op1 = $tmp1$$Register;
8727 Register op2 = $tmp2$$Register;
8728 FloatRegister dst = as_FloatRegister($dst$$reg);
8729 FloatRegister src = as_FloatRegister($src$$reg);
8730 int flag = $cop$$cmpcode;
8731 Label L;
8733 switch(flag)
8734 {
8735 case 0x01: //equal
8736 __ bne(op1, op2, L);
8737 __ nop();
8738 __ mov_d(dst, src);
8739 __ bind(L);
8740 break;
8741 case 0x02: //not_equal
8742 __ beq(op1, op2, L);
8743 __ nop();
8744 __ mov_d(dst, src);
8745 __ bind(L);
8746 break;
8747 case 0x03: //great
8748 __ slt(AT, op2, op1);
8749 __ beq(AT, R0, L);
8750 __ nop();
8751 __ mov_d(dst, src);
8752 __ bind(L);
8753 break;
8754 case 0x04: //great_equal
8755 __ slt(AT, op1, op2);
8756 __ bne(AT, R0, L);
8757 __ nop();
8758 __ mov_d(dst, src);
8759 __ bind(L);
8760 break;
8761 case 0x05: //less
8762 __ slt(AT, op1, op2);
8763 __ beq(AT, R0, L);
8764 __ nop();
8765 __ mov_d(dst, src);
8766 __ bind(L);
8767 break;
8768 case 0x06: //less_equal
8769 __ slt(AT, op2, op1);
8770 __ bne(AT, R0, L);
8771 __ nop();
8772 __ mov_d(dst, src);
8773 __ bind(L);
8774 break;
8775 default:
8776 Unimplemented();
8777 }
8778 %}
8780 ins_pipe( pipe_slow );
8781 %}
8783 //FIXME
8784 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
8785 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8786 ins_cost(80);
8787 format %{
8788 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
8789 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
8790 %}
8792 ins_encode %{
8793 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8794 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8795 Register dst = $dst$$Register;
8796 Register src = $src$$Register;
8797 int flag = $cop$$cmpcode;
8799 switch(flag)
8800 {
8801 case 0x01: //equal
8802 __ c_eq_s(reg_op1, reg_op2);
8803 __ movt(dst, src);
8804 break;
8805 case 0x02: //not_equal
8806 __ c_eq_s(reg_op1, reg_op2);
8807 __ movf(dst, src);
8808 break;
8809 case 0x03: //greater
8810 __ c_ole_s(reg_op1, reg_op2);
8811 __ movf(dst, src);
8812 break;
8813 case 0x04: //greater_equal
8814 __ c_olt_s(reg_op1, reg_op2);
8815 __ movf(dst, src);
8816 break;
8817 case 0x05: //less
8818 __ c_ult_s(reg_op1, reg_op2);
8819 __ movt(dst, src);
8820 break;
8821 case 0x06: //less_equal
8822 __ c_ule_s(reg_op1, reg_op2);
8823 __ movt(dst, src);
8824 break;
8825 default:
8826 Unimplemented();
8827 }
8828 %}
8829 ins_pipe( pipe_slow );
8830 %}
8832 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
8833 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8834 ins_cost(200);
8835 format %{
8836 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
8837 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
8838 %}
8840 ins_encode %{
8841 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8842 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8843 FloatRegister dst = $dst$$FloatRegister;
8844 FloatRegister src = $src$$FloatRegister;
8845 Label L;
8846 int flag = $cop$$cmpcode;
8848 switch(flag)
8849 {
8850 case 0x01: //equal
8851 __ c_eq_s(reg_op1, reg_op2);
8852 __ bc1f(L);
8853 __ nop();
8854 __ mov_s(dst, src);
8855 __ bind(L);
8856 break;
8857 case 0x02: //not_equal
8858 __ c_eq_s(reg_op1, reg_op2);
8859 __ bc1t(L);
8860 __ nop();
8861 __ mov_s(dst, src);
8862 __ bind(L);
8863 break;
8864 case 0x03: //greater
8865 __ c_ole_s(reg_op1, reg_op2);
8866 __ bc1t(L);
8867 __ nop();
8868 __ mov_s(dst, src);
8869 __ bind(L);
8870 break;
8871 case 0x04: //greater_equal
8872 __ c_olt_s(reg_op1, reg_op2);
8873 __ bc1t(L);
8874 __ nop();
8875 __ mov_s(dst, src);
8876 __ bind(L);
8877 break;
8878 case 0x05: //less
8879 __ c_ult_s(reg_op1, reg_op2);
8880 __ bc1f(L);
8881 __ nop();
8882 __ mov_s(dst, src);
8883 __ bind(L);
8884 break;
8885 case 0x06: //less_equal
8886 __ c_ule_s(reg_op1, reg_op2);
8887 __ bc1f(L);
8888 __ nop();
8889 __ mov_s(dst, src);
8890 __ bind(L);
8891 break;
8892 default:
8893 Unimplemented();
8894 }
8895 %}
8896 ins_pipe( pipe_slow );
8897 %}
8899 // Manifest a CmpL result in an integer register. Very painful.
8900 // This is the test to avoid.
8901 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
8902 match(Set dst (CmpL3 src1 src2));
8903 ins_cost(1000);
8904 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
8905 ins_encode %{
8906 Register opr1 = as_Register($src1$$reg);
8907 Register opr2 = as_Register($src2$$reg);
8908 Register dst = as_Register($dst$$reg);
8910 Label p_one, done;
8912 __ subu(dst, opr1, opr2);
8914 __ beq(dst, R0, done);
8915 __ nop();
8917 __ bgtz(dst, done);
8918 __ delayed()->addiu32(dst, R0, 1);
8920 __ addiu32(dst, R0, -1);
8922 __ bind(done);
8923 %}
8924 ins_pipe( pipe_slow );
8925 %}
8927 //
8928 // less_rsult = -1
8929 // greater_result = 1
8930 // equal_result = 0
8931 // nan_result = -1
8932 //
8933 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
8934 match(Set dst (CmpF3 src1 src2));
8935 ins_cost(1000);
8936 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
8937 ins_encode %{
8938 FloatRegister src1 = as_FloatRegister($src1$$reg);
8939 FloatRegister src2 = as_FloatRegister($src2$$reg);
8940 Register dst = as_Register($dst$$reg);
8942 Label EQU, LESS, DONE;
8944 __ move(dst, 1);
8945 __ c_eq_s(src1, src2);
8946 __ bc1t(EQU);
8947 __ nop();
8948 __ c_ult_s(src1, src2);
8949 __ bc1t(LESS);
8950 __ nop();
8951 __ beq(R0, R0, DONE);
8952 __ nop();
8953 __ bind(EQU);
8954 __ move(dst, 0);
8955 __ beq(R0, R0, DONE);
8956 __ nop();
8957 __ bind(LESS);
8958 __ move(dst, -1);
8959 __ bind(DONE);
8960 %}
8961 ins_pipe( pipe_slow );
8962 %}
8964 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
8965 match(Set dst (CmpD3 src1 src2));
8966 ins_cost(1000);
8967 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
8968 ins_encode %{
8969 FloatRegister src1 = as_FloatRegister($src1$$reg);
8970 FloatRegister src2 = as_FloatRegister($src2$$reg);
8971 Register dst = as_Register($dst$$reg);
8973 Label EQU, LESS, DONE;
8975 __ move(dst, 1);
8976 __ c_eq_d(src1, src2);
8977 __ bc1t(EQU);
8978 __ nop();
8979 __ c_ult_d(src1, src2);
8980 __ bc1t(LESS);
8981 __ nop();
8982 __ beq(R0, R0, DONE);
8983 __ nop();
8984 __ bind(EQU);
8985 __ move(dst, 0);
8986 __ beq(R0, R0, DONE);
8987 __ nop();
8988 __ bind(LESS);
8989 __ move(dst, -1);
8990 __ bind(DONE);
8991 %}
8992 ins_pipe( pipe_slow );
8993 %}
8995 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
8996 match(Set dummy (ClearArray cnt base));
8997 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
8998 ins_encode %{
8999 //Assume cnt is the number of bytes in an array to be cleared,
9000 //and base points to the starting address of the array.
9001 Register base = $base$$Register;
9002 Register num = $cnt$$Register;
9003 Label Loop, done;
9005 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9006 __ move(T9, num); /* T9 = words */
9007 __ beq(T9, R0, done);
9008 __ nop();
9009 __ move(AT, base);
9011 __ bind(Loop);
9012 __ sd(R0, Address(AT, 0));
9013 __ daddi(AT, AT, wordSize);
9014 __ daddi(T9, T9, -1);
9015 __ bne(T9, R0, Loop);
9016 __ delayed()->nop();
9017 __ bind(done);
9018 %}
9019 ins_pipe( pipe_slow );
9020 %}
9022 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9023 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9024 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9026 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9027 ins_encode %{
9028 // Get the first character position in both strings
9029 // [8] char array, [12] offset, [16] count
9030 Register str1 = $str1$$Register;
9031 Register str2 = $str2$$Register;
9032 Register cnt1 = $cnt1$$Register;
9033 Register cnt2 = $cnt2$$Register;
9034 Register result = $result$$Register;
9036 Label L, Loop, haveResult, done;
9038 // compute the and difference of lengths (in result)
9039 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9041 // compute the shorter length (in cnt1)
9042 __ slt(AT, cnt2, cnt1);
9043 __ movn(cnt1, cnt2, AT);
9045 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9046 __ bind(Loop); // Loop begin
9047 __ beq(cnt1, R0, done);
9048 __ delayed()->lhu(AT, str1, 0);;
9050 // compare current character
9051 __ lhu(cnt2, str2, 0);
9052 __ bne(AT, cnt2, haveResult);
9053 __ delayed()->addi(str1, str1, 2);
9054 __ addi(str2, str2, 2);
9055 __ b(Loop);
9056 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9058 __ bind(haveResult);
9059 __ subu(result, AT, cnt2);
9061 __ bind(done);
9062 %}
9064 ins_pipe( pipe_slow );
9065 %}
9067 // intrinsic optimization
9068 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9069 match(Set result (StrEquals (Binary str1 str2) cnt));
9070 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9072 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9073 ins_encode %{
9074 // Get the first character position in both strings
9075 // [8] char array, [12] offset, [16] count
9076 Register str1 = $str1$$Register;
9077 Register str2 = $str2$$Register;
9078 Register cnt = $cnt$$Register;
9079 Register tmp = $temp$$Register;
9080 Register result = $result$$Register;
9082 Label Loop, done;
9085 __ beq(str1, str2, done); // same char[] ?
9086 __ daddiu(result, R0, 1);
9088 __ bind(Loop); // Loop begin
9089 __ beq(cnt, R0, done);
9090 __ daddiu(result, R0, 1); // count == 0
9092 // compare current character
9093 __ lhu(AT, str1, 0);;
9094 __ lhu(tmp, str2, 0);
9095 __ bne(AT, tmp, done);
9096 __ delayed()->daddi(result, R0, 0);
9097 __ addi(str1, str1, 2);
9098 __ addi(str2, str2, 2);
9099 __ b(Loop);
9100 __ delayed()->addi(cnt, cnt, -1); // Loop end
9102 __ bind(done);
9103 %}
9105 ins_pipe( pipe_slow );
9106 %}
9108 //----------Arithmetic Instructions-------------------------------------------
9109 //----------Addition Instructions---------------------------------------------
9110 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9111 match(Set dst (AddI src1 src2));
9113 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9114 ins_encode %{
9115 Register dst = $dst$$Register;
9116 Register src1 = $src1$$Register;
9117 Register src2 = $src2$$Register;
9118 __ addu32(dst, src1, src2);
9119 %}
9120 ins_pipe( ialu_regI_regI );
9121 %}
9123 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9124 match(Set dst (AddI src1 src2));
9126 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9127 ins_encode %{
9128 Register dst = $dst$$Register;
9129 Register src1 = $src1$$Register;
9130 int imm = $src2$$constant;
9132 if(Assembler::is_simm16(imm)) {
9133 __ addiu32(dst, src1, imm);
9134 } else {
9135 __ move(AT, imm);
9136 __ addu32(dst, src1, AT);
9137 }
9138 %}
9139 ins_pipe( ialu_regI_regI );
9140 %}
9142 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9143 match(Set dst (AddP src1 src2));
9145 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9147 ins_encode %{
9148 Register dst = $dst$$Register;
9149 Register src1 = $src1$$Register;
9150 Register src2 = $src2$$Register;
9151 __ daddu(dst, src1, src2);
9152 %}
9154 ins_pipe( ialu_regI_regI );
9155 %}
9157 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9158 match(Set dst (AddP src1 (ConvI2L src2)));
9160 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9162 ins_encode %{
9163 Register dst = $dst$$Register;
9164 Register src1 = $src1$$Register;
9165 Register src2 = $src2$$Register;
9166 __ daddu(dst, src1, src2);
9167 %}
9169 ins_pipe( ialu_regI_regI );
9170 %}
9172 instruct addP_reg_imm(mRegP dst, mRegP src1, immL32 src2) %{
9173 match(Set dst (AddP src1 src2));
9174 // effect(KILL cr);
9176 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9177 ins_encode %{
9178 Register src1 = $src1$$Register;
9179 long src2 = $src2$$constant;
9180 Register dst = $dst$$Register;
9182 if(Assembler::is_simm16(src2)) {
9183 __ daddiu(dst, src1, src2);
9184 } else {
9185 __ li(AT, src2);
9186 __ daddu(dst, src1, AT);
9187 }
9188 %}
9189 ins_pipe( ialu_regI_imm16 );
9190 %}
9192 // Add Long Register with Register
9193 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9194 match(Set dst (AddL src1 src2));
9195 ins_cost(200);
9196 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9198 ins_encode %{
9199 Register dst_reg = as_Register($dst$$reg);
9200 Register src1_reg = as_Register($src1$$reg);
9201 Register src2_reg = as_Register($src2$$reg);
9203 __ daddu(dst_reg, src1_reg, src2_reg);
9204 %}
9206 ins_pipe( ialu_regL_regL );
9207 %}
9209 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9210 match(Set dst (AddL (ConvI2L src1) src2));
9211 ins_cost(200);
9212 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9214 ins_encode %{
9215 Register dst_reg = as_Register($dst$$reg);
9216 Register src1_reg = as_Register($src1$$reg);
9217 Register src2_reg = as_Register($src2$$reg);
9219 __ daddu(dst_reg, src1_reg, src2_reg);
9220 %}
9222 ins_pipe( ialu_regL_regL );
9223 %}
9225 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9226 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9227 ins_cost(200);
9228 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9230 ins_encode %{
9231 Register dst_reg = as_Register($dst$$reg);
9232 Register src1_reg = as_Register($src1$$reg);
9233 Register src2_reg = as_Register($src2$$reg);
9235 __ daddu(dst_reg, src1_reg, src2_reg);
9236 %}
9238 ins_pipe( ialu_regL_regL );
9239 %}
9241 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9242 match(Set dst (AddL src1 (ConvI2L src2)));
9243 ins_cost(200);
9244 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9246 ins_encode %{
9247 Register dst_reg = as_Register($dst$$reg);
9248 Register src1_reg = as_Register($src1$$reg);
9249 Register src2_reg = as_Register($src2$$reg);
9251 __ daddu(dst_reg, src1_reg, src2_reg);
9252 %}
9254 ins_pipe( ialu_regL_regL );
9255 %}
9257 //----------Subtraction Instructions-------------------------------------------
9258 // Integer Subtraction Instructions
9259 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9260 match(Set dst (SubI src1 src2));
9262 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9263 ins_encode %{
9264 Register dst = $dst$$Register;
9265 Register src1 = $src1$$Register;
9266 Register src2 = $src2$$Register;
9267 __ subu32(dst, src1, src2);
9268 %}
9269 ins_pipe( ialu_regI_regI );
9270 %}
9272 instruct subI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9273 match(Set dst (SubI src1 src2));
9275 format %{ "sub $dst, $src1, $src2 #@subI_Reg_imm" %}
9276 ins_encode %{
9277 Register dst = $dst$$Register;
9278 Register src1 = $src1$$Register;
9279 __ move(AT, -1 * $src2$$constant);
9280 __ addu32(dst, src1, AT);
9281 %}
9282 ins_pipe( ialu_regI_regI );
9283 %}
9285 // Subtract Long Register with Register.
9286 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9287 match(Set dst (SubL src1 src2));
9288 ins_cost(200);
9289 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9290 ins_encode %{
9291 Register dst = as_Register($dst$$reg);
9292 Register src1 = as_Register($src1$$reg);
9293 Register src2 = as_Register($src2$$reg);
9295 __ subu(dst, src1, src2);
9296 %}
9297 ins_pipe( ialu_regL_regL );
9298 %}
9300 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9301 match(Set dst (SubL src1 (ConvI2L src2)));
9302 ins_cost(200);
9303 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9304 ins_encode %{
9305 Register dst = as_Register($dst$$reg);
9306 Register src1 = as_Register($src1$$reg);
9307 Register src2 = as_Register($src2$$reg);
9309 __ subu(dst, src1, src2);
9310 %}
9311 ins_pipe( ialu_regL_regL );
9312 %}
9314 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9315 match(Set dst (SubL (ConvI2L src1) src2));
9316 ins_cost(200);
9317 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9318 ins_encode %{
9319 Register dst = as_Register($dst$$reg);
9320 Register src1 = as_Register($src1$$reg);
9321 Register src2 = as_Register($src2$$reg);
9323 __ subu(dst, src1, src2);
9324 %}
9325 ins_pipe( ialu_regL_regL );
9326 %}
9328 // Integer MOD with Register
9329 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9330 match(Set dst (ModI src1 src2));
9331 ins_cost(300);
9332 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9333 ins_encode %{
9334 Register dst = $dst$$Register;
9335 Register src1 = $src1$$Register;
9336 Register src2 = $src2$$Register;
9338 //if (UseLoongsonISA) {
9339 if (0) {
9340 // 2016.08.10
9341 // Experiments show that gsmod is slower that div+mfhi.
9342 // So I just disable it here.
9343 __ gsmod(dst, src1, src2);
9344 } else {
9345 __ div(src1, src2);
9346 __ mfhi(dst);
9347 }
9348 %}
9350 //ins_pipe( ialu_mod );
9351 ins_pipe( ialu_regI_regI );
9352 %}
9354 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9355 match(Set dst (ModL src1 src2));
9356 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9358 ins_encode %{
9359 Register dst = as_Register($dst$$reg);
9360 Register op1 = as_Register($src1$$reg);
9361 Register op2 = as_Register($src2$$reg);
9363 if (UseLoongsonISA) {
9364 __ gsdmod(dst, op1, op2);
9365 } else {
9366 __ ddiv(op1, op2);
9367 __ mfhi(dst);
9368 }
9369 %}
9370 ins_pipe( pipe_slow );
9371 %}
9373 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9374 match(Set dst (MulI src1 src2));
9376 ins_cost(300);
9377 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9378 ins_encode %{
9379 Register src1 = $src1$$Register;
9380 Register src2 = $src2$$Register;
9381 Register dst = $dst$$Register;
9383 __ mul(dst, src1, src2);
9384 %}
9385 ins_pipe( ialu_mult );
9386 %}
9388 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9389 match(Set dst (AddI (MulI src1 src2) src3));
9391 ins_cost(999);
9392 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9393 ins_encode %{
9394 Register src1 = $src1$$Register;
9395 Register src2 = $src2$$Register;
9396 Register src3 = $src3$$Register;
9397 Register dst = $dst$$Register;
9399 __ mtlo(src3);
9400 __ madd(src1, src2);
9401 __ mflo(dst);
9402 %}
9403 ins_pipe( ialu_mult );
9404 %}
9406 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9407 match(Set dst (DivI src1 src2));
9409 ins_cost(300);
9410 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9411 ins_encode %{
9412 Register src1 = $src1$$Register;
9413 Register src2 = $src2$$Register;
9414 Register dst = $dst$$Register;
9416 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9417 We must trap an exception manually. */
9418 __ teq(R0, src2, 0x7);
9420 if (UseLoongsonISA) {
9421 __ gsdiv(dst, src1, src2);
9422 } else {
9423 __ div(src1, src2);
9425 __ nop();
9426 __ nop();
9427 __ mflo(dst);
9428 }
9429 %}
9430 ins_pipe( ialu_mod );
9431 %}
9433 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9434 match(Set dst (DivF src1 src2));
9436 ins_cost(300);
9437 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9438 ins_encode %{
9439 FloatRegister src1 = $src1$$FloatRegister;
9440 FloatRegister src2 = $src2$$FloatRegister;
9441 FloatRegister dst = $dst$$FloatRegister;
9443 /* Here do we need to trap an exception manually ? */
9444 __ div_s(dst, src1, src2);
9445 %}
9446 ins_pipe( pipe_slow );
9447 %}
9449 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9450 match(Set dst (DivD src1 src2));
9452 ins_cost(300);
9453 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9454 ins_encode %{
9455 FloatRegister src1 = $src1$$FloatRegister;
9456 FloatRegister src2 = $src2$$FloatRegister;
9457 FloatRegister dst = $dst$$FloatRegister;
9459 /* Here do we need to trap an exception manually ? */
9460 __ div_d(dst, src1, src2);
9461 %}
9462 ins_pipe( pipe_slow );
9463 %}
9465 instruct divF_Reg_immF(regF dst, regF src1, immF src2, regF tmp) %{
9466 match(Set dst (DivF src1 src2));
9467 effect(TEMP tmp);
9469 ins_cost(300);
9470 format %{ "divF $dst, $src1, $src2 [tmp = $tmp] @ divF_Reg_immF" %}
9471 ins_encode %{
9472 FloatRegister src1 = $src1$$FloatRegister;
9473 FloatRegister tmp = $tmp$$FloatRegister;
9474 FloatRegister dst = $dst$$FloatRegister;
9476 jfloat jf = $src2$$constant;
9477 address const_addr = __ float_constant(jf);
9478 assert (const_addr != NULL, "must create float constant in the constant table");
9480 __ relocate(relocInfo::internal_pc_type);
9481 __ li(AT, const_addr);
9482 __ lwc1(tmp, AT, 0);
9484 __ div_s(dst, src1, tmp);
9485 %}
9486 ins_pipe( pipe_slow );
9487 %}
9490 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9491 match(Set dst (MulL src1 src2));
9492 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9493 ins_encode %{
9494 Register dst = as_Register($dst$$reg);
9495 Register op1 = as_Register($src1$$reg);
9496 Register op2 = as_Register($src2$$reg);
9498 if (UseLoongsonISA) {
9499 __ gsdmult(dst, op1, op2);
9500 } else {
9501 __ dmult(op1, op2);
9502 __ mflo(dst);
9503 }
9504 %}
9505 ins_pipe( pipe_slow );
9506 %}
9508 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9509 match(Set dst (DivL src1 src2));
9510 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9512 ins_encode %{
9513 Register dst = as_Register($dst$$reg);
9514 Register op1 = as_Register($src1$$reg);
9515 Register op2 = as_Register($src2$$reg);
9517 if (UseLoongsonISA) {
9518 __ gsddiv(dst, op1, op2);
9519 } else {
9520 __ ddiv(op1, op2);
9521 __ mflo(dst);
9522 }
9523 %}
9524 ins_pipe( pipe_slow );
9525 %}
9527 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9528 match(Set dst (AddF src1 src2));
9529 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9530 ins_encode %{
9531 FloatRegister src1 = as_FloatRegister($src1$$reg);
9532 FloatRegister src2 = as_FloatRegister($src2$$reg);
9533 FloatRegister dst = as_FloatRegister($dst$$reg);
9535 __ add_s(dst, src1, src2);
9536 %}
9537 ins_pipe( fpu_regF_regF );
9538 %}
9540 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9541 match(Set dst (SubF src1 src2));
9542 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9543 ins_encode %{
9544 FloatRegister src1 = as_FloatRegister($src1$$reg);
9545 FloatRegister src2 = as_FloatRegister($src2$$reg);
9546 FloatRegister dst = as_FloatRegister($dst$$reg);
9548 __ sub_s(dst, src1, src2);
9549 %}
9550 ins_pipe( fpu_regF_regF );
9551 %}
9552 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9553 match(Set dst (AddD src1 src2));
9554 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9555 ins_encode %{
9556 FloatRegister src1 = as_FloatRegister($src1$$reg);
9557 FloatRegister src2 = as_FloatRegister($src2$$reg);
9558 FloatRegister dst = as_FloatRegister($dst$$reg);
9560 __ add_d(dst, src1, src2);
9561 %}
9562 ins_pipe( fpu_regF_regF );
9563 %}
9565 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9566 match(Set dst (SubD src1 src2));
9567 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9568 ins_encode %{
9569 FloatRegister src1 = as_FloatRegister($src1$$reg);
9570 FloatRegister src2 = as_FloatRegister($src2$$reg);
9571 FloatRegister dst = as_FloatRegister($dst$$reg);
9573 __ sub_d(dst, src1, src2);
9574 %}
9575 ins_pipe( fpu_regF_regF );
9576 %}
9578 instruct negF_reg(regF dst, regF src) %{
9579 match(Set dst (NegF src));
9580 format %{ "negF $dst, $src @negF_reg" %}
9581 ins_encode %{
9582 FloatRegister src = as_FloatRegister($src$$reg);
9583 FloatRegister dst = as_FloatRegister($dst$$reg);
9585 __ neg_s(dst, src);
9586 %}
9587 ins_pipe( fpu_regF_regF );
9588 %}
9590 instruct negD_reg(regD dst, regD src) %{
9591 match(Set dst (NegD src));
9592 format %{ "negD $dst, $src @negD_reg" %}
9593 ins_encode %{
9594 FloatRegister src = as_FloatRegister($src$$reg);
9595 FloatRegister dst = as_FloatRegister($dst$$reg);
9597 __ neg_d(dst, src);
9598 %}
9599 ins_pipe( fpu_regF_regF );
9600 %}
9603 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9604 match(Set dst (MulF src1 src2));
9605 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9606 ins_encode %{
9607 FloatRegister src1 = $src1$$FloatRegister;
9608 FloatRegister src2 = $src2$$FloatRegister;
9609 FloatRegister dst = $dst$$FloatRegister;
9611 __ mul_s(dst, src1, src2);
9612 %}
9613 ins_pipe( fpu_regF_regF );
9614 %}
9616 // Mul two double precision floating piont number
9617 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9618 match(Set dst (MulD src1 src2));
9619 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9620 ins_encode %{
9621 FloatRegister src1 = $src1$$FloatRegister;
9622 FloatRegister src2 = $src2$$FloatRegister;
9623 FloatRegister dst = $dst$$FloatRegister;
9625 __ mul_d(dst, src1, src2);
9626 %}
9627 ins_pipe( fpu_regF_regF );
9628 %}
9630 instruct absF_reg(regF dst, regF src) %{
9631 match(Set dst (AbsF src));
9632 ins_cost(100);
9633 format %{ "absF $dst, $src @absF_reg" %}
9634 ins_encode %{
9635 FloatRegister src = as_FloatRegister($src$$reg);
9636 FloatRegister dst = as_FloatRegister($dst$$reg);
9638 __ abs_s(dst, src);
9639 %}
9640 ins_pipe( fpu_regF_regF );
9641 %}
9644 // intrinsics for math_native.
9645 // AbsD SqrtD CosD SinD TanD LogD Log10D
9647 instruct absD_reg(regD dst, regD src) %{
9648 match(Set dst (AbsD src));
9649 ins_cost(100);
9650 format %{ "absD $dst, $src @absD_reg" %}
9651 ins_encode %{
9652 FloatRegister src = as_FloatRegister($src$$reg);
9653 FloatRegister dst = as_FloatRegister($dst$$reg);
9655 __ abs_d(dst, src);
9656 %}
9657 ins_pipe( fpu_regF_regF );
9658 %}
9660 instruct sqrtD_reg(regD dst, regD src) %{
9661 match(Set dst (SqrtD src));
9662 ins_cost(100);
9663 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
9664 ins_encode %{
9665 FloatRegister src = as_FloatRegister($src$$reg);
9666 FloatRegister dst = as_FloatRegister($dst$$reg);
9668 __ sqrt_d(dst, src);
9669 %}
9670 ins_pipe( fpu_regF_regF );
9671 %}
9673 //----------------------------------Logical Instructions----------------------
9674 //__________________________________Integer Logical Instructions-------------
9676 //And Instuctions
9677 // And Register with Immediate
9678 instruct andI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9679 match(Set dst (AndI src1 src2));
9681 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm" %}
9682 ins_encode %{
9683 Register dst = $dst$$Register;
9684 Register src = $src1$$Register;
9685 int val = $src2$$constant;
9687 __ move(AT, val);
9688 __ andr(dst, src, AT);
9689 %}
9690 ins_pipe( ialu_regI_regI );
9691 %}
9693 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
9694 match(Set dst (AndI src1 src2));
9695 ins_cost(60);
9697 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
9698 ins_encode %{
9699 Register dst = $dst$$Register;
9700 Register src = $src1$$Register;
9701 int val = $src2$$constant;
9703 __ andi(dst, src, val);
9704 %}
9705 ins_pipe( ialu_regI_regI );
9706 %}
9708 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9709 match(Set dst (AndI src1 src2));
9711 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
9712 ins_encode %{
9713 Register dst = $dst$$Register;
9714 Register src1 = $src1$$Register;
9715 Register src2 = $src2$$Register;
9716 __ andr(dst, src1, src2);
9717 %}
9718 ins_pipe( ialu_regI_regI );
9719 %}
9721 // And Long Register with Register
9722 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9723 match(Set dst (AndL src1 src2));
9724 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
9725 ins_encode %{
9726 Register dst_reg = as_Register($dst$$reg);
9727 Register src1_reg = as_Register($src1$$reg);
9728 Register src2_reg = as_Register($src2$$reg);
9730 __ andr(dst_reg, src1_reg, src2_reg);
9731 %}
9732 ins_pipe( ialu_regL_regL );
9733 %}
9735 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
9736 match(Set dst (AndL src1 (ConvI2L src2)));
9737 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
9738 ins_encode %{
9739 Register dst_reg = as_Register($dst$$reg);
9740 Register src1_reg = as_Register($src1$$reg);
9741 Register src2_reg = as_Register($src2$$reg);
9743 __ andr(dst_reg, src1_reg, src2_reg);
9744 %}
9745 ins_pipe( ialu_regL_regL );
9746 %}
9748 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
9749 match(Set dst (AndL src1 src2));
9750 ins_cost(60);
9752 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
9753 ins_encode %{
9754 Register dst = $dst$$Register;
9755 Register src = $src1$$Register;
9756 long val = $src2$$constant;
9758 __ andi(dst, src, val);
9759 %}
9760 ins_pipe( ialu_regI_regI );
9761 %}
9763 // Or Long Register with Register
9764 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9765 match(Set dst (OrL src1 src2));
9766 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
9767 ins_encode %{
9768 Register dst_reg = $dst$$Register;
9769 Register src1_reg = $src1$$Register;
9770 Register src2_reg = $src2$$Register;
9772 __ orr(dst_reg, src1_reg, src2_reg);
9773 %}
9774 ins_pipe( ialu_regL_regL );
9775 %}
9777 // Xor Long Register with Register
9778 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9779 match(Set dst (XorL src1 src2));
9780 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
9781 ins_encode %{
9782 Register dst_reg = as_Register($dst$$reg);
9783 Register src1_reg = as_Register($src1$$reg);
9784 Register src2_reg = as_Register($src2$$reg);
9786 __ xorr(dst_reg, src1_reg, src2_reg);
9787 %}
9788 ins_pipe( ialu_regL_regL );
9789 %}
9791 // Shift Left by 8-bit immediate
9792 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
9793 match(Set dst (LShiftI src shift));
9795 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
9796 ins_encode %{
9797 Register src = $src$$Register;
9798 Register dst = $dst$$Register;
9799 int shamt = $shift$$constant;
9801 /*
9802 094 SHL S0, S0, #-7 #@salI_Reg_imm
9803 static int insn_RRSO(int rt, int rd, int sa, int op) { return (rt<<16) | (rd<<11) | (sa<<6) | op; }
9804 void sll (Register rd, Register rt , int sa) {
9805 emit_long(insn_RRSO((int)rt->encoding(), (int)rd->encoding(), sa, sll_op));
9806 }
9807 */
9809 if(0 <= shamt && shamt < 32) __ sll(dst, src, shamt);
9810 else {
9811 __ move(AT, shamt);
9812 __ sllv(dst, src, AT);
9813 }
9814 %}
9815 ins_pipe( ialu_regI_regI );
9816 %}
9818 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
9819 match(Set dst (LShiftI (ConvL2I src) shift));
9821 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
9822 ins_encode %{
9823 Register src = $src$$Register;
9824 Register dst = $dst$$Register;
9825 int shamt = $shift$$constant;
9827 if(0 <= shamt && shamt < 32) __ sll(dst, src, shamt);
9828 else {
9829 __ move(AT, shamt);
9830 __ sllv(dst, src, AT);
9831 }
9832 %}
9833 ins_pipe( ialu_regI_regI );
9834 %}
9836 // Shift Left by 8-bit immediate
9837 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
9838 match(Set dst (LShiftI src shift));
9840 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
9841 ins_encode %{
9842 Register src = $src$$Register;
9843 Register dst = $dst$$Register;
9844 Register shamt = $shift$$Register;
9845 __ sllv(dst, src, shamt);
9846 %}
9847 ins_pipe( ialu_regI_regI );
9848 %}
9851 // Shift Left Long
9852 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
9853 //predicate(UseNewLongLShift);
9854 match(Set dst (LShiftL src shift));
9855 ins_cost(100);
9856 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
9857 ins_encode %{
9858 Register src_reg = as_Register($src$$reg);
9859 Register dst_reg = as_Register($dst$$reg);
9860 int shamt = $shift$$constant;
9862 if (__ is_simm(shamt, 5))
9863 __ dsll(dst_reg, src_reg, shamt);
9864 else
9865 {
9866 __ move(AT, shamt);
9867 __ dsllv(dst_reg, src_reg, AT);
9868 }
9869 %}
9870 ins_pipe( ialu_regL_regL );
9871 %}
9873 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
9874 //predicate(UseNewLongLShift);
9875 match(Set dst (LShiftL (ConvI2L src) shift));
9876 ins_cost(100);
9877 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
9878 ins_encode %{
9879 Register src_reg = as_Register($src$$reg);
9880 Register dst_reg = as_Register($dst$$reg);
9881 int shamt = $shift$$constant;
9883 if (__ is_simm(shamt, 5))
9884 __ dsll(dst_reg, src_reg, shamt);
9885 else
9886 {
9887 __ move(AT, shamt);
9888 __ dsllv(dst_reg, src_reg, AT);
9889 }
9890 %}
9891 ins_pipe( ialu_regL_regL );
9892 %}
9894 // Shift Left Long
9895 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
9896 //predicate(UseNewLongLShift);
9897 match(Set dst (LShiftL src shift));
9898 ins_cost(100);
9899 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
9900 ins_encode %{
9901 Register creg = T9;
9902 Register src_reg = as_Register($src$$reg);
9903 Register dst_reg = as_Register($dst$$reg);
9905 __ move(creg, $shift$$Register);
9906 __ andi(creg, creg, 0x3f);
9907 __ dsllv(dst_reg, src_reg, creg);
9908 %}
9909 ins_pipe( ialu_regL_regL );
9910 %}
9912 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
9913 match(Set dst (LShiftL (ConvI2L src) shift));
9914 ins_cost(100);
9915 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
9916 ins_encode %{
9917 Register src_reg = as_Register($src$$reg);
9918 Register dst_reg = as_Register($dst$$reg);
9919 int shamt = $shift$$constant;
9921 if (__ is_simm(shamt, 5)) {
9922 __ dsll(dst_reg, src_reg, shamt);
9923 } else {
9924 __ move(AT, shamt);
9925 __ dsllv(dst_reg, src_reg, AT);
9926 }
9927 %}
9928 ins_pipe( ialu_regL_regL );
9929 %}
9931 // Shift Right Long
9932 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
9933 //predicate(UseNewLongLShift);
9934 match(Set dst (RShiftL src shift));
9935 ins_cost(100);
9936 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
9937 ins_encode %{
9938 Register src_reg = as_Register($src$$reg);
9939 Register dst_reg = as_Register($dst$$reg);
9940 int shamt = ($shift$$constant & 0x3f);
9941 if (__ is_simm(shamt, 5))
9942 __ dsra(dst_reg, src_reg, shamt);
9943 else
9944 {
9945 __ move(AT, shamt);
9946 __ dsrav(dst_reg, src_reg, AT);
9947 }
9948 %}
9949 ins_pipe( ialu_regL_regL );
9950 %}
9952 // Shift Right Long arithmetically
9953 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
9954 //predicate(UseNewLongLShift);
9955 match(Set dst (RShiftL src shift));
9956 ins_cost(100);
9957 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
9958 ins_encode %{
9959 Register creg = T9;
9960 Register src_reg = as_Register($src$$reg);
9961 Register dst_reg = as_Register($dst$$reg);
9963 __ move(creg, $shift$$Register);
9964 __ andi(creg, creg, 0x3f);
9965 __ dsrav(dst_reg, src_reg, creg);
9966 %}
9967 ins_pipe( ialu_regL_regL );
9968 %}
9970 // Shift Right Long logically
9971 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
9972 match(Set dst (URShiftL src shift));
9973 ins_cost(100);
9974 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
9975 ins_encode %{
9976 Register creg = T9;
9977 Register src_reg = as_Register($src$$reg);
9978 Register dst_reg = as_Register($dst$$reg);
9980 __ move(creg, $shift$$Register);
9981 __ andi(creg, creg, 0x3f);
9982 __ dsrlv(dst_reg, src_reg, creg);
9983 %}
9984 ins_pipe( ialu_regL_regL );
9985 %}
9987 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
9988 match(Set dst (URShiftL src shift));
9989 ins_cost(80);
9990 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
9991 ins_encode %{
9992 Register src_reg = as_Register($src$$reg);
9993 Register dst_reg = as_Register($dst$$reg);
9994 int shamt = $shift$$constant;
9996 __ dsrl(dst_reg, src_reg, shamt);
9997 %}
9998 ins_pipe( ialu_regL_regL );
9999 %}
10001 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10002 match(Set dst (URShiftL src shift));
10003 ins_cost(80);
10004 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10005 ins_encode %{
10006 Register src_reg = as_Register($src$$reg);
10007 Register dst_reg = as_Register($dst$$reg);
10008 int shamt = $shift$$constant;
10010 __ dsrl32(dst_reg, src_reg, shamt - 32);
10011 %}
10012 ins_pipe( ialu_regL_regL );
10013 %}
10015 // Xor Instructions
10016 // Xor Register with Register
10017 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10018 match(Set dst (XorI src1 src2));
10020 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10022 ins_encode %{
10023 Register dst = $dst$$Register;
10024 Register src1 = $src1$$Register;
10025 Register src2 = $src2$$Register;
10026 __ xorr(dst, src1, src2);
10027 __ sll(dst, dst, 0); /* long -> int */
10028 %}
10030 ins_pipe( ialu_regI_regI );
10031 %}
10033 // Or Instructions
10034 // Or Register with Register
10035 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10036 match(Set dst (OrI src1 src2));
10038 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10039 ins_encode %{
10040 Register dst = $dst$$Register;
10041 Register src1 = $src1$$Register;
10042 Register src2 = $src2$$Register;
10043 __ orr(dst, src1, src2);
10044 %}
10046 ins_pipe( ialu_regI_regI );
10047 %}
10049 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10050 match(Set dst (OrI src1 (CastP2X src2)));
10052 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10053 ins_encode %{
10054 Register dst = $dst$$Register;
10055 Register src1 = $src1$$Register;
10056 Register src2 = $src2$$Register;
10057 __ orr(dst, src1, src2);
10058 %}
10060 ins_pipe( ialu_regI_regI );
10061 %}
10063 // Logical Shift Right by 8-bit immediate
10064 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10065 match(Set dst (URShiftI src shift));
10066 // effect(KILL cr);
10068 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10069 ins_encode %{
10070 Register src = $src$$Register;
10071 Register dst = $dst$$Register;
10072 int shift = $shift$$constant;
10073 if (shift > 0)
10074 __ srl(dst, src, shift);
10075 else
10076 {
10077 __ move(AT, shift);
10078 __ srlv(dst, src, AT);
10079 }
10080 %}
10081 ins_pipe( ialu_regI_regI );
10082 %}
10084 // Logical Shift Right
10085 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10086 match(Set dst (URShiftI src shift));
10088 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
10089 ins_encode %{
10090 Register src = $src$$Register;
10091 Register dst = $dst$$Register;
10092 Register shift = $shift$$Register;
10093 __ srlv(dst, src, shift);
10094 %}
10095 ins_pipe( ialu_regI_regI );
10096 %}
10099 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10100 match(Set dst (RShiftI src shift));
10101 // effect(KILL cr);
10103 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
10104 ins_encode %{
10105 Register src = $src$$Register;
10106 Register dst = $dst$$Register;
10107 int shift = $shift$$constant;
10108 __ sra(dst, src, shift);
10109 %}
10110 ins_pipe( ialu_regI_regI );
10111 %}
10113 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10114 match(Set dst (RShiftI src shift));
10115 // effect(KILL cr);
10117 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
10118 ins_encode %{
10119 Register src = $src$$Register;
10120 Register dst = $dst$$Register;
10121 Register shift = $shift$$Register;
10122 __ srav(dst, src, shift);
10123 %}
10124 ins_pipe( ialu_regI_regI );
10125 %}
10127 //----------Convert Int to Boolean---------------------------------------------
10129 instruct movI_nocopy(mRegI dst, mRegI src) %{
10130 effect( DEF dst, USE src );
10131 format %{ "MOV $dst, $src @ movI_nocopy" %}
10132 ins_encode %{
10133 Register dst = $dst$$Register;
10134 Register src = $src$$Register;
10135 __ move(dst, src);
10136 %}
10137 ins_pipe( ialu_regI_regI );
10138 %}
10140 instruct ci2b(mRegI dst, mRegI src) %{
10141 effect( USE_DEF dst, USE src );
10143 format %{ "NEG $dst @ ci2b\n\t"
10144 "ADC $dst,$src @ ci2b" %}
10145 ins_encode %{
10146 Register dst = $dst$$Register;
10147 Register src = $src$$Register;
10148 Label L;
10149 //If ( dst != 0 ) CF = 1;
10150 guarantee(dst != src, "in ci2b");
10151 __ move(AT, src);
10152 __ beq(dst, R0, L);
10153 __ nop();
10154 __ addiu(AT, AT, 1);
10155 __ bind(L);
10156 __ neg(dst);
10157 __ addu(dst, dst, AT);
10158 %}
10160 ins_pipe( ialu_regL_regL );
10161 %}
10164 instruct convI2B(mRegI dst, mRegI src) %{
10165 match(Set dst (Conv2B src));
10167 expand %{
10168 movI_nocopy(dst,src);
10169 ci2b(dst,src);
10170 %}
10171 %}
10173 instruct convI2L_reg( mRegL dst, mRegI src) %{
10174 match(Set dst (ConvI2L src));
10176 ins_cost(50);
10177 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
10178 ins_encode %{
10179 Register dst = as_Register($dst$$reg);
10180 Register src = as_Register($src$$reg);
10182 if(dst != src) __ sll(dst, src, 0);
10183 %}
10184 ins_pipe( ialu_regL_regL );
10185 %}
10188 instruct convL2I_reg( mRegI dst, mRegL src ) %{
10189 match(Set dst (ConvL2I src));
10190 effect( DEF dst, USE src );
10191 format %{ "MOV $dst, $src @ convL2I_reg" %}
10192 ins_encode %{
10193 Register dst = as_Register($dst$$reg);
10194 Register src = as_Register($src$$reg);
10196 __ dsll32(dst, src, 0);
10197 __ dsra32(dst, dst, 0);
10198 %}
10200 ins_pipe( ialu_regI_regI );
10201 %}
10203 instruct convL2D_reg( regD dst, mRegL src ) %{
10204 match(Set dst (ConvL2D src));
10205 effect( DEF dst, USE src );
10206 format %{ "convL2D $dst, $src @ convL2D_reg" %}
10207 ins_encode %{
10208 Register src = as_Register($src$$reg);
10209 FloatRegister dst = as_FloatRegister($dst$$reg);
10211 __ dmtc1(src, dst);
10212 __ cvt_d_l(dst, dst);
10213 %}
10215 ins_pipe( pipe_slow );
10216 %}
10218 instruct convD2L_reg( mRegL dst, regD src ) %{
10219 match(Set dst (ConvD2L src));
10220 effect( DEF dst, USE src );
10221 format %{ "convD2L $dst, $src @ convD2L_reg" %}
10222 ins_encode %{
10223 Register dst = as_Register($dst$$reg);
10224 FloatRegister src = as_FloatRegister($src$$reg);
10226 Label L;
10228 __ c_un_d(src, src); //NaN?
10229 __ bc1t(L);
10230 __ delayed();
10231 __ move(dst, R0);
10233 __ trunc_l_d(F30, src);
10234 __ cfc1(AT, 31);
10235 __ li(T9, 0x10000);
10236 __ andr(AT, AT, T9);
10237 __ beq(AT, R0, L);
10238 __ delayed()->dmfc1(dst, F30);
10240 __ mov_d(F12, src);
10241 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
10242 __ move(dst, V0);
10243 __ bind(L);
10244 %}
10246 ins_pipe( pipe_slow );
10247 %}
10249 instruct convF2I_reg( mRegI dst, regF src ) %{
10250 match(Set dst (ConvF2I src));
10251 effect( DEF dst, USE src );
10252 format %{ "convf2i $dst, $src @ convF2I_reg" %}
10253 ins_encode %{
10254 Register dreg = $dst$$Register;
10255 FloatRegister fval = $src$$FloatRegister;
10256 Label L;
10258 __ c_un_s(fval, fval); //NaN?
10259 __ bc1t(L);
10260 __ delayed();
10261 __ move(dreg, R0);
10263 __ trunc_w_s(F30, fval);
10265 /* Call SharedRuntime:f2i() to do valid convention */
10266 __ cfc1(AT, 31);
10267 __ li(T9, 0x10000);
10268 __ andr(AT, AT, T9);
10269 __ beq(AT, R0, L);
10270 __ delayed()->mfc1(dreg, F30);
10272 __ mov_s(F12, fval);
10274 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
10275 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
10276 *
10277 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
10278 * V0 is corrupted during call_VM_leaf(), and should be preserved.
10279 */
10280 if(dreg != V0) {
10281 __ push(V0);
10282 }
10283 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
10284 if(dreg != V0) {
10285 __ move(dreg, V0);
10286 __ pop(V0);
10287 }
10288 __ bind(L);
10289 %}
10291 ins_pipe( pipe_slow );
10292 %}
10294 instruct convF2L_reg( mRegL dst, regF src ) %{
10295 match(Set dst (ConvF2L src));
10296 effect( DEF dst, USE src );
10297 format %{ "convf2l $dst, $src @ convF2L_reg" %}
10298 ins_encode %{
10299 Register dst = as_Register($dst$$reg);
10300 FloatRegister fval = $src$$FloatRegister;
10301 Label L;
10303 __ c_un_s(fval, fval); //NaN?
10304 __ bc1t(L);
10305 __ delayed();
10306 __ move(dst, R0);
10308 __ trunc_l_s(F30, fval);
10309 __ cfc1(AT, 31);
10310 __ li(T9, 0x10000);
10311 __ andr(AT, AT, T9);
10312 __ beq(AT, R0, L);
10313 __ delayed()->dmfc1(dst, F30);
10315 __ mov_s(F12, fval);
10316 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
10317 __ move(dst, V0);
10318 __ bind(L);
10319 %}
10321 ins_pipe( pipe_slow );
10322 %}
10324 instruct convL2F_reg( regF dst, mRegL src ) %{
10325 match(Set dst (ConvL2F src));
10326 effect( DEF dst, USE src );
10327 format %{ "convl2f $dst, $src @ convL2F_reg" %}
10328 ins_encode %{
10329 FloatRegister dst = $dst$$FloatRegister;
10330 Register src = as_Register($src$$reg);
10331 Label L;
10333 __ dmtc1(src, dst);
10334 __ cvt_s_l(dst, dst);
10335 %}
10337 ins_pipe( pipe_slow );
10338 %}
10340 instruct convI2F_reg( regF dst, mRegI src ) %{
10341 match(Set dst (ConvI2F src));
10342 effect( DEF dst, USE src );
10343 format %{ "convi2f $dst, $src @ convI2F_reg" %}
10344 ins_encode %{
10345 Register src = $src$$Register;
10346 FloatRegister dst = $dst$$FloatRegister;
10348 __ mtc1(src, dst);
10349 __ cvt_s_w(dst, dst);
10350 %}
10352 ins_pipe( fpu_regF_regF );
10353 %}
10355 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
10356 match(Set dst (CmpLTMask p zero));
10357 ins_cost(100);
10359 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
10360 ins_encode %{
10361 Register src = $p$$Register;
10362 Register dst = $dst$$Register;
10364 __ sra(dst, src, 31);
10365 %}
10366 ins_pipe( pipe_slow );
10367 %}
10370 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
10371 match(Set dst (CmpLTMask p q));
10372 ins_cost(400);
10374 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
10375 ins_encode %{
10376 Register p = $p$$Register;
10377 Register q = $q$$Register;
10378 Register dst = $dst$$Register;
10380 __ slt(dst, p, q);
10381 __ subu(dst, R0, dst);
10382 %}
10383 ins_pipe( pipe_slow );
10384 %}
10386 instruct movP_nocopy(mRegI dst, mRegP src) %{
10387 effect( DEF dst, USE src );
10388 format %{ "MOV $dst,$src @ movP_nocopy" %}
10389 ins_encode %{
10390 Register dst = $dst$$Register;
10391 Register src = $src$$Register;
10392 __ addu(dst, src, R0);
10393 %}
10394 // ins_encode( enc_Copy( dst, src) );
10395 ins_pipe( ialu_regI_regI );
10396 %}
10398 //FIXME
10399 //instruct cp2b( mRegI dst, mRegP src, eFlagsReg cr ) %{
10400 instruct cp2b( mRegI dst, mRegP src ) %{
10401 effect( USE_DEF dst, USE src );
10402 format %{ "NEG $dst\n\t @cp2b"
10403 "ADC $dst,$src @cp2b" %}
10404 ins_encode %{
10405 Register dst = $dst$$Register;
10406 Register src = $src$$Register;
10407 Label L;
10408 //If ( dst != 0 ) CF = 1;
10409 __ move(AT, src);
10410 __ beq(dst, R0, L);
10411 __ nop();
10412 __ addiu(AT, AT, 1);
10413 __ bind(L);
10414 __ neg(dst);
10415 __ addu(dst, dst, AT);
10416 %}
10418 ins_pipe( ialu_regL_regL );
10419 %}
10421 instruct convP2B( mRegI dst, mRegP src ) %{
10422 match(Set dst (Conv2B src));
10424 expand %{
10425 movP_nocopy(dst,src);
10426 cp2b(dst,src);
10427 %}
10428 %}
10430 instruct convI2D_reg_reg(regD dst, mRegI src) %{
10431 match(Set dst (ConvI2D src));
10432 format %{ "conI2D $dst, $src @convI2D_reg" %}
10433 ins_encode %{
10434 Register src = $src$$Register;
10435 FloatRegister dst = $dst$$FloatRegister;
10436 __ mtc1(src, dst);
10437 __ cvt_d_w(dst, dst);
10438 %}
10439 ins_pipe( fpu_regF_regF );
10440 %}
10442 instruct convF2I_reg_reg(mRegI dst, regF src) %{
10443 match(Set dst (ConvF2I src));
10444 format %{ "convF2I $dst, $src\t# @convF2D_reg_reg" %}
10445 ins_encode %{
10446 FloatRegister dst = $dst$$FloatRegister;
10447 FloatRegister src = $src$$FloatRegister;
10449 __ cvt_d_s(dst, src);
10450 %}
10451 ins_pipe( fpu_regF_regF );
10452 %}
10454 instruct convF2D_reg_reg(regD dst, regF src) %{
10455 match(Set dst (ConvF2D src));
10456 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
10457 ins_encode %{
10458 FloatRegister dst = $dst$$FloatRegister;
10459 FloatRegister src = $src$$FloatRegister;
10461 __ cvt_d_s(dst, src);
10462 %}
10463 ins_pipe( fpu_regF_regF );
10464 %}
10466 instruct convD2F_reg_reg(regF dst, regD src) %{
10467 match(Set dst (ConvD2F src));
10468 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
10469 ins_encode %{
10470 FloatRegister dst = $dst$$FloatRegister;
10471 FloatRegister src = $src$$FloatRegister;
10473 __ cvt_s_d(dst, src);
10474 %}
10475 ins_pipe( fpu_regF_regF );
10476 %}
10478 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
10479 instruct convD2I_reg_reg( mRegI dst, regD src ) %{
10480 match(Set dst (ConvD2I src));
10481 // effect( KILL tmp, KILL cr );//after this instruction, it will release register tmp and cr
10483 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg \n\t" %}
10485 ins_encode %{
10486 FloatRegister src = $src$$FloatRegister;
10487 Register dst = $dst$$Register;
10488 Label L;
10490 __ trunc_w_d(F30, src);
10491 __ cfc1(AT, 31);
10492 __ li(T9, 0x10000);
10493 __ andr(AT, AT, T9);
10494 __ beq(AT, R0, L);
10495 __ delayed()->mfc1(dst, F30);
10497 __ mov_d(F12, src);
10498 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
10499 __ move(dst, V0);
10500 __ bind(L);
10502 %}
10503 ins_pipe( pipe_slow );
10504 %}
10506 // Convert oop pointer into compressed form
10507 instruct encodeHeapOop(mRegN dst, mRegP src) %{
10508 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
10509 match(Set dst (EncodeP src));
10510 format %{ "encode_heap_oop $dst,$src" %}
10511 ins_encode %{
10512 Register src = $src$$Register;
10513 Register dst = $dst$$Register;
10514 if (src != dst) {
10515 __ move(dst, src);
10516 }
10517 __ encode_heap_oop(dst);
10518 %}
10519 ins_pipe( ialu_regL_regL );
10520 %}
10522 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
10523 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
10524 match(Set dst (EncodeP src));
10525 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
10526 ins_encode %{
10527 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
10528 %}
10529 ins_pipe( ialu_regL_regL );
10530 %}
10532 instruct decodeHeapOop(mRegP dst, mRegN src) %{
10533 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
10534 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
10535 match(Set dst (DecodeN src));
10536 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
10537 ins_encode %{
10538 Register s = $src$$Register;
10539 Register d = $dst$$Register;
10540 if (s != d) {
10541 __ move(d, s);
10542 }
10543 __ decode_heap_oop(d);
10544 %}
10545 ins_pipe( ialu_regL_regL );
10546 %}
10548 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
10549 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
10550 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
10551 match(Set dst (DecodeN src));
10552 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
10553 ins_encode %{
10554 Register s = $src$$Register;
10555 Register d = $dst$$Register;
10556 if (s != d) {
10557 __ decode_heap_oop_not_null(d, s);
10558 } else {
10559 __ decode_heap_oop_not_null(d);
10560 }
10561 %}
10562 ins_pipe( ialu_regL_regL );
10563 %}
10565 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
10566 match(Set dst (EncodePKlass src));
10567 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
10568 ins_encode %{
10569 __ encode_klass_not_null($dst$$Register, $src$$Register);
10570 %}
10571 ins_pipe( ialu_regL_regL );
10572 %}
10574 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
10575 match(Set dst (DecodeNKlass src));
10576 format %{ "decode_heap_klass_not_null $dst,$src" %}
10577 ins_encode %{
10578 Register s = $src$$Register;
10579 Register d = $dst$$Register;
10580 if (s != d) {
10581 __ decode_klass_not_null(d, s);
10582 } else {
10583 __ decode_klass_not_null(d);
10584 }
10585 %}
10586 ins_pipe( ialu_regL_regL );
10587 %}
10589 //FIXME
10590 instruct tlsLoadP(mRegP dst) %{
10591 match(Set dst (ThreadLocal));
10593 ins_cost(0);
10594 format %{ " get_thread in $dst #@tlsLoadP" %}
10595 ins_encode %{
10596 Register dst = $dst$$Register;
10597 #ifdef OPT_THREAD
10598 __ move(dst, TREG);
10599 #else
10600 __ get_thread(dst);
10601 #endif
10602 %}
10604 ins_pipe( ialu_loadI );
10605 %}
10608 instruct checkCastPP( mRegP dst ) %{
10609 match(Set dst (CheckCastPP dst));
10611 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
10612 ins_encode( /*empty encoding*/ );
10613 ins_pipe( empty );
10614 %}
10616 instruct castPP(mRegP dst)
10617 %{
10618 match(Set dst (CastPP dst));
10620 size(0);
10621 format %{ "# castPP of $dst" %}
10622 ins_encode(/* empty encoding */);
10623 ins_pipe(empty);
10624 %}
10626 instruct castII( mRegI dst ) %{
10627 match(Set dst (CastII dst));
10628 format %{ "#castII of $dst empty encoding" %}
10629 ins_encode( /*empty encoding*/ );
10630 ins_cost(0);
10631 ins_pipe( empty );
10632 %}
10634 // Return Instruction
10635 // Remove the return address & jump to it.
10636 instruct Ret() %{
10637 match(Return);
10638 format %{ "RET #@Ret" %}
10640 ins_encode %{
10641 __ jr(RA);
10642 __ nop();
10643 %}
10645 ins_pipe( pipe_jump );
10646 %}
10649 // Jump Direct - Label defines a relative address from JMP
10650 instruct jmpDir(label labl) %{
10651 match(Goto);
10652 effect(USE labl);
10654 ins_cost(300);
10655 format %{ "JMP $labl #@jmpDir" %}
10657 ins_encode %{
10658 Label &L = *($labl$$label);
10659 if(&L)
10660 __ b(L);
10661 else
10662 __ b(int(0));
10663 __ nop();
10664 %}
10666 ins_pipe( pipe_jump );
10667 ins_pc_relative(1);
10668 %}
10672 // Tail Jump; remove the return address; jump to target.
10673 // TailCall above leaves the return address around.
10674 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
10675 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
10676 // "restore" before this instruction (in Epilogue), we need to materialize it
10677 // in %i0.
10678 //FIXME
10679 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
10680 match( TailJump jump_target ex_oop );
10681 ins_cost(200);
10682 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
10683 ins_encode %{
10684 Register target = $jump_target$$Register;
10686 /* 2012/9/14 Jin: V0, V1 are indicated in:
10687 * [stubGenerator_mips.cpp] generate_forward_exception()
10688 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
10689 */
10690 Register oop = $ex_oop$$Register;
10691 Register exception_oop = V0;
10692 Register exception_pc = V1;
10694 __ move(exception_pc, RA);
10695 __ move(exception_oop, oop);
10697 __ jr(target);
10698 __ nop();
10699 %}
10700 ins_pipe( pipe_jump );
10701 %}
10703 // ============================================================================
10704 // Procedure Call/Return Instructions
10705 // Call Java Static Instruction
10706 // Note: If this code changes, the corresponding ret_addr_offset() and
10707 // compute_padding() functions will have to be adjusted.
10708 instruct CallStaticJavaDirect(method meth) %{
10709 match(CallStaticJava);
10710 effect(USE meth);
10712 ins_cost(300);
10713 format %{ "CALL,static #@CallStaticJavaDirect " %}
10714 ins_encode( Java_Static_Call( meth ) );
10715 ins_pipe( pipe_slow );
10716 ins_pc_relative(1);
10717 ins_alignment(16);
10718 %}
10720 // Call Java Dynamic Instruction
10721 // Note: If this code changes, the corresponding ret_addr_offset() and
10722 // compute_padding() functions will have to be adjusted.
10723 instruct CallDynamicJavaDirect(method meth) %{
10724 match(CallDynamicJava);
10725 effect(USE meth);
10727 ins_cost(300);
10728 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
10729 "CallDynamic @ CallDynamicJavaDirect" %}
10730 ins_encode( Java_Dynamic_Call( meth ) );
10731 ins_pipe( pipe_slow );
10732 ins_pc_relative(1);
10733 ins_alignment(16);
10734 %}
10736 instruct CallLeafNoFPDirect(method meth) %{
10737 match(CallLeafNoFP);
10738 effect(USE meth);
10740 ins_cost(300);
10741 format %{ "CALL_LEAF_NOFP,runtime " %}
10742 ins_encode(Java_To_Runtime(meth));
10743 ins_pipe( pipe_slow );
10744 ins_pc_relative(1);
10745 ins_alignment(16);
10746 %}
10749 instruct prefetchw0( memory mem ) %{
10750 // predicate(UseSSE==0 && !VM_Version::supports_3dnow());
10751 match(PrefetchWrite mem);
10752 format %{ "Prefetch (sync) #@prefetchw0" %}
10753 ins_encode %{
10754 __ sync();
10755 %}
10756 ins_pipe(pipe_slow);
10757 %}
10760 // Call runtime without safepoint
10761 instruct CallLeafDirect(method meth) %{
10762 match(CallLeaf);
10763 effect(USE meth);
10765 ins_cost(300);
10766 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
10767 ins_encode(Java_To_Runtime(meth));
10768 ins_pipe( pipe_slow );
10769 ins_pc_relative(1);
10770 ins_alignment(16);
10771 %}
10773 // Load Char (16bit unsigned)
10774 instruct loadUS(mRegI dst, memory mem) %{
10775 match(Set dst (LoadUS mem));
10777 ins_cost(125);
10778 format %{ "loadUS $dst,$mem @ loadC" %}
10779 ins_encode(load_C_enc(dst, mem));
10780 ins_pipe( ialu_loadI );
10781 %}
10783 instruct loadUS_convI2L(mRegL dst, memory mem) %{
10784 match(Set dst (ConvI2L (LoadUS mem)));
10786 ins_cost(125);
10787 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
10788 ins_encode(load_C_enc(dst, mem));
10789 ins_pipe( ialu_loadI );
10790 %}
10792 // Store Char (16bit unsigned)
10793 instruct storeC(memory mem, mRegI src) %{
10794 match(Set mem (StoreC mem src));
10796 ins_cost(125);
10797 format %{ "storeC $src,$mem @ storeC" %}
10798 ins_encode(store_C_reg_enc(mem, src));
10799 ins_pipe( ialu_loadI );
10800 %}
10803 instruct loadConF0(regF dst, immF0 zero) %{
10804 match(Set dst zero);
10805 ins_cost(100);
10807 format %{ "mov $dst, zero @ loadConF0\n"%}
10808 ins_encode %{
10809 FloatRegister dst = $dst$$FloatRegister;
10811 __ mtc1(R0, dst);
10812 %}
10813 ins_pipe( fpu_loadF );
10814 %}
10817 instruct loadConF(regF dst, immF src) %{
10818 match(Set dst src);
10819 ins_cost(125);
10821 format %{ "mov $dst, $src @ loadConF"%}
10822 ins_encode %{
10823 FloatRegister dst = $dst$$FloatRegister;
10824 jfloat jf = $src$$constant;
10825 address const_addr = __ float_constant(jf);
10826 assert (const_addr != NULL, "must create float constant in the constant table");
10828 __ relocate(relocInfo::internal_pc_type);
10829 __ li(AT, const_addr);
10830 __ lwc1(dst, AT, 0);
10831 %}
10832 ins_pipe( fpu_loadF );
10833 %}
10836 instruct loadConD0(regD dst, immD0 zero) %{
10837 match(Set dst zero);
10838 ins_cost(100);
10840 format %{ "mov $dst, zero @ loadConD0\n"%}
10841 ins_encode %{
10842 FloatRegister dst = as_FloatRegister($dst$$reg);
10844 __ dmtc1(R0, dst);
10845 %}
10846 ins_pipe( fpu_loadF );
10847 %}
10849 instruct loadConD(regD dst, immD src) %{
10850 match(Set dst src);
10851 ins_cost(125);
10853 format %{ "mov $dst, $src @ loadConD\n"%}
10854 ins_encode %{
10855 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
10857 jdouble jd = $src$$constant;
10858 address const_addr = __ double_constant(jd);
10859 assert (const_addr != NULL, "must create double constant in the constant table");
10861 __ relocate(relocInfo::internal_pc_type);
10862 __ li(AT, const_addr);
10863 __ ldc1(dst_reg, AT, 0);
10864 %}
10865 ins_pipe( fpu_loadF );
10866 %}
10868 // Store register Float value (it is faster than store from FPU register)
10869 instruct storeF_reg( memory mem, regF src) %{
10870 match(Set mem (StoreF mem src));
10872 ins_cost(50);
10873 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
10874 ins_encode(store_F_reg_enc(mem, src));
10875 ins_pipe( fpu_storeF );
10876 %}
10879 // Store immediate Float value (it is faster than store from FPU register)
10880 // The instruction usage is guarded by predicate in operand immF().
10881 instruct storeF_imm( memory mem, immF src) %{
10882 match(Set mem (StoreF mem src));
10884 ins_cost(50);
10885 format %{ "store $mem, $src\t# store float @ storeF_imm" %}
10886 ins_encode %{
10887 jfloat jf = $src$$constant;
10888 int base = $mem$$base;
10889 int index = $mem$$index;
10890 int scale = $mem$$scale;
10891 int disp = $mem$$disp;
10892 address const_addr = __ float_constant(jf);
10893 assert (const_addr != NULL, "must create float constant in the constant table");
10895 __ relocate(relocInfo::internal_pc_type);
10896 __ li(AT, const_addr);
10897 __ lwc1(F30, AT, 0);
10899 if( index != 0 ) {
10900 if( Assembler::is_simm16(disp) ) {
10901 if (scale == 0) {
10902 __ addu(AT, as_Register(base), as_Register(index));
10903 } else {
10904 __ dsll(AT, as_Register(index), scale);
10905 __ addu(AT, as_Register(base), AT);
10906 }
10907 __ swc1(F30, AT, disp);
10908 } else {
10909 if (scale == 0) {
10910 __ addu(AT, as_Register(base), as_Register(index));
10911 } else {
10912 __ dsll(AT, as_Register(index), scale);
10913 __ addu(AT, as_Register(base), AT);
10914 }
10915 __ move(T9, disp);
10916 __ addu(AT, AT, T9);
10917 __ swc1(F30, AT, 0);
10918 }
10920 } else {
10921 if( Assembler::is_simm16(disp) ) {
10922 __ swc1(F30, as_Register(base), disp);
10923 } else {
10924 __ move(T9, disp);
10925 __ addu(AT, as_Register(base), T9);
10926 __ swc1(F30, AT, 0);
10927 }
10928 }
10929 %}
10930 ins_pipe( ialu_storeI );
10931 %}
10933 instruct storeF_imm0( memory mem, immF0 zero) %{
10934 match(Set mem (StoreF mem zero));
10936 ins_cost(40);
10937 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
10938 ins_encode %{
10939 int base = $mem$$base;
10940 int index = $mem$$index;
10941 int scale = $mem$$scale;
10942 int disp = $mem$$disp;
10944 if( index != 0 ) {
10945 if(scale != 0) {
10946 __ dsll(T9, as_Register(index), scale);
10947 __ addu(AT, as_Register(base), T9);
10948 } else {
10949 __ daddu(AT, as_Register(base), as_Register(index));
10950 }
10951 if( Assembler::is_simm16(disp) ) {
10952 __ sw(R0, AT, disp);
10953 } else {
10954 __ move(T9, disp);
10955 __ addu(AT, AT, T9);
10956 __ sw(R0, AT, 0);
10957 }
10959 } else {
10960 if( Assembler::is_simm16(disp) ) {
10961 __ sw(R0, as_Register(base), disp);
10962 } else {
10963 __ move(T9, disp);
10964 __ addu(AT, as_Register(base), T9);
10965 __ sw(R0, AT, 0);
10966 }
10967 }
10968 %}
10969 ins_pipe( ialu_storeI );
10970 %}
10972 // Load Double
10973 instruct loadD(regD dst, memory mem) %{
10974 match(Set dst (LoadD mem));
10976 ins_cost(150);
10977 format %{ "loadD $dst, $mem #@loadD" %}
10978 ins_encode(load_D_enc(dst, mem));
10979 ins_pipe( ialu_loadI );
10980 %}
10982 // Load Double - UNaligned
10983 instruct loadD_unaligned(regD dst, memory mem ) %{
10984 match(Set dst (LoadD_unaligned mem));
10985 ins_cost(250);
10986 // FIXME: Jin: Need more effective ldl/ldr
10987 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
10988 ins_encode(load_D_enc(dst, mem));
10989 ins_pipe( ialu_loadI );
10990 %}
10992 instruct storeD_reg( memory mem, regD src) %{
10993 match(Set mem (StoreD mem src));
10995 ins_cost(50);
10996 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
10997 ins_encode(store_D_reg_enc(mem, src));
10998 ins_pipe( fpu_storeF );
10999 %}
11001 instruct storeD_imm0( memory mem, immD0 zero) %{
11002 match(Set mem (StoreD mem zero));
11004 ins_cost(40);
11005 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
11006 ins_encode %{
11007 int base = $mem$$base;
11008 int index = $mem$$index;
11009 int scale = $mem$$scale;
11010 int disp = $mem$$disp;
11012 __ mtc1(R0, F30);
11013 __ cvt_d_w(F30, F30);
11015 if( index != 0 ) {
11016 if(scale != 0) {
11017 __ dsll(T9, as_Register(index), scale);
11018 __ addu(AT, as_Register(base), T9);
11019 } else {
11020 __ daddu(AT, as_Register(base), as_Register(index));
11021 }
11022 if( Assembler::is_simm16(disp) ) {
11023 __ sdc1(F30, AT, disp);
11024 } else {
11025 __ move(T9, disp);
11026 __ addu(AT, AT, T9);
11027 __ sdc1(F30, AT, 0);
11028 }
11030 } else {
11031 if( Assembler::is_simm16(disp) ) {
11032 __ sdc1(F30, as_Register(base), disp);
11033 } else {
11034 __ move(T9, disp);
11035 __ addu(AT, as_Register(base), T9);
11036 __ sdc1(F30, AT, 0);
11037 }
11038 }
11039 %}
11040 ins_pipe( ialu_storeI );
11041 %}
11043 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
11044 match( Set cr (FastLock object box) );
11045 effect( TEMP tmp, TEMP scr, USE_KILL box );
11046 ins_cost(300);
11047 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
11048 ins_encode %{
11049 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
11050 %}
11052 ins_pipe( pipe_slow );
11053 ins_pc_relative(1);
11054 %}
11056 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
11057 match( Set cr (FastUnlock object box) );
11058 effect( TEMP tmp, USE_KILL box );
11059 ins_cost(300);
11060 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
11061 ins_encode %{
11062 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
11063 %}
11065 ins_pipe( pipe_slow );
11066 ins_pc_relative(1);
11067 %}
11069 // Store CMS card-mark Immediate
11070 instruct storeImmCM(memory mem, immI8 src) %{
11071 match(Set mem (StoreCM mem src));
11073 ins_cost(150);
11074 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
11075 // opcode(0xC6);
11076 ins_encode(store_B_immI_enc_sync(mem, src));
11077 ins_pipe( ialu_storeI );
11078 %}
11080 // Die now
11081 instruct ShouldNotReachHere( )
11082 %{
11083 match(Halt);
11084 ins_cost(300);
11086 // Use the following format syntax
11087 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
11088 ins_encode %{
11089 // Here we should emit illtrap !
11091 __ stop("in ShoudNotReachHere");
11093 %}
11094 ins_pipe( pipe_jump );
11095 %}
11098 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11099 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
11100 match(CountedLoopEnd cop (CmpI src1 src2));
11101 effect(USE labl);
11103 ins_cost(300);
11104 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
11105 ins_encode %{
11106 Register op1 = $src1$$Register;
11107 Register op2 = $src2$$Register;
11108 Label &L = *($labl$$label);
11109 int flag = $cop$$cmpcode;
11111 switch(flag)
11112 {
11113 case 0x01: //equal
11114 if (&L)
11115 __ beq(op1, op2, L);
11116 else
11117 __ beq(op1, op2, (int)0);
11118 break;
11119 case 0x02: //not_equal
11120 if (&L)
11121 __ bne(op1, op2, L);
11122 else
11123 __ bne(op1, op2, (int)0);
11124 break;
11125 case 0x03: //above
11126 __ slt(AT, op2, op1);
11127 if(&L)
11128 __ bne(AT, R0, L);
11129 else
11130 __ bne(AT, R0, (int)0);
11131 break;
11132 case 0x04: //above_equal
11133 __ slt(AT, op1, op2);
11134 if(&L)
11135 __ beq(AT, R0, L);
11136 else
11137 __ beq(AT, R0, (int)0);
11138 break;
11139 case 0x05: //below
11140 __ slt(AT, op1, op2);
11141 if(&L)
11142 __ bne(AT, R0, L);
11143 else
11144 __ bne(AT, R0, (int)0);
11145 break;
11146 case 0x06: //below_equal
11147 __ slt(AT, op2, op1);
11148 if(&L)
11149 __ beq(AT, R0, L);
11150 else
11151 __ beq(AT, R0, (int)0);
11152 break;
11153 default:
11154 Unimplemented();
11155 }
11156 __ nop();
11157 %}
11158 ins_pipe( pipe_jump );
11159 ins_pc_relative(1);
11160 %}
11163 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
11164 match(CountedLoopEnd cop (CmpI src1 src2));
11165 effect(USE labl);
11167 ins_cost(250);
11168 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
11169 ins_encode %{
11170 Register op1 = $src1$$Register;
11171 int op2 = $src2$$constant;
11172 Label &L = *($labl$$label);
11173 int flag = $cop$$cmpcode;
11175 __ addiu32(AT, op1, -1 * op2);
11177 switch(flag)
11178 {
11179 case 0x01: //equal
11180 if (&L)
11181 __ beq(AT, R0, L);
11182 else
11183 __ beq(AT, R0, (int)0);
11184 break;
11185 case 0x02: //not_equal
11186 if (&L)
11187 __ bne(AT, R0, L);
11188 else
11189 __ bne(AT, R0, (int)0);
11190 break;
11191 case 0x03: //above
11192 if(&L)
11193 __ bgtz(AT, L);
11194 else
11195 __ bgtz(AT, (int)0);
11196 break;
11197 case 0x04: //above_equal
11198 if(&L)
11199 __ bgez(AT, L);
11200 else
11201 __ bgez(AT,(int)0);
11202 break;
11203 case 0x05: //below
11204 if(&L)
11205 __ bltz(AT, L);
11206 else
11207 __ bltz(AT, (int)0);
11208 break;
11209 case 0x06: //below_equal
11210 if(&L)
11211 __ blez(AT, L);
11212 else
11213 __ blez(AT, (int)0);
11214 break;
11215 default:
11216 Unimplemented();
11217 }
11218 __ nop();
11219 %}
11220 ins_pipe( pipe_jump );
11221 ins_pc_relative(1);
11222 %}
11225 /*
11226 // Jump Direct Conditional - Label defines a relative address from Jcc+1
11227 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
11228 match(CountedLoopEnd cop cmp);
11229 effect(USE labl);
11231 ins_cost(300);
11232 format %{ "J$cop,u $labl\t# Loop end" %}
11233 size(6);
11234 opcode(0x0F, 0x80);
11235 ins_encode( Jcc( cop, labl) );
11236 ins_pipe( pipe_jump );
11237 ins_pc_relative(1);
11238 %}
11240 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
11241 match(CountedLoopEnd cop cmp);
11242 effect(USE labl);
11244 ins_cost(200);
11245 format %{ "J$cop,u $labl\t# Loop end" %}
11246 opcode(0x0F, 0x80);
11247 ins_encode( Jcc( cop, labl) );
11248 ins_pipe( pipe_jump );
11249 ins_pc_relative(1);
11250 %}
11251 */
11253 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
11254 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
11255 match(If cop cr);
11256 effect(USE labl);
11258 ins_cost(300);
11259 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
11261 ins_encode %{
11262 Label &L = *($labl$$label);
11263 switch($cop$$cmpcode)
11264 {
11265 case 0x01: //equal
11266 if (&L)
11267 __ bne(AT, R0, L);
11268 else
11269 __ bne(AT, R0, (int)0);
11270 break;
11271 case 0x02: //not equal
11272 if (&L)
11273 __ beq(AT, R0, L);
11274 else
11275 __ beq(AT, R0, (int)0);
11276 break;
11277 default:
11278 Unimplemented();
11279 }
11280 __ nop();
11281 %}
11283 ins_pipe( pipe_jump );
11284 ins_pc_relative(1);
11285 %}
11288 // ============================================================================
11289 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
11290 // array for an instance of the superklass. Set a hidden internal cache on a
11291 // hit (cache is checked with exposed code in gen_subtype_check()). Return
11292 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
11293 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
11294 match(Set result (PartialSubtypeCheck sub super));
11295 effect(KILL tmp);
11296 ins_cost(1100); // slightly larger than the next version
11297 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
11299 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
11300 ins_pipe( pipe_slow );
11301 %}
11304 // Conditional-store of an int value.
11305 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
11306 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
11307 match(Set cr (StoreIConditional mem (Binary oldval newval)));
11308 // effect(KILL oldval);
11309 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
11311 ins_encode %{
11312 Register oldval = $oldval$$Register;
11313 Register newval = $newval$$Register;
11314 Address addr(as_Register($mem$$base), $mem$$disp);
11315 Label again, failure;
11317 // int base = $mem$$base;
11318 int index = $mem$$index;
11319 int scale = $mem$$scale;
11320 int disp = $mem$$disp;
11322 guarantee(Assembler::is_simm16(disp), "");
11324 if( index != 0 ) {
11325 __ stop("in storeIConditional: index != 0");
11326 } else {
11327 __ bind(again);
11328 __ sync();
11329 __ ll(AT, addr);
11330 __ bne(AT, oldval, failure);
11331 __ delayed()->addu(AT, R0, R0);
11333 __ addu(AT, newval, R0);
11334 __ sc(AT, addr);
11335 __ beq(AT, R0, again);
11336 __ delayed()->addiu(AT, R0, 0xFF);
11337 __ bind(failure);
11338 __ sync();
11339 }
11340 %}
11342 ins_pipe( long_memory_op );
11343 %}
11345 // Conditional-store of a long value.
11346 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
11347 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
11348 %{
11349 match(Set cr (StoreLConditional mem (Binary oldval newval)));
11350 effect(KILL oldval);
11352 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
11353 ins_encode%{
11354 Register oldval = $oldval$$Register;
11355 Register newval = $newval$$Register;
11356 Address addr((Register)$mem$$base, $mem$$disp);
11358 int index = $mem$$index;
11359 int scale = $mem$$scale;
11360 int disp = $mem$$disp;
11362 guarantee(Assembler::is_simm16(disp), "");
11364 if( index != 0 ) {
11365 __ stop("in storeIConditional: index != 0");
11366 } else {
11367 __ cmpxchg(newval, addr, oldval);
11368 }
11369 %}
11370 ins_pipe( long_memory_op );
11371 %}
11374 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
11375 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
11376 effect(KILL oldval);
11377 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
11378 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
11379 "MOV $res, 1 @ compareAndSwapI\n\t"
11380 "BNE AT, R0 @ compareAndSwapI\n\t"
11381 "MOV $res, 0 @ compareAndSwapI\n"
11382 "L:" %}
11383 ins_encode %{
11384 Register newval = $newval$$Register;
11385 Register oldval = $oldval$$Register;
11386 Register res = $res$$Register;
11387 Address addr($mem_ptr$$Register, 0);
11388 Label L;
11390 __ cmpxchg32(newval, addr, oldval);
11391 __ move(res, AT);
11392 %}
11393 ins_pipe( long_memory_op );
11394 %}
11396 //FIXME:
11397 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
11398 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
11399 effect(KILL oldval);
11400 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
11401 "MOV $res, AT @ compareAndSwapP\n\t"
11402 "L:" %}
11403 ins_encode %{
11404 Register newval = $newval$$Register;
11405 Register oldval = $oldval$$Register;
11406 Register res = $res$$Register;
11407 Address addr($mem_ptr$$Register, 0);
11408 Label L;
11410 __ cmpxchg(newval, addr, oldval);
11411 __ move(res, AT);
11412 %}
11413 ins_pipe( long_memory_op );
11414 %}
11416 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
11417 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
11418 effect(KILL oldval);
11419 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
11420 "MOV $res, AT @ compareAndSwapN\n\t"
11421 "L:" %}
11422 ins_encode %{
11423 Register newval = $newval$$Register;
11424 Register oldval = $oldval$$Register;
11425 Register res = $res$$Register;
11426 Address addr($mem_ptr$$Register, 0);
11427 Label L;
11429 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
11430 * Thus, we should extend oldval's sign for correct comparision.
11431 */
11432 __ sll(oldval, oldval, 0);
11434 __ cmpxchg32(newval, addr, oldval);
11435 __ move(res, AT);
11436 %}
11437 ins_pipe( long_memory_op );
11438 %}
11440 //----------Max and Min--------------------------------------------------------
11441 // Min Instructions
11442 ////
11443 // *** Min and Max using the conditional move are slower than the
11444 // *** branch version on a Pentium III.
11445 // // Conditional move for min
11446 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
11447 // effect( USE_DEF op2, USE op1, USE cr );
11448 // format %{ "CMOVlt $op2,$op1\t! min" %}
11449 // opcode(0x4C,0x0F);
11450 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11451 // ins_pipe( pipe_cmov_reg );
11452 //%}
11453 //
11454 //// Min Register with Register (P6 version)
11455 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
11456 // predicate(VM_Version::supports_cmov() );
11457 // match(Set op2 (MinI op1 op2));
11458 // ins_cost(200);
11459 // expand %{
11460 // eFlagsReg cr;
11461 // compI_eReg(cr,op1,op2);
11462 // cmovI_reg_lt(op2,op1,cr);
11463 // %}
11464 //%}
11466 // Min Register with Register (generic version)
11467 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
11468 match(Set dst (MinI dst src));
11469 //effect(KILL flags);
11470 ins_cost(80);
11472 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
11473 ins_encode %{
11474 Register dst = $dst$$Register;
11475 Register src = $src$$Register;
11477 __ slt(AT, src, dst);
11478 __ movn(dst, src, AT);
11480 %}
11482 ins_pipe( pipe_slow );
11483 %}
11485 // Max Register with Register
11486 // *** Min and Max using the conditional move are slower than the
11487 // *** branch version on a Pentium III.
11488 // // Conditional move for max
11489 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
11490 // effect( USE_DEF op2, USE op1, USE cr );
11491 // format %{ "CMOVgt $op2,$op1\t! max" %}
11492 // opcode(0x4F,0x0F);
11493 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
11494 // ins_pipe( pipe_cmov_reg );
11495 //%}
11496 //
11497 // // Max Register with Register (P6 version)
11498 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
11499 // predicate(VM_Version::supports_cmov() );
11500 // match(Set op2 (MaxI op1 op2));
11501 // ins_cost(200);
11502 // expand %{
11503 // eFlagsReg cr;
11504 // compI_eReg(cr,op1,op2);
11505 // cmovI_reg_gt(op2,op1,cr);
11506 // %}
11507 //%}
11509 // Max Register with Register (generic version)
11510 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
11511 match(Set dst (MaxI dst src));
11512 ins_cost(80);
11514 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
11516 ins_encode %{
11517 Register dst = $dst$$Register;
11518 Register src = $src$$Register;
11520 __ slt(AT, dst, src);
11521 __ movn(dst, src, AT);
11523 %}
11525 ins_pipe( pipe_slow );
11526 %}
11529 // ============================================================================
11530 // Safepoint Instruction
11531 instruct safePoint_poll(mRegP poll) %{
11532 match(SafePoint poll);
11533 effect(USE poll);
11535 ins_cost(125);
11536 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
11538 ins_encode %{
11539 Register poll_reg = $poll$$Register;
11541 __ block_comment("Safepoint:");
11542 __ relocate(relocInfo::poll_type);
11543 __ lw(AT, poll_reg, 0);
11544 %}
11546 ins_pipe( ialu_storeI );
11547 %}
11549 //----------PEEPHOLE RULES-----------------------------------------------------
11550 // These must follow all instruction definitions as they use the names
11551 // defined in the instructions definitions.
11552 //
11553 // peepmatch ( root_instr_name [preceeding_instruction]* );
11554 //
11555 // peepconstraint %{
11556 // (instruction_number.operand_name relational_op instruction_number.operand_name
11557 // [, ...] );
11558 // // instruction numbers are zero-based using left to right order in peepmatch
11559 //
11560 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
11561 // // provide an instruction_number.operand_name for each operand that appears
11562 // // in the replacement instruction's match rule
11563 //
11564 // ---------VM FLAGS---------------------------------------------------------
11565 //
11566 // All peephole optimizations can be turned off using -XX:-OptoPeephole
11567 //
11568 // Each peephole rule is given an identifying number starting with zero and
11569 // increasing by one in the order seen by the parser. An individual peephole
11570 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
11571 // on the command-line.
11572 //
11573 // ---------CURRENT LIMITATIONS----------------------------------------------
11574 //
11575 // Only match adjacent instructions in same basic block
11576 // Only equality constraints
11577 // Only constraints between operands, not (0.dest_reg == EAX_enc)
11578 // Only one replacement instruction
11579 //
11580 // ---------EXAMPLE----------------------------------------------------------
11581 //
11582 // // pertinent parts of existing instructions in architecture description
11583 // instruct movI(eRegI dst, eRegI src) %{
11584 // match(Set dst (CopyI src));
11585 // %}
11586 //
11587 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
11588 // match(Set dst (AddI dst src));
11589 // effect(KILL cr);
11590 // %}
11591 //
11592 // // Change (inc mov) to lea
11593 // peephole %{
11594 // // increment preceeded by register-register move
11595 // peepmatch ( incI_eReg movI );
11596 // // require that the destination register of the increment
11597 // // match the destination register of the move
11598 // peepconstraint ( 0.dst == 1.dst );
11599 // // construct a replacement instruction that sets
11600 // // the destination to ( move's source register + one )
11601 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
11602 // %}
11603 //
11604 // Implementation no longer uses movX instructions since
11605 // machine-independent system no longer uses CopyX nodes.
11606 //
11607 // peephole %{
11608 // peepmatch ( incI_eReg movI );
11609 // peepconstraint ( 0.dst == 1.dst );
11610 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
11611 // %}
11612 //
11613 // peephole %{
11614 // peepmatch ( decI_eReg movI );
11615 // peepconstraint ( 0.dst == 1.dst );
11616 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
11617 // %}
11618 //
11619 // peephole %{
11620 // peepmatch ( addI_eReg_imm movI );
11621 // peepconstraint ( 0.dst == 1.dst );
11622 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
11623 // %}
11624 //
11625 // peephole %{
11626 // peepmatch ( addP_eReg_imm movP );
11627 // peepconstraint ( 0.dst == 1.dst );
11628 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
11629 // %}
11631 // // Change load of spilled value to only a spill
11632 // instruct storeI(memory mem, eRegI src) %{
11633 // match(Set mem (StoreI mem src));
11634 // %}
11635 //
11636 // instruct loadI(eRegI dst, memory mem) %{
11637 // match(Set dst (LoadI mem));
11638 // %}
11639 //
11640 //peephole %{
11641 // peepmatch ( loadI storeI );
11642 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
11643 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
11644 //%}
11646 //----------SMARTSPILL RULES---------------------------------------------------
11647 // These must follow all instruction definitions as they use the names
11648 // defined in the instructions definitions.