Thu, 05 Sep 2019 13:07:31 +0800
#9372 Refactor VM_Version, removed UseLoongsonISA and Use3A3000, added UseLEXT1, UseLEXT2, UseLEXT3.
Summary: used cpucfg to detect cpu features
Reviewed-by: wanghaomin
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2019, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // F31 are not used as temporary registers in D2I
384 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
385 reg_class dbl_reg( F0, F0_H,
386 F1, F1_H,
387 F2, F2_H,
388 F3, F3_H,
389 F4, F4_H,
390 F5, F5_H,
391 F6, F6_H,
392 F7, F7_H,
393 F8, F8_H,
394 F9, F9_H,
395 F10, F10_H,
396 F11, F11_H,
397 F12, F12_H,
398 F13, F13_H,
399 F14, F14_H,
400 F15, F15_H,
401 F16, F16_H,
402 F17, F17_H,
403 F18, F18_H,
404 F19, F19_H,
405 F20, F20_H,
406 F21, F21_H,
407 F22, F22_H,
408 F23, F23_H,
409 F24, F24_H,
410 F25, F25_H,
411 F26, F26_H,
412 F27, F27_H,
413 F28, F28_H,
414 F29, F29_H,
415 F31, F31_H);
417 reg_class flt_arg0( F12 );
418 reg_class dbl_arg0( F12, F12_H );
419 reg_class dbl_arg1( F14, F14_H );
421 %}
423 //----------DEFINITION BLOCK---------------------------------------------------
424 // Define name --> value mappings to inform the ADLC of an integer valued name
425 // Current support includes integer values in the range [0, 0x7FFFFFFF]
426 // Format:
427 // int_def <name> ( <int_value>, <expression>);
428 // Generated Code in ad_<arch>.hpp
429 // #define <name> (<expression>)
430 // // value == <int_value>
431 // Generated code in ad_<arch>.cpp adlc_verification()
432 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
433 //
434 definitions %{
435 int_def DEFAULT_COST ( 100, 100);
436 int_def HUGE_COST (1000000, 1000000);
438 // Memory refs are twice as expensive as run-of-the-mill.
439 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
441 // Branches are even more expensive.
442 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
443 // we use jr instruction to construct call, so more expensive
444 int_def CALL_COST ( 500, DEFAULT_COST * 5);
445 /*
446 int_def EQUAL ( 1, 1 );
447 int_def NOT_EQUAL ( 2, 2 );
448 int_def GREATER ( 3, 3 );
449 int_def GREATER_EQUAL ( 4, 4 );
450 int_def LESS ( 5, 5 );
451 int_def LESS_EQUAL ( 6, 6 );
452 */
453 %}
457 //----------SOURCE BLOCK-------------------------------------------------------
458 // This is a block of C++ code which provides values, functions, and
459 // definitions necessary in the rest of the architecture description
461 source_hpp %{
462 // Header information of the source block.
463 // Method declarations/definitions which are used outside
464 // the ad-scope can conveniently be defined here.
465 //
466 // To keep related declarations/definitions/uses close together,
467 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
469 class CallStubImpl {
471 //--------------------------------------------------------------
472 //---< Used for optimization in Compile::shorten_branches >---
473 //--------------------------------------------------------------
475 public:
476 // Size of call trampoline stub.
477 static uint size_call_trampoline() {
478 return 0; // no call trampolines on this platform
479 }
481 // number of relocations needed by a call trampoline stub
482 static uint reloc_call_trampoline() {
483 return 0; // no call trampolines on this platform
484 }
485 };
487 class HandlerImpl {
489 public:
491 static int emit_exception_handler(CodeBuffer &cbuf);
492 static int emit_deopt_handler(CodeBuffer& cbuf);
494 static uint size_exception_handler() {
495 // NativeCall instruction size is the same as NativeJump.
496 // exception handler starts out as jump and can be patched to
497 // a call be deoptimization. (4932387)
498 // Note that this value is also credited (in output.cpp) to
499 // the size of the code section.
500 int size = NativeCall::instruction_size;
501 return round_to(size, 16);
502 }
504 #ifdef _LP64
505 static uint size_deopt_handler() {
506 int size = NativeCall::instruction_size;
507 return round_to(size, 16);
508 }
509 #else
510 static uint size_deopt_handler() {
511 // NativeCall instruction size is the same as NativeJump.
512 // exception handler starts out as jump and can be patched to
513 // a call be deoptimization. (4932387)
514 // Note that this value is also credited (in output.cpp) to
515 // the size of the code section.
516 return 5 + NativeJump::instruction_size; // pushl(); jmp;
517 }
518 #endif
519 };
521 %} // end source_hpp
523 source %{
525 #define NO_INDEX 0
526 #define RELOC_IMM64 Assembler::imm_operand
527 #define RELOC_DISP32 Assembler::disp32_operand
530 #define __ _masm.
533 // Emit exception handler code.
534 // Stuff framesize into a register and call a VM stub routine.
535 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
536 // Note that the code buffer's insts_mark is always relative to insts.
537 // That's why we must use the macroassembler to generate a handler.
538 MacroAssembler _masm(&cbuf);
539 address base = __ start_a_stub(size_exception_handler());
540 if (base == NULL) {
541 ciEnv::current()->record_failure("CodeCache is full");
542 return 0; // CodeBuffer::expand failed
543 }
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base = __ start_a_stub(size_deopt_handler());
564 if (base == NULL) {
565 ciEnv::current()->record_failure("CodeCache is full");
566 return 0; // CodeBuffer::expand failed
567 }
569 int offset = __ offset();
571 __ block_comment("; emit_deopt_handler");
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
575 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
583 const bool Matcher::match_rule_supported(int opcode) {
584 if (!has_match_rule(opcode))
585 return false;
587 switch (opcode) {
588 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
589 case Op_CountLeadingZerosI:
590 case Op_CountLeadingZerosL:
591 if (!UseCountLeadingZerosInstructionMIPS64)
592 return false;
593 break;
594 case Op_CountTrailingZerosI:
595 case Op_CountTrailingZerosL:
596 if (!UseCountTrailingZerosInstructionMIPS64)
597 return false;
598 break;
599 }
601 return true; // Per default match rules are supported.
602 }
604 //FIXME
605 // emit call stub, compiled java to interpreter
606 void emit_java_to_interp(CodeBuffer &cbuf ) {
607 // Stub is fixed up when the corresponding call is converted from calling
608 // compiled code to calling interpreted code.
609 // mov S3,0
610 // jmp -1
612 address mark = cbuf.insts_mark(); // get mark within main instrs section
614 // Note that the code buffer's insts_mark is always relative to insts.
615 // That's why we must use the macroassembler to generate a stub.
616 MacroAssembler _masm(&cbuf);
618 address base = __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) { // CodeBuffer::expand failed
620 ciEnv::current()->record_failure("CodeCache is full");
621 }
623 // static stub relocation stores the instruction address of the call
625 __ relocate(static_stub_Relocation::spec(mark), 0);
627 // static stub relocation also tags the methodOop in the code-stream.
628 __ patchable_set48(S3, (long)0);
629 // This is recognized as unresolved by relocs/nativeInst/ic code
631 __ relocate(relocInfo::runtime_call_type);
633 cbuf.set_insts_mark();
634 address call_pc = (address)-1;
635 __ patchable_jump(call_pc);
636 __ align(16);
637 __ end_a_stub();
638 // Update current stubs pointer and restore code_end.
639 }
641 // size of call stub, compiled java to interpretor
642 uint size_java_to_interp() {
643 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
644 return round_to(size, 16);
645 }
647 // relocation entries for call stub, compiled java to interpreter
648 uint reloc_java_to_interp() {
649 return 16; // in emit_java_to_interp + in Java_Static_Call
650 }
652 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
653 int offs = offset - br_size + 4;
654 // To be conservative on MIPS
655 // branch node should be end with:
656 // branch inst
657 // delay slot
658 const int safety_zone = 3 * BytesPerInstWord;
659 return Assembler::is_simm16((offs<0 ? offs-safety_zone : offs+safety_zone) >> 2);
660 }
663 // No additional cost for CMOVL.
664 const int Matcher::long_cmove_cost() { return 0; }
666 // No CMOVF/CMOVD with SSE2
667 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
669 // Does the CPU require late expand (see block.cpp for description of late expand)?
670 const bool Matcher::require_postalloc_expand = false;
672 // Should the Matcher clone shifts on addressing modes, expecting them
673 // to be subsumed into complex addressing expressions or compute them
674 // into registers? True for Intel but false for most RISCs
675 const bool Matcher::clone_shift_expressions = false;
677 // Do we need to mask the count passed to shift instructions or does
678 // the cpu only look at the lower 5/6 bits anyway?
679 const bool Matcher::need_masked_shift_count = false;
681 bool Matcher::narrow_oop_use_complex_address() {
682 NOT_LP64(ShouldNotCallThis());
683 assert(UseCompressedOops, "only for compressed oops code");
684 return false;
685 }
687 bool Matcher::narrow_klass_use_complex_address() {
688 NOT_LP64(ShouldNotCallThis());
689 assert(UseCompressedClassPointers, "only for compressed klass code");
690 return false;
691 }
693 // This is UltraSparc specific, true just means we have fast l2f conversion
694 const bool Matcher::convL2FSupported(void) {
695 return true;
696 }
698 // Max vector size in bytes. 0 if not supported.
699 const int Matcher::vector_width_in_bytes(BasicType bt) {
700 if (MaxVectorSize == 0)
701 return 0;
702 assert(MaxVectorSize == 8, "");
703 return 8;
704 }
706 // Vector ideal reg
707 const uint Matcher::vector_ideal_reg(int size) {
708 assert(MaxVectorSize == 8, "");
709 switch(size) {
710 case 8: return Op_VecD;
711 }
712 ShouldNotReachHere();
713 return 0;
714 }
716 // Only lowest bits of xmm reg are used for vector shift count.
717 const uint Matcher::vector_shift_count_ideal_reg(int size) {
718 fatal("vector shift is not supported");
719 return Node::NotAMachineReg;
720 }
722 // Limits on vector size (number of elements) loaded into vector.
723 const int Matcher::max_vector_size(const BasicType bt) {
724 assert(is_java_primitive(bt), "only primitive type vectors");
725 return vector_width_in_bytes(bt)/type2aelembytes(bt);
726 }
728 const int Matcher::min_vector_size(const BasicType bt) {
729 return max_vector_size(bt); // Same as max.
730 }
732 // MIPS supports misaligned vectors store/load? FIXME
733 const bool Matcher::misaligned_vectors_ok() {
734 return false;
735 //return !AlignVector; // can be changed by flag
736 }
738 // Register for DIVI projection of divmodI
739 RegMask Matcher::divI_proj_mask() {
740 ShouldNotReachHere();
741 return RegMask();
742 }
744 // Register for MODI projection of divmodI
745 RegMask Matcher::modI_proj_mask() {
746 ShouldNotReachHere();
747 return RegMask();
748 }
750 // Register for DIVL projection of divmodL
751 RegMask Matcher::divL_proj_mask() {
752 ShouldNotReachHere();
753 return RegMask();
754 }
756 int Matcher::regnum_to_fpu_offset(int regnum) {
757 return regnum - 32; // The FP registers are in the second chunk
758 }
761 const bool Matcher::isSimpleConstant64(jlong value) {
762 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
763 return true;
764 }
767 // Return whether or not this register is ever used as an argument. This
768 // function is used on startup to build the trampoline stubs in generateOptoStub.
769 // Registers not mentioned will be killed by the VM call in the trampoline, and
770 // arguments in those registers not be available to the callee.
771 bool Matcher::can_be_java_arg( int reg ) {
772 // Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention()
773 if ( reg == T0_num || reg == T0_H_num
774 || reg == A0_num || reg == A0_H_num
775 || reg == A1_num || reg == A1_H_num
776 || reg == A2_num || reg == A2_H_num
777 || reg == A3_num || reg == A3_H_num
778 || reg == A4_num || reg == A4_H_num
779 || reg == A5_num || reg == A5_H_num
780 || reg == A6_num || reg == A6_H_num
781 || reg == A7_num || reg == A7_H_num )
782 return true;
784 if ( reg == F12_num || reg == F12_H_num
785 || reg == F13_num || reg == F13_H_num
786 || reg == F14_num || reg == F14_H_num
787 || reg == F15_num || reg == F15_H_num
788 || reg == F16_num || reg == F16_H_num
789 || reg == F17_num || reg == F17_H_num
790 || reg == F18_num || reg == F18_H_num
791 || reg == F19_num || reg == F19_H_num )
792 return true;
794 return false;
795 }
797 bool Matcher::is_spillable_arg( int reg ) {
798 return can_be_java_arg(reg);
799 }
801 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
802 return false;
803 }
805 // Register for MODL projection of divmodL
806 RegMask Matcher::modL_proj_mask() {
807 ShouldNotReachHere();
808 return RegMask();
809 }
811 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
812 return FP_REG_mask();
813 }
815 // MIPS doesn't support AES intrinsics
816 const bool Matcher::pass_original_key_for_aes() {
817 return false;
818 }
820 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
821 //lui
822 //ori
823 //dsll
824 //ori
826 //jalr
827 //nop
829 return round_to(current_offset, alignment_required()) - current_offset;
830 }
832 int CallLeafDirectNode::compute_padding(int current_offset) const {
833 //lui
834 //ori
835 //dsll
836 //ori
838 //jalr
839 //nop
841 return round_to(current_offset, alignment_required()) - current_offset;
842 }
844 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
845 //lui
846 //ori
847 //dsll
848 //ori
850 //jalr
851 //nop
853 return round_to(current_offset, alignment_required()) - current_offset;
854 }
856 // If CPU can load and store mis-aligned doubles directly then no fixup is
857 // needed. Else we split the double into 2 integer pieces and move it
858 // piece-by-piece. Only happens when passing doubles into C code as the
859 // Java calling convention forces doubles to be aligned.
860 const bool Matcher::misaligned_doubles_ok = false;
861 // Do floats take an entire double register or just half?
862 //const bool Matcher::float_in_double = true;
863 bool Matcher::float_in_double() { return false; }
864 // Threshold size for cleararray.
865 const int Matcher::init_array_short_size = 8 * BytesPerLong;
866 // Do ints take an entire long register or just half?
867 const bool Matcher::int_in_long = true;
868 // Is it better to copy float constants, or load them directly from memory?
869 // Intel can load a float constant from a direct address, requiring no
870 // extra registers. Most RISCs will have to materialize an address into a
871 // register first, so they would do better to copy the constant from stack.
872 const bool Matcher::rematerialize_float_constants = false;
873 // Advertise here if the CPU requires explicit rounding operations
874 // to implement the UseStrictFP mode.
875 const bool Matcher::strict_fp_requires_explicit_rounding = false;
876 // false => size gets scaled to BytesPerLong, ok.
877 const bool Matcher::init_array_count_is_in_bytes = false;
879 // Indicate if the safepoint node needs the polling page as an input.
880 // Since MIPS doesn't have absolute addressing, it needs.
881 bool SafePointNode::needs_polling_address_input() {
882 return false;
883 }
885 // !!!!! Special hack to get all type of calls to specify the byte offset
886 // from the start of the call to the point where the return address
887 // will point.
888 int MachCallStaticJavaNode::ret_addr_offset() {
889 //lui
890 //ori
891 //nop
892 //nop
893 //jalr
894 //nop
895 return 24;
896 }
898 int MachCallDynamicJavaNode::ret_addr_offset() {
899 //lui IC_Klass,
900 //ori IC_Klass,
901 //dsll IC_Klass
902 //ori IC_Klass
904 //lui T9
905 //ori T9
906 //nop
907 //nop
908 //jalr T9
909 //nop
910 return 4 * 4 + 4 * 6;
911 }
913 //=============================================================================
915 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
916 enum RC { rc_bad, rc_int, rc_float, rc_stack };
917 static enum RC rc_class( OptoReg::Name reg ) {
918 if( !OptoReg::is_valid(reg) ) return rc_bad;
919 if (OptoReg::is_stack(reg)) return rc_stack;
920 VMReg r = OptoReg::as_VMReg(reg);
921 if (r->is_Register()) return rc_int;
922 assert(r->is_FloatRegister(), "must be");
923 return rc_float;
924 }
926 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
927 // Get registers to move
928 OptoReg::Name src_second = ra_->get_reg_second(in(1));
929 OptoReg::Name src_first = ra_->get_reg_first(in(1));
930 OptoReg::Name dst_second = ra_->get_reg_second(this );
931 OptoReg::Name dst_first = ra_->get_reg_first(this );
933 enum RC src_second_rc = rc_class(src_second);
934 enum RC src_first_rc = rc_class(src_first);
935 enum RC dst_second_rc = rc_class(dst_second);
936 enum RC dst_first_rc = rc_class(dst_first);
938 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
940 // Generate spill code!
941 int size = 0;
943 if( src_first == dst_first && src_second == dst_second )
944 return 0; // Self copy, no move
946 if (src_first_rc == rc_stack) {
947 // mem ->
948 if (dst_first_rc == rc_stack) {
949 // mem -> mem
950 assert(src_second != dst_first, "overlap");
951 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
952 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
953 // 64-bit
954 int src_offset = ra_->reg2offset(src_first);
955 int dst_offset = ra_->reg2offset(dst_first);
956 if (cbuf) {
957 MacroAssembler _masm(cbuf);
958 __ ld(AT, Address(SP, src_offset));
959 __ sd(AT, Address(SP, dst_offset));
960 #ifndef PRODUCT
961 } else {
962 if(!do_size){
963 if (size != 0) st->print("\n\t");
964 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
965 "sd AT, [SP + #%d]",
966 src_offset, dst_offset);
967 }
968 #endif
969 }
970 size += 8;
971 } else {
972 // 32-bit
973 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
974 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
975 // No pushl/popl, so:
976 int src_offset = ra_->reg2offset(src_first);
977 int dst_offset = ra_->reg2offset(dst_first);
978 if (cbuf) {
979 MacroAssembler _masm(cbuf);
980 __ lw(AT, Address(SP, src_offset));
981 __ sw(AT, Address(SP, dst_offset));
982 #ifndef PRODUCT
983 } else {
984 if(!do_size){
985 if (size != 0) st->print("\n\t");
986 st->print("lw AT, [SP + #%d] spill 2\n\t"
987 "sw AT, [SP + #%d]\n\t",
988 src_offset, dst_offset);
989 }
990 #endif
991 }
992 size += 8;
993 }
994 return size;
995 } else if (dst_first_rc == rc_int) {
996 // mem -> gpr
997 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
998 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
999 // 64-bit
1000 int offset = ra_->reg2offset(src_first);
1001 if (cbuf) {
1002 MacroAssembler _masm(cbuf);
1003 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1004 #ifndef PRODUCT
1005 } else {
1006 if(!do_size){
1007 if (size != 0) st->print("\n\t");
1008 st->print("ld %s, [SP + #%d]\t# spill 3",
1009 Matcher::regName[dst_first],
1010 offset);
1011 }
1012 #endif
1013 }
1014 size += 4;
1015 } else {
1016 // 32-bit
1017 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1018 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1019 int offset = ra_->reg2offset(src_first);
1020 if (cbuf) {
1021 MacroAssembler _masm(cbuf);
1022 if (this->ideal_reg() == Op_RegI)
1023 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1024 else
1025 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1026 #ifndef PRODUCT
1027 } else {
1028 if(!do_size){
1029 if (size != 0) st->print("\n\t");
1030 if (this->ideal_reg() == Op_RegI)
1031 st->print("lw %s, [SP + #%d]\t# spill 4",
1032 Matcher::regName[dst_first],
1033 offset);
1034 else
1035 st->print("lwu %s, [SP + #%d]\t# spill 5",
1036 Matcher::regName[dst_first],
1037 offset);
1038 }
1039 #endif
1040 }
1041 size += 4;
1042 }
1043 return size;
1044 } else if (dst_first_rc == rc_float) {
1045 // mem-> xmm
1046 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1047 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1048 // 64-bit
1049 int offset = ra_->reg2offset(src_first);
1050 if (cbuf) {
1051 MacroAssembler _masm(cbuf);
1052 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1053 #ifndef PRODUCT
1054 } else {
1055 if (!do_size) {
1056 if (size != 0) st->print("\n\t");
1057 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1058 Matcher::regName[dst_first],
1059 offset);
1060 }
1061 #endif
1062 }
1063 size += 4;
1064 } else {
1065 // 32-bit
1066 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1067 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1068 int offset = ra_->reg2offset(src_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1072 #ifndef PRODUCT
1073 } else {
1074 if(!do_size){
1075 if (size != 0) st->print("\n\t");
1076 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1077 Matcher::regName[dst_first],
1078 offset);
1079 }
1080 #endif
1081 }
1082 size += 4;
1083 }
1084 return size;
1085 }
1086 } else if (src_first_rc == rc_int) {
1087 // gpr ->
1088 if (dst_first_rc == rc_stack) {
1089 // gpr -> mem
1090 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1091 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1092 // 64-bit
1093 int offset = ra_->reg2offset(dst_first);
1094 if (cbuf) {
1095 MacroAssembler _masm(cbuf);
1096 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1097 #ifndef PRODUCT
1098 } else {
1099 if(!do_size){
1100 if (size != 0) st->print("\n\t");
1101 st->print("sd %s, [SP + #%d] # spill 8",
1102 Matcher::regName[src_first],
1103 offset);
1104 }
1105 #endif
1106 }
1107 size += 4;
1108 } else {
1109 // 32-bit
1110 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1111 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1112 int offset = ra_->reg2offset(dst_first);
1113 if (cbuf) {
1114 MacroAssembler _masm(cbuf);
1115 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1116 #ifndef PRODUCT
1117 } else {
1118 if (!do_size) {
1119 if (size != 0) st->print("\n\t");
1120 st->print("sw %s, [SP + #%d]\t# spill 9",
1121 Matcher::regName[src_first], offset);
1122 }
1123 #endif
1124 }
1125 size += 4;
1126 }
1127 return size;
1128 } else if (dst_first_rc == rc_int) {
1129 // gpr -> gpr
1130 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1131 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1132 // 64-bit
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 __ move(as_Register(Matcher::_regEncode[dst_first]),
1136 as_Register(Matcher::_regEncode[src_first]));
1137 #ifndef PRODUCT
1138 } else {
1139 if(!do_size){
1140 if (size != 0) st->print("\n\t");
1141 st->print("move(64bit) %s <-- %s\t# spill 10",
1142 Matcher::regName[dst_first],
1143 Matcher::regName[src_first]);
1144 }
1145 #endif
1146 }
1147 size += 4;
1148 return size;
1149 } else {
1150 // 32-bit
1151 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1152 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1153 if (cbuf) {
1154 MacroAssembler _masm(cbuf);
1155 if (this->ideal_reg() == Op_RegI)
1156 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1157 else
1158 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1159 #ifndef PRODUCT
1160 } else {
1161 if (!do_size) {
1162 if (size != 0) st->print("\n\t");
1163 st->print("move(32-bit) %s <-- %s\t# spill 11",
1164 Matcher::regName[dst_first],
1165 Matcher::regName[src_first]);
1166 }
1167 #endif
1168 }
1169 size += 4;
1170 return size;
1171 }
1172 } else if (dst_first_rc == rc_float) {
1173 // gpr -> xmm
1174 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1175 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1176 // 64-bit
1177 if (cbuf) {
1178 MacroAssembler _masm(cbuf);
1179 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1180 #ifndef PRODUCT
1181 } else {
1182 if(!do_size){
1183 if (size != 0) st->print("\n\t");
1184 st->print("dmtc1 %s, %s\t# spill 12",
1185 Matcher::regName[dst_first],
1186 Matcher::regName[src_first]);
1187 }
1188 #endif
1189 }
1190 size += 4;
1191 } else {
1192 // 32-bit
1193 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1194 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1195 if (cbuf) {
1196 MacroAssembler _masm(cbuf);
1197 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1198 #ifndef PRODUCT
1199 } else {
1200 if(!do_size){
1201 if (size != 0) st->print("\n\t");
1202 st->print("mtc1 %s, %s\t# spill 13",
1203 Matcher::regName[dst_first],
1204 Matcher::regName[src_first]);
1205 }
1206 #endif
1207 }
1208 size += 4;
1209 }
1210 return size;
1211 }
1212 } else if (src_first_rc == rc_float) {
1213 // xmm ->
1214 if (dst_first_rc == rc_stack) {
1215 // xmm -> mem
1216 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1217 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1218 // 64-bit
1219 int offset = ra_->reg2offset(dst_first);
1220 if (cbuf) {
1221 MacroAssembler _masm(cbuf);
1222 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1223 #ifndef PRODUCT
1224 } else {
1225 if(!do_size){
1226 if (size != 0) st->print("\n\t");
1227 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1228 Matcher::regName[src_first],
1229 offset);
1230 }
1231 #endif
1232 }
1233 size += 4;
1234 } else {
1235 // 32-bit
1236 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1237 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1238 int offset = ra_->reg2offset(dst_first);
1239 if (cbuf) {
1240 MacroAssembler _masm(cbuf);
1241 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1242 #ifndef PRODUCT
1243 } else {
1244 if(!do_size){
1245 if (size != 0) st->print("\n\t");
1246 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1247 Matcher::regName[src_first],
1248 offset);
1249 }
1250 #endif
1251 }
1252 size += 4;
1253 }
1254 return size;
1255 } else if (dst_first_rc == rc_int) {
1256 // xmm -> gpr
1257 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1258 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1259 // 64-bit
1260 if (cbuf) {
1261 MacroAssembler _masm(cbuf);
1262 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1263 #ifndef PRODUCT
1264 } else {
1265 if(!do_size){
1266 if (size != 0) st->print("\n\t");
1267 st->print("dmfc1 %s, %s\t# spill 16",
1268 Matcher::regName[dst_first],
1269 Matcher::regName[src_first]);
1270 }
1271 #endif
1272 }
1273 size += 4;
1274 } else {
1275 // 32-bit
1276 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1277 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1278 if (cbuf) {
1279 MacroAssembler _masm(cbuf);
1280 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1281 #ifndef PRODUCT
1282 } else {
1283 if(!do_size){
1284 if (size != 0) st->print("\n\t");
1285 st->print("mfc1 %s, %s\t# spill 17",
1286 Matcher::regName[dst_first],
1287 Matcher::regName[src_first]);
1288 }
1289 #endif
1290 }
1291 size += 4;
1292 }
1293 return size;
1294 } else if (dst_first_rc == rc_float) {
1295 // xmm -> xmm
1296 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1297 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1298 // 64-bit
1299 if (cbuf) {
1300 MacroAssembler _masm(cbuf);
1301 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1302 #ifndef PRODUCT
1303 } else {
1304 if(!do_size){
1305 if (size != 0) st->print("\n\t");
1306 st->print("mov_d %s <-- %s\t# spill 18",
1307 Matcher::regName[dst_first],
1308 Matcher::regName[src_first]);
1309 }
1310 #endif
1311 }
1312 size += 4;
1313 } else {
1314 // 32-bit
1315 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1316 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1317 if (cbuf) {
1318 MacroAssembler _masm(cbuf);
1319 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1320 #ifndef PRODUCT
1321 } else {
1322 if(!do_size){
1323 if (size != 0) st->print("\n\t");
1324 st->print("mov_s %s <-- %s\t# spill 19",
1325 Matcher::regName[dst_first],
1326 Matcher::regName[src_first]);
1327 }
1328 #endif
1329 }
1330 size += 4;
1331 }
1332 return size;
1333 }
1334 }
1336 assert(0," foo ");
1337 Unimplemented();
1338 return size;
1340 }
1342 #ifndef PRODUCT
1343 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1344 implementation( NULL, ra_, false, st );
1345 }
1346 #endif
1348 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1349 implementation( &cbuf, ra_, false, NULL );
1350 }
1352 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1353 return implementation( NULL, ra_, true, NULL );
1354 }
1356 //=============================================================================
1357 #
1359 #ifndef PRODUCT
1360 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1361 st->print("INT3");
1362 }
1363 #endif
1365 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1366 MacroAssembler _masm(&cbuf);
1367 __ int3();
1368 }
1370 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1371 return MachNode::size(ra_);
1372 }
1375 //=============================================================================
1376 #ifndef PRODUCT
1377 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1378 Compile *C = ra_->C;
1379 int framesize = C->frame_size_in_bytes();
1381 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1383 st->print_cr("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode", framesize);
1384 st->print("\t");
1385 if (UseLEXT1) {
1386 st->print_cr("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1387 } else {
1388 st->print_cr("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1389 st->print("\t");
1390 st->print_cr("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1391 }
1393 if( do_polling() && C->is_method_compilation() ) {
1394 st->print("\t");
1395 st->print_cr("Poll Safepoint # MachEpilogNode");
1396 }
1397 }
1398 #endif
1400 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1401 Compile *C = ra_->C;
1402 MacroAssembler _masm(&cbuf);
1403 int framesize = C->frame_size_in_bytes();
1405 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1407 __ daddiu(SP, SP, framesize);
1409 if (UseLEXT1) {
1410 __ gslq(RA, FP, SP, -wordSize*2);
1411 } else {
1412 __ ld(RA, SP, -wordSize );
1413 __ ld(FP, SP, -wordSize*2 );
1414 }
1416 if( do_polling() && C->is_method_compilation() ) {
1417 __ set64(AT, (long)os::get_polling_page());
1418 __ relocate(relocInfo::poll_return_type);
1419 __ lw(AT, AT, 0);
1420 }
1421 }
1423 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1424 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1425 }
1427 int MachEpilogNode::reloc() const {
1428 return 0; // a large enough number
1429 }
1431 const Pipeline * MachEpilogNode::pipeline() const {
1432 return MachNode::pipeline_class();
1433 }
1435 int MachEpilogNode::safepoint_offset() const { return 0; }
1437 //=============================================================================
1439 #ifndef PRODUCT
1440 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1441 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1442 int reg = ra_->get_reg_first(this);
1443 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1444 }
1445 #endif
1448 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1449 return 4;
1450 }
1452 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1453 MacroAssembler _masm(&cbuf);
1454 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1455 int reg = ra_->get_encode(this);
1457 __ addi(as_Register(reg), SP, offset);
1458 }
1461 //static int sizeof_FFree_Float_Stack_All = -1;
1463 int MachCallRuntimeNode::ret_addr_offset() {
1464 //lui
1465 //ori
1466 //dsll
1467 //ori
1468 //jalr
1469 //nop
1470 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1471 return NativeCall::instruction_size;
1472 }
1475 //=============================================================================
1476 #ifndef PRODUCT
1477 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1478 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1479 }
1480 #endif
1482 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1483 MacroAssembler _masm(&cbuf);
1484 int i = 0;
1485 for(i = 0; i < _count; i++)
1486 __ nop();
1487 }
1489 uint MachNopNode::size(PhaseRegAlloc *) const {
1490 return 4 * _count;
1491 }
1492 const Pipeline* MachNopNode::pipeline() const {
1493 return MachNode::pipeline_class();
1494 }
1496 //=============================================================================
1498 //=============================================================================
1499 #ifndef PRODUCT
1500 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1501 st->print_cr("load_klass(T9, T0)");
1502 st->print_cr("\tbeq(T9, iCache, L)");
1503 st->print_cr("\tnop");
1504 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1505 st->print_cr("\tnop");
1506 st->print_cr("\tnop");
1507 st->print_cr(" L:");
1508 }
1509 #endif
1512 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1513 MacroAssembler _masm(&cbuf);
1514 #ifdef ASSERT
1515 //uint code_size = cbuf.code_size();
1516 #endif
1517 int ic_reg = Matcher::inline_cache_reg_encode();
1518 Label L;
1519 Register receiver = T0;
1520 Register iCache = as_Register(ic_reg);
1521 __ load_klass(T9, receiver);
1522 __ beq(T9, iCache, L);
1523 __ delayed()->nop();
1525 __ relocate(relocInfo::runtime_call_type);
1526 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1528 // WARNING these NOPs are critical so that verified entry point is properly
1529 // 8 bytes aligned for patching by NativeJump::patch_verified_entry()
1530 __ align(CodeEntryAlignment);
1531 __ bind(L);
1532 }
1534 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1535 return MachNode::size(ra_);
1536 }
1540 //=============================================================================
1542 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1544 int Compile::ConstantTable::calculate_table_base_offset() const {
1545 return 0; // absolute addressing, no offset
1546 }
1548 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1549 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1550 ShouldNotReachHere();
1551 }
1553 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1554 Compile* C = ra_->C;
1555 Compile::ConstantTable& constant_table = C->constant_table();
1556 MacroAssembler _masm(&cbuf);
1558 Register Rtoc = as_Register(ra_->get_encode(this));
1559 CodeSection* consts_section = __ code()->consts();
1560 int consts_size = consts_section->align_at_start(consts_section->size());
1561 assert(constant_table.size() == consts_size, "must be equal");
1563 if (consts_section->size()) {
1564 // Materialize the constant table base.
1565 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1566 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1567 __ relocate(relocInfo::internal_word_type);
1568 __ patchable_set48(Rtoc, (long)baseaddr);
1569 }
1570 }
1572 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1573 // patchable_set48 (4 insts)
1574 return 4 * 4;
1575 }
1577 #ifndef PRODUCT
1578 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1579 Register r = as_Register(ra_->get_encode(this));
1580 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1581 }
1582 #endif
1585 //=============================================================================
1586 #ifndef PRODUCT
1587 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1588 Compile* C = ra_->C;
1590 int framesize = C->frame_size_in_bytes();
1591 int bangsize = C->bang_size_in_bytes();
1592 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1594 // Calls to C2R adapters often do not accept exceptional returns.
1595 // We require that their callers must bang for them. But be careful, because
1596 // some VM calls (such as call site linkage) can use several kilobytes of
1597 // stack. But the stack safety zone should account for that.
1598 // See bugs 4446381, 4468289, 4497237.
1599 if (C->need_stack_bang(bangsize)) {
1600 st->print_cr("# stack bang"); st->print("\t");
1601 }
1602 if (UseLEXT1) {
1603 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1604 } else {
1605 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1606 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1607 }
1608 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1609 st->print("daddiu SP, SP, -%d \t",framesize);
1610 }
1611 #endif
1614 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1615 Compile* C = ra_->C;
1616 MacroAssembler _masm(&cbuf);
1618 int framesize = C->frame_size_in_bytes();
1619 int bangsize = C->bang_size_in_bytes();
1621 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1623 if (C->need_stack_bang(bangsize)) {
1624 __ generate_stack_overflow_check(bangsize);
1625 }
1627 if (UseLEXT1) {
1628 __ gssq(RA, FP, SP, -wordSize*2);
1629 } else {
1630 __ sd(RA, SP, -wordSize);
1631 __ sd(FP, SP, -wordSize*2);
1632 }
1633 __ daddiu(FP, SP, -wordSize*2);
1634 __ daddiu(SP, SP, -framesize);
1635 __ nop(); // Make enough room for patch_verified_entry()
1636 __ nop();
1638 C->set_frame_complete(cbuf.insts_size());
1639 if (C->has_mach_constant_base_node()) {
1640 // NOTE: We set the table base offset here because users might be
1641 // emitted before MachConstantBaseNode.
1642 Compile::ConstantTable& constant_table = C->constant_table();
1643 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1644 }
1646 }
1649 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1650 return MachNode::size(ra_); // too many variables; just compute it the hard way
1651 }
1653 int MachPrologNode::reloc() const {
1654 return 0; // a large enough number
1655 }
1657 %}
1659 //----------ENCODING BLOCK-----------------------------------------------------
1660 // This block specifies the encoding classes used by the compiler to output
1661 // byte streams. Encoding classes generate functions which are called by
1662 // Machine Instruction Nodes in order to generate the bit encoding of the
1663 // instruction. Operands specify their base encoding interface with the
1664 // interface keyword. There are currently supported four interfaces,
1665 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1666 // operand to generate a function which returns its register number when
1667 // queried. CONST_INTER causes an operand to generate a function which
1668 // returns the value of the constant when queried. MEMORY_INTER causes an
1669 // operand to generate four functions which return the Base Register, the
1670 // Index Register, the Scale Value, and the Offset Value of the operand when
1671 // queried. COND_INTER causes an operand to generate six functions which
1672 // return the encoding code (ie - encoding bits for the instruction)
1673 // associated with each basic boolean condition for a conditional instruction.
1674 // Instructions specify two basic values for encoding. They use the
1675 // ins_encode keyword to specify their encoding class (which must be one of
1676 // the class names specified in the encoding block), and they use the
1677 // opcode keyword to specify, in order, their primary, secondary, and
1678 // tertiary opcode. Only the opcode sections which a particular instruction
1679 // needs for encoding need to be specified.
1680 encode %{
1682 //Load byte signed
1683 enc_class load_B_enc (mRegI dst, memory mem) %{
1684 MacroAssembler _masm(&cbuf);
1685 int dst = $dst$$reg;
1686 int base = $mem$$base;
1687 int index = $mem$$index;
1688 int scale = $mem$$scale;
1689 int disp = $mem$$disp;
1691 if( index != 0 ) {
1692 if( Assembler::is_simm16(disp) ) {
1693 if (UseLEXT1) {
1694 if (scale == 0) {
1695 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1696 } else {
1697 __ dsll(AT, as_Register(index), scale);
1698 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1699 }
1700 } else {
1701 if (scale == 0) {
1702 __ addu(AT, as_Register(base), as_Register(index));
1703 } else {
1704 __ dsll(AT, as_Register(index), scale);
1705 __ addu(AT, as_Register(base), AT);
1706 }
1707 __ lb(as_Register(dst), AT, disp);
1708 }
1709 } else {
1710 if (scale == 0) {
1711 __ addu(AT, as_Register(base), as_Register(index));
1712 } else {
1713 __ dsll(AT, as_Register(index), scale);
1714 __ addu(AT, as_Register(base), AT);
1715 }
1716 __ move(T9, disp);
1717 if (UseLEXT1) {
1718 __ gslbx(as_Register(dst), AT, T9, 0);
1719 } else {
1720 __ addu(AT, AT, T9);
1721 __ lb(as_Register(dst), AT, 0);
1722 }
1723 }
1724 } else {
1725 if( Assembler::is_simm16(disp) ) {
1726 __ lb(as_Register(dst), as_Register(base), disp);
1727 } else {
1728 __ move(T9, disp);
1729 if (UseLEXT1) {
1730 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1731 } else {
1732 __ addu(AT, as_Register(base), T9);
1733 __ lb(as_Register(dst), AT, 0);
1734 }
1735 }
1736 }
1737 %}
1739 //Load byte unsigned
1740 enc_class load_UB_enc (mRegI dst, memory mem) %{
1741 MacroAssembler _masm(&cbuf);
1742 int dst = $dst$$reg;
1743 int base = $mem$$base;
1744 int index = $mem$$index;
1745 int scale = $mem$$scale;
1746 int disp = $mem$$disp;
1748 if( index != 0 ) {
1749 if (scale == 0) {
1750 __ daddu(AT, as_Register(base), as_Register(index));
1751 } else {
1752 __ dsll(AT, as_Register(index), scale);
1753 __ daddu(AT, as_Register(base), AT);
1754 }
1755 if( Assembler::is_simm16(disp) ) {
1756 __ lbu(as_Register(dst), AT, disp);
1757 } else {
1758 __ move(T9, disp);
1759 __ daddu(AT, AT, T9);
1760 __ lbu(as_Register(dst), AT, 0);
1761 }
1762 } else {
1763 if( Assembler::is_simm16(disp) ) {
1764 __ lbu(as_Register(dst), as_Register(base), disp);
1765 } else {
1766 __ move(T9, disp);
1767 __ daddu(AT, as_Register(base), T9);
1768 __ lbu(as_Register(dst), AT, 0);
1769 }
1770 }
1771 %}
1773 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1774 MacroAssembler _masm(&cbuf);
1775 int src = $src$$reg;
1776 int base = $mem$$base;
1777 int index = $mem$$index;
1778 int scale = $mem$$scale;
1779 int disp = $mem$$disp;
1781 if( index != 0 ) {
1782 if (scale == 0) {
1783 if( Assembler::is_simm(disp, 8) ) {
1784 if (UseLEXT1) {
1785 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1786 } else {
1787 __ addu(AT, as_Register(base), as_Register(index));
1788 __ sb(as_Register(src), AT, disp);
1789 }
1790 } else if( Assembler::is_simm16(disp) ) {
1791 __ addu(AT, as_Register(base), as_Register(index));
1792 __ sb(as_Register(src), AT, disp);
1793 } else {
1794 __ addu(AT, as_Register(base), as_Register(index));
1795 __ move(T9, disp);
1796 if (UseLEXT1) {
1797 __ gssbx(as_Register(src), AT, T9, 0);
1798 } else {
1799 __ addu(AT, AT, T9);
1800 __ sb(as_Register(src), AT, 0);
1801 }
1802 }
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 if( Assembler::is_simm(disp, 8) ) {
1806 if (UseLEXT1) {
1807 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1808 } else {
1809 __ addu(AT, as_Register(base), AT);
1810 __ sb(as_Register(src), AT, disp);
1811 }
1812 } else if( Assembler::is_simm16(disp) ) {
1813 __ addu(AT, as_Register(base), AT);
1814 __ sb(as_Register(src), AT, disp);
1815 } else {
1816 __ addu(AT, as_Register(base), AT);
1817 __ move(T9, disp);
1818 if (UseLEXT1) {
1819 __ gssbx(as_Register(src), AT, T9, 0);
1820 } else {
1821 __ addu(AT, AT, T9);
1822 __ sb(as_Register(src), AT, 0);
1823 }
1824 }
1825 }
1826 } else {
1827 if( Assembler::is_simm16(disp) ) {
1828 __ sb(as_Register(src), as_Register(base), disp);
1829 } else {
1830 __ move(T9, disp);
1831 if (UseLEXT1) {
1832 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1833 } else {
1834 __ addu(AT, as_Register(base), T9);
1835 __ sb(as_Register(src), AT, 0);
1836 }
1837 }
1838 }
1839 %}
1841 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1842 MacroAssembler _masm(&cbuf);
1843 int base = $mem$$base;
1844 int index = $mem$$index;
1845 int scale = $mem$$scale;
1846 int disp = $mem$$disp;
1847 int value = $src$$constant;
1849 if( index != 0 ) {
1850 if (!UseLEXT1) {
1851 if (scale == 0) {
1852 __ daddu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ daddu(AT, as_Register(base), AT);
1856 }
1857 if( Assembler::is_simm16(disp) ) {
1858 if (value == 0) {
1859 __ sb(R0, AT, disp);
1860 } else {
1861 __ move(T9, value);
1862 __ sb(T9, AT, disp);
1863 }
1864 } else {
1865 if (value == 0) {
1866 __ move(T9, disp);
1867 __ daddu(AT, AT, T9);
1868 __ sb(R0, AT, 0);
1869 } else {
1870 __ move(T9, disp);
1871 __ daddu(AT, AT, T9);
1872 __ move(T9, value);
1873 __ sb(T9, AT, 0);
1874 }
1875 }
1876 } else {
1878 if (scale == 0) {
1879 if( Assembler::is_simm(disp, 8) ) {
1880 if (value == 0) {
1881 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1882 } else {
1883 __ move(T9, value);
1884 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1885 }
1886 } else if( Assembler::is_simm16(disp) ) {
1887 __ daddu(AT, as_Register(base), as_Register(index));
1888 if (value == 0) {
1889 __ sb(R0, AT, disp);
1890 } else {
1891 __ move(T9, value);
1892 __ sb(T9, AT, disp);
1893 }
1894 } else {
1895 if (value == 0) {
1896 __ daddu(AT, as_Register(base), as_Register(index));
1897 __ move(T9, disp);
1898 __ gssbx(R0, AT, T9, 0);
1899 } else {
1900 __ move(AT, disp);
1901 __ move(T9, value);
1902 __ daddu(AT, as_Register(base), AT);
1903 __ gssbx(T9, AT, as_Register(index), 0);
1904 }
1905 }
1907 } else {
1909 if( Assembler::is_simm(disp, 8) ) {
1910 __ dsll(AT, as_Register(index), scale);
1911 if (value == 0) {
1912 __ gssbx(R0, as_Register(base), AT, disp);
1913 } else {
1914 __ move(T9, value);
1915 __ gssbx(T9, as_Register(base), AT, disp);
1916 }
1917 } else if( Assembler::is_simm16(disp) ) {
1918 __ dsll(AT, as_Register(index), scale);
1919 __ daddu(AT, as_Register(base), AT);
1920 if (value == 0) {
1921 __ sb(R0, AT, disp);
1922 } else {
1923 __ move(T9, value);
1924 __ sb(T9, AT, disp);
1925 }
1926 } else {
1927 __ dsll(AT, as_Register(index), scale);
1928 if (value == 0) {
1929 __ daddu(AT, as_Register(base), AT);
1930 __ move(T9, disp);
1931 __ gssbx(R0, AT, T9, 0);
1932 } else {
1933 __ move(T9, disp);
1934 __ daddu(AT, AT, T9);
1935 __ move(T9, value);
1936 __ gssbx(T9, as_Register(base), AT, 0);
1937 }
1938 }
1939 }
1940 }
1941 } else {
1942 if( Assembler::is_simm16(disp) ) {
1943 if (value == 0) {
1944 __ sb(R0, as_Register(base), disp);
1945 } else {
1946 __ move(AT, value);
1947 __ sb(AT, as_Register(base), disp);
1948 }
1949 } else {
1950 if (value == 0) {
1951 __ move(T9, disp);
1952 if (UseLEXT1) {
1953 __ gssbx(R0, as_Register(base), T9, 0);
1954 } else {
1955 __ daddu(AT, as_Register(base), T9);
1956 __ sb(R0, AT, 0);
1957 }
1958 } else {
1959 __ move(T9, disp);
1960 if (UseLEXT1) {
1961 __ move(AT, value);
1962 __ gssbx(AT, as_Register(base), T9, 0);
1963 } else {
1964 __ daddu(AT, as_Register(base), T9);
1965 __ move(T9, value);
1966 __ sb(T9, AT, 0);
1967 }
1968 }
1969 }
1970 }
1971 %}
1974 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1975 MacroAssembler _masm(&cbuf);
1976 int base = $mem$$base;
1977 int index = $mem$$index;
1978 int scale = $mem$$scale;
1979 int disp = $mem$$disp;
1980 int value = $src$$constant;
1982 if( index != 0 ) {
1983 if (UseLEXT1) {
1984 if ( Assembler::is_simm(disp,8) ) {
1985 if ( scale == 0 ) {
1986 if ( value == 0 ) {
1987 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1988 } else {
1989 __ move(AT, value);
1990 __ gssbx(AT, as_Register(base), as_Register(index), disp);
1991 }
1992 } else {
1993 __ dsll(AT, as_Register(index), scale);
1994 if ( value == 0 ) {
1995 __ gssbx(R0, as_Register(base), AT, disp);
1996 } else {
1997 __ move(T9, value);
1998 __ gssbx(T9, as_Register(base), AT, disp);
1999 }
2000 }
2001 } else if ( Assembler::is_simm16(disp) ) {
2002 if ( scale == 0 ) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 if ( value == 0 ){
2005 __ sb(R0, AT, disp);
2006 } else {
2007 __ move(T9, value);
2008 __ sb(T9, AT, disp);
2009 }
2010 } else {
2011 __ dsll(AT, as_Register(index), scale);
2012 __ daddu(AT, as_Register(base), AT);
2013 if ( value == 0 ) {
2014 __ sb(R0, AT, disp);
2015 } else {
2016 __ move(T9, value);
2017 __ sb(T9, AT, disp);
2018 }
2019 }
2020 } else {
2021 if ( scale == 0 ) {
2022 __ move(AT, disp);
2023 __ daddu(AT, as_Register(index), AT);
2024 if ( value == 0 ) {
2025 __ gssbx(R0, as_Register(base), AT, 0);
2026 } else {
2027 __ move(T9, value);
2028 __ gssbx(T9, as_Register(base), AT, 0);
2029 }
2030 } else {
2031 __ dsll(AT, as_Register(index), scale);
2032 __ move(T9, disp);
2033 __ daddu(AT, AT, T9);
2034 if ( value == 0 ) {
2035 __ gssbx(R0, as_Register(base), AT, 0);
2036 } else {
2037 __ move(T9, value);
2038 __ gssbx(T9, as_Register(base), AT, 0);
2039 }
2040 }
2041 }
2042 } else { //not use loongson isa
2043 if (scale == 0) {
2044 __ daddu(AT, as_Register(base), as_Register(index));
2045 } else {
2046 __ dsll(AT, as_Register(index), scale);
2047 __ daddu(AT, as_Register(base), AT);
2048 }
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, AT, disp);
2052 } else {
2053 __ move(T9, value);
2054 __ sb(T9, AT, disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 __ daddu(AT, AT, T9);
2060 __ sb(R0, AT, 0);
2061 } else {
2062 __ move(T9, disp);
2063 __ daddu(AT, AT, T9);
2064 __ move(T9, value);
2065 __ sb(T9, AT, 0);
2066 }
2067 }
2068 }
2069 } else {
2070 if (UseLEXT1){
2071 if ( Assembler::is_simm16(disp) ){
2072 if ( value == 0 ) {
2073 __ sb(R0, as_Register(base), disp);
2074 } else {
2075 __ move(AT, value);
2076 __ sb(AT, as_Register(base), disp);
2077 }
2078 } else {
2079 __ move(AT, disp);
2080 if ( value == 0 ) {
2081 __ gssbx(R0, as_Register(base), AT, 0);
2082 } else {
2083 __ move(T9, value);
2084 __ gssbx(T9, as_Register(base), AT, 0);
2085 }
2086 }
2087 } else {
2088 if( Assembler::is_simm16(disp) ) {
2089 if (value == 0) {
2090 __ sb(R0, as_Register(base), disp);
2091 } else {
2092 __ move(AT, value);
2093 __ sb(AT, as_Register(base), disp);
2094 }
2095 } else {
2096 if (value == 0) {
2097 __ move(T9, disp);
2098 __ daddu(AT, as_Register(base), T9);
2099 __ sb(R0, AT, 0);
2100 } else {
2101 __ move(T9, disp);
2102 __ daddu(AT, as_Register(base), T9);
2103 __ move(T9, value);
2104 __ sb(T9, AT, 0);
2105 }
2106 }
2107 }
2108 }
2110 __ sync();
2111 %}
2113 // Load Short (16bit signed)
2114 enc_class load_S_enc (mRegI dst, memory mem) %{
2115 MacroAssembler _masm(&cbuf);
2116 int dst = $dst$$reg;
2117 int base = $mem$$base;
2118 int index = $mem$$index;
2119 int scale = $mem$$scale;
2120 int disp = $mem$$disp;
2122 if( index != 0 ) {
2123 if (UseLEXT1) {
2124 if ( Assembler::is_simm(disp, 8) ) {
2125 if (scale == 0) {
2126 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2127 } else {
2128 __ dsll(AT, as_Register(index), scale);
2129 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2130 }
2131 } else if ( Assembler::is_simm16(disp) ) {
2132 if (scale == 0) {
2133 __ daddu(AT, as_Register(base), as_Register(index));
2134 __ lh(as_Register(dst), AT, disp);
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 __ lh(as_Register(dst), AT, disp);
2139 }
2140 } else {
2141 if (scale == 0) {
2142 __ move(AT, disp);
2143 __ daddu(AT, as_Register(index), AT);
2144 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2145 } else {
2146 __ dsll(AT, as_Register(index), scale);
2147 __ move(T9, disp);
2148 __ daddu(AT, AT, T9);
2149 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2150 }
2151 }
2152 } else { // not use loongson isa
2153 if (scale == 0) {
2154 __ daddu(AT, as_Register(base), as_Register(index));
2155 } else {
2156 __ dsll(AT, as_Register(index), scale);
2157 __ daddu(AT, as_Register(base), AT);
2158 }
2159 if( Assembler::is_simm16(disp) ) {
2160 __ lh(as_Register(dst), AT, disp);
2161 } else {
2162 __ move(T9, disp);
2163 __ daddu(AT, AT, T9);
2164 __ lh(as_Register(dst), AT, 0);
2165 }
2166 }
2167 } else { // index is 0
2168 if (UseLEXT1) {
2169 if ( Assembler::is_simm16(disp) ) {
2170 __ lh(as_Register(dst), as_Register(base), disp);
2171 } else {
2172 __ move(T9, disp);
2173 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2174 }
2175 } else { //not use loongson isa
2176 if( Assembler::is_simm16(disp) ) {
2177 __ lh(as_Register(dst), as_Register(base), disp);
2178 } else {
2179 __ move(T9, disp);
2180 __ daddu(AT, as_Register(base), T9);
2181 __ lh(as_Register(dst), AT, 0);
2182 }
2183 }
2184 }
2185 %}
2187 // Load Char (16bit unsigned)
2188 enc_class load_C_enc (mRegI dst, memory mem) %{
2189 MacroAssembler _masm(&cbuf);
2190 int dst = $dst$$reg;
2191 int base = $mem$$base;
2192 int index = $mem$$index;
2193 int scale = $mem$$scale;
2194 int disp = $mem$$disp;
2196 if( index != 0 ) {
2197 if (scale == 0) {
2198 __ daddu(AT, as_Register(base), as_Register(index));
2199 } else {
2200 __ dsll(AT, as_Register(index), scale);
2201 __ daddu(AT, as_Register(base), AT);
2202 }
2203 if( Assembler::is_simm16(disp) ) {
2204 __ lhu(as_Register(dst), AT, disp);
2205 } else {
2206 __ move(T9, disp);
2207 __ addu(AT, AT, T9);
2208 __ lhu(as_Register(dst), AT, 0);
2209 }
2210 } else {
2211 if( Assembler::is_simm16(disp) ) {
2212 __ lhu(as_Register(dst), as_Register(base), disp);
2213 } else {
2214 __ move(T9, disp);
2215 __ daddu(AT, as_Register(base), T9);
2216 __ lhu(as_Register(dst), AT, 0);
2217 }
2218 }
2219 %}
2221 // Store Char (16bit unsigned)
2222 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2223 MacroAssembler _masm(&cbuf);
2224 int src = $src$$reg;
2225 int base = $mem$$base;
2226 int index = $mem$$index;
2227 int scale = $mem$$scale;
2228 int disp = $mem$$disp;
2230 if( index != 0 ) {
2231 if( Assembler::is_simm16(disp) ) {
2232 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
2233 if (scale == 0) {
2234 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2235 } else {
2236 __ dsll(AT, as_Register(index), scale);
2237 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2238 }
2239 } else {
2240 if (scale == 0) {
2241 __ addu(AT, as_Register(base), as_Register(index));
2242 } else {
2243 __ dsll(AT, as_Register(index), scale);
2244 __ addu(AT, as_Register(base), AT);
2245 }
2246 __ sh(as_Register(src), AT, disp);
2247 }
2248 } else {
2249 if (scale == 0) {
2250 __ addu(AT, as_Register(base), as_Register(index));
2251 } else {
2252 __ dsll(AT, as_Register(index), scale);
2253 __ addu(AT, as_Register(base), AT);
2254 }
2255 __ move(T9, disp);
2256 if (UseLEXT1) {
2257 __ gsshx(as_Register(src), AT, T9, 0);
2258 } else {
2259 __ addu(AT, AT, T9);
2260 __ sh(as_Register(src), AT, 0);
2261 }
2262 }
2263 } else {
2264 if( Assembler::is_simm16(disp) ) {
2265 __ sh(as_Register(src), as_Register(base), disp);
2266 } else {
2267 __ move(T9, disp);
2268 if (UseLEXT1) {
2269 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2270 } else {
2271 __ addu(AT, as_Register(base), T9);
2272 __ sh(as_Register(src), AT, 0);
2273 }
2274 }
2275 }
2276 %}
2278 enc_class store_C0_enc (memory mem) %{
2279 MacroAssembler _masm(&cbuf);
2280 int base = $mem$$base;
2281 int index = $mem$$index;
2282 int scale = $mem$$scale;
2283 int disp = $mem$$disp;
2285 if( index != 0 ) {
2286 if ( Assembler::is_simm16(disp) ) {
2287 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
2288 if (scale == 0) {
2289 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2290 } else {
2291 __ dsll(AT, as_Register(index), scale);
2292 __ gsshx(R0, as_Register(base), AT, disp);
2293 }
2294 } else {
2295 if (scale == 0) {
2296 __ addu(AT, as_Register(base), as_Register(index));
2297 } else {
2298 __ dsll(AT, as_Register(index), scale);
2299 __ addu(AT, as_Register(base), AT);
2300 }
2301 __ sh(R0, AT, disp);
2302 }
2303 } else {
2304 if (scale == 0) {
2305 __ addu(AT, as_Register(base), as_Register(index));
2306 } else {
2307 __ dsll(AT, as_Register(index), scale);
2308 __ addu(AT, as_Register(base), AT);
2309 }
2310 __ move(T9, disp);
2311 if (UseLEXT1) {
2312 __ gsshx(R0, AT, T9, 0);
2313 } else {
2314 __ addu(AT, AT, T9);
2315 __ sh(R0, AT, 0);
2316 }
2317 }
2318 } else {
2319 if( Assembler::is_simm16(disp) ) {
2320 __ sh(R0, as_Register(base), disp);
2321 } else {
2322 __ move(T9, disp);
2323 if (UseLEXT1) {
2324 __ gsshx(R0, as_Register(base), T9, 0);
2325 } else {
2326 __ addu(AT, as_Register(base), T9);
2327 __ sh(R0, AT, 0);
2328 }
2329 }
2330 }
2331 %}
2333 enc_class load_I_enc (mRegI dst, memory mem) %{
2334 MacroAssembler _masm(&cbuf);
2335 int dst = $dst$$reg;
2336 int base = $mem$$base;
2337 int index = $mem$$index;
2338 int scale = $mem$$scale;
2339 int disp = $mem$$disp;
2341 if( index != 0 ) {
2342 if( Assembler::is_simm16(disp) ) {
2343 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
2344 if (scale == 0) {
2345 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2346 } else {
2347 __ dsll(AT, as_Register(index), scale);
2348 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2349 }
2350 } else {
2351 if (scale == 0) {
2352 __ addu(AT, as_Register(base), as_Register(index));
2353 } else {
2354 __ dsll(AT, as_Register(index), scale);
2355 __ addu(AT, as_Register(base), AT);
2356 }
2357 __ lw(as_Register(dst), AT, disp);
2358 }
2359 } else {
2360 if (scale == 0) {
2361 __ addu(AT, as_Register(base), as_Register(index));
2362 } else {
2363 __ dsll(AT, as_Register(index), scale);
2364 __ addu(AT, as_Register(base), AT);
2365 }
2366 __ move(T9, disp);
2367 if (UseLEXT1) {
2368 __ gslwx(as_Register(dst), AT, T9, 0);
2369 } else {
2370 __ addu(AT, AT, T9);
2371 __ lw(as_Register(dst), AT, 0);
2372 }
2373 }
2374 } else {
2375 if( Assembler::is_simm16(disp) ) {
2376 __ lw(as_Register(dst), as_Register(base), disp);
2377 } else {
2378 __ move(T9, disp);
2379 if (UseLEXT1) {
2380 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2381 } else {
2382 __ addu(AT, as_Register(base), T9);
2383 __ lw(as_Register(dst), AT, 0);
2384 }
2385 }
2386 }
2387 %}
2389 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2390 MacroAssembler _masm(&cbuf);
2391 int src = $src$$reg;
2392 int base = $mem$$base;
2393 int index = $mem$$index;
2394 int scale = $mem$$scale;
2395 int disp = $mem$$disp;
2397 if( index != 0 ) {
2398 if( Assembler::is_simm16(disp) ) {
2399 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
2400 if (scale == 0) {
2401 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2402 } else {
2403 __ dsll(AT, as_Register(index), scale);
2404 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2405 }
2406 } else {
2407 if (scale == 0) {
2408 __ addu(AT, as_Register(base), as_Register(index));
2409 } else {
2410 __ dsll(AT, as_Register(index), scale);
2411 __ addu(AT, as_Register(base), AT);
2412 }
2413 __ sw(as_Register(src), AT, disp);
2414 }
2415 } else {
2416 if (scale == 0) {
2417 __ addu(AT, as_Register(base), as_Register(index));
2418 } else {
2419 __ dsll(AT, as_Register(index), scale);
2420 __ addu(AT, as_Register(base), AT);
2421 }
2422 __ move(T9, disp);
2423 if (UseLEXT1) {
2424 __ gsswx(as_Register(src), AT, T9, 0);
2425 } else {
2426 __ addu(AT, AT, T9);
2427 __ sw(as_Register(src), AT, 0);
2428 }
2429 }
2430 } else {
2431 if( Assembler::is_simm16(disp) ) {
2432 __ sw(as_Register(src), as_Register(base), disp);
2433 } else {
2434 __ move(T9, disp);
2435 if (UseLEXT1) {
2436 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2437 } else {
2438 __ addu(AT, as_Register(base), T9);
2439 __ sw(as_Register(src), AT, 0);
2440 }
2441 }
2442 }
2443 %}
2445 enc_class store_I_immI_enc (memory mem, immI src) %{
2446 MacroAssembler _masm(&cbuf);
2447 int base = $mem$$base;
2448 int index = $mem$$index;
2449 int scale = $mem$$scale;
2450 int disp = $mem$$disp;
2451 int value = $src$$constant;
2453 if( index != 0 ) {
2454 if (UseLEXT1) {
2455 if ( Assembler::is_simm(disp, 8) ) {
2456 if ( scale == 0 ) {
2457 if ( value == 0 ) {
2458 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2459 } else {
2460 __ move(T9, value);
2461 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2462 }
2463 } else {
2464 __ dsll(AT, as_Register(index), scale);
2465 if ( value == 0 ) {
2466 __ gsswx(R0, as_Register(base), AT, disp);
2467 } else {
2468 __ move(T9, value);
2469 __ gsswx(T9, as_Register(base), AT, disp);
2470 }
2471 }
2472 } else if ( Assembler::is_simm16(disp) ) {
2473 if ( scale == 0 ) {
2474 __ daddu(AT, as_Register(base), as_Register(index));
2475 if ( value == 0 ) {
2476 __ sw(R0, AT, disp);
2477 } else {
2478 __ move(T9, value);
2479 __ sw(T9, AT, disp);
2480 }
2481 } else {
2482 __ dsll(AT, as_Register(index), scale);
2483 __ daddu(AT, as_Register(base), AT);
2484 if ( value == 0 ) {
2485 __ sw(R0, AT, disp);
2486 } else {
2487 __ move(T9, value);
2488 __ sw(T9, AT, disp);
2489 }
2490 }
2491 } else {
2492 if ( scale == 0 ) {
2493 __ move(T9, disp);
2494 __ daddu(AT, as_Register(index), T9);
2495 if ( value ==0 ) {
2496 __ gsswx(R0, as_Register(base), AT, 0);
2497 } else {
2498 __ move(T9, value);
2499 __ gsswx(T9, as_Register(base), AT, 0);
2500 }
2501 } else {
2502 __ dsll(AT, as_Register(index), scale);
2503 __ move(T9, disp);
2504 __ daddu(AT, AT, T9);
2505 if ( value == 0 ) {
2506 __ gsswx(R0, as_Register(base), AT, 0);
2507 } else {
2508 __ move(T9, value);
2509 __ gsswx(T9, as_Register(base), AT, 0);
2510 }
2511 }
2512 }
2513 } else { //not use loongson isa
2514 if (scale == 0) {
2515 __ daddu(AT, as_Register(base), as_Register(index));
2516 } else {
2517 __ dsll(AT, as_Register(index), scale);
2518 __ daddu(AT, as_Register(base), AT);
2519 }
2520 if( Assembler::is_simm16(disp) ) {
2521 if (value == 0) {
2522 __ sw(R0, AT, disp);
2523 } else {
2524 __ move(T9, value);
2525 __ sw(T9, AT, disp);
2526 }
2527 } else {
2528 if (value == 0) {
2529 __ move(T9, disp);
2530 __ daddu(AT, AT, T9);
2531 __ sw(R0, AT, 0);
2532 } else {
2533 __ move(T9, disp);
2534 __ daddu(AT, AT, T9);
2535 __ move(T9, value);
2536 __ sw(T9, AT, 0);
2537 }
2538 }
2539 }
2540 } else {
2541 if (UseLEXT1) {
2542 if ( Assembler::is_simm16(disp) ) {
2543 if ( value == 0 ) {
2544 __ sw(R0, as_Register(base), disp);
2545 } else {
2546 __ move(AT, value);
2547 __ sw(AT, as_Register(base), disp);
2548 }
2549 } else {
2550 __ move(T9, disp);
2551 if ( value == 0 ) {
2552 __ gsswx(R0, as_Register(base), T9, 0);
2553 } else {
2554 __ move(AT, value);
2555 __ gsswx(AT, as_Register(base), T9, 0);
2556 }
2557 }
2558 } else {
2559 if( Assembler::is_simm16(disp) ) {
2560 if (value == 0) {
2561 __ sw(R0, as_Register(base), disp);
2562 } else {
2563 __ move(AT, value);
2564 __ sw(AT, as_Register(base), disp);
2565 }
2566 } else {
2567 if (value == 0) {
2568 __ move(T9, disp);
2569 __ daddu(AT, as_Register(base), T9);
2570 __ sw(R0, AT, 0);
2571 } else {
2572 __ move(T9, disp);
2573 __ daddu(AT, as_Register(base), T9);
2574 __ move(T9, value);
2575 __ sw(T9, AT, 0);
2576 }
2577 }
2578 }
2579 }
2580 %}
2582 enc_class load_N_enc (mRegN dst, memory mem) %{
2583 MacroAssembler _masm(&cbuf);
2584 int dst = $dst$$reg;
2585 int base = $mem$$base;
2586 int index = $mem$$index;
2587 int scale = $mem$$scale;
2588 int disp = $mem$$disp;
2589 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2590 assert(disp_reloc == relocInfo::none, "cannot have disp");
2592 if( index != 0 ) {
2593 if (scale == 0) {
2594 __ daddu(AT, as_Register(base), as_Register(index));
2595 } else {
2596 __ dsll(AT, as_Register(index), scale);
2597 __ daddu(AT, as_Register(base), AT);
2598 }
2599 if( Assembler::is_simm16(disp) ) {
2600 __ lwu(as_Register(dst), AT, disp);
2601 } else {
2602 __ set64(T9, disp);
2603 __ daddu(AT, AT, T9);
2604 __ lwu(as_Register(dst), AT, 0);
2605 }
2606 } else {
2607 if( Assembler::is_simm16(disp) ) {
2608 __ lwu(as_Register(dst), as_Register(base), disp);
2609 } else {
2610 __ set64(T9, disp);
2611 __ daddu(AT, as_Register(base), T9);
2612 __ lwu(as_Register(dst), AT, 0);
2613 }
2614 }
2615 %}
2618 enc_class load_P_enc (mRegP dst, memory mem) %{
2619 MacroAssembler _masm(&cbuf);
2620 int dst = $dst$$reg;
2621 int base = $mem$$base;
2622 int index = $mem$$index;
2623 int scale = $mem$$scale;
2624 int disp = $mem$$disp;
2625 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2626 assert(disp_reloc == relocInfo::none, "cannot have disp");
2628 if( index != 0 ) {
2629 if (UseLEXT1) {
2630 if ( Assembler::is_simm(disp, 8) ) {
2631 if ( scale != 0 ) {
2632 __ dsll(AT, as_Register(index), scale);
2633 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2634 } else {
2635 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2636 }
2637 } else if ( Assembler::is_simm16(disp) ){
2638 if ( scale != 0 ) {
2639 __ dsll(AT, as_Register(index), scale);
2640 __ daddu(AT, AT, as_Register(base));
2641 } else {
2642 __ daddu(AT, as_Register(index), as_Register(base));
2643 }
2644 __ ld(as_Register(dst), AT, disp);
2645 } else {
2646 if ( scale != 0 ) {
2647 __ dsll(AT, as_Register(index), scale);
2648 __ move(T9, disp);
2649 __ daddu(AT, AT, T9);
2650 } else {
2651 __ move(T9, disp);
2652 __ daddu(AT, as_Register(index), T9);
2653 }
2654 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2655 }
2656 } else { //not use loongson isa
2657 if (scale == 0) {
2658 __ daddu(AT, as_Register(base), as_Register(index));
2659 } else {
2660 __ dsll(AT, as_Register(index), scale);
2661 __ daddu(AT, as_Register(base), AT);
2662 }
2663 if( Assembler::is_simm16(disp) ) {
2664 __ ld(as_Register(dst), AT, disp);
2665 } else {
2666 __ set64(T9, disp);
2667 __ daddu(AT, AT, T9);
2668 __ ld(as_Register(dst), AT, 0);
2669 }
2670 }
2671 } else {
2672 if (UseLEXT1) {
2673 if ( Assembler::is_simm16(disp) ){
2674 __ ld(as_Register(dst), as_Register(base), disp);
2675 } else {
2676 __ set64(T9, disp);
2677 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2678 }
2679 } else { //not use loongson isa
2680 if( Assembler::is_simm16(disp) ) {
2681 __ ld(as_Register(dst), as_Register(base), disp);
2682 } else {
2683 __ set64(T9, disp);
2684 __ daddu(AT, as_Register(base), T9);
2685 __ ld(as_Register(dst), AT, 0);
2686 }
2687 }
2688 }
2689 %}
2691 // Load acquire.
2692 // load_P_enc + sync
2693 enc_class load_P_enc_ac (mRegP dst, memory mem) %{
2694 MacroAssembler _masm(&cbuf);
2695 int dst = $dst$$reg;
2696 int base = $mem$$base;
2697 int index = $mem$$index;
2698 int scale = $mem$$scale;
2699 int disp = $mem$$disp;
2700 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2701 assert(disp_reloc == relocInfo::none, "cannot have disp");
2703 if( index != 0 ) {
2704 if (UseLEXT1) {
2705 if ( Assembler::is_simm(disp, 8) ) {
2706 if ( scale != 0 ) {
2707 __ dsll(AT, as_Register(index), scale);
2708 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2709 } else {
2710 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2711 }
2712 } else if ( Assembler::is_simm16(disp) ){
2713 if ( scale != 0 ) {
2714 __ dsll(AT, as_Register(index), scale);
2715 __ daddu(AT, AT, as_Register(base));
2716 } else {
2717 __ daddu(AT, as_Register(index), as_Register(base));
2718 }
2719 __ ld(as_Register(dst), AT, disp);
2720 } else {
2721 if ( scale != 0 ) {
2722 __ dsll(AT, as_Register(index), scale);
2723 __ move(T9, disp);
2724 __ daddu(AT, AT, T9);
2725 } else {
2726 __ move(T9, disp);
2727 __ daddu(AT, as_Register(index), T9);
2728 }
2729 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2730 }
2731 } else { //not use loongson isa
2732 if (scale == 0) {
2733 __ daddu(AT, as_Register(base), as_Register(index));
2734 } else {
2735 __ dsll(AT, as_Register(index), scale);
2736 __ daddu(AT, as_Register(base), AT);
2737 }
2738 if( Assembler::is_simm16(disp) ) {
2739 __ ld(as_Register(dst), AT, disp);
2740 } else {
2741 __ set64(T9, disp);
2742 __ daddu(AT, AT, T9);
2743 __ ld(as_Register(dst), AT, 0);
2744 }
2745 }
2746 } else {
2747 if (UseLEXT1) {
2748 if ( Assembler::is_simm16(disp) ){
2749 __ ld(as_Register(dst), as_Register(base), disp);
2750 } else {
2751 __ set64(T9, disp);
2752 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2753 }
2754 } else { //not use loongson isa
2755 if( Assembler::is_simm16(disp) ) {
2756 __ ld(as_Register(dst), as_Register(base), disp);
2757 } else {
2758 __ set64(T9, disp);
2759 __ daddu(AT, as_Register(base), T9);
2760 __ ld(as_Register(dst), AT, 0);
2761 }
2762 }
2763 }
2764 __ sync();
2765 %}
2767 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2768 MacroAssembler _masm(&cbuf);
2769 int src = $src$$reg;
2770 int base = $mem$$base;
2771 int index = $mem$$index;
2772 int scale = $mem$$scale;
2773 int disp = $mem$$disp;
2775 if( index != 0 ) {
2776 if (UseLEXT1){
2777 if ( Assembler::is_simm(disp, 8) ) {
2778 if ( scale == 0 ) {
2779 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2780 } else {
2781 __ dsll(AT, as_Register(index), scale);
2782 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2783 }
2784 } else if ( Assembler::is_simm16(disp) ) {
2785 if ( scale == 0 ) {
2786 __ daddu(AT, as_Register(base), as_Register(index));
2787 } else {
2788 __ dsll(AT, as_Register(index), scale);
2789 __ daddu(AT, as_Register(base), AT);
2790 }
2791 __ sd(as_Register(src), AT, disp);
2792 } else {
2793 if ( scale == 0 ) {
2794 __ move(T9, disp);
2795 __ daddu(AT, as_Register(index), T9);
2796 } else {
2797 __ dsll(AT, as_Register(index), scale);
2798 __ move(T9, disp);
2799 __ daddu(AT, AT, T9);
2800 }
2801 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2802 }
2803 } else { //not use loongson isa
2804 if (scale == 0) {
2805 __ daddu(AT, as_Register(base), as_Register(index));
2806 } else {
2807 __ dsll(AT, as_Register(index), scale);
2808 __ daddu(AT, as_Register(base), AT);
2809 }
2810 if( Assembler::is_simm16(disp) ) {
2811 __ sd(as_Register(src), AT, disp);
2812 } else {
2813 __ move(T9, disp);
2814 __ daddu(AT, AT, T9);
2815 __ sd(as_Register(src), AT, 0);
2816 }
2817 }
2818 } else {
2819 if (UseLEXT1) {
2820 if ( Assembler::is_simm16(disp) ) {
2821 __ sd(as_Register(src), as_Register(base), disp);
2822 } else {
2823 __ move(T9, disp);
2824 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2825 }
2826 } else {
2827 if( Assembler::is_simm16(disp) ) {
2828 __ sd(as_Register(src), as_Register(base), disp);
2829 } else {
2830 __ move(T9, disp);
2831 __ daddu(AT, as_Register(base), T9);
2832 __ sd(as_Register(src), AT, 0);
2833 }
2834 }
2835 }
2836 %}
2838 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2839 MacroAssembler _masm(&cbuf);
2840 int src = $src$$reg;
2841 int base = $mem$$base;
2842 int index = $mem$$index;
2843 int scale = $mem$$scale;
2844 int disp = $mem$$disp;
2846 if( index != 0 ) {
2847 if (UseLEXT1){
2848 if ( Assembler::is_simm(disp, 8) ) {
2849 if ( scale == 0 ) {
2850 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2851 } else {
2852 __ dsll(AT, as_Register(index), scale);
2853 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2854 }
2855 } else if ( Assembler::is_simm16(disp) ) {
2856 if ( scale == 0 ) {
2857 __ daddu(AT, as_Register(base), as_Register(index));
2858 } else {
2859 __ dsll(AT, as_Register(index), scale);
2860 __ daddu(AT, as_Register(base), AT);
2861 }
2862 __ sw(as_Register(src), AT, disp);
2863 } else {
2864 if ( scale == 0 ) {
2865 __ move(T9, disp);
2866 __ daddu(AT, as_Register(index), T9);
2867 } else {
2868 __ dsll(AT, as_Register(index), scale);
2869 __ move(T9, disp);
2870 __ daddu(AT, AT, T9);
2871 }
2872 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2873 }
2874 } else { //not use loongson isa
2875 if (scale == 0) {
2876 __ daddu(AT, as_Register(base), as_Register(index));
2877 } else {
2878 __ dsll(AT, as_Register(index), scale);
2879 __ daddu(AT, as_Register(base), AT);
2880 }
2881 if( Assembler::is_simm16(disp) ) {
2882 __ sw(as_Register(src), AT, disp);
2883 } else {
2884 __ move(T9, disp);
2885 __ daddu(AT, AT, T9);
2886 __ sw(as_Register(src), AT, 0);
2887 }
2888 }
2889 } else {
2890 if (UseLEXT1) {
2891 if ( Assembler::is_simm16(disp) ) {
2892 __ sw(as_Register(src), as_Register(base), disp);
2893 } else {
2894 __ move(T9, disp);
2895 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2896 }
2897 } else {
2898 if( Assembler::is_simm16(disp) ) {
2899 __ sw(as_Register(src), as_Register(base), disp);
2900 } else {
2901 __ move(T9, disp);
2902 __ daddu(AT, as_Register(base), T9);
2903 __ sw(as_Register(src), AT, 0);
2904 }
2905 }
2906 }
2907 %}
2909 enc_class store_P_immP0_enc (memory mem) %{
2910 MacroAssembler _masm(&cbuf);
2911 int base = $mem$$base;
2912 int index = $mem$$index;
2913 int scale = $mem$$scale;
2914 int disp = $mem$$disp;
2916 if( index != 0 ) {
2917 if (scale == 0) {
2918 if ( Assembler::is_simm16(disp) ) {
2919 if (UseLEXT1 && Assembler::is_simm(disp, 8)) {
2920 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2921 } else {
2922 __ daddu(AT, as_Register(base), as_Register(index));
2923 __ sd(R0, AT, disp);
2924 }
2925 } else {
2926 __ daddu(AT, as_Register(base), as_Register(index));
2927 __ move(T9, disp);
2928 if (UseLEXT1) {
2929 __ gssdx(R0, AT, T9, 0);
2930 } else {
2931 __ daddu(AT, AT, T9);
2932 __ sd(R0, AT, 0);
2933 }
2934 }
2935 } else {
2936 __ dsll(AT, as_Register(index), scale);
2937 if( Assembler::is_simm16(disp) ) {
2938 if (UseLEXT1 && Assembler::is_simm(disp, 8)) {
2939 __ gssdx(R0, as_Register(base), AT, disp);
2940 } else {
2941 __ daddu(AT, as_Register(base), AT);
2942 __ sd(R0, AT, disp);
2943 }
2944 } else {
2945 __ daddu(AT, as_Register(base), AT);
2946 __ move(T9, disp);
2947 if (UseLEXT1) {
2948 __ gssdx(R0, AT, T9, 0);
2949 } else {
2950 __ daddu(AT, AT, T9);
2951 __ sd(R0, AT, 0);
2952 }
2953 }
2954 }
2955 } else {
2956 if( Assembler::is_simm16(disp) ) {
2957 __ sd(R0, as_Register(base), disp);
2958 } else {
2959 __ move(T9, disp);
2960 if (UseLEXT1) {
2961 __ gssdx(R0, as_Register(base), T9, 0);
2962 } else {
2963 __ daddu(AT, as_Register(base), T9);
2964 __ sd(R0, AT, 0);
2965 }
2966 }
2967 }
2968 %}
2970 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2971 MacroAssembler _masm(&cbuf);
2972 int base = $mem$$base;
2973 int index = $mem$$index;
2974 int scale = $mem$$scale;
2975 int disp = $mem$$disp;
2977 if(index!=0){
2978 if (scale == 0) {
2979 __ daddu(AT, as_Register(base), as_Register(index));
2980 } else {
2981 __ dsll(AT, as_Register(index), scale);
2982 __ daddu(AT, as_Register(base), AT);
2983 }
2985 if( Assembler::is_simm16(disp) ) {
2986 __ sw(R0, AT, disp);
2987 } else {
2988 __ move(T9, disp);
2989 __ daddu(AT, AT, T9);
2990 __ sw(R0, AT, 0);
2991 }
2992 } else {
2993 if( Assembler::is_simm16(disp) ) {
2994 __ sw(R0, as_Register(base), disp);
2995 } else {
2996 __ move(T9, disp);
2997 __ daddu(AT, as_Register(base), T9);
2998 __ sw(R0, AT, 0);
2999 }
3000 }
3001 %}
3003 enc_class load_L_enc (mRegL dst, memory mem) %{
3004 MacroAssembler _masm(&cbuf);
3005 int base = $mem$$base;
3006 int index = $mem$$index;
3007 int scale = $mem$$scale;
3008 int disp = $mem$$disp;
3009 Register dst_reg = as_Register($dst$$reg);
3011 if( index != 0 ) {
3012 if (scale == 0) {
3013 __ daddu(AT, as_Register(base), as_Register(index));
3014 } else {
3015 __ dsll(AT, as_Register(index), scale);
3016 __ daddu(AT, as_Register(base), AT);
3017 }
3018 if( Assembler::is_simm16(disp) ) {
3019 __ ld(dst_reg, AT, disp);
3020 } else {
3021 __ move(T9, disp);
3022 __ daddu(AT, AT, T9);
3023 __ ld(dst_reg, AT, 0);
3024 }
3025 } else {
3026 if( Assembler::is_simm16(disp) ) {
3027 __ ld(dst_reg, as_Register(base), disp);
3028 } else {
3029 __ move(T9, disp);
3030 __ daddu(AT, as_Register(base), T9);
3031 __ ld(dst_reg, AT, 0);
3032 }
3033 }
3034 %}
3036 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3037 MacroAssembler _masm(&cbuf);
3038 int base = $mem$$base;
3039 int index = $mem$$index;
3040 int scale = $mem$$scale;
3041 int disp = $mem$$disp;
3042 Register src_reg = as_Register($src$$reg);
3044 if( index != 0 ) {
3045 if (scale == 0) {
3046 __ daddu(AT, as_Register(base), as_Register(index));
3047 } else {
3048 __ dsll(AT, as_Register(index), scale);
3049 __ daddu(AT, as_Register(base), AT);
3050 }
3051 if( Assembler::is_simm16(disp) ) {
3052 __ sd(src_reg, AT, disp);
3053 } else {
3054 __ move(T9, disp);
3055 __ daddu(AT, AT, T9);
3056 __ sd(src_reg, AT, 0);
3057 }
3058 } else {
3059 if( Assembler::is_simm16(disp) ) {
3060 __ sd(src_reg, as_Register(base), disp);
3061 } else {
3062 __ move(T9, disp);
3063 __ daddu(AT, as_Register(base), T9);
3064 __ sd(src_reg, AT, 0);
3065 }
3066 }
3067 %}
3069 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3070 MacroAssembler _masm(&cbuf);
3071 int base = $mem$$base;
3072 int index = $mem$$index;
3073 int scale = $mem$$scale;
3074 int disp = $mem$$disp;
3076 if( index != 0 ) {
3077 if (scale == 0) {
3078 __ daddu(AT, as_Register(base), as_Register(index));
3079 } else {
3080 __ dsll(AT, as_Register(index), scale);
3081 __ daddu(AT, as_Register(base), AT);
3082 }
3083 if( Assembler::is_simm16(disp) ) {
3084 __ sd(R0, AT, disp);
3085 } else {
3086 __ move(T9, disp);
3087 __ addu(AT, AT, T9);
3088 __ sd(R0, AT, 0);
3089 }
3090 } else {
3091 if( Assembler::is_simm16(disp) ) {
3092 __ sd(R0, as_Register(base), disp);
3093 } else {
3094 __ move(T9, disp);
3095 __ addu(AT, as_Register(base), T9);
3096 __ sd(R0, AT, 0);
3097 }
3098 }
3099 %}
3101 enc_class store_L_immL_enc (memory mem, immL src) %{
3102 MacroAssembler _masm(&cbuf);
3103 int base = $mem$$base;
3104 int index = $mem$$index;
3105 int scale = $mem$$scale;
3106 int disp = $mem$$disp;
3107 long imm = $src$$constant;
3109 if( index != 0 ) {
3110 if (scale == 0) {
3111 __ daddu(AT, as_Register(base), as_Register(index));
3112 } else {
3113 __ dsll(AT, as_Register(index), scale);
3114 __ daddu(AT, as_Register(base), AT);
3115 }
3116 if( Assembler::is_simm16(disp) ) {
3117 __ set64(T9, imm);
3118 __ sd(T9, AT, disp);
3119 } else {
3120 __ move(T9, disp);
3121 __ addu(AT, AT, T9);
3122 __ set64(T9, imm);
3123 __ sd(T9, AT, 0);
3124 }
3125 } else {
3126 if( Assembler::is_simm16(disp) ) {
3127 __ move(AT, as_Register(base));
3128 __ set64(T9, imm);
3129 __ sd(T9, AT, disp);
3130 } else {
3131 __ move(T9, disp);
3132 __ addu(AT, as_Register(base), T9);
3133 __ set64(T9, imm);
3134 __ sd(T9, AT, 0);
3135 }
3136 }
3137 %}
3139 enc_class load_F_enc (regF dst, memory mem) %{
3140 MacroAssembler _masm(&cbuf);
3141 int base = $mem$$base;
3142 int index = $mem$$index;
3143 int scale = $mem$$scale;
3144 int disp = $mem$$disp;
3145 FloatRegister dst = $dst$$FloatRegister;
3147 if( index != 0 ) {
3148 if( Assembler::is_simm16(disp) ) {
3149 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
3150 if (scale == 0) {
3151 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3152 } else {
3153 __ dsll(AT, as_Register(index), scale);
3154 __ gslwxc1(dst, as_Register(base), AT, disp);
3155 }
3156 } else {
3157 if (scale == 0) {
3158 __ daddu(AT, as_Register(base), as_Register(index));
3159 } else {
3160 __ dsll(AT, as_Register(index), scale);
3161 __ daddu(AT, as_Register(base), AT);
3162 }
3163 __ lwc1(dst, AT, disp);
3164 }
3165 } else {
3166 if (scale == 0) {
3167 __ daddu(AT, as_Register(base), as_Register(index));
3168 } else {
3169 __ dsll(AT, as_Register(index), scale);
3170 __ daddu(AT, as_Register(base), AT);
3171 }
3172 __ move(T9, disp);
3173 if (UseLEXT1) {
3174 __ gslwxc1(dst, AT, T9, 0);
3175 } else {
3176 __ daddu(AT, AT, T9);
3177 __ lwc1(dst, AT, 0);
3178 }
3179 }
3180 } else {
3181 if( Assembler::is_simm16(disp) ) {
3182 __ lwc1(dst, as_Register(base), disp);
3183 } else {
3184 __ move(T9, disp);
3185 if (UseLEXT1) {
3186 __ gslwxc1(dst, as_Register(base), T9, 0);
3187 } else {
3188 __ daddu(AT, as_Register(base), T9);
3189 __ lwc1(dst, AT, 0);
3190 }
3191 }
3192 }
3193 %}
3195 enc_class store_F_reg_enc (memory mem, regF src) %{
3196 MacroAssembler _masm(&cbuf);
3197 int base = $mem$$base;
3198 int index = $mem$$index;
3199 int scale = $mem$$scale;
3200 int disp = $mem$$disp;
3201 FloatRegister src = $src$$FloatRegister;
3203 if( index != 0 ) {
3204 if ( Assembler::is_simm16(disp) ) {
3205 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
3206 if (scale == 0) {
3207 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3208 } else {
3209 __ dsll(AT, as_Register(index), scale);
3210 __ gsswxc1(src, as_Register(base), AT, disp);
3211 }
3212 } else {
3213 if (scale == 0) {
3214 __ daddu(AT, as_Register(base), as_Register(index));
3215 } else {
3216 __ dsll(AT, as_Register(index), scale);
3217 __ daddu(AT, as_Register(base), AT);
3218 }
3219 __ swc1(src, AT, disp);
3220 }
3221 } else {
3222 if (scale == 0) {
3223 __ daddu(AT, as_Register(base), as_Register(index));
3224 } else {
3225 __ dsll(AT, as_Register(index), scale);
3226 __ daddu(AT, as_Register(base), AT);
3227 }
3228 __ move(T9, disp);
3229 if (UseLEXT1) {
3230 __ gsswxc1(src, AT, T9, 0);
3231 } else {
3232 __ daddu(AT, AT, T9);
3233 __ swc1(src, AT, 0);
3234 }
3235 }
3236 } else {
3237 if( Assembler::is_simm16(disp) ) {
3238 __ swc1(src, as_Register(base), disp);
3239 } else {
3240 __ move(T9, disp);
3241 if (UseLEXT1) {
3242 __ gsswxc1(src, as_Register(base), T9, 0);
3243 } else {
3244 __ daddu(AT, as_Register(base), T9);
3245 __ swc1(src, AT, 0);
3246 }
3247 }
3248 }
3249 %}
3251 enc_class load_D_enc (regD dst, memory mem) %{
3252 MacroAssembler _masm(&cbuf);
3253 int base = $mem$$base;
3254 int index = $mem$$index;
3255 int scale = $mem$$scale;
3256 int disp = $mem$$disp;
3257 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3259 if ( index != 0 ) {
3260 if ( Assembler::is_simm16(disp) ) {
3261 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
3262 if (scale == 0) {
3263 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3264 } else {
3265 __ dsll(AT, as_Register(index), scale);
3266 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3267 }
3268 } else {
3269 if (scale == 0) {
3270 __ daddu(AT, as_Register(base), as_Register(index));
3271 } else {
3272 __ dsll(AT, as_Register(index), scale);
3273 __ daddu(AT, as_Register(base), AT);
3274 }
3275 __ ldc1(dst_reg, AT, disp);
3276 }
3277 } else {
3278 if (scale == 0) {
3279 __ daddu(AT, as_Register(base), as_Register(index));
3280 } else {
3281 __ dsll(AT, as_Register(index), scale);
3282 __ daddu(AT, as_Register(base), AT);
3283 }
3284 __ move(T9, disp);
3285 if (UseLEXT1) {
3286 __ gsldxc1(dst_reg, AT, T9, 0);
3287 } else {
3288 __ addu(AT, AT, T9);
3289 __ ldc1(dst_reg, AT, 0);
3290 }
3291 }
3292 } else {
3293 if( Assembler::is_simm16(disp) ) {
3294 __ ldc1(dst_reg, as_Register(base), disp);
3295 } else {
3296 __ move(T9, disp);
3297 if (UseLEXT1) {
3298 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3299 } else {
3300 __ addu(AT, as_Register(base), T9);
3301 __ ldc1(dst_reg, AT, 0);
3302 }
3303 }
3304 }
3305 %}
3307 enc_class store_D_reg_enc (memory mem, regD src) %{
3308 MacroAssembler _masm(&cbuf);
3309 int base = $mem$$base;
3310 int index = $mem$$index;
3311 int scale = $mem$$scale;
3312 int disp = $mem$$disp;
3313 FloatRegister src_reg = as_FloatRegister($src$$reg);
3315 if ( index != 0 ) {
3316 if ( Assembler::is_simm16(disp) ) {
3317 if ( UseLEXT1 && Assembler::is_simm(disp, 8) ) {
3318 if (scale == 0) {
3319 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3320 } else {
3321 __ dsll(AT, as_Register(index), scale);
3322 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3323 }
3324 } else {
3325 if (scale == 0) {
3326 __ daddu(AT, as_Register(base), as_Register(index));
3327 } else {
3328 __ dsll(AT, as_Register(index), scale);
3329 __ daddu(AT, as_Register(base), AT);
3330 }
3331 __ sdc1(src_reg, AT, disp);
3332 }
3333 } else {
3334 if (scale == 0) {
3335 __ daddu(AT, as_Register(base), as_Register(index));
3336 } else {
3337 __ dsll(AT, as_Register(index), scale);
3338 __ daddu(AT, as_Register(base), AT);
3339 }
3340 __ move(T9, disp);
3341 if (UseLEXT1) {
3342 __ gssdxc1(src_reg, AT, T9, 0);
3343 } else {
3344 __ addu(AT, AT, T9);
3345 __ sdc1(src_reg, AT, 0);
3346 }
3347 }
3348 } else {
3349 if ( Assembler::is_simm16(disp) ) {
3350 __ sdc1(src_reg, as_Register(base), disp);
3351 } else {
3352 __ move(T9, disp);
3353 if (UseLEXT1) {
3354 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3355 } else {
3356 __ addu(AT, as_Register(base), T9);
3357 __ sdc1(src_reg, AT, 0);
3358 }
3359 }
3360 }
3361 %}
3363 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3364 MacroAssembler _masm(&cbuf);
3365 // This is the instruction starting address for relocation info.
3366 __ block_comment("Java_To_Runtime");
3367 cbuf.set_insts_mark();
3368 __ relocate(relocInfo::runtime_call_type);
3370 __ patchable_call((address)$meth$$method);
3371 %}
3373 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3374 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3375 // who we intended to call.
3376 MacroAssembler _masm(&cbuf);
3377 cbuf.set_insts_mark();
3379 if ( !_method ) {
3380 __ relocate(relocInfo::runtime_call_type);
3381 } else if(_optimized_virtual) {
3382 __ relocate(relocInfo::opt_virtual_call_type);
3383 } else {
3384 __ relocate(relocInfo::static_call_type);
3385 }
3387 __ patchable_call((address)($meth$$method));
3388 if( _method ) { // Emit stub for static call
3389 emit_java_to_interp(cbuf);
3390 }
3391 %}
3394 //
3395 // [Ref: LIR_Assembler::ic_call() ]
3396 //
3397 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3398 MacroAssembler _masm(&cbuf);
3399 __ block_comment("Java_Dynamic_Call");
3400 __ ic_call((address)$meth$$method);
3401 %}
3404 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3405 Register flags = $cr$$Register;
3406 Label L;
3408 MacroAssembler _masm(&cbuf);
3410 __ addu(flags, R0, R0);
3411 __ beq(AT, R0, L);
3412 __ delayed()->nop();
3413 __ move(flags, 0xFFFFFFFF);
3414 __ bind(L);
3415 %}
3417 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3418 Register result = $result$$Register;
3419 Register sub = $sub$$Register;
3420 Register super = $super$$Register;
3421 Register length = $tmp$$Register;
3422 Register tmp = T9;
3423 Label miss;
3425 // result may be the same as sub
3426 // 47c B40: # B21 B41 <- B20 Freq: 0.155379
3427 // 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3428 // 4bc mov S2, NULL #@loadConP
3429 // 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3430 //
3431 MacroAssembler _masm(&cbuf);
3432 Label done;
3433 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3434 NULL, &miss,
3435 /*set_cond_codes:*/ true);
3436 // Refer to X86_64's RDI
3437 __ move(result, 0);
3438 __ b(done);
3439 __ delayed()->nop();
3441 __ bind(miss);
3442 __ move(result, 1);
3443 __ bind(done);
3444 %}
3446 %}
3449 //---------MIPS FRAME--------------------------------------------------------------
3450 // Definition of frame structure and management information.
3451 //
3452 // S T A C K L A Y O U T Allocators stack-slot number
3453 // | (to get allocators register number
3454 // G Owned by | | v add SharedInfo::stack0)
3455 // r CALLER | |
3456 // o | +--------+ pad to even-align allocators stack-slot
3457 // w V | pad0 | numbers; owned by CALLER
3458 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3459 // h ^ | in | 5
3460 // | | args | 4 Holes in incoming args owned by SELF
3461 // | | old | | 3
3462 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3463 // v | | ret | 3 return address
3464 // Owned by +--------+
3465 // Self | pad2 | 2 pad to align old SP
3466 // | +--------+ 1
3467 // | | locks | 0
3468 // | +--------+----> SharedInfo::stack0, even aligned
3469 // | | pad1 | 11 pad to align new SP
3470 // | +--------+
3471 // | | | 10
3472 // | | spills | 9 spills
3473 // V | | 8 (pad0 slot for callee)
3474 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3475 // ^ | out | 7
3476 // | | args | 6 Holes in outgoing args owned by CALLEE
3477 // Owned by new | |
3478 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3479 // | |
3480 //
3481 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3482 // known from SELF's arguments and the Java calling convention.
3483 // Region 6-7 is determined per call site.
3484 // Note 2: If the calling convention leaves holes in the incoming argument
3485 // area, those holes are owned by SELF. Holes in the outgoing area
3486 // are owned by the CALLEE. Holes should not be nessecary in the
3487 // incoming area, as the Java calling convention is completely under
3488 // the control of the AD file. Doubles can be sorted and packed to
3489 // avoid holes. Holes in the outgoing arguments may be nessecary for
3490 // varargs C calling conventions.
3491 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3492 // even aligned with pad0 as needed.
3493 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3494 // region 6-11 is even aligned; it may be padded out more so that
3495 // the region from SP to FP meets the minimum stack alignment.
3496 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3497 // alignment. Region 11, pad1, may be dynamically extended so that
3498 // SP meets the minimum alignment.
3501 frame %{
3503 stack_direction(TOWARDS_LOW);
3505 // These two registers define part of the calling convention
3506 // between compiled code and the interpreter.
3507 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3508 // for more information.
3510 inline_cache_reg(T1); // Inline Cache Register
3511 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3513 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3514 cisc_spilling_operand_name(indOffset32);
3516 // Number of stack slots consumed by locking an object
3517 // generate Compile::sync_stack_slots
3518 #ifdef _LP64
3519 sync_stack_slots(2);
3520 #else
3521 sync_stack_slots(1);
3522 #endif
3524 frame_pointer(SP);
3526 // Interpreter stores its frame pointer in a register which is
3527 // stored to the stack by I2CAdaptors.
3528 // I2CAdaptors convert from interpreted java to compiled java.
3530 interpreter_frame_pointer(FP);
3532 // generate Matcher::stack_alignment
3533 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3535 // Number of stack slots between incoming argument block and the start of
3536 // a new frame. The PROLOG must add this many slots to the stack. The
3537 // EPILOG must remove this many slots.
3538 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3540 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3541 // for calls to C. Supports the var-args backing area for register parms.
3542 varargs_C_out_slots_killed(0);
3544 // The after-PROLOG location of the return address. Location of
3545 // return address specifies a type (REG or STACK) and a number
3546 // representing the register number (i.e. - use a register name) or
3547 // stack slot.
3548 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3549 // Otherwise, it is above the locks and verification slot and alignment word
3550 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3551 return_addr(REG RA);
3553 // Body of function which returns an integer array locating
3554 // arguments either in registers or in stack slots. Passed an array
3555 // of ideal registers called "sig" and a "length" count. Stack-slot
3556 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3557 // arguments for a CALLEE. Incoming stack arguments are
3558 // automatically biased by the preserve_stack_slots field above.
3561 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3562 // StartNode::calling_convention call this.
3563 calling_convention %{
3564 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3565 %}
3570 // Body of function which returns an integer array locating
3571 // arguments either in registers or in stack slots. Passed an array
3572 // of ideal registers called "sig" and a "length" count. Stack-slot
3573 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3574 // arguments for a CALLEE. Incoming stack arguments are
3575 // automatically biased by the preserve_stack_slots field above.
3578 // SEE CallRuntimeNode::calling_convention for more information.
3579 c_calling_convention %{
3580 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3581 %}
3584 // Location of C & interpreter return values
3585 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3586 // SEE Matcher::match.
3587 c_return_value %{
3588 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3589 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3590 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3591 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3592 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3593 %}
3595 // Location of return values
3596 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3597 // SEE Matcher::match.
3599 return_value %{
3600 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3601 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3602 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3603 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3604 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3605 %}
3607 %}
3609 //----------ATTRIBUTES---------------------------------------------------------
3610 //----------Operand Attributes-------------------------------------------------
3611 op_attrib op_cost(0); // Required cost attribute
3613 //----------Instruction Attributes---------------------------------------------
3614 ins_attrib ins_cost(100); // Required cost attribute
3615 ins_attrib ins_size(32); // Required size attribute (in bits)
3616 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3617 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3618 // non-matching short branch variant of some
3619 // long branch?
3620 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3621 // specifies the alignment that some part of the instruction (not
3622 // necessarily the start) requires. If > 1, a compute_padding()
3623 // function must be provided for the instruction
3625 //----------OPERANDS-----------------------------------------------------------
3626 // Operand definitions must precede instruction definitions for correct parsing
3627 // in the ADLC because operands constitute user defined types which are used in
3628 // instruction definitions.
3630 // Vectors
3631 operand vecD() %{
3632 constraint(ALLOC_IN_RC(dbl_reg));
3633 match(VecD);
3635 format %{ %}
3636 interface(REG_INTER);
3637 %}
3639 // Flags register, used as output of compare instructions
3640 operand FlagsReg() %{
3641 constraint(ALLOC_IN_RC(mips_flags));
3642 match(RegFlags);
3644 format %{ "AT" %}
3645 interface(REG_INTER);
3646 %}
3648 //----------Simple Operands----------------------------------------------------
3649 //TODO: Should we need to define some more special immediate number ?
3650 // Immediate Operands
3651 // Integer Immediate
3652 operand immI() %{
3653 match(ConI);
3654 //TODO: should not match immI8 here LEE
3655 match(immI8);
3657 op_cost(20);
3658 format %{ %}
3659 interface(CONST_INTER);
3660 %}
3662 // Long Immediate 8-bit
3663 operand immL8()
3664 %{
3665 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3666 match(ConL);
3668 op_cost(5);
3669 format %{ %}
3670 interface(CONST_INTER);
3671 %}
3673 // Constant for test vs zero
3674 operand immI0() %{
3675 predicate(n->get_int() == 0);
3676 match(ConI);
3678 op_cost(0);
3679 format %{ %}
3680 interface(CONST_INTER);
3681 %}
3683 // Constant for increment
3684 operand immI1() %{
3685 predicate(n->get_int() == 1);
3686 match(ConI);
3688 op_cost(0);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 // Constant for decrement
3694 operand immI_M1() %{
3695 predicate(n->get_int() == -1);
3696 match(ConI);
3698 op_cost(0);
3699 format %{ %}
3700 interface(CONST_INTER);
3701 %}
3703 operand immI_MaxI() %{
3704 predicate(n->get_int() == 2147483647);
3705 match(ConI);
3707 op_cost(0);
3708 format %{ %}
3709 interface(CONST_INTER);
3710 %}
3712 // Valid scale values for addressing modes
3713 operand immI2() %{
3714 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3715 match(ConI);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 operand immI8() %{
3722 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3723 match(ConI);
3725 op_cost(5);
3726 format %{ %}
3727 interface(CONST_INTER);
3728 %}
3730 operand immI16() %{
3731 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3732 match(ConI);
3734 op_cost(10);
3735 format %{ %}
3736 interface(CONST_INTER);
3737 %}
3739 // Constant for long shifts
3740 operand immI_32() %{
3741 predicate( n->get_int() == 32 );
3742 match(ConI);
3744 op_cost(0);
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 operand immI_63() %{
3750 predicate( n->get_int() == 63 );
3751 match(ConI);
3753 op_cost(0);
3754 format %{ %}
3755 interface(CONST_INTER);
3756 %}
3758 operand immI_0_31() %{
3759 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3760 match(ConI);
3762 op_cost(0);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 // Operand for non-negtive integer mask
3768 operand immI_nonneg_mask() %{
3769 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3770 match(ConI);
3772 op_cost(0);
3773 format %{ %}
3774 interface(CONST_INTER);
3775 %}
3777 operand immI_32_63() %{
3778 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3779 match(ConI);
3780 op_cost(0);
3782 format %{ %}
3783 interface(CONST_INTER);
3784 %}
3786 operand immI16_sub() %{
3787 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3788 match(ConI);
3790 op_cost(10);
3791 format %{ %}
3792 interface(CONST_INTER);
3793 %}
3795 operand immI_0_32767() %{
3796 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3797 match(ConI);
3798 op_cost(0);
3800 format %{ %}
3801 interface(CONST_INTER);
3802 %}
3804 operand immI_0_65535() %{
3805 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3806 match(ConI);
3807 op_cost(0);
3809 format %{ %}
3810 interface(CONST_INTER);
3811 %}
3813 operand immI_1() %{
3814 predicate( n->get_int() == 1 );
3815 match(ConI);
3817 op_cost(0);
3818 format %{ %}
3819 interface(CONST_INTER);
3820 %}
3822 operand immI_2() %{
3823 predicate( n->get_int() == 2 );
3824 match(ConI);
3826 op_cost(0);
3827 format %{ %}
3828 interface(CONST_INTER);
3829 %}
3831 operand immI_3() %{
3832 predicate( n->get_int() == 3 );
3833 match(ConI);
3835 op_cost(0);
3836 format %{ %}
3837 interface(CONST_INTER);
3838 %}
3840 operand immI_7() %{
3841 predicate( n->get_int() == 7 );
3842 match(ConI);
3844 format %{ %}
3845 interface(CONST_INTER);
3846 %}
3848 // Immediates for special shifts (sign extend)
3850 // Constants for increment
3851 operand immI_16() %{
3852 predicate( n->get_int() == 16 );
3853 match(ConI);
3855 format %{ %}
3856 interface(CONST_INTER);
3857 %}
3859 operand immI_24() %{
3860 predicate( n->get_int() == 24 );
3861 match(ConI);
3863 format %{ %}
3864 interface(CONST_INTER);
3865 %}
3867 // Constant for byte-wide masking
3868 operand immI_255() %{
3869 predicate( n->get_int() == 255 );
3870 match(ConI);
3872 op_cost(0);
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 operand immI_65535() %{
3878 predicate( n->get_int() == 65535 );
3879 match(ConI);
3881 op_cost(5);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3886 operand immI_65536() %{
3887 predicate( n->get_int() == 65536 );
3888 match(ConI);
3890 op_cost(5);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 operand immI_M65536() %{
3896 predicate( n->get_int() == -65536 );
3897 match(ConI);
3899 op_cost(5);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 // Pointer Immediate
3905 operand immP() %{
3906 match(ConP);
3908 op_cost(10);
3909 format %{ %}
3910 interface(CONST_INTER);
3911 %}
3913 // NULL Pointer Immediate
3914 operand immP0() %{
3915 predicate( n->get_ptr() == 0 );
3916 match(ConP);
3917 op_cost(0);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 // Pointer Immediate: 64-bit
3924 operand immP_set() %{
3925 match(ConP);
3927 op_cost(5);
3928 // formats are generated automatically for constants and base registers
3929 format %{ %}
3930 interface(CONST_INTER);
3931 %}
3933 // Pointer Immediate: 64-bit
3934 operand immP_load() %{
3935 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3936 match(ConP);
3938 op_cost(5);
3939 // formats are generated automatically for constants and base registers
3940 format %{ %}
3941 interface(CONST_INTER);
3942 %}
3944 // Pointer Immediate: 64-bit
3945 operand immP_no_oop_cheap() %{
3946 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3947 match(ConP);
3949 op_cost(5);
3950 // formats are generated automatically for constants and base registers
3951 format %{ %}
3952 interface(CONST_INTER);
3953 %}
3955 // Pointer for polling page
3956 operand immP_poll() %{
3957 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3958 match(ConP);
3959 op_cost(5);
3961 format %{ %}
3962 interface(CONST_INTER);
3963 %}
3965 // Pointer Immediate
3966 operand immN() %{
3967 match(ConN);
3969 op_cost(10);
3970 format %{ %}
3971 interface(CONST_INTER);
3972 %}
3974 operand immNKlass() %{
3975 match(ConNKlass);
3977 op_cost(10);
3978 format %{ %}
3979 interface(CONST_INTER);
3980 %}
3982 // NULL Pointer Immediate
3983 operand immN0() %{
3984 predicate(n->get_narrowcon() == 0);
3985 match(ConN);
3987 op_cost(5);
3988 format %{ %}
3989 interface(CONST_INTER);
3990 %}
3992 // Long Immediate
3993 operand immL() %{
3994 match(ConL);
3996 op_cost(20);
3997 format %{ %}
3998 interface(CONST_INTER);
3999 %}
4001 // Long Immediate zero
4002 operand immL0() %{
4003 predicate( n->get_long() == 0L );
4004 match(ConL);
4005 op_cost(0);
4007 format %{ %}
4008 interface(CONST_INTER);
4009 %}
4011 operand immL7() %{
4012 predicate( n->get_long() == 7L );
4013 match(ConL);
4014 op_cost(0);
4016 format %{ %}
4017 interface(CONST_INTER);
4018 %}
4020 operand immL_M1() %{
4021 predicate( n->get_long() == -1L );
4022 match(ConL);
4023 op_cost(0);
4025 format %{ %}
4026 interface(CONST_INTER);
4027 %}
4029 // bit 0..2 zero
4030 operand immL_M8() %{
4031 predicate( n->get_long() == -8L );
4032 match(ConL);
4033 op_cost(0);
4035 format %{ %}
4036 interface(CONST_INTER);
4037 %}
4039 // bit 2 zero
4040 operand immL_M5() %{
4041 predicate( n->get_long() == -5L );
4042 match(ConL);
4043 op_cost(0);
4045 format %{ %}
4046 interface(CONST_INTER);
4047 %}
4049 // bit 1..2 zero
4050 operand immL_M7() %{
4051 predicate( n->get_long() == -7L );
4052 match(ConL);
4053 op_cost(0);
4055 format %{ %}
4056 interface(CONST_INTER);
4057 %}
4059 // bit 0..1 zero
4060 operand immL_M4() %{
4061 predicate( n->get_long() == -4L );
4062 match(ConL);
4063 op_cost(0);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // bit 3..6 zero
4070 operand immL_M121() %{
4071 predicate( n->get_long() == -121L );
4072 match(ConL);
4073 op_cost(0);
4075 format %{ %}
4076 interface(CONST_INTER);
4077 %}
4079 // Long immediate from 0 to 127.
4080 // Used for a shorter form of long mul by 10.
4081 operand immL_127() %{
4082 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4083 match(ConL);
4084 op_cost(0);
4086 format %{ %}
4087 interface(CONST_INTER);
4088 %}
4090 // Operand for non-negtive long mask
4091 operand immL_nonneg_mask() %{
4092 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4093 match(ConL);
4095 op_cost(0);
4096 format %{ %}
4097 interface(CONST_INTER);
4098 %}
4100 operand immL_0_65535() %{
4101 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4102 match(ConL);
4103 op_cost(0);
4105 format %{ %}
4106 interface(CONST_INTER);
4107 %}
4109 // Long Immediate: cheap (materialize in <= 3 instructions)
4110 operand immL_cheap() %{
4111 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4112 match(ConL);
4113 op_cost(0);
4115 format %{ %}
4116 interface(CONST_INTER);
4117 %}
4119 // Long Immediate: expensive (materialize in > 3 instructions)
4120 operand immL_expensive() %{
4121 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4122 match(ConL);
4123 op_cost(0);
4125 format %{ %}
4126 interface(CONST_INTER);
4127 %}
4129 operand immL16() %{
4130 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4131 match(ConL);
4133 op_cost(10);
4134 format %{ %}
4135 interface(CONST_INTER);
4136 %}
4138 operand immL16_sub() %{
4139 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4140 match(ConL);
4142 op_cost(10);
4143 format %{ %}
4144 interface(CONST_INTER);
4145 %}
4147 // Long Immediate: low 32-bit mask
4148 operand immL_32bits() %{
4149 predicate(n->get_long() == 0xFFFFFFFFL);
4150 match(ConL);
4151 op_cost(20);
4153 format %{ %}
4154 interface(CONST_INTER);
4155 %}
4157 // Long Immediate 32-bit signed
4158 operand immL32()
4159 %{
4160 predicate(n->get_long() == (int) (n->get_long()));
4161 match(ConL);
4163 op_cost(15);
4164 format %{ %}
4165 interface(CONST_INTER);
4166 %}
4169 //single-precision floating-point zero
4170 operand immF0() %{
4171 predicate(jint_cast(n->getf()) == 0);
4172 match(ConF);
4174 op_cost(5);
4175 format %{ %}
4176 interface(CONST_INTER);
4177 %}
4179 //single-precision floating-point immediate
4180 operand immF() %{
4181 match(ConF);
4183 op_cost(20);
4184 format %{ %}
4185 interface(CONST_INTER);
4186 %}
4188 //double-precision floating-point zero
4189 operand immD0() %{
4190 predicate(jlong_cast(n->getd()) == 0);
4191 match(ConD);
4193 op_cost(5);
4194 format %{ %}
4195 interface(CONST_INTER);
4196 %}
4198 //double-precision floating-point immediate
4199 operand immD() %{
4200 match(ConD);
4202 op_cost(20);
4203 format %{ %}
4204 interface(CONST_INTER);
4205 %}
4207 // Register Operands
4208 // Integer Register
4209 operand mRegI() %{
4210 constraint(ALLOC_IN_RC(int_reg));
4211 match(RegI);
4213 format %{ %}
4214 interface(REG_INTER);
4215 %}
4217 operand no_Ax_mRegI() %{
4218 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4219 match(RegI);
4220 match(mRegI);
4222 format %{ %}
4223 interface(REG_INTER);
4224 %}
4226 operand mS0RegI() %{
4227 constraint(ALLOC_IN_RC(s0_reg));
4228 match(RegI);
4229 match(mRegI);
4231 format %{ "S0" %}
4232 interface(REG_INTER);
4233 %}
4235 operand mS1RegI() %{
4236 constraint(ALLOC_IN_RC(s1_reg));
4237 match(RegI);
4238 match(mRegI);
4240 format %{ "S1" %}
4241 interface(REG_INTER);
4242 %}
4244 operand mS2RegI() %{
4245 constraint(ALLOC_IN_RC(s2_reg));
4246 match(RegI);
4247 match(mRegI);
4249 format %{ "S2" %}
4250 interface(REG_INTER);
4251 %}
4253 operand mS3RegI() %{
4254 constraint(ALLOC_IN_RC(s3_reg));
4255 match(RegI);
4256 match(mRegI);
4258 format %{ "S3" %}
4259 interface(REG_INTER);
4260 %}
4262 operand mS4RegI() %{
4263 constraint(ALLOC_IN_RC(s4_reg));
4264 match(RegI);
4265 match(mRegI);
4267 format %{ "S4" %}
4268 interface(REG_INTER);
4269 %}
4271 operand mS5RegI() %{
4272 constraint(ALLOC_IN_RC(s5_reg));
4273 match(RegI);
4274 match(mRegI);
4276 format %{ "S5" %}
4277 interface(REG_INTER);
4278 %}
4280 operand mS6RegI() %{
4281 constraint(ALLOC_IN_RC(s6_reg));
4282 match(RegI);
4283 match(mRegI);
4285 format %{ "S6" %}
4286 interface(REG_INTER);
4287 %}
4289 operand mS7RegI() %{
4290 constraint(ALLOC_IN_RC(s7_reg));
4291 match(RegI);
4292 match(mRegI);
4294 format %{ "S7" %}
4295 interface(REG_INTER);
4296 %}
4299 operand mT0RegI() %{
4300 constraint(ALLOC_IN_RC(t0_reg));
4301 match(RegI);
4302 match(mRegI);
4304 format %{ "T0" %}
4305 interface(REG_INTER);
4306 %}
4308 operand mT1RegI() %{
4309 constraint(ALLOC_IN_RC(t1_reg));
4310 match(RegI);
4311 match(mRegI);
4313 format %{ "T1" %}
4314 interface(REG_INTER);
4315 %}
4317 operand mT2RegI() %{
4318 constraint(ALLOC_IN_RC(t2_reg));
4319 match(RegI);
4320 match(mRegI);
4322 format %{ "T2" %}
4323 interface(REG_INTER);
4324 %}
4326 operand mT3RegI() %{
4327 constraint(ALLOC_IN_RC(t3_reg));
4328 match(RegI);
4329 match(mRegI);
4331 format %{ "T3" %}
4332 interface(REG_INTER);
4333 %}
4335 operand mT8RegI() %{
4336 constraint(ALLOC_IN_RC(t8_reg));
4337 match(RegI);
4338 match(mRegI);
4340 format %{ "T8" %}
4341 interface(REG_INTER);
4342 %}
4344 operand mT9RegI() %{
4345 constraint(ALLOC_IN_RC(t9_reg));
4346 match(RegI);
4347 match(mRegI);
4349 format %{ "T9" %}
4350 interface(REG_INTER);
4351 %}
4353 operand mA0RegI() %{
4354 constraint(ALLOC_IN_RC(a0_reg));
4355 match(RegI);
4356 match(mRegI);
4358 format %{ "A0" %}
4359 interface(REG_INTER);
4360 %}
4362 operand mA1RegI() %{
4363 constraint(ALLOC_IN_RC(a1_reg));
4364 match(RegI);
4365 match(mRegI);
4367 format %{ "A1" %}
4368 interface(REG_INTER);
4369 %}
4371 operand mA2RegI() %{
4372 constraint(ALLOC_IN_RC(a2_reg));
4373 match(RegI);
4374 match(mRegI);
4376 format %{ "A2" %}
4377 interface(REG_INTER);
4378 %}
4380 operand mA3RegI() %{
4381 constraint(ALLOC_IN_RC(a3_reg));
4382 match(RegI);
4383 match(mRegI);
4385 format %{ "A3" %}
4386 interface(REG_INTER);
4387 %}
4389 operand mA4RegI() %{
4390 constraint(ALLOC_IN_RC(a4_reg));
4391 match(RegI);
4392 match(mRegI);
4394 format %{ "A4" %}
4395 interface(REG_INTER);
4396 %}
4398 operand mA5RegI() %{
4399 constraint(ALLOC_IN_RC(a5_reg));
4400 match(RegI);
4401 match(mRegI);
4403 format %{ "A5" %}
4404 interface(REG_INTER);
4405 %}
4407 operand mA6RegI() %{
4408 constraint(ALLOC_IN_RC(a6_reg));
4409 match(RegI);
4410 match(mRegI);
4412 format %{ "A6" %}
4413 interface(REG_INTER);
4414 %}
4416 operand mA7RegI() %{
4417 constraint(ALLOC_IN_RC(a7_reg));
4418 match(RegI);
4419 match(mRegI);
4421 format %{ "A7" %}
4422 interface(REG_INTER);
4423 %}
4425 operand mV0RegI() %{
4426 constraint(ALLOC_IN_RC(v0_reg));
4427 match(RegI);
4428 match(mRegI);
4430 format %{ "V0" %}
4431 interface(REG_INTER);
4432 %}
4434 operand mV1RegI() %{
4435 constraint(ALLOC_IN_RC(v1_reg));
4436 match(RegI);
4437 match(mRegI);
4439 format %{ "V1" %}
4440 interface(REG_INTER);
4441 %}
4443 operand mRegN() %{
4444 constraint(ALLOC_IN_RC(int_reg));
4445 match(RegN);
4447 format %{ %}
4448 interface(REG_INTER);
4449 %}
4451 operand t0_RegN() %{
4452 constraint(ALLOC_IN_RC(t0_reg));
4453 match(RegN);
4454 match(mRegN);
4456 format %{ %}
4457 interface(REG_INTER);
4458 %}
4460 operand t1_RegN() %{
4461 constraint(ALLOC_IN_RC(t1_reg));
4462 match(RegN);
4463 match(mRegN);
4465 format %{ %}
4466 interface(REG_INTER);
4467 %}
4469 operand t2_RegN() %{
4470 constraint(ALLOC_IN_RC(t2_reg));
4471 match(RegN);
4472 match(mRegN);
4474 format %{ %}
4475 interface(REG_INTER);
4476 %}
4478 operand t3_RegN() %{
4479 constraint(ALLOC_IN_RC(t3_reg));
4480 match(RegN);
4481 match(mRegN);
4483 format %{ %}
4484 interface(REG_INTER);
4485 %}
4487 operand t8_RegN() %{
4488 constraint(ALLOC_IN_RC(t8_reg));
4489 match(RegN);
4490 match(mRegN);
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand t9_RegN() %{
4497 constraint(ALLOC_IN_RC(t9_reg));
4498 match(RegN);
4499 match(mRegN);
4501 format %{ %}
4502 interface(REG_INTER);
4503 %}
4505 operand a0_RegN() %{
4506 constraint(ALLOC_IN_RC(a0_reg));
4507 match(RegN);
4508 match(mRegN);
4510 format %{ %}
4511 interface(REG_INTER);
4512 %}
4514 operand a1_RegN() %{
4515 constraint(ALLOC_IN_RC(a1_reg));
4516 match(RegN);
4517 match(mRegN);
4519 format %{ %}
4520 interface(REG_INTER);
4521 %}
4523 operand a2_RegN() %{
4524 constraint(ALLOC_IN_RC(a2_reg));
4525 match(RegN);
4526 match(mRegN);
4528 format %{ %}
4529 interface(REG_INTER);
4530 %}
4532 operand a3_RegN() %{
4533 constraint(ALLOC_IN_RC(a3_reg));
4534 match(RegN);
4535 match(mRegN);
4537 format %{ %}
4538 interface(REG_INTER);
4539 %}
4541 operand a4_RegN() %{
4542 constraint(ALLOC_IN_RC(a4_reg));
4543 match(RegN);
4544 match(mRegN);
4546 format %{ %}
4547 interface(REG_INTER);
4548 %}
4550 operand a5_RegN() %{
4551 constraint(ALLOC_IN_RC(a5_reg));
4552 match(RegN);
4553 match(mRegN);
4555 format %{ %}
4556 interface(REG_INTER);
4557 %}
4559 operand a6_RegN() %{
4560 constraint(ALLOC_IN_RC(a6_reg));
4561 match(RegN);
4562 match(mRegN);
4564 format %{ %}
4565 interface(REG_INTER);
4566 %}
4568 operand a7_RegN() %{
4569 constraint(ALLOC_IN_RC(a7_reg));
4570 match(RegN);
4571 match(mRegN);
4573 format %{ %}
4574 interface(REG_INTER);
4575 %}
4577 operand s0_RegN() %{
4578 constraint(ALLOC_IN_RC(s0_reg));
4579 match(RegN);
4580 match(mRegN);
4582 format %{ %}
4583 interface(REG_INTER);
4584 %}
4586 operand s1_RegN() %{
4587 constraint(ALLOC_IN_RC(s1_reg));
4588 match(RegN);
4589 match(mRegN);
4591 format %{ %}
4592 interface(REG_INTER);
4593 %}
4595 operand s2_RegN() %{
4596 constraint(ALLOC_IN_RC(s2_reg));
4597 match(RegN);
4598 match(mRegN);
4600 format %{ %}
4601 interface(REG_INTER);
4602 %}
4604 operand s3_RegN() %{
4605 constraint(ALLOC_IN_RC(s3_reg));
4606 match(RegN);
4607 match(mRegN);
4609 format %{ %}
4610 interface(REG_INTER);
4611 %}
4613 operand s4_RegN() %{
4614 constraint(ALLOC_IN_RC(s4_reg));
4615 match(RegN);
4616 match(mRegN);
4618 format %{ %}
4619 interface(REG_INTER);
4620 %}
4622 operand s5_RegN() %{
4623 constraint(ALLOC_IN_RC(s5_reg));
4624 match(RegN);
4625 match(mRegN);
4627 format %{ %}
4628 interface(REG_INTER);
4629 %}
4631 operand s6_RegN() %{
4632 constraint(ALLOC_IN_RC(s6_reg));
4633 match(RegN);
4634 match(mRegN);
4636 format %{ %}
4637 interface(REG_INTER);
4638 %}
4640 operand s7_RegN() %{
4641 constraint(ALLOC_IN_RC(s7_reg));
4642 match(RegN);
4643 match(mRegN);
4645 format %{ %}
4646 interface(REG_INTER);
4647 %}
4649 operand v0_RegN() %{
4650 constraint(ALLOC_IN_RC(v0_reg));
4651 match(RegN);
4652 match(mRegN);
4654 format %{ %}
4655 interface(REG_INTER);
4656 %}
4658 operand v1_RegN() %{
4659 constraint(ALLOC_IN_RC(v1_reg));
4660 match(RegN);
4661 match(mRegN);
4663 format %{ %}
4664 interface(REG_INTER);
4665 %}
4667 // Pointer Register
4668 operand mRegP() %{
4669 constraint(ALLOC_IN_RC(p_reg));
4670 match(RegP);
4671 match(a0_RegP);
4673 format %{ %}
4674 interface(REG_INTER);
4675 %}
4677 operand no_T8_mRegP() %{
4678 constraint(ALLOC_IN_RC(no_T8_p_reg));
4679 match(RegP);
4680 match(mRegP);
4682 format %{ %}
4683 interface(REG_INTER);
4684 %}
4686 operand s0_RegP()
4687 %{
4688 constraint(ALLOC_IN_RC(s0_long_reg));
4689 match(RegP);
4690 match(mRegP);
4691 match(no_T8_mRegP);
4693 format %{ %}
4694 interface(REG_INTER);
4695 %}
4697 operand s1_RegP()
4698 %{
4699 constraint(ALLOC_IN_RC(s1_long_reg));
4700 match(RegP);
4701 match(mRegP);
4702 match(no_T8_mRegP);
4704 format %{ %}
4705 interface(REG_INTER);
4706 %}
4708 operand s2_RegP()
4709 %{
4710 constraint(ALLOC_IN_RC(s2_long_reg));
4711 match(RegP);
4712 match(mRegP);
4713 match(no_T8_mRegP);
4715 format %{ %}
4716 interface(REG_INTER);
4717 %}
4719 operand s3_RegP()
4720 %{
4721 constraint(ALLOC_IN_RC(s3_long_reg));
4722 match(RegP);
4723 match(mRegP);
4724 match(no_T8_mRegP);
4726 format %{ %}
4727 interface(REG_INTER);
4728 %}
4730 operand s4_RegP()
4731 %{
4732 constraint(ALLOC_IN_RC(s4_long_reg));
4733 match(RegP);
4734 match(mRegP);
4735 match(no_T8_mRegP);
4737 format %{ %}
4738 interface(REG_INTER);
4739 %}
4741 operand s5_RegP()
4742 %{
4743 constraint(ALLOC_IN_RC(s5_long_reg));
4744 match(RegP);
4745 match(mRegP);
4746 match(no_T8_mRegP);
4748 format %{ %}
4749 interface(REG_INTER);
4750 %}
4752 operand s6_RegP()
4753 %{
4754 constraint(ALLOC_IN_RC(s6_long_reg));
4755 match(RegP);
4756 match(mRegP);
4757 match(no_T8_mRegP);
4759 format %{ %}
4760 interface(REG_INTER);
4761 %}
4763 operand s7_RegP()
4764 %{
4765 constraint(ALLOC_IN_RC(s7_long_reg));
4766 match(RegP);
4767 match(mRegP);
4768 match(no_T8_mRegP);
4770 format %{ %}
4771 interface(REG_INTER);
4772 %}
4774 operand t0_RegP()
4775 %{
4776 constraint(ALLOC_IN_RC(t0_long_reg));
4777 match(RegP);
4778 match(mRegP);
4779 match(no_T8_mRegP);
4781 format %{ %}
4782 interface(REG_INTER);
4783 %}
4785 operand t1_RegP()
4786 %{
4787 constraint(ALLOC_IN_RC(t1_long_reg));
4788 match(RegP);
4789 match(mRegP);
4790 match(no_T8_mRegP);
4792 format %{ %}
4793 interface(REG_INTER);
4794 %}
4796 operand t2_RegP()
4797 %{
4798 constraint(ALLOC_IN_RC(t2_long_reg));
4799 match(RegP);
4800 match(mRegP);
4801 match(no_T8_mRegP);
4803 format %{ %}
4804 interface(REG_INTER);
4805 %}
4807 operand t3_RegP()
4808 %{
4809 constraint(ALLOC_IN_RC(t3_long_reg));
4810 match(RegP);
4811 match(mRegP);
4812 match(no_T8_mRegP);
4814 format %{ %}
4815 interface(REG_INTER);
4816 %}
4818 operand t8_RegP()
4819 %{
4820 constraint(ALLOC_IN_RC(t8_long_reg));
4821 match(RegP);
4822 match(mRegP);
4824 format %{ %}
4825 interface(REG_INTER);
4826 %}
4828 operand t9_RegP()
4829 %{
4830 constraint(ALLOC_IN_RC(t9_long_reg));
4831 match(RegP);
4832 match(mRegP);
4833 match(no_T8_mRegP);
4835 format %{ %}
4836 interface(REG_INTER);
4837 %}
4839 operand a0_RegP()
4840 %{
4841 constraint(ALLOC_IN_RC(a0_long_reg));
4842 match(RegP);
4843 match(mRegP);
4844 match(no_T8_mRegP);
4846 format %{ %}
4847 interface(REG_INTER);
4848 %}
4850 operand a1_RegP()
4851 %{
4852 constraint(ALLOC_IN_RC(a1_long_reg));
4853 match(RegP);
4854 match(mRegP);
4855 match(no_T8_mRegP);
4857 format %{ %}
4858 interface(REG_INTER);
4859 %}
4861 operand a2_RegP()
4862 %{
4863 constraint(ALLOC_IN_RC(a2_long_reg));
4864 match(RegP);
4865 match(mRegP);
4866 match(no_T8_mRegP);
4868 format %{ %}
4869 interface(REG_INTER);
4870 %}
4872 operand a3_RegP()
4873 %{
4874 constraint(ALLOC_IN_RC(a3_long_reg));
4875 match(RegP);
4876 match(mRegP);
4877 match(no_T8_mRegP);
4879 format %{ %}
4880 interface(REG_INTER);
4881 %}
4883 operand a4_RegP()
4884 %{
4885 constraint(ALLOC_IN_RC(a4_long_reg));
4886 match(RegP);
4887 match(mRegP);
4888 match(no_T8_mRegP);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4895 operand a5_RegP()
4896 %{
4897 constraint(ALLOC_IN_RC(a5_long_reg));
4898 match(RegP);
4899 match(mRegP);
4900 match(no_T8_mRegP);
4902 format %{ %}
4903 interface(REG_INTER);
4904 %}
4906 operand a6_RegP()
4907 %{
4908 constraint(ALLOC_IN_RC(a6_long_reg));
4909 match(RegP);
4910 match(mRegP);
4911 match(no_T8_mRegP);
4913 format %{ %}
4914 interface(REG_INTER);
4915 %}
4917 operand a7_RegP()
4918 %{
4919 constraint(ALLOC_IN_RC(a7_long_reg));
4920 match(RegP);
4921 match(mRegP);
4922 match(no_T8_mRegP);
4924 format %{ %}
4925 interface(REG_INTER);
4926 %}
4928 operand v0_RegP()
4929 %{
4930 constraint(ALLOC_IN_RC(v0_long_reg));
4931 match(RegP);
4932 match(mRegP);
4933 match(no_T8_mRegP);
4935 format %{ %}
4936 interface(REG_INTER);
4937 %}
4939 operand v1_RegP()
4940 %{
4941 constraint(ALLOC_IN_RC(v1_long_reg));
4942 match(RegP);
4943 match(mRegP);
4944 match(no_T8_mRegP);
4946 format %{ %}
4947 interface(REG_INTER);
4948 %}
4950 /*
4951 operand mSPRegP(mRegP reg) %{
4952 constraint(ALLOC_IN_RC(sp_reg));
4953 match(reg);
4955 format %{ "SP" %}
4956 interface(REG_INTER);
4957 %}
4959 operand mFPRegP(mRegP reg) %{
4960 constraint(ALLOC_IN_RC(fp_reg));
4961 match(reg);
4963 format %{ "FP" %}
4964 interface(REG_INTER);
4965 %}
4966 */
4968 operand mRegL() %{
4969 constraint(ALLOC_IN_RC(long_reg));
4970 match(RegL);
4972 format %{ %}
4973 interface(REG_INTER);
4974 %}
4976 operand v0RegL() %{
4977 constraint(ALLOC_IN_RC(v0_long_reg));
4978 match(RegL);
4979 match(mRegL);
4981 format %{ %}
4982 interface(REG_INTER);
4983 %}
4985 operand v1RegL() %{
4986 constraint(ALLOC_IN_RC(v1_long_reg));
4987 match(RegL);
4988 match(mRegL);
4990 format %{ %}
4991 interface(REG_INTER);
4992 %}
4994 operand a0RegL() %{
4995 constraint(ALLOC_IN_RC(a0_long_reg));
4996 match(RegL);
4997 match(mRegL);
4999 format %{ "A0" %}
5000 interface(REG_INTER);
5001 %}
5003 operand a1RegL() %{
5004 constraint(ALLOC_IN_RC(a1_long_reg));
5005 match(RegL);
5006 match(mRegL);
5008 format %{ %}
5009 interface(REG_INTER);
5010 %}
5012 operand a2RegL() %{
5013 constraint(ALLOC_IN_RC(a2_long_reg));
5014 match(RegL);
5015 match(mRegL);
5017 format %{ %}
5018 interface(REG_INTER);
5019 %}
5021 operand a3RegL() %{
5022 constraint(ALLOC_IN_RC(a3_long_reg));
5023 match(RegL);
5024 match(mRegL);
5026 format %{ %}
5027 interface(REG_INTER);
5028 %}
5030 operand t0RegL() %{
5031 constraint(ALLOC_IN_RC(t0_long_reg));
5032 match(RegL);
5033 match(mRegL);
5035 format %{ %}
5036 interface(REG_INTER);
5037 %}
5039 operand t1RegL() %{
5040 constraint(ALLOC_IN_RC(t1_long_reg));
5041 match(RegL);
5042 match(mRegL);
5044 format %{ %}
5045 interface(REG_INTER);
5046 %}
5048 operand t2RegL() %{
5049 constraint(ALLOC_IN_RC(t2_long_reg));
5050 match(RegL);
5051 match(mRegL);
5053 format %{ %}
5054 interface(REG_INTER);
5055 %}
5057 operand t3RegL() %{
5058 constraint(ALLOC_IN_RC(t3_long_reg));
5059 match(RegL);
5060 match(mRegL);
5062 format %{ %}
5063 interface(REG_INTER);
5064 %}
5066 operand t8RegL() %{
5067 constraint(ALLOC_IN_RC(t8_long_reg));
5068 match(RegL);
5069 match(mRegL);
5071 format %{ %}
5072 interface(REG_INTER);
5073 %}
5075 operand a4RegL() %{
5076 constraint(ALLOC_IN_RC(a4_long_reg));
5077 match(RegL);
5078 match(mRegL);
5080 format %{ %}
5081 interface(REG_INTER);
5082 %}
5084 operand a5RegL() %{
5085 constraint(ALLOC_IN_RC(a5_long_reg));
5086 match(RegL);
5087 match(mRegL);
5089 format %{ %}
5090 interface(REG_INTER);
5091 %}
5093 operand a6RegL() %{
5094 constraint(ALLOC_IN_RC(a6_long_reg));
5095 match(RegL);
5096 match(mRegL);
5098 format %{ %}
5099 interface(REG_INTER);
5100 %}
5102 operand a7RegL() %{
5103 constraint(ALLOC_IN_RC(a7_long_reg));
5104 match(RegL);
5105 match(mRegL);
5107 format %{ %}
5108 interface(REG_INTER);
5109 %}
5111 operand s0RegL() %{
5112 constraint(ALLOC_IN_RC(s0_long_reg));
5113 match(RegL);
5114 match(mRegL);
5116 format %{ %}
5117 interface(REG_INTER);
5118 %}
5120 operand s1RegL() %{
5121 constraint(ALLOC_IN_RC(s1_long_reg));
5122 match(RegL);
5123 match(mRegL);
5125 format %{ %}
5126 interface(REG_INTER);
5127 %}
5129 operand s2RegL() %{
5130 constraint(ALLOC_IN_RC(s2_long_reg));
5131 match(RegL);
5132 match(mRegL);
5134 format %{ %}
5135 interface(REG_INTER);
5136 %}
5138 operand s3RegL() %{
5139 constraint(ALLOC_IN_RC(s3_long_reg));
5140 match(RegL);
5141 match(mRegL);
5143 format %{ %}
5144 interface(REG_INTER);
5145 %}
5147 operand s4RegL() %{
5148 constraint(ALLOC_IN_RC(s4_long_reg));
5149 match(RegL);
5150 match(mRegL);
5152 format %{ %}
5153 interface(REG_INTER);
5154 %}
5156 operand s7RegL() %{
5157 constraint(ALLOC_IN_RC(s7_long_reg));
5158 match(RegL);
5159 match(mRegL);
5161 format %{ %}
5162 interface(REG_INTER);
5163 %}
5165 // Floating register operands
5166 operand regF() %{
5167 constraint(ALLOC_IN_RC(flt_reg));
5168 match(RegF);
5170 format %{ %}
5171 interface(REG_INTER);
5172 %}
5174 //Double Precision Floating register operands
5175 operand regD() %{
5176 constraint(ALLOC_IN_RC(dbl_reg));
5177 match(RegD);
5179 format %{ %}
5180 interface(REG_INTER);
5181 %}
5183 //----------Memory Operands----------------------------------------------------
5184 // Indirect Memory Operand
5185 operand indirect(mRegP reg) %{
5186 constraint(ALLOC_IN_RC(p_reg));
5187 match(reg);
5189 format %{ "[$reg] @ indirect" %}
5190 interface(MEMORY_INTER) %{
5191 base($reg);
5192 index(0x0); /* NO_INDEX */
5193 scale(0x0);
5194 disp(0x0);
5195 %}
5196 %}
5198 // Indirect Memory Plus Short Offset Operand
5199 operand indOffset8(mRegP reg, immL8 off)
5200 %{
5201 constraint(ALLOC_IN_RC(p_reg));
5202 match(AddP reg off);
5204 op_cost(10);
5205 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5206 interface(MEMORY_INTER) %{
5207 base($reg);
5208 index(0x0); /* NO_INDEX */
5209 scale(0x0);
5210 disp($off);
5211 %}
5212 %}
5214 // Indirect Memory Times Scale Plus Index Register
5215 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5216 %{
5217 constraint(ALLOC_IN_RC(p_reg));
5218 match(AddP reg (LShiftL lreg scale));
5220 op_cost(10);
5221 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5222 interface(MEMORY_INTER) %{
5223 base($reg);
5224 index($lreg);
5225 scale($scale);
5226 disp(0x0);
5227 %}
5228 %}
5231 // [base + index + offset]
5232 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5233 %{
5234 constraint(ALLOC_IN_RC(p_reg));
5235 op_cost(5);
5236 match(AddP (AddP base index) off);
5238 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5239 interface(MEMORY_INTER) %{
5240 base($base);
5241 index($index);
5242 scale(0x0);
5243 disp($off);
5244 %}
5245 %}
5247 // [base + index + offset]
5248 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5249 %{
5250 constraint(ALLOC_IN_RC(p_reg));
5251 op_cost(5);
5252 match(AddP (AddP base (ConvI2L index)) off);
5254 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5255 interface(MEMORY_INTER) %{
5256 base($base);
5257 index($index);
5258 scale(0x0);
5259 disp($off);
5260 %}
5261 %}
5263 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5264 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5265 %{
5266 constraint(ALLOC_IN_RC(p_reg));
5267 match(AddP (AddP reg (LShiftL lreg scale)) off);
5269 op_cost(10);
5270 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5271 interface(MEMORY_INTER) %{
5272 base($reg);
5273 index($lreg);
5274 scale($scale);
5275 disp($off);
5276 %}
5277 %}
5279 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5280 %{
5281 constraint(ALLOC_IN_RC(p_reg));
5282 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5284 op_cost(10);
5285 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5286 interface(MEMORY_INTER) %{
5287 base($reg);
5288 index($ireg);
5289 scale($scale);
5290 disp($off);
5291 %}
5292 %}
5294 // [base + index<<scale + offset]
5295 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5296 %{
5297 constraint(ALLOC_IN_RC(p_reg));
5298 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5299 op_cost(10);
5300 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5302 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5303 interface(MEMORY_INTER) %{
5304 base($base);
5305 index($index);
5306 scale($scale);
5307 disp($off);
5308 %}
5309 %}
5311 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5312 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5313 %{
5314 predicate(Universe::narrow_oop_shift() == 0);
5315 constraint(ALLOC_IN_RC(p_reg));
5316 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5318 op_cost(10);
5319 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5320 interface(MEMORY_INTER) %{
5321 base($reg);
5322 index($lreg);
5323 scale($scale);
5324 disp($off);
5325 %}
5326 %}
5328 // [base + index<<scale + offset] for compressd Oops
5329 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5330 %{
5331 constraint(ALLOC_IN_RC(p_reg));
5332 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5333 predicate(Universe::narrow_oop_shift() == 0);
5334 op_cost(10);
5335 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5337 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5338 interface(MEMORY_INTER) %{
5339 base($base);
5340 index($index);
5341 scale($scale);
5342 disp($off);
5343 %}
5344 %}
5346 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5347 // Indirect Memory Plus Long Offset Operand
5348 operand indOffset32(mRegP reg, immL32 off) %{
5349 constraint(ALLOC_IN_RC(p_reg));
5350 op_cost(20);
5351 match(AddP reg off);
5353 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5354 interface(MEMORY_INTER) %{
5355 base($reg);
5356 index(0x0); /* NO_INDEX */
5357 scale(0x0);
5358 disp($off);
5359 %}
5360 %}
5362 // Indirect Memory Plus Index Register
5363 operand indIndex(mRegP addr, mRegL index) %{
5364 constraint(ALLOC_IN_RC(p_reg));
5365 match(AddP addr index);
5367 op_cost(20);
5368 format %{"[$addr + $index] @ indIndex" %}
5369 interface(MEMORY_INTER) %{
5370 base($addr);
5371 index($index);
5372 scale(0x0);
5373 disp(0x0);
5374 %}
5375 %}
5377 operand indirectNarrowKlass(mRegN reg)
5378 %{
5379 predicate(Universe::narrow_klass_shift() == 0);
5380 constraint(ALLOC_IN_RC(p_reg));
5381 op_cost(10);
5382 match(DecodeNKlass reg);
5384 format %{ "[$reg] @ indirectNarrowKlass" %}
5385 interface(MEMORY_INTER) %{
5386 base($reg);
5387 index(0x0);
5388 scale(0x0);
5389 disp(0x0);
5390 %}
5391 %}
5393 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5394 %{
5395 predicate(Universe::narrow_klass_shift() == 0);
5396 constraint(ALLOC_IN_RC(p_reg));
5397 op_cost(10);
5398 match(AddP (DecodeNKlass reg) off);
5400 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5401 interface(MEMORY_INTER) %{
5402 base($reg);
5403 index(0x0);
5404 scale(0x0);
5405 disp($off);
5406 %}
5407 %}
5409 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5410 %{
5411 predicate(Universe::narrow_klass_shift() == 0);
5412 constraint(ALLOC_IN_RC(p_reg));
5413 op_cost(10);
5414 match(AddP (DecodeNKlass reg) off);
5416 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5417 interface(MEMORY_INTER) %{
5418 base($reg);
5419 index(0x0);
5420 scale(0x0);
5421 disp($off);
5422 %}
5423 %}
5425 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5426 %{
5427 predicate(Universe::narrow_klass_shift() == 0);
5428 constraint(ALLOC_IN_RC(p_reg));
5429 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5431 op_cost(10);
5432 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5433 interface(MEMORY_INTER) %{
5434 base($reg);
5435 index($lreg);
5436 scale(0x0);
5437 disp($off);
5438 %}
5439 %}
5441 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5442 %{
5443 predicate(Universe::narrow_klass_shift() == 0);
5444 constraint(ALLOC_IN_RC(p_reg));
5445 match(AddP (DecodeNKlass reg) lreg);
5447 op_cost(10);
5448 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5449 interface(MEMORY_INTER) %{
5450 base($reg);
5451 index($lreg);
5452 scale(0x0);
5453 disp(0x0);
5454 %}
5455 %}
5457 // Indirect Memory Operand
5458 operand indirectNarrow(mRegN reg)
5459 %{
5460 predicate(Universe::narrow_oop_shift() == 0);
5461 constraint(ALLOC_IN_RC(p_reg));
5462 op_cost(10);
5463 match(DecodeN reg);
5465 format %{ "[$reg] @ indirectNarrow" %}
5466 interface(MEMORY_INTER) %{
5467 base($reg);
5468 index(0x0);
5469 scale(0x0);
5470 disp(0x0);
5471 %}
5472 %}
5474 // Indirect Memory Plus Short Offset Operand
5475 operand indOffset8Narrow(mRegN reg, immL8 off)
5476 %{
5477 predicate(Universe::narrow_oop_shift() == 0);
5478 constraint(ALLOC_IN_RC(p_reg));
5479 op_cost(10);
5480 match(AddP (DecodeN reg) off);
5482 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5483 interface(MEMORY_INTER) %{
5484 base($reg);
5485 index(0x0);
5486 scale(0x0);
5487 disp($off);
5488 %}
5489 %}
5491 // Indirect Memory Plus Index Register Plus Offset Operand
5492 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5493 %{
5494 predicate(Universe::narrow_oop_shift() == 0);
5495 constraint(ALLOC_IN_RC(p_reg));
5496 match(AddP (AddP (DecodeN reg) lreg) off);
5498 op_cost(10);
5499 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5500 interface(MEMORY_INTER) %{
5501 base($reg);
5502 index($lreg);
5503 scale(0x0);
5504 disp($off);
5505 %}
5506 %}
5508 //----------Load Long Memory Operands------------------------------------------
5509 // The load-long idiom will use it's address expression again after loading
5510 // the first word of the long. If the load-long destination overlaps with
5511 // registers used in the addressing expression, the 2nd half will be loaded
5512 // from a clobbered address. Fix this by requiring that load-long use
5513 // address registers that do not overlap with the load-long target.
5515 // load-long support
5516 operand load_long_RegP() %{
5517 constraint(ALLOC_IN_RC(p_reg));
5518 match(RegP);
5519 match(mRegP);
5520 op_cost(100);
5521 format %{ %}
5522 interface(REG_INTER);
5523 %}
5525 // Indirect Memory Operand Long
5526 operand load_long_indirect(load_long_RegP reg) %{
5527 constraint(ALLOC_IN_RC(p_reg));
5528 match(reg);
5530 format %{ "[$reg]" %}
5531 interface(MEMORY_INTER) %{
5532 base($reg);
5533 index(0x0);
5534 scale(0x0);
5535 disp(0x0);
5536 %}
5537 %}
5539 // Indirect Memory Plus Long Offset Operand
5540 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5541 match(AddP reg off);
5543 format %{ "[$reg + $off]" %}
5544 interface(MEMORY_INTER) %{
5545 base($reg);
5546 index(0x0);
5547 scale(0x0);
5548 disp($off);
5549 %}
5550 %}
5552 //----------Conditional Branch Operands----------------------------------------
5553 // Comparison Op - This is the operation of the comparison, and is limited to
5554 // the following set of codes:
5555 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5556 //
5557 // Other attributes of the comparison, such as unsignedness, are specified
5558 // by the comparison instruction that sets a condition code flags register.
5559 // That result is represented by a flags operand whose subtype is appropriate
5560 // to the unsignedness (etc.) of the comparison.
5561 //
5562 // Later, the instruction which matches both the Comparison Op (a Bool) and
5563 // the flags (produced by the Cmp) specifies the coding of the comparison op
5564 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5566 // Comparision Code
5567 operand cmpOp() %{
5568 match(Bool);
5570 format %{ "" %}
5571 interface(COND_INTER) %{
5572 equal(0x01);
5573 not_equal(0x02);
5574 greater(0x03);
5575 greater_equal(0x04);
5576 less(0x05);
5577 less_equal(0x06);
5578 overflow(0x7);
5579 no_overflow(0x8);
5580 %}
5581 %}
5584 // Comparision Code
5585 // Comparison Code, unsigned compare. Used by FP also, with
5586 // C2 (unordered) turned into GT or LT already. The other bits
5587 // C0 and C3 are turned into Carry & Zero flags.
5588 operand cmpOpU() %{
5589 match(Bool);
5591 format %{ "" %}
5592 interface(COND_INTER) %{
5593 equal(0x01);
5594 not_equal(0x02);
5595 greater(0x03);
5596 greater_equal(0x04);
5597 less(0x05);
5598 less_equal(0x06);
5599 overflow(0x7);
5600 no_overflow(0x8);
5601 %}
5602 %}
5605 //----------Special Memory Operands--------------------------------------------
5606 // Stack Slot Operand - This operand is used for loading and storing temporary
5607 // values on the stack where a match requires a value to
5608 // flow through memory.
5609 operand stackSlotP(sRegP reg) %{
5610 constraint(ALLOC_IN_RC(stack_slots));
5611 // No match rule because this operand is only generated in matching
5612 op_cost(50);
5613 format %{ "[$reg]" %}
5614 interface(MEMORY_INTER) %{
5615 base(0x1d); // SP
5616 index(0x0); // No Index
5617 scale(0x0); // No Scale
5618 disp($reg); // Stack Offset
5619 %}
5620 %}
5622 operand stackSlotI(sRegI reg) %{
5623 constraint(ALLOC_IN_RC(stack_slots));
5624 // No match rule because this operand is only generated in matching
5625 op_cost(50);
5626 format %{ "[$reg]" %}
5627 interface(MEMORY_INTER) %{
5628 base(0x1d); // SP
5629 index(0x0); // No Index
5630 scale(0x0); // No Scale
5631 disp($reg); // Stack Offset
5632 %}
5633 %}
5635 operand stackSlotF(sRegF reg) %{
5636 constraint(ALLOC_IN_RC(stack_slots));
5637 // No match rule because this operand is only generated in matching
5638 op_cost(50);
5639 format %{ "[$reg]" %}
5640 interface(MEMORY_INTER) %{
5641 base(0x1d); // SP
5642 index(0x0); // No Index
5643 scale(0x0); // No Scale
5644 disp($reg); // Stack Offset
5645 %}
5646 %}
5648 operand stackSlotD(sRegD reg) %{
5649 constraint(ALLOC_IN_RC(stack_slots));
5650 // No match rule because this operand is only generated in matching
5651 op_cost(50);
5652 format %{ "[$reg]" %}
5653 interface(MEMORY_INTER) %{
5654 base(0x1d); // SP
5655 index(0x0); // No Index
5656 scale(0x0); // No Scale
5657 disp($reg); // Stack Offset
5658 %}
5659 %}
5661 operand stackSlotL(sRegL reg) %{
5662 constraint(ALLOC_IN_RC(stack_slots));
5663 // No match rule because this operand is only generated in matching
5664 op_cost(50);
5665 format %{ "[$reg]" %}
5666 interface(MEMORY_INTER) %{
5667 base(0x1d); // SP
5668 index(0x0); // No Index
5669 scale(0x0); // No Scale
5670 disp($reg); // Stack Offset
5671 %}
5672 %}
5675 //------------------------OPERAND CLASSES--------------------------------------
5676 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5677 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5680 //----------PIPELINE-----------------------------------------------------------
5681 // Rules which define the behavior of the target architectures pipeline.
5683 pipeline %{
5685 //----------ATTRIBUTES---------------------------------------------------------
5686 attributes %{
5687 fixed_size_instructions; // Fixed size instructions
5688 branch_has_delay_slot; // branch have delay slot in gs2
5689 max_instructions_per_bundle = 1; // 1 instruction per bundle
5690 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5691 bundle_unit_size=4;
5692 instruction_unit_size = 4; // An instruction is 4 bytes long
5693 instruction_fetch_unit_size = 16; // The processor fetches one line
5694 instruction_fetch_units = 1; // of 16 bytes
5696 // List of nop instructions
5697 nops( MachNop );
5698 %}
5700 //----------RESOURCES----------------------------------------------------------
5701 // Resources are the functional units available to the machine
5703 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5705 //----------PIPELINE DESCRIPTION-----------------------------------------------
5706 // Pipeline Description specifies the stages in the machine's pipeline
5708 // IF: fetch
5709 // ID: decode
5710 // RD: read
5711 // CA: caculate
5712 // WB: write back
5713 // CM: commit
5715 pipe_desc(IF, ID, RD, CA, WB, CM);
5718 //----------PIPELINE CLASSES---------------------------------------------------
5719 // Pipeline Classes describe the stages in which input and output are
5720 // referenced by the hardware pipeline.
5722 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5723 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5724 single_instruction;
5725 src1 : RD(read);
5726 src2 : RD(read);
5727 dst : WB(write)+1;
5728 DECODE : ID;
5729 ALU : CA;
5730 %}
5732 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5733 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5734 src1 : RD(read);
5735 src2 : RD(read);
5736 dst : WB(write)+5;
5737 DECODE : ID;
5738 ALU2 : CA;
5739 %}
5741 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5742 src1 : RD(read);
5743 src2 : RD(read);
5744 dst : WB(write)+10;
5745 DECODE : ID;
5746 ALU2 : CA;
5747 %}
5749 //No.19 Integer div operation : dst <-- reg1 div reg2
5750 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5751 src1 : RD(read);
5752 src2 : RD(read);
5753 dst : WB(write)+10;
5754 DECODE : ID;
5755 ALU2 : CA;
5756 %}
5758 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5759 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5760 instruction_count(2);
5761 src1 : RD(read);
5762 src2 : RD(read);
5763 dst : WB(write)+10;
5764 DECODE : ID;
5765 ALU2 : CA;
5766 %}
5768 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5769 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5770 instruction_count(2);
5771 src1 : RD(read);
5772 src2 : RD(read);
5773 dst : WB(write);
5774 DECODE : ID;
5775 ALU : CA;
5776 %}
5778 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5779 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5780 instruction_count(2);
5781 src : RD(read);
5782 dst : WB(write);
5783 DECODE : ID;
5784 ALU : CA;
5785 %}
5787 //no.16 load Long from memory :
5788 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5789 instruction_count(2);
5790 mem : RD(read);
5791 dst : WB(write)+5;
5792 DECODE : ID;
5793 MEM : RD;
5794 %}
5796 //No.17 Store Long to Memory :
5797 pipe_class ialu_storeL(mRegL src, memory mem) %{
5798 instruction_count(2);
5799 mem : RD(read);
5800 src : RD(read);
5801 DECODE : ID;
5802 MEM : RD;
5803 %}
5805 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5806 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5807 single_instruction;
5808 src : RD(read);
5809 dst : WB(write);
5810 DECODE : ID;
5811 ALU : CA;
5812 %}
5814 //No.3 Integer move operation : dst <-- reg
5815 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5816 src : RD(read);
5817 dst : WB(write);
5818 DECODE : ID;
5819 ALU : CA;
5820 %}
5822 //No.4 No instructions : do nothing
5823 pipe_class empty( ) %{
5824 instruction_count(0);
5825 %}
5827 //No.5 UnConditional branch :
5828 pipe_class pipe_jump( label labl ) %{
5829 multiple_bundles;
5830 DECODE : ID;
5831 BR : RD;
5832 %}
5834 //No.6 ALU Conditional branch :
5835 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5836 multiple_bundles;
5837 src1 : RD(read);
5838 src2 : RD(read);
5839 DECODE : ID;
5840 BR : RD;
5841 %}
5843 //no.7 load integer from memory :
5844 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5845 mem : RD(read);
5846 dst : WB(write)+3;
5847 DECODE : ID;
5848 MEM : RD;
5849 %}
5851 //No.8 Store Integer to Memory :
5852 pipe_class ialu_storeI(mRegI src, memory mem) %{
5853 mem : RD(read);
5854 src : RD(read);
5855 DECODE : ID;
5856 MEM : RD;
5857 %}
5860 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5861 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5862 src1 : RD(read);
5863 src2 : RD(read);
5864 dst : WB(write);
5865 DECODE : ID;
5866 FPU : CA;
5867 %}
5869 //No.22 Floating div operation : dst <-- reg1 div reg2
5870 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5871 src1 : RD(read);
5872 src2 : RD(read);
5873 dst : WB(write);
5874 DECODE : ID;
5875 FPU2 : CA;
5876 %}
5878 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5879 src : RD(read);
5880 dst : WB(write);
5881 DECODE : ID;
5882 FPU1 : CA;
5883 %}
5885 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5886 src : RD(read);
5887 dst : WB(write);
5888 DECODE : ID;
5889 FPU1 : CA;
5890 %}
5892 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5893 src : RD(read);
5894 dst : WB(write);
5895 DECODE : ID;
5896 MEM : RD;
5897 %}
5899 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5900 src : RD(read);
5901 dst : WB(write);
5902 DECODE : ID;
5903 MEM : RD(5);
5904 %}
5906 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5907 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5908 multiple_bundles;
5909 src1 : RD(read);
5910 src2 : RD(read);
5911 dst : WB(write);
5912 DECODE : ID;
5913 FPU2 : CA;
5914 %}
5916 //No.11 Load Floating from Memory :
5917 pipe_class fpu_loadF(regF dst, memory mem) %{
5918 instruction_count(1);
5919 mem : RD(read);
5920 dst : WB(write)+3;
5921 DECODE : ID;
5922 MEM : RD;
5923 %}
5925 //No.12 Store Floating to Memory :
5926 pipe_class fpu_storeF(regF src, memory mem) %{
5927 instruction_count(1);
5928 mem : RD(read);
5929 src : RD(read);
5930 DECODE : ID;
5931 MEM : RD;
5932 %}
5934 //No.13 FPU Conditional branch :
5935 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5936 multiple_bundles;
5937 src1 : RD(read);
5938 src2 : RD(read);
5939 DECODE : ID;
5940 BR : RD;
5941 %}
5943 //No.14 Floating FPU reg operation : dst <-- op reg
5944 pipe_class fpu1_regF(regF dst, regF src) %{
5945 src : RD(read);
5946 dst : WB(write);
5947 DECODE : ID;
5948 FPU : CA;
5949 %}
5951 pipe_class long_memory_op() %{
5952 instruction_count(10); multiple_bundles; force_serialization;
5953 fixed_latency(30);
5954 %}
5956 pipe_class simple_call() %{
5957 instruction_count(10); multiple_bundles; force_serialization;
5958 fixed_latency(200);
5959 BR : RD;
5960 %}
5962 pipe_class call() %{
5963 instruction_count(10); multiple_bundles; force_serialization;
5964 fixed_latency(200);
5965 %}
5967 //FIXME:
5968 //No.9 Piple slow : for multi-instructions
5969 pipe_class pipe_slow( ) %{
5970 instruction_count(20);
5971 force_serialization;
5972 multiple_bundles;
5973 fixed_latency(50);
5974 %}
5976 %}
5980 //----------INSTRUCTIONS-------------------------------------------------------
5981 //
5982 // match -- States which machine-independent subtree may be replaced
5983 // by this instruction.
5984 // ins_cost -- The estimated cost of this instruction is used by instruction
5985 // selection to identify a minimum cost tree of machine
5986 // instructions that matches a tree of machine-independent
5987 // instructions.
5988 // format -- A string providing the disassembly for this instruction.
5989 // The value of an instruction's operand may be inserted
5990 // by referring to it with a '$' prefix.
5991 // opcode -- Three instruction opcodes may be provided. These are referred
5992 // to within an encode class as $primary, $secondary, and $tertiary
5993 // respectively. The primary opcode is commonly used to
5994 // indicate the type of machine instruction, while secondary
5995 // and tertiary are often used for prefix options or addressing
5996 // modes.
5997 // ins_encode -- A list of encode classes with parameters. The encode class
5998 // name must have been defined in an 'enc_class' specification
5999 // in the encode section of the architecture description.
6002 // Load Integer
6003 instruct loadI(mRegI dst, memory mem) %{
6004 match(Set dst (LoadI mem));
6006 ins_cost(125);
6007 format %{ "lw $dst, $mem #@loadI" %}
6008 ins_encode (load_I_enc(dst, mem));
6009 ins_pipe( ialu_loadI );
6010 %}
6012 instruct loadI_convI2L(mRegL dst, memory mem) %{
6013 match(Set dst (ConvI2L (LoadI mem)));
6015 ins_cost(125);
6016 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6017 ins_encode (load_I_enc(dst, mem));
6018 ins_pipe( ialu_loadI );
6019 %}
6021 // Load Integer (32 bit signed) to Byte (8 bit signed)
6022 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6023 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6025 ins_cost(125);
6026 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6027 ins_encode(load_B_enc(dst, mem));
6028 ins_pipe(ialu_loadI);
6029 %}
6031 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6032 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6033 match(Set dst (AndI (LoadI mem) mask));
6035 ins_cost(125);
6036 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6037 ins_encode(load_UB_enc(dst, mem));
6038 ins_pipe(ialu_loadI);
6039 %}
6041 // Load Integer (32 bit signed) to Short (16 bit signed)
6042 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6043 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6045 ins_cost(125);
6046 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6047 ins_encode(load_S_enc(dst, mem));
6048 ins_pipe(ialu_loadI);
6049 %}
6051 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6052 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6053 match(Set dst (AndI (LoadI mem) mask));
6055 ins_cost(125);
6056 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6057 ins_encode(load_C_enc(dst, mem));
6058 ins_pipe(ialu_loadI);
6059 %}
6061 // Load Long.
6062 instruct loadL(mRegL dst, memory mem) %{
6063 // predicate(!((LoadLNode*)n)->require_atomic_access());
6064 match(Set dst (LoadL mem));
6066 ins_cost(250);
6067 format %{ "ld $dst, $mem #@loadL" %}
6068 ins_encode(load_L_enc(dst, mem));
6069 ins_pipe( ialu_loadL );
6070 %}
6072 // Load Long - UNaligned
6073 instruct loadL_unaligned(mRegL dst, memory mem) %{
6074 match(Set dst (LoadL_unaligned mem));
6076 // FIXME: Need more effective ldl/ldr
6077 ins_cost(450);
6078 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6079 ins_encode(load_L_enc(dst, mem));
6080 ins_pipe( ialu_loadL );
6081 %}
6083 // Store Long
6084 instruct storeL_reg(memory mem, mRegL src) %{
6085 match(Set mem (StoreL mem src));
6087 ins_cost(200);
6088 format %{ "sd $mem, $src #@storeL_reg\n" %}
6089 ins_encode(store_L_reg_enc(mem, src));
6090 ins_pipe( ialu_storeL );
6091 %}
6093 instruct storeL_immL0(memory mem, immL0 zero) %{
6094 match(Set mem (StoreL mem zero));
6096 ins_cost(180);
6097 format %{ "sd zero, $mem #@storeL_immL0" %}
6098 ins_encode(store_L_immL0_enc(mem, zero));
6099 ins_pipe( ialu_storeL );
6100 %}
6102 instruct storeL_imm(memory mem, immL src) %{
6103 match(Set mem (StoreL mem src));
6105 ins_cost(200);
6106 format %{ "sd $src, $mem #@storeL_imm" %}
6107 ins_encode(store_L_immL_enc(mem, src));
6108 ins_pipe( ialu_storeL );
6109 %}
6111 // Load Compressed Pointer
6112 instruct loadN(mRegN dst, memory mem)
6113 %{
6114 match(Set dst (LoadN mem));
6116 ins_cost(125); // XXX
6117 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6118 ins_encode (load_N_enc(dst, mem));
6119 ins_pipe( ialu_loadI ); // XXX
6120 %}
6122 instruct loadN2P(mRegP dst, memory mem)
6123 %{
6124 match(Set dst (DecodeN (LoadN mem)));
6125 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6127 ins_cost(125); // XXX
6128 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6129 ins_encode (load_N_enc(dst, mem));
6130 ins_pipe( ialu_loadI ); // XXX
6131 %}
6133 // Load Pointer
6134 instruct loadP(mRegP dst, memory mem) %{
6135 match(Set dst (LoadP mem));
6137 ins_cost(125);
6138 format %{ "ld $dst, $mem #@loadP" %}
6139 ins_encode (load_P_enc(dst, mem));
6140 ins_pipe( ialu_loadI );
6141 %}
6143 // Load Klass Pointer
6144 instruct loadKlass(mRegP dst, memory mem) %{
6145 match(Set dst (LoadKlass mem));
6147 ins_cost(125);
6148 format %{ "MOV $dst,$mem @ loadKlass" %}
6149 ins_encode (load_P_enc(dst, mem));
6150 ins_pipe( ialu_loadI );
6151 %}
6153 // Load narrow Klass Pointer
6154 instruct loadNKlass(mRegN dst, memory mem)
6155 %{
6156 match(Set dst (LoadNKlass mem));
6158 ins_cost(125); // XXX
6159 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6160 ins_encode (load_N_enc(dst, mem));
6161 ins_pipe( ialu_loadI ); // XXX
6162 %}
6164 instruct loadN2PKlass(mRegP dst, memory mem)
6165 %{
6166 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6167 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6169 ins_cost(125); // XXX
6170 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6171 ins_encode (load_N_enc(dst, mem));
6172 ins_pipe( ialu_loadI ); // XXX
6173 %}
6175 // Load Constant
6176 instruct loadConI(mRegI dst, immI src) %{
6177 match(Set dst src);
6179 ins_cost(150);
6180 format %{ "mov $dst, $src #@loadConI" %}
6181 ins_encode %{
6182 Register dst = $dst$$Register;
6183 int value = $src$$constant;
6184 __ move(dst, value);
6185 %}
6186 ins_pipe( ialu_regI_regI );
6187 %}
6190 instruct loadConL_set64(mRegL dst, immL src) %{
6191 match(Set dst src);
6192 ins_cost(120);
6193 format %{ "li $dst, $src @ loadConL_set64" %}
6194 ins_encode %{
6195 __ set64($dst$$Register, $src$$constant);
6196 %}
6197 ins_pipe(ialu_regL_regL);
6198 %}
6200 /*
6201 // Load long value from constant table (predicated by immL_expensive).
6202 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6203 match(Set dst src);
6204 ins_cost(150);
6205 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6206 ins_encode %{
6207 int con_offset = $constantoffset($src);
6209 if (Assembler::is_simm16(con_offset)) {
6210 __ ld($dst$$Register, $constanttablebase, con_offset);
6211 } else {
6212 __ set64(AT, con_offset);
6213 if (UseLEXT1) {
6214 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6215 } else {
6216 __ daddu(AT, $constanttablebase, AT);
6217 __ ld($dst$$Register, AT, 0);
6218 }
6219 }
6220 %}
6221 ins_pipe(ialu_loadI);
6222 %}
6223 */
6225 instruct loadConL16(mRegL dst, immL16 src) %{
6226 match(Set dst src);
6227 ins_cost(105);
6228 format %{ "mov $dst, $src #@loadConL16" %}
6229 ins_encode %{
6230 Register dst_reg = as_Register($dst$$reg);
6231 int value = $src$$constant;
6232 __ daddiu(dst_reg, R0, value);
6233 %}
6234 ins_pipe( ialu_regL_regL );
6235 %}
6238 instruct loadConL0(mRegL dst, immL0 src) %{
6239 match(Set dst src);
6240 ins_cost(100);
6241 format %{ "mov $dst, zero #@loadConL0" %}
6242 ins_encode %{
6243 Register dst_reg = as_Register($dst$$reg);
6244 __ daddu(dst_reg, R0, R0);
6245 %}
6246 ins_pipe( ialu_regL_regL );
6247 %}
6249 // Load Range
6250 instruct loadRange(mRegI dst, memory mem) %{
6251 match(Set dst (LoadRange mem));
6253 ins_cost(125);
6254 format %{ "MOV $dst,$mem @ loadRange" %}
6255 ins_encode(load_I_enc(dst, mem));
6256 ins_pipe( ialu_loadI );
6257 %}
6260 instruct storeP(memory mem, mRegP src ) %{
6261 match(Set mem (StoreP mem src));
6263 ins_cost(125);
6264 format %{ "sd $src, $mem #@storeP" %}
6265 ins_encode(store_P_reg_enc(mem, src));
6266 ins_pipe( ialu_storeI );
6267 %}
6269 // Store NULL Pointer, mark word, or other simple pointer constant.
6270 instruct storeImmP0(memory mem, immP0 zero) %{
6271 match(Set mem (StoreP mem zero));
6273 ins_cost(125);
6274 format %{ "mov $mem, $zero #@storeImmP0" %}
6275 ins_encode(store_P_immP0_enc(mem));
6276 ins_pipe( ialu_storeI );
6277 %}
6279 // Store Byte Immediate
6280 instruct storeImmB(memory mem, immI8 src) %{
6281 match(Set mem (StoreB mem src));
6283 ins_cost(150);
6284 format %{ "movb $mem, $src #@storeImmB" %}
6285 ins_encode(store_B_immI_enc(mem, src));
6286 ins_pipe( ialu_storeI );
6287 %}
6289 // Store Compressed Pointer
6290 instruct storeN(memory mem, mRegN src)
6291 %{
6292 match(Set mem (StoreN mem src));
6294 ins_cost(125); // XXX
6295 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6296 ins_encode(store_N_reg_enc(mem, src));
6297 ins_pipe( ialu_storeI );
6298 %}
6300 instruct storeP2N(memory mem, mRegP src)
6301 %{
6302 match(Set mem (StoreN mem (EncodeP src)));
6303 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6305 ins_cost(125); // XXX
6306 format %{ "sw $mem, $src\t# @ storeP2N" %}
6307 ins_encode(store_N_reg_enc(mem, src));
6308 ins_pipe( ialu_storeI );
6309 %}
6311 instruct storeNKlass(memory mem, mRegN src)
6312 %{
6313 match(Set mem (StoreNKlass mem src));
6315 ins_cost(125); // XXX
6316 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6317 ins_encode(store_N_reg_enc(mem, src));
6318 ins_pipe( ialu_storeI );
6319 %}
6321 instruct storeP2NKlass(memory mem, mRegP src)
6322 %{
6323 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6324 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6326 ins_cost(125); // XXX
6327 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6328 ins_encode(store_N_reg_enc(mem, src));
6329 ins_pipe( ialu_storeI );
6330 %}
6332 instruct storeImmN0(memory mem, immN0 zero)
6333 %{
6334 match(Set mem (StoreN mem zero));
6336 ins_cost(125); // XXX
6337 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6338 ins_encode(storeImmN0_enc(mem, zero));
6339 ins_pipe( ialu_storeI );
6340 %}
6342 // Store Byte
6343 instruct storeB(memory mem, mRegI src) %{
6344 match(Set mem (StoreB mem src));
6346 ins_cost(125);
6347 format %{ "sb $src, $mem #@storeB" %}
6348 ins_encode(store_B_reg_enc(mem, src));
6349 ins_pipe( ialu_storeI );
6350 %}
6352 instruct storeB_convL2I(memory mem, mRegL src) %{
6353 match(Set mem (StoreB mem (ConvL2I src)));
6355 ins_cost(125);
6356 format %{ "sb $src, $mem #@storeB_convL2I" %}
6357 ins_encode(store_B_reg_enc(mem, src));
6358 ins_pipe( ialu_storeI );
6359 %}
6361 // Load Byte (8bit signed)
6362 instruct loadB(mRegI dst, memory mem) %{
6363 match(Set dst (LoadB mem));
6365 ins_cost(125);
6366 format %{ "lb $dst, $mem #@loadB" %}
6367 ins_encode(load_B_enc(dst, mem));
6368 ins_pipe( ialu_loadI );
6369 %}
6371 instruct loadB_convI2L(mRegL dst, memory mem) %{
6372 match(Set dst (ConvI2L (LoadB mem)));
6374 ins_cost(125);
6375 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6376 ins_encode(load_B_enc(dst, mem));
6377 ins_pipe( ialu_loadI );
6378 %}
6380 // Load Byte (8bit UNsigned)
6381 instruct loadUB(mRegI dst, memory mem) %{
6382 match(Set dst (LoadUB mem));
6384 ins_cost(125);
6385 format %{ "lbu $dst, $mem #@loadUB" %}
6386 ins_encode(load_UB_enc(dst, mem));
6387 ins_pipe( ialu_loadI );
6388 %}
6390 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6391 match(Set dst (ConvI2L (LoadUB mem)));
6393 ins_cost(125);
6394 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6395 ins_encode(load_UB_enc(dst, mem));
6396 ins_pipe( ialu_loadI );
6397 %}
6399 // Load Short (16bit signed)
6400 instruct loadS(mRegI dst, memory mem) %{
6401 match(Set dst (LoadS mem));
6403 ins_cost(125);
6404 format %{ "lh $dst, $mem #@loadS" %}
6405 ins_encode(load_S_enc(dst, mem));
6406 ins_pipe( ialu_loadI );
6407 %}
6409 // Load Short (16 bit signed) to Byte (8 bit signed)
6410 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6411 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6413 ins_cost(125);
6414 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6415 ins_encode(load_B_enc(dst, mem));
6416 ins_pipe(ialu_loadI);
6417 %}
6419 instruct loadS_convI2L(mRegL dst, memory mem) %{
6420 match(Set dst (ConvI2L (LoadS mem)));
6422 ins_cost(125);
6423 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6424 ins_encode(load_S_enc(dst, mem));
6425 ins_pipe( ialu_loadI );
6426 %}
6428 // Store Integer Immediate
6429 instruct storeImmI(memory mem, immI src) %{
6430 match(Set mem (StoreI mem src));
6432 ins_cost(150);
6433 format %{ "mov $mem, $src #@storeImmI" %}
6434 ins_encode(store_I_immI_enc(mem, src));
6435 ins_pipe( ialu_storeI );
6436 %}
6438 // Store Integer
6439 instruct storeI(memory mem, mRegI src) %{
6440 match(Set mem (StoreI mem src));
6442 ins_cost(125);
6443 format %{ "sw $mem, $src #@storeI" %}
6444 ins_encode(store_I_reg_enc(mem, src));
6445 ins_pipe( ialu_storeI );
6446 %}
6448 instruct storeI_convL2I(memory mem, mRegL src) %{
6449 match(Set mem (StoreI mem (ConvL2I src)));
6451 ins_cost(125);
6452 format %{ "sw $mem, $src #@storeI_convL2I" %}
6453 ins_encode(store_I_reg_enc(mem, src));
6454 ins_pipe( ialu_storeI );
6455 %}
6457 // Load Float
6458 instruct loadF(regF dst, memory mem) %{
6459 match(Set dst (LoadF mem));
6461 ins_cost(150);
6462 format %{ "loadF $dst, $mem #@loadF" %}
6463 ins_encode(load_F_enc(dst, mem));
6464 ins_pipe( ialu_loadI );
6465 %}
6467 instruct loadConP_general(mRegP dst, immP src) %{
6468 match(Set dst src);
6470 ins_cost(120);
6471 format %{ "li $dst, $src #@loadConP_general" %}
6473 ins_encode %{
6474 Register dst = $dst$$Register;
6475 long* value = (long*)$src$$constant;
6477 if($src->constant_reloc() == relocInfo::metadata_type){
6478 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6479 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6481 __ relocate(rspec);
6482 __ patchable_set48(dst, (long)value);
6483 }else if($src->constant_reloc() == relocInfo::oop_type){
6484 int oop_index = __ oop_recorder()->find_index((jobject)value);
6485 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6487 __ relocate(rspec);
6488 __ patchable_set48(dst, (long)value);
6489 } else if ($src->constant_reloc() == relocInfo::none) {
6490 __ set64(dst, (long)value);
6491 }
6492 %}
6494 ins_pipe( ialu_regI_regI );
6495 %}
6497 /*
6498 instruct loadConP_load(mRegP dst, immP_load src) %{
6499 match(Set dst src);
6501 ins_cost(100);
6502 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6504 ins_encode %{
6506 int con_offset = $constantoffset($src);
6508 if (Assembler::is_simm16(con_offset)) {
6509 __ ld($dst$$Register, $constanttablebase, con_offset);
6510 } else {
6511 __ set64(AT, con_offset);
6512 if (UseLEXT1) {
6513 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6514 } else {
6515 __ daddu(AT, $constanttablebase, AT);
6516 __ ld($dst$$Register, AT, 0);
6517 }
6518 }
6519 %}
6521 ins_pipe(ialu_loadI);
6522 %}
6523 */
6525 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6526 match(Set dst src);
6528 ins_cost(80);
6529 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6531 ins_encode %{
6532 __ set64($dst$$Register, $src$$constant);
6533 %}
6535 ins_pipe(ialu_regI_regI);
6536 %}
6539 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6540 match(Set dst src);
6542 ins_cost(50);
6543 format %{ "li $dst, $src #@loadConP_poll" %}
6545 ins_encode %{
6546 Register dst = $dst$$Register;
6547 intptr_t value = (intptr_t)$src$$constant;
6549 __ set64(dst, (jlong)value);
6550 %}
6552 ins_pipe( ialu_regI_regI );
6553 %}
6555 instruct loadConP0(mRegP dst, immP0 src)
6556 %{
6557 match(Set dst src);
6559 ins_cost(50);
6560 format %{ "mov $dst, R0\t# ptr" %}
6561 ins_encode %{
6562 Register dst_reg = $dst$$Register;
6563 __ daddu(dst_reg, R0, R0);
6564 %}
6565 ins_pipe( ialu_regI_regI );
6566 %}
6568 instruct loadConN0(mRegN dst, immN0 src) %{
6569 match(Set dst src);
6570 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6571 ins_encode %{
6572 __ move($dst$$Register, R0);
6573 %}
6574 ins_pipe( ialu_regI_regI );
6575 %}
6577 instruct loadConN(mRegN dst, immN src) %{
6578 match(Set dst src);
6580 ins_cost(125);
6581 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6582 ins_encode %{
6583 Register dst = $dst$$Register;
6584 __ set_narrow_oop(dst, (jobject)$src$$constant);
6585 %}
6586 ins_pipe( ialu_regI_regI ); // XXX
6587 %}
6589 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6590 match(Set dst src);
6592 ins_cost(125);
6593 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6594 ins_encode %{
6595 Register dst = $dst$$Register;
6596 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6597 %}
6598 ins_pipe( ialu_regI_regI ); // XXX
6599 %}
6601 //FIXME
6602 // Tail Call; Jump from runtime stub to Java code.
6603 // Also known as an 'interprocedural jump'.
6604 // Target of jump will eventually return to caller.
6605 // TailJump below removes the return address.
6606 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6607 match(TailCall jump_target method_oop );
6608 ins_cost(300);
6609 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6611 ins_encode %{
6612 Register target = $jump_target$$Register;
6613 Register oop = $method_oop$$Register;
6615 // RA will be used in generate_forward_exception()
6616 __ push(RA);
6618 __ move(S3, oop);
6619 __ jr(target);
6620 __ delayed()->nop();
6621 %}
6623 ins_pipe( pipe_jump );
6624 %}
6626 // Create exception oop: created by stack-crawling runtime code.
6627 // Created exception is now available to this handler, and is setup
6628 // just prior to jumping to this handler. No code emitted.
6629 instruct CreateException( a0_RegP ex_oop )
6630 %{
6631 match(Set ex_oop (CreateEx));
6633 // use the following format syntax
6634 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6635 ins_encode %{
6636 // X86 leaves this function empty
6637 __ block_comment("CreateException is empty in MIPS");
6638 %}
6639 ins_pipe( empty );
6640 // ins_pipe( pipe_jump );
6641 %}
6644 /* The mechanism of exception handling is clear now.
6646 - Common try/catch:
6647 [stubGenerator_mips.cpp] generate_forward_exception()
6648 |- V0, V1 are created
6649 |- T9 <= SharedRuntime::exception_handler_for_return_address
6650 `- jr T9
6651 `- the caller's exception_handler
6652 `- jr OptoRuntime::exception_blob
6653 `- here
6654 - Rethrow(e.g. 'unwind'):
6655 * The callee:
6656 |- an exception is triggered during execution
6657 `- exits the callee method through RethrowException node
6658 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6659 `- The callee jumps to OptoRuntime::rethrow_stub()
6660 * In OptoRuntime::rethrow_stub:
6661 |- The VM calls _rethrow_Java to determine the return address in the caller method
6662 `- exits the stub with tailjmpInd
6663 |- pops exception_oop(V0) and exception_pc(V1)
6664 `- jumps to the return address(usually an exception_handler)
6665 * The caller:
6666 `- continues processing the exception_blob with V0/V1
6667 */
6669 /*
6670 Disassembling OptoRuntime::rethrow_stub()
6672 ; locals
6673 0x2d3bf320: addiu sp, sp, 0xfffffff8
6674 0x2d3bf324: sw ra, 0x4(sp)
6675 0x2d3bf328: sw fp, 0x0(sp)
6676 0x2d3bf32c: addu fp, sp, zero
6677 0x2d3bf330: addiu sp, sp, 0xfffffff0
6678 0x2d3bf334: sw ra, 0x8(sp)
6679 0x2d3bf338: sw t0, 0x4(sp)
6680 0x2d3bf33c: sw sp, 0x0(sp)
6682 ; get_thread(S2)
6683 0x2d3bf340: addu s2, sp, zero
6684 0x2d3bf344: srl s2, s2, 12
6685 0x2d3bf348: sll s2, s2, 2
6686 0x2d3bf34c: lui at, 0x2c85
6687 0x2d3bf350: addu at, at, s2
6688 0x2d3bf354: lw s2, 0xffffcc80(at)
6690 0x2d3bf358: lw s0, 0x0(sp)
6691 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6692 0x2d3bf360: sw s2, 0xc(sp)
6694 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6695 0x2d3bf364: lw a0, 0x4(sp)
6696 0x2d3bf368: lw a1, 0xc(sp)
6697 0x2d3bf36c: lw a2, 0x8(sp)
6698 ;; Java_To_Runtime
6699 0x2d3bf370: lui t9, 0x2c34
6700 0x2d3bf374: addiu t9, t9, 0xffff8a48
6701 0x2d3bf378: jalr t9
6702 0x2d3bf37c: nop
6704 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6706 0x2d3bf384: lw s0, 0xc(sp)
6707 0x2d3bf388: sw zero, 0x118(s0)
6708 0x2d3bf38c: sw zero, 0x11c(s0)
6709 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6710 0x2d3bf394: addu s2, s0, zero
6711 0x2d3bf398: sw zero, 0x144(s2)
6712 0x2d3bf39c: lw s0, 0x4(s2)
6713 0x2d3bf3a0: addiu s4, zero, 0x0
6714 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6715 0x2d3bf3a8: nop
6716 0x2d3bf3ac: addiu sp, sp, 0x10
6717 0x2d3bf3b0: addiu sp, sp, 0x8
6718 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6719 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6720 0x2d3bf3bc: lui at, 0x2b48
6721 0x2d3bf3c0: lw at, 0x100(at)
6723 ; tailjmpInd: Restores exception_oop & exception_pc
6724 0x2d3bf3c4: addu v1, ra, zero
6725 0x2d3bf3c8: addu v0, s1, zero
6726 0x2d3bf3cc: jr s3
6727 0x2d3bf3d0: nop
6728 ; Exception:
6729 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6730 0x2d3bf3d8: addiu s1, s1, 0x40
6731 0x2d3bf3dc: addiu s2, zero, 0x0
6732 0x2d3bf3e0: addiu sp, sp, 0x10
6733 0x2d3bf3e4: addiu sp, sp, 0x8
6734 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6735 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6736 0x2d3bf3f0: lui at, 0x2b48
6737 0x2d3bf3f4: lw at, 0x100(at)
6738 ; TailCalljmpInd
6739 __ push(RA); ; to be used in generate_forward_exception()
6740 0x2d3bf3f8: addu t7, s2, zero
6741 0x2d3bf3fc: jr s1
6742 0x2d3bf400: nop
6743 */
6744 // Rethrow exception:
6745 // The exception oop will come in the first argument position.
6746 // Then JUMP (not call) to the rethrow stub code.
6747 instruct RethrowException()
6748 %{
6749 match(Rethrow);
6751 // use the following format syntax
6752 format %{ "JMP rethrow_stub #@RethrowException" %}
6753 ins_encode %{
6754 __ block_comment("@ RethrowException");
6756 cbuf.set_insts_mark();
6757 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6759 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6760 __ patchable_jump((address)OptoRuntime::rethrow_stub());
6761 %}
6762 ins_pipe( pipe_jump );
6763 %}
6765 // ============================================================================
6766 // Branch Instructions --- long offset versions
6768 // Jump Direct
6769 instruct jmpDir_long(label labl) %{
6770 match(Goto);
6771 effect(USE labl);
6773 ins_cost(300);
6774 format %{ "JMP $labl #@jmpDir_long" %}
6776 ins_encode %{
6777 Label* L = $labl$$label;
6778 __ jmp_far(*L);
6779 %}
6781 ins_pipe( pipe_jump );
6782 //ins_pc_relative(1);
6783 %}
6785 // Jump Direct Conditional - Label defines a relative address from Jcc+1
6786 instruct jmpLoopEnd_long(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
6787 match(CountedLoopEnd cop (CmpI src1 src2));
6788 effect(USE labl);
6790 ins_cost(300);
6791 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_long" %}
6792 ins_encode %{
6793 Register op1 = $src1$$Register;
6794 Register op2 = $src2$$Register;
6795 Label* L = $labl$$label;
6796 int flag = $cop$$cmpcode;
6798 switch(flag) {
6799 case 0x01: //equal
6800 __ beq_long(op1, op2, *L);
6801 break;
6802 case 0x02: //not_equal
6803 __ bne_long(op1, op2, *L);
6804 break;
6805 case 0x03: //above
6806 __ slt(AT, op2, op1);
6807 __ bne_long(AT, R0, *L);
6808 break;
6809 case 0x04: //above_equal
6810 __ slt(AT, op1, op2);
6811 __ beq_long(AT, R0, *L);
6812 break;
6813 case 0x05: //below
6814 __ slt(AT, op1, op2);
6815 __ bne_long(AT, R0, *L);
6816 break;
6817 case 0x06: //below_equal
6818 __ slt(AT, op2, op1);
6819 __ beq_long(AT, R0, *L);
6820 break;
6821 default:
6822 Unimplemented();
6823 }
6824 %}
6825 ins_pipe( pipe_jump );
6826 ins_pc_relative(1);
6827 %}
6829 instruct jmpLoopEnd_reg_immI_long(cmpOp cop, mRegI src1, immI src2, label labl) %{
6830 match(CountedLoopEnd cop (CmpI src1 src2));
6831 effect(USE labl);
6833 ins_cost(300);
6834 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI_long" %}
6835 ins_encode %{
6836 Register op1 = $src1$$Register;
6837 Register op2 = AT;
6838 Label* L = $labl$$label;
6839 int flag = $cop$$cmpcode;
6841 __ move(op2, $src2$$constant);
6843 switch(flag) {
6844 case 0x01: //equal
6845 __ beq_long(op1, op2, *L);
6846 break;
6847 case 0x02: //not_equal
6848 __ bne_long(op1, op2, *L);
6849 break;
6850 case 0x03: //above
6851 __ slt(AT, op2, op1);
6852 __ bne_long(AT, R0, *L);
6853 break;
6854 case 0x04: //above_equal
6855 __ slt(AT, op1, op2);
6856 __ beq_long(AT, R0, *L);
6857 break;
6858 case 0x05: //below
6859 __ slt(AT, op1, op2);
6860 __ bne_long(AT, R0, *L);
6861 break;
6862 case 0x06: //below_equal
6863 __ slt(AT, op2, op1);
6864 __ beq_long(AT, R0, *L);
6865 break;
6866 default:
6867 Unimplemented();
6868 }
6869 %}
6870 ins_pipe( pipe_jump );
6871 ins_pc_relative(1);
6872 %}
6875 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
6876 instruct jmpCon_flags_long(cmpOp cop, FlagsReg cr, label labl) %{
6877 match(If cop cr);
6878 effect(USE labl);
6880 ins_cost(300);
6881 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags_long" %}
6883 ins_encode %{
6884 Label* L = $labl$$label;
6885 switch($cop$$cmpcode) {
6886 case 0x01: //equal
6887 __ bne_long(AT, R0, *L);
6888 break;
6889 case 0x02: //not equal
6890 __ beq_long(AT, R0, *L);
6891 break;
6892 default:
6893 Unimplemented();
6894 }
6895 %}
6897 ins_pipe( pipe_jump );
6898 ins_pc_relative(1);
6899 %}
6901 // Conditional jumps
6902 instruct branchConP_zero_long(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6903 match(If cmp (CmpP op1 zero));
6904 effect(USE labl);
6906 ins_cost(180);
6907 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero_long" %}
6909 ins_encode %{
6910 Register op1 = $op1$$Register;
6911 Register op2 = R0;
6912 Label* L = $labl$$label;
6913 int flag = $cmp$$cmpcode;
6915 switch(flag) {
6916 case 0x01: //equal
6917 __ beq_long(op1, op2, *L);
6918 break;
6919 case 0x02: //not_equal
6920 __ bne_long(op1, op2, *L);
6921 break;
6922 default:
6923 Unimplemented();
6924 }
6925 %}
6927 ins_pc_relative(1);
6928 ins_pipe( pipe_alu_branch );
6929 %}
6931 instruct branchConN2P_zero_long(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6932 match(If cmp (CmpP (DecodeN op1) zero));
6933 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6934 effect(USE labl);
6936 ins_cost(180);
6937 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero_long" %}
6939 ins_encode %{
6940 Register op1 = $op1$$Register;
6941 Register op2 = R0;
6942 Label* L = $labl$$label;
6943 int flag = $cmp$$cmpcode;
6945 switch(flag)
6946 {
6947 case 0x01: //equal
6948 __ beq_long(op1, op2, *L);
6949 break;
6950 case 0x02: //not_equal
6951 __ bne_long(op1, op2, *L);
6952 break;
6953 default:
6954 Unimplemented();
6955 }
6956 %}
6958 ins_pc_relative(1);
6959 ins_pipe( pipe_alu_branch );
6960 %}
6963 instruct branchConP_long(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6964 match(If cmp (CmpP op1 op2));
6965 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6966 effect(USE labl);
6968 ins_cost(200);
6969 format %{ "b$cmp $op1, $op2, $labl #@branchConP_long" %}
6971 ins_encode %{
6972 Register op1 = $op1$$Register;
6973 Register op2 = $op2$$Register;
6974 Label* L = $labl$$label;
6975 int flag = $cmp$$cmpcode;
6977 switch(flag) {
6978 case 0x01: //equal
6979 __ beq_long(op1, op2, *L);
6980 break;
6981 case 0x02: //not_equal
6982 __ bne_long(op1, op2, *L);
6983 break;
6984 case 0x03: //above
6985 __ sltu(AT, op2, op1);
6986 __ bne_long(R0, AT, *L);
6987 break;
6988 case 0x04: //above_equal
6989 __ sltu(AT, op1, op2);
6990 __ beq_long(AT, R0, *L);
6991 break;
6992 case 0x05: //below
6993 __ sltu(AT, op1, op2);
6994 __ bne_long(R0, AT, *L);
6995 break;
6996 case 0x06: //below_equal
6997 __ sltu(AT, op2, op1);
6998 __ beq_long(AT, R0, *L);
6999 break;
7000 default:
7001 Unimplemented();
7002 }
7003 %}
7005 ins_pc_relative(1);
7006 ins_pipe( pipe_alu_branch );
7007 %}
7009 instruct cmpN_null_branch_long(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7010 match(If cmp (CmpN op1 null));
7011 effect(USE labl);
7013 ins_cost(180);
7014 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7015 "BP$cmp $labl @ cmpN_null_branch_long" %}
7016 ins_encode %{
7017 Register op1 = $op1$$Register;
7018 Register op2 = R0;
7019 Label* L = $labl$$label;
7020 int flag = $cmp$$cmpcode;
7022 switch(flag) {
7023 case 0x01: //equal
7024 __ beq_long(op1, op2, *L);
7025 break;
7026 case 0x02: //not_equal
7027 __ bne_long(op1, op2, *L);
7028 break;
7029 default:
7030 Unimplemented();
7031 }
7032 %}
7033 //TODO: pipe_branchP or create pipe_branchN LEE
7034 ins_pc_relative(1);
7035 ins_pipe( pipe_alu_branch );
7036 %}
7038 instruct cmpN_reg_branch_long(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7039 match(If cmp (CmpN op1 op2));
7040 effect(USE labl);
7042 ins_cost(180);
7043 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7044 "BP$cmp $labl @ cmpN_reg_branch_long" %}
7045 ins_encode %{
7046 Register op1_reg = $op1$$Register;
7047 Register op2_reg = $op2$$Register;
7048 Label* L = $labl$$label;
7049 int flag = $cmp$$cmpcode;
7051 switch(flag) {
7052 case 0x01: //equal
7053 __ beq_long(op1_reg, op2_reg, *L);
7054 break;
7055 case 0x02: //not_equal
7056 __ bne_long(op1_reg, op2_reg, *L);
7057 break;
7058 case 0x03: //above
7059 __ sltu(AT, op2_reg, op1_reg);
7060 __ bne_long(R0, AT, *L);
7061 break;
7062 case 0x04: //above_equal
7063 __ sltu(AT, op1_reg, op2_reg);
7064 __ beq_long(AT, R0, *L);
7065 break;
7066 case 0x05: //below
7067 __ sltu(AT, op1_reg, op2_reg);
7068 __ bne_long(R0, AT, *L);
7069 break;
7070 case 0x06: //below_equal
7071 __ sltu(AT, op2_reg, op1_reg);
7072 __ beq_long(AT, R0, *L);
7073 break;
7074 default:
7075 Unimplemented();
7076 }
7077 %}
7078 ins_pc_relative(1);
7079 ins_pipe( pipe_alu_branch );
7080 %}
7082 instruct branchConIU_reg_reg_long(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7083 match( If cmp (CmpU src1 src2) );
7084 effect(USE labl);
7085 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg_long" %}
7087 ins_encode %{
7088 Register op1 = $src1$$Register;
7089 Register op2 = $src2$$Register;
7090 Label* L = $labl$$label;
7091 int flag = $cmp$$cmpcode;
7093 switch(flag) {
7094 case 0x01: //equal
7095 __ beq_long(op1, op2, *L);
7096 break;
7097 case 0x02: //not_equal
7098 __ bne_long(op1, op2, *L);
7099 break;
7100 case 0x03: //above
7101 __ sltu(AT, op2, op1);
7102 __ bne_long(AT, R0, *L);
7103 break;
7104 case 0x04: //above_equal
7105 __ sltu(AT, op1, op2);
7106 __ beq_long(AT, R0, *L);
7107 break;
7108 case 0x05: //below
7109 __ sltu(AT, op1, op2);
7110 __ bne_long(AT, R0, *L);
7111 break;
7112 case 0x06: //below_equal
7113 __ sltu(AT, op2, op1);
7114 __ beq_long(AT, R0, *L);
7115 break;
7116 default:
7117 Unimplemented();
7118 }
7119 %}
7121 ins_pc_relative(1);
7122 ins_pipe( pipe_alu_branch );
7123 %}
7126 instruct branchConIU_reg_imm_long(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7127 match( If cmp (CmpU src1 src2) );
7128 effect(USE labl);
7129 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_long" %}
7131 ins_encode %{
7132 Register op1 = $src1$$Register;
7133 int val = $src2$$constant;
7134 Label* L = $labl$$label;
7135 int flag = $cmp$$cmpcode;
7137 __ move(AT, val);
7138 switch(flag) {
7139 case 0x01: //equal
7140 __ beq_long(op1, AT, *L);
7141 break;
7142 case 0x02: //not_equal
7143 __ bne_long(op1, AT, *L);
7144 break;
7145 case 0x03: //above
7146 __ sltu(AT, AT, op1);
7147 __ bne_long(R0, AT, *L);
7148 break;
7149 case 0x04: //above_equal
7150 __ sltu(AT, op1, AT);
7151 __ beq_long(AT, R0, *L);
7152 break;
7153 case 0x05: //below
7154 __ sltu(AT, op1, AT);
7155 __ bne_long(R0, AT, *L);
7156 break;
7157 case 0x06: //below_equal
7158 __ sltu(AT, AT, op1);
7159 __ beq_long(AT, R0, *L);
7160 break;
7161 default:
7162 Unimplemented();
7163 }
7164 %}
7166 ins_pc_relative(1);
7167 ins_pipe( pipe_alu_branch );
7168 %}
7170 instruct branchConI_reg_reg_long(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7171 match( If cmp (CmpI src1 src2) );
7172 effect(USE labl);
7173 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg_long" %}
7175 ins_encode %{
7176 Register op1 = $src1$$Register;
7177 Register op2 = $src2$$Register;
7178 Label* L = $labl$$label;
7179 int flag = $cmp$$cmpcode;
7181 switch(flag) {
7182 case 0x01: //equal
7183 __ beq_long(op1, op2, *L);
7184 break;
7185 case 0x02: //not_equal
7186 __ bne_long(op1, op2, *L);
7187 break;
7188 case 0x03: //above
7189 __ slt(AT, op2, op1);
7190 __ bne_long(R0, AT, *L);
7191 break;
7192 case 0x04: //above_equal
7193 __ slt(AT, op1, op2);
7194 __ beq_long(AT, R0, *L);
7195 break;
7196 case 0x05: //below
7197 __ slt(AT, op1, op2);
7198 __ bne_long(R0, AT, *L);
7199 break;
7200 case 0x06: //below_equal
7201 __ slt(AT, op2, op1);
7202 __ beq_long(AT, R0, *L);
7203 break;
7204 default:
7205 Unimplemented();
7206 }
7207 %}
7209 ins_pc_relative(1);
7210 ins_pipe( pipe_alu_branch );
7211 %}
7213 instruct branchConI_reg_imm0_long(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7214 match( If cmp (CmpI src1 src2) );
7215 effect(USE labl);
7216 ins_cost(170);
7217 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0_long" %}
7219 ins_encode %{
7220 Register op1 = $src1$$Register;
7221 Label* L = $labl$$label;
7222 int flag = $cmp$$cmpcode;
7224 switch(flag) {
7225 case 0x01: //equal
7226 __ beq_long(op1, R0, *L);
7227 break;
7228 case 0x02: //not_equal
7229 __ bne_long(op1, R0, *L);
7230 break;
7231 case 0x03: //greater
7232 __ slt(AT, R0, op1);
7233 __ bne_long(R0, AT, *L);
7234 break;
7235 case 0x04: //greater_equal
7236 __ slt(AT, op1, R0);
7237 __ beq_long(AT, R0, *L);
7238 break;
7239 case 0x05: //less
7240 __ slt(AT, op1, R0);
7241 __ bne_long(R0, AT, *L);
7242 break;
7243 case 0x06: //less_equal
7244 __ slt(AT, R0, op1);
7245 __ beq_long(AT, R0, *L);
7246 break;
7247 default:
7248 Unimplemented();
7249 }
7250 %}
7252 ins_pc_relative(1);
7253 ins_pipe( pipe_alu_branch );
7254 %}
7256 instruct branchConI_reg_imm_long(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7257 match( If cmp (CmpI src1 src2) );
7258 effect(USE labl);
7259 ins_cost(200);
7260 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_long" %}
7262 ins_encode %{
7263 Register op1 = $src1$$Register;
7264 int val = $src2$$constant;
7265 Label* L = $labl$$label;
7266 int flag = $cmp$$cmpcode;
7268 __ move(AT, val);
7269 switch(flag) {
7270 case 0x01: //equal
7271 __ beq_long(op1, AT, *L);
7272 break;
7273 case 0x02: //not_equal
7274 __ bne_long(op1, AT, *L);
7275 break;
7276 case 0x03: //greater
7277 __ slt(AT, AT, op1);
7278 __ bne_long(R0, AT, *L);
7279 break;
7280 case 0x04: //greater_equal
7281 __ slt(AT, op1, AT);
7282 __ beq_long(AT, R0, *L);
7283 break;
7284 case 0x05: //less
7285 __ slt(AT, op1, AT);
7286 __ bne_long(R0, AT, *L);
7287 break;
7288 case 0x06: //less_equal
7289 __ slt(AT, AT, op1);
7290 __ beq_long(AT, R0, *L);
7291 break;
7292 default:
7293 Unimplemented();
7294 }
7295 %}
7297 ins_pc_relative(1);
7298 ins_pipe( pipe_alu_branch );
7299 %}
7301 instruct branchConIU_reg_imm0_long(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7302 match( If cmp (CmpU src1 zero) );
7303 effect(USE labl);
7304 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0_long" %}
7306 ins_encode %{
7307 Register op1 = $src1$$Register;
7308 Label* L = $labl$$label;
7309 int flag = $cmp$$cmpcode;
7311 switch(flag) {
7312 case 0x01: //equal
7313 __ beq_long(op1, R0, *L);
7314 break;
7315 case 0x02: //not_equal
7316 __ bne_long(op1, R0, *L);
7317 break;
7318 case 0x03: //above
7319 __ bne_long(R0, op1, *L);
7320 break;
7321 case 0x04: //above_equal
7322 __ beq_long(R0, R0, *L);
7323 break;
7324 case 0x05: //below
7325 return;
7326 break;
7327 case 0x06: //below_equal
7328 __ beq_long(op1, R0, *L);
7329 break;
7330 default:
7331 Unimplemented();
7332 }
7333 %}
7335 ins_pc_relative(1);
7336 ins_pipe( pipe_alu_branch );
7337 %}
7340 instruct branchConIU_reg_immI16_long(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7341 match( If cmp (CmpU src1 src2) );
7342 effect(USE labl);
7343 ins_cost(180);
7344 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16_long" %}
7346 ins_encode %{
7347 Register op1 = $src1$$Register;
7348 int val = $src2$$constant;
7349 Label* L = $labl$$label;
7350 int flag = $cmp$$cmpcode;
7352 switch(flag) {
7353 case 0x01: //equal
7354 __ move(AT, val);
7355 __ beq_long(op1, AT, *L);
7356 break;
7357 case 0x02: //not_equal
7358 __ move(AT, val);
7359 __ bne_long(op1, AT, *L);
7360 break;
7361 case 0x03: //above
7362 __ move(AT, val);
7363 __ sltu(AT, AT, op1);
7364 __ bne_long(R0, AT, *L);
7365 break;
7366 case 0x04: //above_equal
7367 __ sltiu(AT, op1, val);
7368 __ beq_long(AT, R0, *L);
7369 break;
7370 case 0x05: //below
7371 __ sltiu(AT, op1, val);
7372 __ bne_long(R0, AT, *L);
7373 break;
7374 case 0x06: //below_equal
7375 __ move(AT, val);
7376 __ sltu(AT, AT, op1);
7377 __ beq_long(AT, R0, *L);
7378 break;
7379 default:
7380 Unimplemented();
7381 }
7382 %}
7384 ins_pc_relative(1);
7385 ins_pipe( pipe_alu_branch );
7386 %}
7389 instruct branchConL_regL_regL_long(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7390 match( If cmp (CmpL src1 src2) );
7391 effect(USE labl);
7392 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL_long" %}
7393 ins_cost(250);
7395 ins_encode %{
7396 Register opr1_reg = as_Register($src1$$reg);
7397 Register opr2_reg = as_Register($src2$$reg);
7399 Label* target = $labl$$label;
7400 int flag = $cmp$$cmpcode;
7402 switch(flag) {
7403 case 0x01: //equal
7404 __ beq_long(opr1_reg, opr2_reg, *target);
7405 break;
7407 case 0x02: //not_equal
7408 __ bne_long(opr1_reg, opr2_reg, *target);
7409 break;
7411 case 0x03: //greater
7412 __ slt(AT, opr2_reg, opr1_reg);
7413 __ bne_long(AT, R0, *target);
7414 break;
7416 case 0x04: //greater_equal
7417 __ slt(AT, opr1_reg, opr2_reg);
7418 __ beq_long(AT, R0, *target);
7419 break;
7421 case 0x05: //less
7422 __ slt(AT, opr1_reg, opr2_reg);
7423 __ bne_long(AT, R0, *target);
7424 break;
7426 case 0x06: //less_equal
7427 __ slt(AT, opr2_reg, opr1_reg);
7428 __ beq_long(AT, R0, *target);
7429 break;
7431 default:
7432 Unimplemented();
7433 }
7434 %}
7437 ins_pc_relative(1);
7438 ins_pipe( pipe_alu_branch );
7439 %}
7441 instruct branchConL_regL_immL0_long(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7442 match( If cmp (CmpL src1 zero) );
7443 effect(USE labl);
7444 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0_long" %}
7445 ins_cost(150);
7447 ins_encode %{
7448 Register opr1_reg = as_Register($src1$$reg);
7449 Register opr2_reg = R0;
7451 Label* target = $labl$$label;
7452 int flag = $cmp$$cmpcode;
7454 switch(flag) {
7455 case 0x01: //equal
7456 __ beq_long(opr1_reg, opr2_reg, *target);
7457 break;
7459 case 0x02: //not_equal
7460 __ bne_long(opr1_reg, opr2_reg, *target);
7461 break;
7463 case 0x03: //greater
7464 __ slt(AT, opr2_reg, opr1_reg);
7465 __ bne_long(AT, R0, *target);
7466 break;
7468 case 0x04: //greater_equal
7469 __ slt(AT, opr1_reg, opr2_reg);
7470 __ beq_long(AT, R0, *target);
7471 break;
7473 case 0x05: //less
7474 __ slt(AT, opr1_reg, opr2_reg);
7475 __ bne_long(AT, R0, *target);
7476 break;
7478 case 0x06: //less_equal
7479 __ slt(AT, opr2_reg, opr1_reg);
7480 __ beq_long(AT, R0, *target);
7481 break;
7483 default:
7484 Unimplemented();
7485 }
7486 %}
7489 ins_pc_relative(1);
7490 ins_pipe( pipe_alu_branch );
7491 %}
7493 instruct branchConL_regL_immL_long(cmpOp cmp, mRegL src1, immL src2, label labl) %{
7494 match( If cmp (CmpL src1 src2) );
7495 effect(USE labl);
7496 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL_long" %}
7497 ins_cost(180);
7499 ins_encode %{
7500 Register opr1_reg = as_Register($src1$$reg);
7501 Register opr2_reg = AT;
7503 Label* target = $labl$$label;
7504 int flag = $cmp$$cmpcode;
7506 __ set64(opr2_reg, $src2$$constant);
7508 switch(flag) {
7509 case 0x01: //equal
7510 __ beq_long(opr1_reg, opr2_reg, *target);
7511 break;
7513 case 0x02: //not_equal
7514 __ bne_long(opr1_reg, opr2_reg, *target);
7515 break;
7517 case 0x03: //greater
7518 __ slt(AT, opr2_reg, opr1_reg);
7519 __ bne_long(AT, R0, *target);
7520 break;
7522 case 0x04: //greater_equal
7523 __ slt(AT, opr1_reg, opr2_reg);
7524 __ beq_long(AT, R0, *target);
7525 break;
7527 case 0x05: //less
7528 __ slt(AT, opr1_reg, opr2_reg);
7529 __ bne_long(AT, R0, *target);
7530 break;
7532 case 0x06: //less_equal
7533 __ slt(AT, opr2_reg, opr1_reg);
7534 __ beq_long(AT, R0, *target);
7535 break;
7537 default:
7538 Unimplemented();
7539 }
7540 %}
7543 ins_pc_relative(1);
7544 ins_pipe( pipe_alu_branch );
7545 %}
7548 //FIXME
7549 instruct branchConF_reg_reg_long(cmpOp cmp, regF src1, regF src2, label labl) %{
7550 match( If cmp (CmpF src1 src2) );
7551 effect(USE labl);
7552 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg_long" %}
7554 ins_encode %{
7555 FloatRegister reg_op1 = $src1$$FloatRegister;
7556 FloatRegister reg_op2 = $src2$$FloatRegister;
7557 Label* L = $labl$$label;
7558 int flag = $cmp$$cmpcode;
7560 switch(flag) {
7561 case 0x01: //equal
7562 __ c_eq_s(reg_op1, reg_op2);
7563 __ bc1t_long(*L);
7564 break;
7565 case 0x02: //not_equal
7566 __ c_eq_s(reg_op1, reg_op2);
7567 __ bc1f_long(*L);
7568 break;
7569 case 0x03: //greater
7570 __ c_ule_s(reg_op1, reg_op2);
7571 __ bc1f_long(*L);
7572 break;
7573 case 0x04: //greater_equal
7574 __ c_ult_s(reg_op1, reg_op2);
7575 __ bc1f_long(*L);
7576 break;
7577 case 0x05: //less
7578 __ c_ult_s(reg_op1, reg_op2);
7579 __ bc1t_long(*L);
7580 break;
7581 case 0x06: //less_equal
7582 __ c_ule_s(reg_op1, reg_op2);
7583 __ bc1t_long(*L);
7584 break;
7585 default:
7586 Unimplemented();
7587 }
7588 %}
7590 ins_pc_relative(1);
7591 ins_pipe(pipe_slow);
7592 %}
7594 instruct branchConD_reg_reg_long(cmpOp cmp, regD src1, regD src2, label labl) %{
7595 match( If cmp (CmpD src1 src2) );
7596 effect(USE labl);
7597 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg_long" %}
7599 ins_encode %{
7600 FloatRegister reg_op1 = $src1$$FloatRegister;
7601 FloatRegister reg_op2 = $src2$$FloatRegister;
7602 Label* L = $labl$$label;
7603 int flag = $cmp$$cmpcode;
7605 switch(flag) {
7606 case 0x01: //equal
7607 __ c_eq_d(reg_op1, reg_op2);
7608 __ bc1t_long(*L);
7609 break;
7610 case 0x02: //not_equal
7611 // c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7612 __ c_eq_d(reg_op1, reg_op2);
7613 __ bc1f_long(*L);
7614 break;
7615 case 0x03: //greater
7616 __ c_ule_d(reg_op1, reg_op2);
7617 __ bc1f_long(*L);
7618 break;
7619 case 0x04: //greater_equal
7620 __ c_ult_d(reg_op1, reg_op2);
7621 __ bc1f_long(*L);
7622 break;
7623 case 0x05: //less
7624 __ c_ult_d(reg_op1, reg_op2);
7625 __ bc1t_long(*L);
7626 break;
7627 case 0x06: //less_equal
7628 __ c_ule_d(reg_op1, reg_op2);
7629 __ bc1t_long(*L);
7630 break;
7631 default:
7632 Unimplemented();
7633 }
7634 %}
7636 ins_pc_relative(1);
7637 ins_pipe(pipe_slow);
7638 %}
7641 // ============================================================================
7642 // Branch Instructions -- short offset versions
7644 // Jump Direct
7645 instruct jmpDir_short(label labl) %{
7646 match(Goto);
7647 effect(USE labl);
7649 ins_cost(300);
7650 format %{ "JMP $labl #@jmpDir_short" %}
7652 ins_encode %{
7653 Label &L = *($labl$$label);
7654 if(&L)
7655 __ b(L);
7656 else
7657 __ b(int(0));
7658 __ delayed()->nop();
7659 %}
7661 ins_pipe( pipe_jump );
7662 ins_pc_relative(1);
7663 ins_short_branch(1);
7664 %}
7666 // Jump Direct Conditional - Label defines a relative address from Jcc+1
7667 instruct jmpLoopEnd_short(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
7668 match(CountedLoopEnd cop (CmpI src1 src2));
7669 effect(USE labl);
7671 ins_cost(300);
7672 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_short" %}
7673 ins_encode %{
7674 Register op1 = $src1$$Register;
7675 Register op2 = $src2$$Register;
7676 Label &L = *($labl$$label);
7677 int flag = $cop$$cmpcode;
7679 switch(flag) {
7680 case 0x01: //equal
7681 if (&L)
7682 __ beq(op1, op2, L);
7683 else
7684 __ beq(op1, op2, (int)0);
7685 break;
7686 case 0x02: //not_equal
7687 if (&L)
7688 __ bne(op1, op2, L);
7689 else
7690 __ bne(op1, op2, (int)0);
7691 break;
7692 case 0x03: //above
7693 __ slt(AT, op2, op1);
7694 if(&L)
7695 __ bne(AT, R0, L);
7696 else
7697 __ bne(AT, R0, (int)0);
7698 break;
7699 case 0x04: //above_equal
7700 __ slt(AT, op1, op2);
7701 if(&L)
7702 __ beq(AT, R0, L);
7703 else
7704 __ beq(AT, R0, (int)0);
7705 break;
7706 case 0x05: //below
7707 __ slt(AT, op1, op2);
7708 if(&L)
7709 __ bne(AT, R0, L);
7710 else
7711 __ bne(AT, R0, (int)0);
7712 break;
7713 case 0x06: //below_equal
7714 __ slt(AT, op2, op1);
7715 if(&L)
7716 __ beq(AT, R0, L);
7717 else
7718 __ beq(AT, R0, (int)0);
7719 break;
7720 default:
7721 Unimplemented();
7722 }
7723 __ delayed()->nop();
7724 %}
7725 ins_pipe( pipe_jump );
7726 ins_pc_relative(1);
7727 ins_short_branch(1);
7728 %}
7730 instruct jmpLoopEnd_reg_immI_short(cmpOp cop, mRegI src1, immI src2, label labl) %{
7731 match(CountedLoopEnd cop (CmpI src1 src2));
7732 effect(USE labl);
7734 ins_cost(300);
7735 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI_short" %}
7736 ins_encode %{
7737 Register op1 = $src1$$Register;
7738 Register op2 = AT;
7739 Label &L = *($labl$$label);
7740 int flag = $cop$$cmpcode;
7742 __ move(op2, $src2$$constant);
7744 switch(flag) {
7745 case 0x01: //equal
7746 if (&L)
7747 __ beq(op1, op2, L);
7748 else
7749 __ beq(op1, op2, (int)0);
7750 break;
7751 case 0x02: //not_equal
7752 if (&L)
7753 __ bne(op1, op2, L);
7754 else
7755 __ bne(op1, op2, (int)0);
7756 break;
7757 case 0x03: //above
7758 __ slt(AT, op2, op1);
7759 if(&L)
7760 __ bne(AT, R0, L);
7761 else
7762 __ bne(AT, R0, (int)0);
7763 break;
7764 case 0x04: //above_equal
7765 __ slt(AT, op1, op2);
7766 if(&L)
7767 __ beq(AT, R0, L);
7768 else
7769 __ beq(AT, R0, (int)0);
7770 break;
7771 case 0x05: //below
7772 __ slt(AT, op1, op2);
7773 if(&L)
7774 __ bne(AT, R0, L);
7775 else
7776 __ bne(AT, R0, (int)0);
7777 break;
7778 case 0x06: //below_equal
7779 __ slt(AT, op2, op1);
7780 if(&L)
7781 __ beq(AT, R0, L);
7782 else
7783 __ beq(AT, R0, (int)0);
7784 break;
7785 default:
7786 Unimplemented();
7787 }
7788 __ delayed()->nop();
7789 %}
7790 ins_pipe( pipe_jump );
7791 ins_pc_relative(1);
7792 ins_short_branch(1);
7793 %}
7796 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
7797 instruct jmpCon_flags_short(cmpOp cop, FlagsReg cr, label labl) %{
7798 match(If cop cr);
7799 effect(USE labl);
7801 ins_cost(300);
7802 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags_short" %}
7804 ins_encode %{
7805 Label &L = *($labl$$label);
7806 switch($cop$$cmpcode) {
7807 case 0x01: //equal
7808 if (&L)
7809 __ bne(AT, R0, L);
7810 else
7811 __ bne(AT, R0, (int)0);
7812 break;
7813 case 0x02: //not equal
7814 if (&L)
7815 __ beq(AT, R0, L);
7816 else
7817 __ beq(AT, R0, (int)0);
7818 break;
7819 default:
7820 Unimplemented();
7821 }
7822 __ delayed()->nop();
7823 %}
7825 ins_pipe( pipe_jump );
7826 ins_pc_relative(1);
7827 ins_short_branch(1);
7828 %}
7830 // Conditional jumps
7831 instruct branchConP_zero_short(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7832 match(If cmp (CmpP op1 zero));
7833 effect(USE labl);
7835 ins_cost(180);
7836 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero_short" %}
7838 ins_encode %{
7839 Register op1 = $op1$$Register;
7840 Register op2 = R0;
7841 Label &L = *($labl$$label);
7842 int flag = $cmp$$cmpcode;
7844 switch(flag) {
7845 case 0x01: //equal
7846 if (&L)
7847 __ beq(op1, op2, L);
7848 else
7849 __ beq(op1, op2, (int)0);
7850 break;
7851 case 0x02: //not_equal
7852 if (&L)
7853 __ bne(op1, op2, L);
7854 else
7855 __ bne(op1, op2, (int)0);
7856 break;
7857 default:
7858 Unimplemented();
7859 }
7860 __ delayed()->nop();
7861 %}
7863 ins_pc_relative(1);
7864 ins_pipe( pipe_alu_branch );
7865 ins_short_branch(1);
7866 %}
7868 instruct branchConN2P_zero_short(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7869 match(If cmp (CmpP (DecodeN op1) zero));
7870 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7871 effect(USE labl);
7873 ins_cost(180);
7874 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero_short" %}
7876 ins_encode %{
7877 Register op1 = $op1$$Register;
7878 Register op2 = R0;
7879 Label &L = *($labl$$label);
7880 int flag = $cmp$$cmpcode;
7882 switch(flag)
7883 {
7884 case 0x01: //equal
7885 if (&L)
7886 __ beq(op1, op2, L);
7887 else
7888 __ beq(op1, op2, (int)0);
7889 break;
7890 case 0x02: //not_equal
7891 if (&L)
7892 __ bne(op1, op2, L);
7893 else
7894 __ bne(op1, op2, (int)0);
7895 break;
7896 default:
7897 Unimplemented();
7898 }
7899 __ delayed()->nop();
7900 %}
7902 ins_pc_relative(1);
7903 ins_pipe( pipe_alu_branch );
7904 ins_short_branch(1);
7905 %}
7908 instruct branchConP_short(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7909 match(If cmp (CmpP op1 op2));
7910 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7911 effect(USE labl);
7913 ins_cost(200);
7914 format %{ "b$cmp $op1, $op2, $labl #@branchConP_short" %}
7916 ins_encode %{
7917 Register op1 = $op1$$Register;
7918 Register op2 = $op2$$Register;
7919 Label &L = *($labl$$label);
7920 int flag = $cmp$$cmpcode;
7922 switch(flag) {
7923 case 0x01: //equal
7924 if (&L)
7925 __ beq(op1, op2, L);
7926 else
7927 __ beq(op1, op2, (int)0);
7928 break;
7929 case 0x02: //not_equal
7930 if (&L)
7931 __ bne(op1, op2, L);
7932 else
7933 __ bne(op1, op2, (int)0);
7934 break;
7935 case 0x03: //above
7936 __ sltu(AT, op2, op1);
7937 if(&L)
7938 __ bne(R0, AT, L);
7939 else
7940 __ bne(R0, AT, (int)0);
7941 break;
7942 case 0x04: //above_equal
7943 __ sltu(AT, op1, op2);
7944 if(&L)
7945 __ beq(AT, R0, L);
7946 else
7947 __ beq(AT, R0, (int)0);
7948 break;
7949 case 0x05: //below
7950 __ sltu(AT, op1, op2);
7951 if(&L)
7952 __ bne(R0, AT, L);
7953 else
7954 __ bne(R0, AT, (int)0);
7955 break;
7956 case 0x06: //below_equal
7957 __ sltu(AT, op2, op1);
7958 if(&L)
7959 __ beq(AT, R0, L);
7960 else
7961 __ beq(AT, R0, (int)0);
7962 break;
7963 default:
7964 Unimplemented();
7965 }
7966 __ delayed()->nop();
7967 %}
7969 ins_pc_relative(1);
7970 ins_pipe( pipe_alu_branch );
7971 ins_short_branch(1);
7972 %}
7974 instruct cmpN_null_branch_short(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7975 match(If cmp (CmpN op1 null));
7976 effect(USE labl);
7978 ins_cost(180);
7979 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7980 "BP$cmp $labl @ cmpN_null_branch_short" %}
7981 ins_encode %{
7982 Register op1 = $op1$$Register;
7983 Register op2 = R0;
7984 Label &L = *($labl$$label);
7985 int flag = $cmp$$cmpcode;
7987 switch(flag) {
7988 case 0x01: //equal
7989 if (&L)
7990 __ beq(op1, op2, L);
7991 else
7992 __ beq(op1, op2, (int)0);
7993 break;
7994 case 0x02: //not_equal
7995 if (&L)
7996 __ bne(op1, op2, L);
7997 else
7998 __ bne(op1, op2, (int)0);
7999 break;
8000 default:
8001 Unimplemented();
8002 }
8003 __ delayed()->nop();
8004 %}
8005 //TODO: pipe_branchP or create pipe_branchN LEE
8006 ins_pc_relative(1);
8007 ins_pipe( pipe_alu_branch );
8008 ins_short_branch(1);
8009 %}
8011 instruct cmpN_reg_branch_short(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
8012 match(If cmp (CmpN op1 op2));
8013 effect(USE labl);
8015 ins_cost(180);
8016 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
8017 "BP$cmp $labl @ cmpN_reg_branch_short" %}
8018 ins_encode %{
8019 Register op1_reg = $op1$$Register;
8020 Register op2_reg = $op2$$Register;
8021 Label &L = *($labl$$label);
8022 int flag = $cmp$$cmpcode;
8024 switch(flag) {
8025 case 0x01: //equal
8026 if (&L)
8027 __ beq(op1_reg, op2_reg, L);
8028 else
8029 __ beq(op1_reg, op2_reg, (int)0);
8030 break;
8031 case 0x02: //not_equal
8032 if (&L)
8033 __ bne(op1_reg, op2_reg, L);
8034 else
8035 __ bne(op1_reg, op2_reg, (int)0);
8036 break;
8037 case 0x03: //above
8038 __ sltu(AT, op2_reg, op1_reg);
8039 if(&L)
8040 __ bne(R0, AT, L);
8041 else
8042 __ bne(R0, AT, (int)0);
8043 break;
8044 case 0x04: //above_equal
8045 __ sltu(AT, op1_reg, op2_reg);
8046 if(&L)
8047 __ beq(AT, R0, L);
8048 else
8049 __ beq(AT, R0, (int)0);
8050 break;
8051 case 0x05: //below
8052 __ sltu(AT, op1_reg, op2_reg);
8053 if(&L)
8054 __ bne(R0, AT, L);
8055 else
8056 __ bne(R0, AT, (int)0);
8057 break;
8058 case 0x06: //below_equal
8059 __ sltu(AT, op2_reg, op1_reg);
8060 if(&L)
8061 __ beq(AT, R0, L);
8062 else
8063 __ beq(AT, R0, (int)0);
8064 break;
8065 default:
8066 Unimplemented();
8067 }
8068 __ delayed()->nop();
8069 %}
8070 ins_pc_relative(1);
8071 ins_pipe( pipe_alu_branch );
8072 ins_short_branch(1);
8073 %}
8075 instruct branchConIU_reg_reg_short(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
8076 match( If cmp (CmpU src1 src2) );
8077 effect(USE labl);
8078 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg_short" %}
8080 ins_encode %{
8081 Register op1 = $src1$$Register;
8082 Register op2 = $src2$$Register;
8083 Label &L = *($labl$$label);
8084 int flag = $cmp$$cmpcode;
8086 switch(flag) {
8087 case 0x01: //equal
8088 if (&L)
8089 __ beq(op1, op2, L);
8090 else
8091 __ beq(op1, op2, (int)0);
8092 break;
8093 case 0x02: //not_equal
8094 if (&L)
8095 __ bne(op1, op2, L);
8096 else
8097 __ bne(op1, op2, (int)0);
8098 break;
8099 case 0x03: //above
8100 __ sltu(AT, op2, op1);
8101 if(&L)
8102 __ bne(AT, R0, L);
8103 else
8104 __ bne(AT, R0, (int)0);
8105 break;
8106 case 0x04: //above_equal
8107 __ sltu(AT, op1, op2);
8108 if(&L)
8109 __ beq(AT, R0, L);
8110 else
8111 __ beq(AT, R0, (int)0);
8112 break;
8113 case 0x05: //below
8114 __ sltu(AT, op1, op2);
8115 if(&L)
8116 __ bne(AT, R0, L);
8117 else
8118 __ bne(AT, R0, (int)0);
8119 break;
8120 case 0x06: //below_equal
8121 __ sltu(AT, op2, op1);
8122 if(&L)
8123 __ beq(AT, R0, L);
8124 else
8125 __ beq(AT, R0, (int)0);
8126 break;
8127 default:
8128 Unimplemented();
8129 }
8130 __ delayed()->nop();
8131 %}
8133 ins_pc_relative(1);
8134 ins_pipe( pipe_alu_branch );
8135 ins_short_branch(1);
8136 %}
8139 instruct branchConIU_reg_imm_short(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
8140 match( If cmp (CmpU src1 src2) );
8141 effect(USE labl);
8142 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_short" %}
8144 ins_encode %{
8145 Register op1 = $src1$$Register;
8146 int val = $src2$$constant;
8147 Label &L = *($labl$$label);
8148 int flag = $cmp$$cmpcode;
8150 __ move(AT, val);
8151 switch(flag) {
8152 case 0x01: //equal
8153 if (&L)
8154 __ beq(op1, AT, L);
8155 else
8156 __ beq(op1, AT, (int)0);
8157 break;
8158 case 0x02: //not_equal
8159 if (&L)
8160 __ bne(op1, AT, L);
8161 else
8162 __ bne(op1, AT, (int)0);
8163 break;
8164 case 0x03: //above
8165 __ sltu(AT, AT, op1);
8166 if(&L)
8167 __ bne(R0, AT, L);
8168 else
8169 __ bne(R0, AT, (int)0);
8170 break;
8171 case 0x04: //above_equal
8172 __ sltu(AT, op1, AT);
8173 if(&L)
8174 __ beq(AT, R0, L);
8175 else
8176 __ beq(AT, R0, (int)0);
8177 break;
8178 case 0x05: //below
8179 __ sltu(AT, op1, AT);
8180 if(&L)
8181 __ bne(R0, AT, L);
8182 else
8183 __ bne(R0, AT, (int)0);
8184 break;
8185 case 0x06: //below_equal
8186 __ sltu(AT, AT, op1);
8187 if(&L)
8188 __ beq(AT, R0, L);
8189 else
8190 __ beq(AT, R0, (int)0);
8191 break;
8192 default:
8193 Unimplemented();
8194 }
8195 __ delayed()->nop();
8196 %}
8198 ins_pc_relative(1);
8199 ins_pipe( pipe_alu_branch );
8200 ins_short_branch(1);
8201 %}
8203 instruct branchConI_reg_reg_short(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
8204 match( If cmp (CmpI src1 src2) );
8205 effect(USE labl);
8206 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg_short" %}
8208 ins_encode %{
8209 Register op1 = $src1$$Register;
8210 Register op2 = $src2$$Register;
8211 Label &L = *($labl$$label);
8212 int flag = $cmp$$cmpcode;
8214 switch(flag) {
8215 case 0x01: //equal
8216 if (&L)
8217 __ beq(op1, op2, L);
8218 else
8219 __ beq(op1, op2, (int)0);
8220 break;
8221 case 0x02: //not_equal
8222 if (&L)
8223 __ bne(op1, op2, L);
8224 else
8225 __ bne(op1, op2, (int)0);
8226 break;
8227 case 0x03: //above
8228 __ slt(AT, op2, op1);
8229 if(&L)
8230 __ bne(R0, AT, L);
8231 else
8232 __ bne(R0, AT, (int)0);
8233 break;
8234 case 0x04: //above_equal
8235 __ slt(AT, op1, op2);
8236 if(&L)
8237 __ beq(AT, R0, L);
8238 else
8239 __ beq(AT, R0, (int)0);
8240 break;
8241 case 0x05: //below
8242 __ slt(AT, op1, op2);
8243 if(&L)
8244 __ bne(R0, AT, L);
8245 else
8246 __ bne(R0, AT, (int)0);
8247 break;
8248 case 0x06: //below_equal
8249 __ slt(AT, op2, op1);
8250 if(&L)
8251 __ beq(AT, R0, L);
8252 else
8253 __ beq(AT, R0, (int)0);
8254 break;
8255 default:
8256 Unimplemented();
8257 }
8258 __ delayed()->nop();
8259 %}
8261 ins_pc_relative(1);
8262 ins_pipe( pipe_alu_branch );
8263 ins_short_branch(1);
8264 %}
8266 instruct branchConI_reg_imm0_short(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
8267 match( If cmp (CmpI src1 src2) );
8268 effect(USE labl);
8269 ins_cost(170);
8270 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0_short" %}
8272 ins_encode %{
8273 Register op1 = $src1$$Register;
8274 Label &L = *($labl$$label);
8275 int flag = $cmp$$cmpcode;
8277 switch(flag) {
8278 case 0x01: //equal
8279 if (&L)
8280 __ beq(op1, R0, L);
8281 else
8282 __ beq(op1, R0, (int)0);
8283 break;
8284 case 0x02: //not_equal
8285 if (&L)
8286 __ bne(op1, R0, L);
8287 else
8288 __ bne(op1, R0, (int)0);
8289 break;
8290 case 0x03: //greater
8291 if(&L)
8292 __ bgtz(op1, L);
8293 else
8294 __ bgtz(op1, (int)0);
8295 break;
8296 case 0x04: //greater_equal
8297 if(&L)
8298 __ bgez(op1, L);
8299 else
8300 __ bgez(op1, (int)0);
8301 break;
8302 case 0x05: //less
8303 if(&L)
8304 __ bltz(op1, L);
8305 else
8306 __ bltz(op1, (int)0);
8307 break;
8308 case 0x06: //less_equal
8309 if(&L)
8310 __ blez(op1, L);
8311 else
8312 __ blez(op1, (int)0);
8313 break;
8314 default:
8315 Unimplemented();
8316 }
8317 __ delayed()->nop();
8318 %}
8320 ins_pc_relative(1);
8321 ins_pipe( pipe_alu_branch );
8322 ins_short_branch(1);
8323 %}
8326 instruct branchConI_reg_imm_short(cmpOp cmp, mRegI src1, immI src2, label labl) %{
8327 match( If cmp (CmpI src1 src2) );
8328 effect(USE labl);
8329 ins_cost(200);
8330 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_short" %}
8332 ins_encode %{
8333 Register op1 = $src1$$Register;
8334 int val = $src2$$constant;
8335 Label &L = *($labl$$label);
8336 int flag = $cmp$$cmpcode;
8338 __ move(AT, val);
8339 switch(flag) {
8340 case 0x01: //equal
8341 if (&L)
8342 __ beq(op1, AT, L);
8343 else
8344 __ beq(op1, AT, (int)0);
8345 break;
8346 case 0x02: //not_equal
8347 if (&L)
8348 __ bne(op1, AT, L);
8349 else
8350 __ bne(op1, AT, (int)0);
8351 break;
8352 case 0x03: //greater
8353 __ slt(AT, AT, op1);
8354 if(&L)
8355 __ bne(R0, AT, L);
8356 else
8357 __ bne(R0, AT, (int)0);
8358 break;
8359 case 0x04: //greater_equal
8360 __ slt(AT, op1, AT);
8361 if(&L)
8362 __ beq(AT, R0, L);
8363 else
8364 __ beq(AT, R0, (int)0);
8365 break;
8366 case 0x05: //less
8367 __ slt(AT, op1, AT);
8368 if(&L)
8369 __ bne(R0, AT, L);
8370 else
8371 __ bne(R0, AT, (int)0);
8372 break;
8373 case 0x06: //less_equal
8374 __ slt(AT, AT, op1);
8375 if(&L)
8376 __ beq(AT, R0, L);
8377 else
8378 __ beq(AT, R0, (int)0);
8379 break;
8380 default:
8381 Unimplemented();
8382 }
8383 __ delayed()->nop();
8384 %}
8386 ins_pc_relative(1);
8387 ins_pipe( pipe_alu_branch );
8388 ins_short_branch(1);
8389 %}
8391 instruct branchConIU_reg_imm0_short(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
8392 match( If cmp (CmpU src1 zero) );
8393 effect(USE labl);
8394 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0_short" %}
8396 ins_encode %{
8397 Register op1 = $src1$$Register;
8398 Label &L = *($labl$$label);
8399 int flag = $cmp$$cmpcode;
8401 switch(flag) {
8402 case 0x01: //equal
8403 if (&L)
8404 __ beq(op1, R0, L);
8405 else
8406 __ beq(op1, R0, (int)0);
8407 break;
8408 case 0x02: //not_equal
8409 if (&L)
8410 __ bne(op1, R0, L);
8411 else
8412 __ bne(op1, R0, (int)0);
8413 break;
8414 case 0x03: //above
8415 if(&L)
8416 __ bne(R0, op1, L);
8417 else
8418 __ bne(R0, op1, (int)0);
8419 break;
8420 case 0x04: //above_equal
8421 if(&L)
8422 __ beq(R0, R0, L);
8423 else
8424 __ beq(R0, R0, (int)0);
8425 break;
8426 case 0x05: //below
8427 return;
8428 break;
8429 case 0x06: //below_equal
8430 if(&L)
8431 __ beq(op1, R0, L);
8432 else
8433 __ beq(op1, R0, (int)0);
8434 break;
8435 default:
8436 Unimplemented();
8437 }
8438 __ delayed()->nop();
8439 %}
8441 ins_pc_relative(1);
8442 ins_pipe( pipe_alu_branch );
8443 ins_short_branch(1);
8444 %}
8447 instruct branchConIU_reg_immI16_short(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
8448 match( If cmp (CmpU src1 src2) );
8449 effect(USE labl);
8450 ins_cost(180);
8451 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16_short" %}
8453 ins_encode %{
8454 Register op1 = $src1$$Register;
8455 int val = $src2$$constant;
8456 Label &L = *($labl$$label);
8457 int flag = $cmp$$cmpcode;
8459 switch(flag) {
8460 case 0x01: //equal
8461 __ move(AT, val);
8462 if (&L)
8463 __ beq(op1, AT, L);
8464 else
8465 __ beq(op1, AT, (int)0);
8466 break;
8467 case 0x02: //not_equal
8468 __ move(AT, val);
8469 if (&L)
8470 __ bne(op1, AT, L);
8471 else
8472 __ bne(op1, AT, (int)0);
8473 break;
8474 case 0x03: //above
8475 __ move(AT, val);
8476 __ sltu(AT, AT, op1);
8477 if(&L)
8478 __ bne(R0, AT, L);
8479 else
8480 __ bne(R0, AT, (int)0);
8481 break;
8482 case 0x04: //above_equal
8483 __ sltiu(AT, op1, val);
8484 if(&L)
8485 __ beq(AT, R0, L);
8486 else
8487 __ beq(AT, R0, (int)0);
8488 break;
8489 case 0x05: //below
8490 __ sltiu(AT, op1, val);
8491 if(&L)
8492 __ bne(R0, AT, L);
8493 else
8494 __ bne(R0, AT, (int)0);
8495 break;
8496 case 0x06: //below_equal
8497 __ move(AT, val);
8498 __ sltu(AT, AT, op1);
8499 if(&L)
8500 __ beq(AT, R0, L);
8501 else
8502 __ beq(AT, R0, (int)0);
8503 break;
8504 default:
8505 Unimplemented();
8506 }
8507 __ delayed()->nop();
8508 %}
8510 ins_pc_relative(1);
8511 ins_pipe( pipe_alu_branch );
8512 ins_short_branch(1);
8513 %}
8516 instruct branchConL_regL_regL_short(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
8517 match( If cmp (CmpL src1 src2) );
8518 effect(USE labl);
8519 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL_short" %}
8520 ins_cost(250);
8522 ins_encode %{
8523 Register opr1_reg = as_Register($src1$$reg);
8524 Register opr2_reg = as_Register($src2$$reg);
8526 Label &target = *($labl$$label);
8527 int flag = $cmp$$cmpcode;
8529 switch(flag) {
8530 case 0x01: //equal
8531 if (&target)
8532 __ beq(opr1_reg, opr2_reg, target);
8533 else
8534 __ beq(opr1_reg, opr2_reg, (int)0);
8535 __ delayed()->nop();
8536 break;
8538 case 0x02: //not_equal
8539 if(&target)
8540 __ bne(opr1_reg, opr2_reg, target);
8541 else
8542 __ bne(opr1_reg, opr2_reg, (int)0);
8543 __ delayed()->nop();
8544 break;
8546 case 0x03: //greater
8547 __ slt(AT, opr2_reg, opr1_reg);
8548 if(&target)
8549 __ bne(AT, R0, target);
8550 else
8551 __ bne(AT, R0, (int)0);
8552 __ delayed()->nop();
8553 break;
8555 case 0x04: //greater_equal
8556 __ slt(AT, opr1_reg, opr2_reg);
8557 if(&target)
8558 __ beq(AT, R0, target);
8559 else
8560 __ beq(AT, R0, (int)0);
8561 __ delayed()->nop();
8563 break;
8565 case 0x05: //less
8566 __ slt(AT, opr1_reg, opr2_reg);
8567 if(&target)
8568 __ bne(AT, R0, target);
8569 else
8570 __ bne(AT, R0, (int)0);
8571 __ delayed()->nop();
8573 break;
8575 case 0x06: //less_equal
8576 __ slt(AT, opr2_reg, opr1_reg);
8578 if(&target)
8579 __ beq(AT, R0, target);
8580 else
8581 __ beq(AT, R0, (int)0);
8582 __ delayed()->nop();
8584 break;
8586 default:
8587 Unimplemented();
8588 }
8589 %}
8592 ins_pc_relative(1);
8593 ins_pipe( pipe_alu_branch );
8594 ins_short_branch(1);
8595 %}
8598 instruct branchConL_regL_immL0_short(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
8599 match( If cmp (CmpL src1 zero) );
8600 effect(USE labl);
8601 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0_short" %}
8602 ins_cost(150);
8604 ins_encode %{
8605 Register opr1_reg = as_Register($src1$$reg);
8606 Label &target = *($labl$$label);
8607 int flag = $cmp$$cmpcode;
8609 switch(flag) {
8610 case 0x01: //equal
8611 if (&target)
8612 __ beq(opr1_reg, R0, target);
8613 else
8614 __ beq(opr1_reg, R0, int(0));
8615 break;
8617 case 0x02: //not_equal
8618 if(&target)
8619 __ bne(opr1_reg, R0, target);
8620 else
8621 __ bne(opr1_reg, R0, (int)0);
8622 break;
8624 case 0x03: //greater
8625 if(&target)
8626 __ bgtz(opr1_reg, target);
8627 else
8628 __ bgtz(opr1_reg, (int)0);
8629 break;
8631 case 0x04: //greater_equal
8632 if(&target)
8633 __ bgez(opr1_reg, target);
8634 else
8635 __ bgez(opr1_reg, (int)0);
8636 break;
8638 case 0x05: //less
8639 __ slt(AT, opr1_reg, R0);
8640 if(&target)
8641 __ bne(AT, R0, target);
8642 else
8643 __ bne(AT, R0, (int)0);
8644 break;
8646 case 0x06: //less_equal
8647 if (&target)
8648 __ blez(opr1_reg, target);
8649 else
8650 __ blez(opr1_reg, int(0));
8651 break;
8653 default:
8654 Unimplemented();
8655 }
8656 __ delayed()->nop();
8657 %}
8660 ins_pc_relative(1);
8661 ins_pipe( pipe_alu_branch );
8662 ins_short_branch(1);
8663 %}
8665 instruct branchConL_regL_immL_short(cmpOp cmp, mRegL src1, immL src2, label labl) %{
8666 match( If cmp (CmpL src1 src2) );
8667 effect(USE labl);
8668 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL_short" %}
8669 ins_cost(180);
8671 ins_encode %{
8672 Register opr1_reg = as_Register($src1$$reg);
8673 Register opr2_reg = AT;
8675 Label &target = *($labl$$label);
8676 int flag = $cmp$$cmpcode;
8678 __ set64(opr2_reg, $src2$$constant);
8680 switch(flag) {
8681 case 0x01: //equal
8682 if (&target)
8683 __ beq(opr1_reg, opr2_reg, target);
8684 else
8685 __ beq(opr1_reg, opr2_reg, (int)0);
8686 break;
8688 case 0x02: //not_equal
8689 if(&target)
8690 __ bne(opr1_reg, opr2_reg, target);
8691 else
8692 __ bne(opr1_reg, opr2_reg, (int)0);
8693 break;
8695 case 0x03: //greater
8696 __ slt(AT, opr2_reg, opr1_reg);
8697 if(&target)
8698 __ bne(AT, R0, target);
8699 else
8700 __ bne(AT, R0, (int)0);
8701 break;
8703 case 0x04: //greater_equal
8704 __ slt(AT, opr1_reg, opr2_reg);
8705 if(&target)
8706 __ beq(AT, R0, target);
8707 else
8708 __ beq(AT, R0, (int)0);
8709 break;
8711 case 0x05: //less
8712 __ slt(AT, opr1_reg, opr2_reg);
8713 if(&target)
8714 __ bne(AT, R0, target);
8715 else
8716 __ bne(AT, R0, (int)0);
8717 break;
8719 case 0x06: //less_equal
8720 __ slt(AT, opr2_reg, opr1_reg);
8721 if(&target)
8722 __ beq(AT, R0, target);
8723 else
8724 __ beq(AT, R0, (int)0);
8725 break;
8727 default:
8728 Unimplemented();
8729 }
8730 __ delayed()->nop();
8731 %}
8734 ins_pc_relative(1);
8735 ins_pipe( pipe_alu_branch );
8736 ins_short_branch(1);
8737 %}
8740 //FIXME
8741 instruct branchConF_reg_reg_short(cmpOp cmp, regF src1, regF src2, label labl) %{
8742 match( If cmp (CmpF src1 src2) );
8743 effect(USE labl);
8744 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg_short" %}
8746 ins_encode %{
8747 FloatRegister reg_op1 = $src1$$FloatRegister;
8748 FloatRegister reg_op2 = $src2$$FloatRegister;
8749 Label &L = *($labl$$label);
8750 int flag = $cmp$$cmpcode;
8752 switch(flag) {
8753 case 0x01: //equal
8754 __ c_eq_s(reg_op1, reg_op2);
8755 if (&L)
8756 __ bc1t(L);
8757 else
8758 __ bc1t((int)0);
8759 break;
8760 case 0x02: //not_equal
8761 __ c_eq_s(reg_op1, reg_op2);
8762 if (&L)
8763 __ bc1f(L);
8764 else
8765 __ bc1f((int)0);
8766 break;
8767 case 0x03: //greater
8768 __ c_ule_s(reg_op1, reg_op2);
8769 if(&L)
8770 __ bc1f(L);
8771 else
8772 __ bc1f((int)0);
8773 break;
8774 case 0x04: //greater_equal
8775 __ c_ult_s(reg_op1, reg_op2);
8776 if(&L)
8777 __ bc1f(L);
8778 else
8779 __ bc1f((int)0);
8780 break;
8781 case 0x05: //less
8782 __ c_ult_s(reg_op1, reg_op2);
8783 if(&L)
8784 __ bc1t(L);
8785 else
8786 __ bc1t((int)0);
8787 break;
8788 case 0x06: //less_equal
8789 __ c_ule_s(reg_op1, reg_op2);
8790 if(&L)
8791 __ bc1t(L);
8792 else
8793 __ bc1t((int)0);
8794 break;
8795 default:
8796 Unimplemented();
8797 }
8798 __ delayed()->nop();
8799 %}
8801 ins_pc_relative(1);
8802 ins_pipe(pipe_slow);
8803 ins_short_branch(1);
8804 %}
8806 instruct branchConD_reg_reg_short(cmpOp cmp, regD src1, regD src2, label labl) %{
8807 match( If cmp (CmpD src1 src2) );
8808 effect(USE labl);
8809 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg_short" %}
8811 ins_encode %{
8812 FloatRegister reg_op1 = $src1$$FloatRegister;
8813 FloatRegister reg_op2 = $src2$$FloatRegister;
8814 Label &L = *($labl$$label);
8815 int flag = $cmp$$cmpcode;
8817 switch(flag) {
8818 case 0x01: //equal
8819 __ c_eq_d(reg_op1, reg_op2);
8820 if (&L)
8821 __ bc1t(L);
8822 else
8823 __ bc1t((int)0);
8824 break;
8825 case 0x02: //not_equal
8826 // c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8827 __ c_eq_d(reg_op1, reg_op2);
8828 if (&L)
8829 __ bc1f(L);
8830 else
8831 __ bc1f((int)0);
8832 break;
8833 case 0x03: //greater
8834 __ c_ule_d(reg_op1, reg_op2);
8835 if(&L)
8836 __ bc1f(L);
8837 else
8838 __ bc1f((int)0);
8839 break;
8840 case 0x04: //greater_equal
8841 __ c_ult_d(reg_op1, reg_op2);
8842 if(&L)
8843 __ bc1f(L);
8844 else
8845 __ bc1f((int)0);
8846 break;
8847 case 0x05: //less
8848 __ c_ult_d(reg_op1, reg_op2);
8849 if(&L)
8850 __ bc1t(L);
8851 else
8852 __ bc1t((int)0);
8853 break;
8854 case 0x06: //less_equal
8855 __ c_ule_d(reg_op1, reg_op2);
8856 if(&L)
8857 __ bc1t(L);
8858 else
8859 __ bc1t((int)0);
8860 break;
8861 default:
8862 Unimplemented();
8863 }
8864 __ delayed()->nop();
8865 %}
8867 ins_pc_relative(1);
8868 ins_pipe(pipe_slow);
8869 ins_short_branch(1);
8870 %}
8872 // =================== End of branch instructions ==========================
8874 // Call Runtime Instruction
8875 instruct CallRuntimeDirect(method meth) %{
8876 match(CallRuntime );
8877 effect(USE meth);
8879 ins_cost(300);
8880 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8881 ins_encode( Java_To_Runtime( meth ) );
8882 ins_pipe( pipe_slow );
8883 ins_alignment(16);
8884 %}
8888 //------------------------MemBar Instructions-------------------------------
8889 //Memory barrier flavors
8891 instruct membar_acquire() %{
8892 match(MemBarAcquire);
8893 ins_cost(400);
8895 format %{ "MEMBAR-acquire @ membar_acquire" %}
8896 ins_encode %{
8897 __ sync();
8898 %}
8899 ins_pipe(empty);
8900 %}
8902 instruct load_fence() %{
8903 match(LoadFence);
8904 ins_cost(400);
8906 format %{ "MEMBAR @ load_fence" %}
8907 ins_encode %{
8908 __ sync();
8909 %}
8910 ins_pipe(pipe_slow);
8911 %}
8913 instruct membar_acquire_lock()
8914 %{
8915 match(MemBarAcquireLock);
8916 ins_cost(0);
8918 size(0);
8919 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8920 ins_encode();
8921 ins_pipe(empty);
8922 %}
8924 instruct membar_release() %{
8925 match(MemBarRelease);
8926 ins_cost(400);
8928 format %{ "MEMBAR-release @ membar_release" %}
8930 ins_encode %{
8931 // Attention: DO NOT DELETE THIS GUY!
8932 __ sync();
8933 %}
8935 ins_pipe(pipe_slow);
8936 %}
8938 instruct store_fence() %{
8939 match(StoreFence);
8940 ins_cost(400);
8942 format %{ "MEMBAR @ store_fence" %}
8944 ins_encode %{
8945 __ sync();
8946 %}
8948 ins_pipe(pipe_slow);
8949 %}
8951 instruct membar_release_lock()
8952 %{
8953 match(MemBarReleaseLock);
8954 ins_cost(0);
8956 size(0);
8957 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8958 ins_encode();
8959 ins_pipe(empty);
8960 %}
8963 instruct membar_volatile() %{
8964 match(MemBarVolatile);
8965 ins_cost(400);
8967 format %{ "MEMBAR-volatile" %}
8968 ins_encode %{
8969 if( !os::is_MP() ) return; // Not needed on single CPU
8970 __ sync();
8972 %}
8973 ins_pipe(pipe_slow);
8974 %}
8976 instruct unnecessary_membar_volatile() %{
8977 match(MemBarVolatile);
8978 predicate(Matcher::post_store_load_barrier(n));
8979 ins_cost(0);
8981 size(0);
8982 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8983 ins_encode( );
8984 ins_pipe(empty);
8985 %}
8987 instruct membar_storestore() %{
8988 match(MemBarStoreStore);
8990 ins_cost(400);
8991 format %{ "MEMBAR-storestore @ membar_storestore" %}
8992 ins_encode %{
8993 __ sync();
8994 %}
8995 ins_pipe(empty);
8996 %}
8998 //----------Move Instructions--------------------------------------------------
8999 instruct castX2P(mRegP dst, mRegL src) %{
9000 match(Set dst (CastX2P src));
9001 format %{ "castX2P $dst, $src @ castX2P" %}
9002 ins_encode %{
9003 Register src = $src$$Register;
9004 Register dst = $dst$$Register;
9006 if(src != dst)
9007 __ move(dst, src);
9008 %}
9009 ins_cost(10);
9010 ins_pipe( ialu_regI_mov );
9011 %}
9013 instruct castP2X(mRegL dst, mRegP src ) %{
9014 match(Set dst (CastP2X src));
9016 format %{ "mov $dst, $src\t #@castP2X" %}
9017 ins_encode %{
9018 Register src = $src$$Register;
9019 Register dst = $dst$$Register;
9021 if(src != dst)
9022 __ move(dst, src);
9023 %}
9024 ins_pipe( ialu_regI_mov );
9025 %}
9027 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
9028 match(Set dst (MoveF2I src));
9029 effect(DEF dst, USE src);
9030 ins_cost(85);
9031 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
9032 ins_encode %{
9033 Register dst = as_Register($dst$$reg);
9034 FloatRegister src = as_FloatRegister($src$$reg);
9036 __ mfc1(dst, src);
9037 %}
9038 ins_pipe( pipe_slow );
9039 %}
9041 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
9042 match(Set dst (MoveI2F src));
9043 effect(DEF dst, USE src);
9044 ins_cost(85);
9045 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
9046 ins_encode %{
9047 Register src = as_Register($src$$reg);
9048 FloatRegister dst = as_FloatRegister($dst$$reg);
9050 __ mtc1(src, dst);
9051 %}
9052 ins_pipe( pipe_slow );
9053 %}
9055 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
9056 match(Set dst (MoveD2L src));
9057 effect(DEF dst, USE src);
9058 ins_cost(85);
9059 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
9060 ins_encode %{
9061 Register dst = as_Register($dst$$reg);
9062 FloatRegister src = as_FloatRegister($src$$reg);
9064 __ dmfc1(dst, src);
9065 %}
9066 ins_pipe( pipe_slow );
9067 %}
9069 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
9070 match(Set dst (MoveL2D src));
9071 effect(DEF dst, USE src);
9072 ins_cost(85);
9073 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
9074 ins_encode %{
9075 FloatRegister dst = as_FloatRegister($dst$$reg);
9076 Register src = as_Register($src$$reg);
9078 __ dmtc1(src, dst);
9079 %}
9080 ins_pipe( pipe_slow );
9081 %}
9083 //----------Conditional Move---------------------------------------------------
9084 // Conditional move
9085 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9086 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9087 ins_cost(80);
9088 format %{
9089 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
9090 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
9091 %}
9093 ins_encode %{
9094 Register op1 = $tmp1$$Register;
9095 Register op2 = $tmp2$$Register;
9096 Register dst = $dst$$Register;
9097 Register src = $src$$Register;
9098 int flag = $cop$$cmpcode;
9100 switch(flag) {
9101 case 0x01: //equal
9102 __ subu32(AT, op1, op2);
9103 __ movz(dst, src, AT);
9104 break;
9106 case 0x02: //not_equal
9107 __ subu32(AT, op1, op2);
9108 __ movn(dst, src, AT);
9109 break;
9111 case 0x03: //great
9112 __ slt(AT, op2, op1);
9113 __ movn(dst, src, AT);
9114 break;
9116 case 0x04: //great_equal
9117 __ slt(AT, op1, op2);
9118 __ movz(dst, src, AT);
9119 break;
9121 case 0x05: //less
9122 __ slt(AT, op1, op2);
9123 __ movn(dst, src, AT);
9124 break;
9126 case 0x06: //less_equal
9127 __ slt(AT, op2, op1);
9128 __ movz(dst, src, AT);
9129 break;
9131 default:
9132 Unimplemented();
9133 }
9134 %}
9136 ins_pipe( pipe_slow );
9137 %}
9139 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9140 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9141 ins_cost(80);
9142 format %{
9143 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
9144 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
9145 %}
9146 ins_encode %{
9147 Register op1 = $tmp1$$Register;
9148 Register op2 = $tmp2$$Register;
9149 Register dst = $dst$$Register;
9150 Register src = $src$$Register;
9151 int flag = $cop$$cmpcode;
9153 switch(flag) {
9154 case 0x01: //equal
9155 __ subu(AT, op1, op2);
9156 __ movz(dst, src, AT);
9157 break;
9159 case 0x02: //not_equal
9160 __ subu(AT, op1, op2);
9161 __ movn(dst, src, AT);
9162 break;
9164 case 0x03: //above
9165 __ sltu(AT, op2, op1);
9166 __ movn(dst, src, AT);
9167 break;
9169 case 0x04: //above_equal
9170 __ sltu(AT, op1, op2);
9171 __ movz(dst, src, AT);
9172 break;
9174 case 0x05: //below
9175 __ sltu(AT, op1, op2);
9176 __ movn(dst, src, AT);
9177 break;
9179 case 0x06: //below_equal
9180 __ sltu(AT, op2, op1);
9181 __ movz(dst, src, AT);
9182 break;
9184 default:
9185 Unimplemented();
9186 }
9187 %}
9189 ins_pipe( pipe_slow );
9190 %}
9192 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9193 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9194 ins_cost(80);
9195 format %{
9196 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
9197 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
9198 %}
9199 ins_encode %{
9200 Register op1 = $tmp1$$Register;
9201 Register op2 = $tmp2$$Register;
9202 Register dst = $dst$$Register;
9203 Register src = $src$$Register;
9204 int flag = $cop$$cmpcode;
9206 switch(flag) {
9207 case 0x01: //equal
9208 __ subu32(AT, op1, op2);
9209 __ movz(dst, src, AT);
9210 break;
9212 case 0x02: //not_equal
9213 __ subu32(AT, op1, op2);
9214 __ movn(dst, src, AT);
9215 break;
9217 case 0x03: //above
9218 __ sltu(AT, op2, op1);
9219 __ movn(dst, src, AT);
9220 break;
9222 case 0x04: //above_equal
9223 __ sltu(AT, op1, op2);
9224 __ movz(dst, src, AT);
9225 break;
9227 case 0x05: //below
9228 __ sltu(AT, op1, op2);
9229 __ movn(dst, src, AT);
9230 break;
9232 case 0x06: //below_equal
9233 __ sltu(AT, op2, op1);
9234 __ movz(dst, src, AT);
9235 break;
9237 default:
9238 Unimplemented();
9239 }
9240 %}
9242 ins_pipe( pipe_slow );
9243 %}
9245 instruct cmovP_cmpU_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
9246 match(Set dst (CMoveP (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
9247 ins_cost(80);
9248 format %{
9249 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpU_reg_reg\n\t"
9250 "CMOV $dst,$src\t @cmovP_cmpU_reg_reg"
9251 %}
9252 ins_encode %{
9253 Register op1 = $tmp1$$Register;
9254 Register op2 = $tmp2$$Register;
9255 Register dst = $dst$$Register;
9256 Register src = $src$$Register;
9257 int flag = $cop$$cmpcode;
9259 switch(flag) {
9260 case 0x01: //equal
9261 __ subu32(AT, op1, op2);
9262 __ movz(dst, src, AT);
9263 break;
9265 case 0x02: //not_equal
9266 __ subu32(AT, op1, op2);
9267 __ movn(dst, src, AT);
9268 break;
9270 case 0x03: //above
9271 __ sltu(AT, op2, op1);
9272 __ movn(dst, src, AT);
9273 break;
9275 case 0x04: //above_equal
9276 __ sltu(AT, op1, op2);
9277 __ movz(dst, src, AT);
9278 break;
9280 case 0x05: //below
9281 __ sltu(AT, op1, op2);
9282 __ movn(dst, src, AT);
9283 break;
9285 case 0x06: //below_equal
9286 __ sltu(AT, op2, op1);
9287 __ movz(dst, src, AT);
9288 break;
9290 default:
9291 Unimplemented();
9292 }
9293 %}
9295 ins_pipe( pipe_slow );
9296 %}
9298 instruct cmovP_cmpF_reg_reg(mRegP dst, mRegP src, regF tmp1, regF tmp2, cmpOp cop ) %{
9299 match(Set dst (CMoveP (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9300 ins_cost(80);
9301 format %{
9302 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpF_reg_reg\n"
9303 "\tCMOV $dst,$src \t @cmovP_cmpF_reg_reg"
9304 %}
9306 ins_encode %{
9307 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9308 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9309 Register dst = $dst$$Register;
9310 Register src = $src$$Register;
9311 int flag = $cop$$cmpcode;
9313 switch(flag) {
9314 case 0x01: //equal
9315 __ c_eq_s(reg_op1, reg_op2);
9316 __ movt(dst, src);
9317 break;
9318 case 0x02: //not_equal
9319 __ c_eq_s(reg_op1, reg_op2);
9320 __ movf(dst, src);
9321 break;
9322 case 0x03: //greater
9323 __ c_ole_s(reg_op1, reg_op2);
9324 __ movf(dst, src);
9325 break;
9326 case 0x04: //greater_equal
9327 __ c_olt_s(reg_op1, reg_op2);
9328 __ movf(dst, src);
9329 break;
9330 case 0x05: //less
9331 __ c_ult_s(reg_op1, reg_op2);
9332 __ movt(dst, src);
9333 break;
9334 case 0x06: //less_equal
9335 __ c_ule_s(reg_op1, reg_op2);
9336 __ movt(dst, src);
9337 break;
9338 default:
9339 Unimplemented();
9340 }
9341 %}
9342 ins_pipe( pipe_slow );
9343 %}
9345 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9346 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9347 ins_cost(80);
9348 format %{
9349 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
9350 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
9351 %}
9352 ins_encode %{
9353 Register op1 = $tmp1$$Register;
9354 Register op2 = $tmp2$$Register;
9355 Register dst = $dst$$Register;
9356 Register src = $src$$Register;
9357 int flag = $cop$$cmpcode;
9359 switch(flag) {
9360 case 0x01: //equal
9361 __ subu32(AT, op1, op2);
9362 __ movz(dst, src, AT);
9363 break;
9365 case 0x02: //not_equal
9366 __ subu32(AT, op1, op2);
9367 __ movn(dst, src, AT);
9368 break;
9370 case 0x03: //above
9371 __ sltu(AT, op2, op1);
9372 __ movn(dst, src, AT);
9373 break;
9375 case 0x04: //above_equal
9376 __ sltu(AT, op1, op2);
9377 __ movz(dst, src, AT);
9378 break;
9380 case 0x05: //below
9381 __ sltu(AT, op1, op2);
9382 __ movn(dst, src, AT);
9383 break;
9385 case 0x06: //below_equal
9386 __ sltu(AT, op2, op1);
9387 __ movz(dst, src, AT);
9388 break;
9390 default:
9391 Unimplemented();
9392 }
9393 %}
9395 ins_pipe( pipe_slow );
9396 %}
9398 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9399 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9400 ins_cost(80);
9401 format %{
9402 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
9403 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
9404 %}
9405 ins_encode %{
9406 Register op1 = $tmp1$$Register;
9407 Register op2 = $tmp2$$Register;
9408 Register dst = $dst$$Register;
9409 Register src = $src$$Register;
9410 int flag = $cop$$cmpcode;
9412 switch(flag) {
9413 case 0x01: //equal
9414 __ subu(AT, op1, op2);
9415 __ movz(dst, src, AT);
9416 break;
9418 case 0x02: //not_equal
9419 __ subu(AT, op1, op2);
9420 __ movn(dst, src, AT);
9421 break;
9423 case 0x03: //above
9424 __ sltu(AT, op2, op1);
9425 __ movn(dst, src, AT);
9426 break;
9428 case 0x04: //above_equal
9429 __ sltu(AT, op1, op2);
9430 __ movz(dst, src, AT);
9431 break;
9433 case 0x05: //below
9434 __ sltu(AT, op1, op2);
9435 __ movn(dst, src, AT);
9436 break;
9438 case 0x06: //below_equal
9439 __ sltu(AT, op2, op1);
9440 __ movz(dst, src, AT);
9441 break;
9443 default:
9444 Unimplemented();
9445 }
9446 %}
9448 ins_pipe( pipe_slow );
9449 %}
9451 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
9452 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9453 ins_cost(80);
9454 format %{
9455 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
9456 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
9457 %}
9458 ins_encode %{
9459 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9460 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9461 Register dst = as_Register($dst$$reg);
9462 Register src = as_Register($src$$reg);
9464 int flag = $cop$$cmpcode;
9466 switch(flag) {
9467 case 0x01: //equal
9468 __ c_eq_d(reg_op1, reg_op2);
9469 __ movt(dst, src);
9470 break;
9471 case 0x02: //not_equal
9472 __ c_eq_d(reg_op1, reg_op2);
9473 __ movf(dst, src);
9474 break;
9475 case 0x03: //greater
9476 __ c_ole_d(reg_op1, reg_op2);
9477 __ movf(dst, src);
9478 break;
9479 case 0x04: //greater_equal
9480 __ c_olt_d(reg_op1, reg_op2);
9481 __ movf(dst, src);
9482 break;
9483 case 0x05: //less
9484 __ c_ult_d(reg_op1, reg_op2);
9485 __ movt(dst, src);
9486 break;
9487 case 0x06: //less_equal
9488 __ c_ule_d(reg_op1, reg_op2);
9489 __ movt(dst, src);
9490 break;
9491 default:
9492 Unimplemented();
9493 }
9494 %}
9496 ins_pipe( pipe_slow );
9497 %}
9500 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9501 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9502 ins_cost(80);
9503 format %{
9504 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
9505 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
9506 %}
9507 ins_encode %{
9508 Register op1 = $tmp1$$Register;
9509 Register op2 = $tmp2$$Register;
9510 Register dst = $dst$$Register;
9511 Register src = $src$$Register;
9512 int flag = $cop$$cmpcode;
9514 switch(flag) {
9515 case 0x01: //equal
9516 __ subu32(AT, op1, op2);
9517 __ movz(dst, src, AT);
9518 break;
9520 case 0x02: //not_equal
9521 __ subu32(AT, op1, op2);
9522 __ movn(dst, src, AT);
9523 break;
9525 case 0x03: //above
9526 __ sltu(AT, op2, op1);
9527 __ movn(dst, src, AT);
9528 break;
9530 case 0x04: //above_equal
9531 __ sltu(AT, op1, op2);
9532 __ movz(dst, src, AT);
9533 break;
9535 case 0x05: //below
9536 __ sltu(AT, op1, op2);
9537 __ movn(dst, src, AT);
9538 break;
9540 case 0x06: //below_equal
9541 __ sltu(AT, op2, op1);
9542 __ movz(dst, src, AT);
9543 break;
9545 default:
9546 Unimplemented();
9547 }
9548 %}
9550 ins_pipe( pipe_slow );
9551 %}
9554 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
9555 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
9556 ins_cost(80);
9557 format %{
9558 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
9559 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
9560 %}
9561 ins_encode %{
9562 Register op1 = $tmp1$$Register;
9563 Register op2 = $tmp2$$Register;
9564 Register dst = $dst$$Register;
9565 Register src = $src$$Register;
9566 int flag = $cop$$cmpcode;
9568 switch(flag) {
9569 case 0x01: //equal
9570 __ subu(AT, op1, op2);
9571 __ movz(dst, src, AT);
9572 break;
9574 case 0x02: //not_equal
9575 __ subu(AT, op1, op2);
9576 __ movn(dst, src, AT);
9577 break;
9579 case 0x03: //above
9580 __ sltu(AT, op2, op1);
9581 __ movn(dst, src, AT);
9582 break;
9584 case 0x04: //above_equal
9585 __ sltu(AT, op1, op2);
9586 __ movz(dst, src, AT);
9587 break;
9589 case 0x05: //below
9590 __ sltu(AT, op1, op2);
9591 __ movn(dst, src, AT);
9592 break;
9594 case 0x06: //below_equal
9595 __ sltu(AT, op2, op1);
9596 __ movz(dst, src, AT);
9597 break;
9599 default:
9600 Unimplemented();
9601 }
9602 %}
9604 ins_pipe( pipe_slow );
9605 %}
9607 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9608 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9609 ins_cost(80);
9610 format %{
9611 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
9612 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
9613 %}
9614 ins_encode %{
9615 Register opr1 = as_Register($tmp1$$reg);
9616 Register opr2 = as_Register($tmp2$$reg);
9617 Register dst = $dst$$Register;
9618 Register src = $src$$Register;
9619 int flag = $cop$$cmpcode;
9621 switch(flag) {
9622 case 0x01: //equal
9623 __ subu(AT, opr1, opr2);
9624 __ movz(dst, src, AT);
9625 break;
9627 case 0x02: //not_equal
9628 __ subu(AT, opr1, opr2);
9629 __ movn(dst, src, AT);
9630 break;
9632 case 0x03: //greater
9633 __ slt(AT, opr2, opr1);
9634 __ movn(dst, src, AT);
9635 break;
9637 case 0x04: //greater_equal
9638 __ slt(AT, opr1, opr2);
9639 __ movz(dst, src, AT);
9640 break;
9642 case 0x05: //less
9643 __ slt(AT, opr1, opr2);
9644 __ movn(dst, src, AT);
9645 break;
9647 case 0x06: //less_equal
9648 __ slt(AT, opr2, opr1);
9649 __ movz(dst, src, AT);
9650 break;
9652 default:
9653 Unimplemented();
9654 }
9655 %}
9657 ins_pipe( pipe_slow );
9658 %}
9660 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9661 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9662 ins_cost(80);
9663 format %{
9664 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
9665 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
9666 %}
9667 ins_encode %{
9668 Register opr1 = as_Register($tmp1$$reg);
9669 Register opr2 = as_Register($tmp2$$reg);
9670 Register dst = $dst$$Register;
9671 Register src = $src$$Register;
9672 int flag = $cop$$cmpcode;
9674 switch(flag) {
9675 case 0x01: //equal
9676 __ subu(AT, opr1, opr2);
9677 __ movz(dst, src, AT);
9678 break;
9680 case 0x02: //not_equal
9681 __ subu(AT, opr1, opr2);
9682 __ movn(dst, src, AT);
9683 break;
9685 case 0x03: //greater
9686 __ slt(AT, opr2, opr1);
9687 __ movn(dst, src, AT);
9688 break;
9690 case 0x04: //greater_equal
9691 __ slt(AT, opr1, opr2);
9692 __ movz(dst, src, AT);
9693 break;
9695 case 0x05: //less
9696 __ slt(AT, opr1, opr2);
9697 __ movn(dst, src, AT);
9698 break;
9700 case 0x06: //less_equal
9701 __ slt(AT, opr2, opr1);
9702 __ movz(dst, src, AT);
9703 break;
9705 default:
9706 Unimplemented();
9707 }
9708 %}
9710 ins_pipe( pipe_slow );
9711 %}
9713 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
9714 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9715 ins_cost(80);
9716 format %{
9717 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
9718 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
9719 %}
9720 ins_encode %{
9721 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9722 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9723 Register dst = as_Register($dst$$reg);
9724 Register src = as_Register($src$$reg);
9726 int flag = $cop$$cmpcode;
9728 switch(flag) {
9729 case 0x01: //equal
9730 __ c_eq_d(reg_op1, reg_op2);
9731 __ movt(dst, src);
9732 break;
9733 case 0x02: //not_equal
9734 // See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9735 __ c_eq_d(reg_op1, reg_op2);
9736 __ movf(dst, src);
9737 break;
9738 case 0x03: //greater
9739 __ c_ole_d(reg_op1, reg_op2);
9740 __ movf(dst, src);
9741 break;
9742 case 0x04: //greater_equal
9743 __ c_olt_d(reg_op1, reg_op2);
9744 __ movf(dst, src);
9745 break;
9746 case 0x05: //less
9747 __ c_ult_d(reg_op1, reg_op2);
9748 __ movt(dst, src);
9749 break;
9750 case 0x06: //less_equal
9751 __ c_ule_d(reg_op1, reg_op2);
9752 __ movt(dst, src);
9753 break;
9754 default:
9755 Unimplemented();
9756 }
9757 %}
9759 ins_pipe( pipe_slow );
9760 %}
9763 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9764 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9765 ins_cost(80);
9766 format %{
9767 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
9768 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
9769 %}
9770 ins_encode %{
9771 Register op1 = $tmp1$$Register;
9772 Register op2 = $tmp2$$Register;
9773 Register dst = $dst$$Register;
9774 Register src = $src$$Register;
9775 int flag = $cop$$cmpcode;
9777 switch(flag) {
9778 case 0x01: //equal
9779 __ subu(AT, op1, op2);
9780 __ movz(dst, src, AT);
9781 break;
9783 case 0x02: //not_equal
9784 __ subu(AT, op1, op2);
9785 __ movn(dst, src, AT);
9786 break;
9788 case 0x03: //above
9789 __ sltu(AT, op2, op1);
9790 __ movn(dst, src, AT);
9791 break;
9793 case 0x04: //above_equal
9794 __ sltu(AT, op1, op2);
9795 __ movz(dst, src, AT);
9796 break;
9798 case 0x05: //below
9799 __ sltu(AT, op1, op2);
9800 __ movn(dst, src, AT);
9801 break;
9803 case 0x06: //below_equal
9804 __ sltu(AT, op2, op1);
9805 __ movz(dst, src, AT);
9806 break;
9808 default:
9809 Unimplemented();
9810 }
9811 %}
9813 ins_pipe( pipe_slow );
9814 %}
9816 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9817 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9818 ins_cost(80);
9819 format %{
9820 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
9821 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
9822 %}
9823 ins_encode %{
9824 Register op1 = $tmp1$$Register;
9825 Register op2 = $tmp2$$Register;
9826 Register dst = $dst$$Register;
9827 Register src = $src$$Register;
9828 int flag = $cop$$cmpcode;
9830 switch(flag) {
9831 case 0x01: //equal
9832 __ subu32(AT, op1, op2);
9833 __ movz(dst, src, AT);
9834 break;
9836 case 0x02: //not_equal
9837 __ subu32(AT, op1, op2);
9838 __ movn(dst, src, AT);
9839 break;
9841 case 0x03: //above
9842 __ slt(AT, op2, op1);
9843 __ movn(dst, src, AT);
9844 break;
9846 case 0x04: //above_equal
9847 __ slt(AT, op1, op2);
9848 __ movz(dst, src, AT);
9849 break;
9851 case 0x05: //below
9852 __ slt(AT, op1, op2);
9853 __ movn(dst, src, AT);
9854 break;
9856 case 0x06: //below_equal
9857 __ slt(AT, op2, op1);
9858 __ movz(dst, src, AT);
9859 break;
9861 default:
9862 Unimplemented();
9863 }
9864 %}
9866 ins_pipe( pipe_slow );
9867 %}
9869 instruct cmovL_cmpP_reg_reg(mRegL dst, mRegL src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9870 match(Set dst (CMoveL (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9871 ins_cost(80);
9872 format %{
9873 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpP_reg_reg\n\t"
9874 "CMOV $dst,$src\t @cmovL_cmpP_reg_reg"
9875 %}
9876 ins_encode %{
9877 Register op1 = $tmp1$$Register;
9878 Register op2 = $tmp2$$Register;
9879 Register dst = $dst$$Register;
9880 Register src = $src$$Register;
9881 int flag = $cop$$cmpcode;
9883 switch(flag) {
9884 case 0x01: //equal
9885 __ subu(AT, op1, op2);
9886 __ movz(dst, src, AT);
9887 break;
9889 case 0x02: //not_equal
9890 __ subu(AT, op1, op2);
9891 __ movn(dst, src, AT);
9892 break;
9894 case 0x03: //above
9895 __ sltu(AT, op2, op1);
9896 __ movn(dst, src, AT);
9897 break;
9899 case 0x04: //above_equal
9900 __ sltu(AT, op1, op2);
9901 __ movz(dst, src, AT);
9902 break;
9904 case 0x05: //below
9905 __ sltu(AT, op1, op2);
9906 __ movn(dst, src, AT);
9907 break;
9909 case 0x06: //below_equal
9910 __ sltu(AT, op2, op1);
9911 __ movz(dst, src, AT);
9912 break;
9914 default:
9915 Unimplemented();
9916 }
9917 %}
9919 ins_pipe( pipe_slow );
9920 %}
9922 instruct cmovN_cmpU_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
9923 match(Set dst (CMoveN (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
9924 ins_cost(80);
9925 format %{
9926 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpU_reg_reg\n\t"
9927 "CMOV $dst,$src\t @cmovN_cmpU_reg_reg"
9928 %}
9929 ins_encode %{
9930 Register op1 = $tmp1$$Register;
9931 Register op2 = $tmp2$$Register;
9932 Register dst = $dst$$Register;
9933 Register src = $src$$Register;
9934 int flag = $cop$$cmpcode;
9936 switch(flag) {
9937 case 0x01: //equal
9938 __ subu32(AT, op1, op2);
9939 __ movz(dst, src, AT);
9940 break;
9942 case 0x02: //not_equal
9943 __ subu32(AT, op1, op2);
9944 __ movn(dst, src, AT);
9945 break;
9947 case 0x03: //above
9948 __ sltu(AT, op2, op1);
9949 __ movn(dst, src, AT);
9950 break;
9952 case 0x04: //above_equal
9953 __ sltu(AT, op1, op2);
9954 __ movz(dst, src, AT);
9955 break;
9957 case 0x05: //below
9958 __ sltu(AT, op1, op2);
9959 __ movn(dst, src, AT);
9960 break;
9962 case 0x06: //below_equal
9963 __ sltu(AT, op2, op1);
9964 __ movz(dst, src, AT);
9965 break;
9967 default:
9968 Unimplemented();
9969 }
9970 %}
9972 ins_pipe( pipe_slow );
9973 %}
9975 instruct cmovN_cmpL_reg_reg(mRegN dst, mRegN src, mRegL tmp1, mRegL tmp2, cmpOp cop) %{
9976 match(Set dst (CMoveN (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9977 ins_cost(80);
9978 format %{
9979 "CMP$cop $tmp1, $tmp2\t @cmovN_cmpL_reg_reg\n"
9980 "\tCMOV $dst,$src \t @cmovN_cmpL_reg_reg"
9981 %}
9982 ins_encode %{
9983 Register opr1 = as_Register($tmp1$$reg);
9984 Register opr2 = as_Register($tmp2$$reg);
9985 Register dst = $dst$$Register;
9986 Register src = $src$$Register;
9987 int flag = $cop$$cmpcode;
9989 switch(flag) {
9990 case 0x01: //equal
9991 __ subu(AT, opr1, opr2);
9992 __ movz(dst, src, AT);
9993 break;
9995 case 0x02: //not_equal
9996 __ subu(AT, opr1, opr2);
9997 __ movn(dst, src, AT);
9998 break;
10000 case 0x03: //greater
10001 __ slt(AT, opr2, opr1);
10002 __ movn(dst, src, AT);
10003 break;
10005 case 0x04: //greater_equal
10006 __ slt(AT, opr1, opr2);
10007 __ movz(dst, src, AT);
10008 break;
10010 case 0x05: //less
10011 __ slt(AT, opr1, opr2);
10012 __ movn(dst, src, AT);
10013 break;
10015 case 0x06: //less_equal
10016 __ slt(AT, opr2, opr1);
10017 __ movz(dst, src, AT);
10018 break;
10020 default:
10021 Unimplemented();
10022 }
10023 %}
10025 ins_pipe( pipe_slow );
10026 %}
10028 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10029 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10030 ins_cost(80);
10031 format %{
10032 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
10033 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
10034 %}
10035 ins_encode %{
10036 Register op1 = $tmp1$$Register;
10037 Register op2 = $tmp2$$Register;
10038 Register dst = $dst$$Register;
10039 Register src = $src$$Register;
10040 int flag = $cop$$cmpcode;
10042 switch(flag) {
10043 case 0x01: //equal
10044 __ subu32(AT, op1, op2);
10045 __ movz(dst, src, AT);
10046 break;
10048 case 0x02: //not_equal
10049 __ subu32(AT, op1, op2);
10050 __ movn(dst, src, AT);
10051 break;
10053 case 0x03: //above
10054 __ slt(AT, op2, op1);
10055 __ movn(dst, src, AT);
10056 break;
10058 case 0x04: //above_equal
10059 __ slt(AT, op1, op2);
10060 __ movz(dst, src, AT);
10061 break;
10063 case 0x05: //below
10064 __ slt(AT, op1, op2);
10065 __ movn(dst, src, AT);
10066 break;
10068 case 0x06: //below_equal
10069 __ slt(AT, op2, op1);
10070 __ movz(dst, src, AT);
10071 break;
10073 default:
10074 Unimplemented();
10075 }
10076 %}
10078 ins_pipe( pipe_slow );
10079 %}
10081 instruct cmovL_cmpU_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
10082 match(Set dst (CMoveL (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
10083 ins_cost(80);
10084 format %{
10085 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpU_reg_reg\n\t"
10086 "CMOV $dst,$src\t @cmovL_cmpU_reg_reg"
10087 %}
10088 ins_encode %{
10089 Register op1 = $tmp1$$Register;
10090 Register op2 = $tmp2$$Register;
10091 Register dst = $dst$$Register;
10092 Register src = $src$$Register;
10093 int flag = $cop$$cmpcode;
10095 switch(flag) {
10096 case 0x01: //equal
10097 __ subu32(AT, op1, op2);
10098 __ movz(dst, src, AT);
10099 break;
10101 case 0x02: //not_equal
10102 __ subu32(AT, op1, op2);
10103 __ movn(dst, src, AT);
10104 break;
10106 case 0x03: //above
10107 __ sltu(AT, op2, op1);
10108 __ movn(dst, src, AT);
10109 break;
10111 case 0x04: //above_equal
10112 __ sltu(AT, op1, op2);
10113 __ movz(dst, src, AT);
10114 break;
10116 case 0x05: //below
10117 __ sltu(AT, op1, op2);
10118 __ movn(dst, src, AT);
10119 break;
10121 case 0x06: //below_equal
10122 __ sltu(AT, op2, op1);
10123 __ movz(dst, src, AT);
10124 break;
10126 default:
10127 Unimplemented();
10128 }
10129 %}
10131 ins_pipe( pipe_slow );
10132 %}
10134 instruct cmovL_cmpF_reg_reg(mRegL dst, mRegL src, regF tmp1, regF tmp2, cmpOp cop ) %{
10135 match(Set dst (CMoveL (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10136 ins_cost(80);
10137 format %{
10138 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpF_reg_reg\n"
10139 "\tCMOV $dst,$src \t @cmovL_cmpF_reg_reg"
10140 %}
10142 ins_encode %{
10143 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10144 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10145 Register dst = $dst$$Register;
10146 Register src = $src$$Register;
10147 int flag = $cop$$cmpcode;
10149 switch(flag) {
10150 case 0x01: //equal
10151 __ c_eq_s(reg_op1, reg_op2);
10152 __ movt(dst, src);
10153 break;
10154 case 0x02: //not_equal
10155 __ c_eq_s(reg_op1, reg_op2);
10156 __ movf(dst, src);
10157 break;
10158 case 0x03: //greater
10159 __ c_ole_s(reg_op1, reg_op2);
10160 __ movf(dst, src);
10161 break;
10162 case 0x04: //greater_equal
10163 __ c_olt_s(reg_op1, reg_op2);
10164 __ movf(dst, src);
10165 break;
10166 case 0x05: //less
10167 __ c_ult_s(reg_op1, reg_op2);
10168 __ movt(dst, src);
10169 break;
10170 case 0x06: //less_equal
10171 __ c_ule_s(reg_op1, reg_op2);
10172 __ movt(dst, src);
10173 break;
10174 default:
10175 Unimplemented();
10176 }
10177 %}
10178 ins_pipe( pipe_slow );
10179 %}
10181 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10182 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10183 ins_cost(80);
10184 format %{
10185 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
10186 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
10187 %}
10189 ins_encode %{
10190 Register op1 = $tmp1$$Register;
10191 Register op2 = $tmp2$$Register;
10192 Register dst = as_Register($dst$$reg);
10193 Register src = as_Register($src$$reg);
10194 int flag = $cop$$cmpcode;
10196 switch(flag)
10197 {
10198 case 0x01: //equal
10199 __ subu32(AT, op1, op2);
10200 __ movz(dst, src, AT);
10201 break;
10203 case 0x02: //not_equal
10204 __ subu32(AT, op1, op2);
10205 __ movn(dst, src, AT);
10206 break;
10208 case 0x03: //great
10209 __ slt(AT, op2, op1);
10210 __ movn(dst, src, AT);
10211 break;
10213 case 0x04: //great_equal
10214 __ slt(AT, op1, op2);
10215 __ movz(dst, src, AT);
10216 break;
10218 case 0x05: //less
10219 __ slt(AT, op1, op2);
10220 __ movn(dst, src, AT);
10221 break;
10223 case 0x06: //less_equal
10224 __ slt(AT, op2, op1);
10225 __ movz(dst, src, AT);
10226 break;
10228 default:
10229 Unimplemented();
10230 }
10231 %}
10233 ins_pipe( pipe_slow );
10234 %}
10236 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
10237 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
10238 ins_cost(80);
10239 format %{
10240 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
10241 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
10242 %}
10243 ins_encode %{
10244 Register opr1 = as_Register($tmp1$$reg);
10245 Register opr2 = as_Register($tmp2$$reg);
10246 Register dst = as_Register($dst$$reg);
10247 Register src = as_Register($src$$reg);
10248 int flag = $cop$$cmpcode;
10250 switch(flag) {
10251 case 0x01: //equal
10252 __ subu(AT, opr1, opr2);
10253 __ movz(dst, src, AT);
10254 break;
10256 case 0x02: //not_equal
10257 __ subu(AT, opr1, opr2);
10258 __ movn(dst, src, AT);
10259 break;
10261 case 0x03: //greater
10262 __ slt(AT, opr2, opr1);
10263 __ movn(dst, src, AT);
10264 break;
10266 case 0x04: //greater_equal
10267 __ slt(AT, opr1, opr2);
10268 __ movz(dst, src, AT);
10269 break;
10271 case 0x05: //less
10272 __ slt(AT, opr1, opr2);
10273 __ movn(dst, src, AT);
10274 break;
10276 case 0x06: //less_equal
10277 __ slt(AT, opr2, opr1);
10278 __ movz(dst, src, AT);
10279 break;
10281 default:
10282 Unimplemented();
10283 }
10284 %}
10286 ins_pipe( pipe_slow );
10287 %}
10289 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
10290 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
10291 ins_cost(80);
10292 format %{
10293 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
10294 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
10295 %}
10296 ins_encode %{
10297 Register op1 = $tmp1$$Register;
10298 Register op2 = $tmp2$$Register;
10299 Register dst = $dst$$Register;
10300 Register src = $src$$Register;
10301 int flag = $cop$$cmpcode;
10303 switch(flag) {
10304 case 0x01: //equal
10305 __ subu32(AT, op1, op2);
10306 __ movz(dst, src, AT);
10307 break;
10309 case 0x02: //not_equal
10310 __ subu32(AT, op1, op2);
10311 __ movn(dst, src, AT);
10312 break;
10314 case 0x03: //above
10315 __ sltu(AT, op2, op1);
10316 __ movn(dst, src, AT);
10317 break;
10319 case 0x04: //above_equal
10320 __ sltu(AT, op1, op2);
10321 __ movz(dst, src, AT);
10322 break;
10324 case 0x05: //below
10325 __ sltu(AT, op1, op2);
10326 __ movn(dst, src, AT);
10327 break;
10329 case 0x06: //below_equal
10330 __ sltu(AT, op2, op1);
10331 __ movz(dst, src, AT);
10332 break;
10334 default:
10335 Unimplemented();
10336 }
10337 %}
10339 ins_pipe( pipe_slow );
10340 %}
10343 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
10344 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
10345 ins_cost(80);
10346 format %{
10347 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
10348 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
10349 %}
10350 ins_encode %{
10351 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
10352 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
10353 Register dst = as_Register($dst$$reg);
10354 Register src = as_Register($src$$reg);
10356 int flag = $cop$$cmpcode;
10358 switch(flag) {
10359 case 0x01: //equal
10360 __ c_eq_d(reg_op1, reg_op2);
10361 __ movt(dst, src);
10362 break;
10363 case 0x02: //not_equal
10364 __ c_eq_d(reg_op1, reg_op2);
10365 __ movf(dst, src);
10366 break;
10367 case 0x03: //greater
10368 __ c_ole_d(reg_op1, reg_op2);
10369 __ movf(dst, src);
10370 break;
10371 case 0x04: //greater_equal
10372 __ c_olt_d(reg_op1, reg_op2);
10373 __ movf(dst, src);
10374 break;
10375 case 0x05: //less
10376 __ c_ult_d(reg_op1, reg_op2);
10377 __ movt(dst, src);
10378 break;
10379 case 0x06: //less_equal
10380 __ c_ule_d(reg_op1, reg_op2);
10381 __ movt(dst, src);
10382 break;
10383 default:
10384 Unimplemented();
10385 }
10386 %}
10388 ins_pipe( pipe_slow );
10389 %}
10391 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
10392 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
10393 ins_cost(200);
10394 format %{
10395 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
10396 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
10397 %}
10398 ins_encode %{
10399 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
10400 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
10401 FloatRegister dst = as_FloatRegister($dst$$reg);
10402 FloatRegister src = as_FloatRegister($src$$reg);
10404 int flag = $cop$$cmpcode;
10406 switch(flag) {
10407 case 0x01: //equal
10408 __ c_eq_d(reg_op1, reg_op2);
10409 __ movt_d(dst, src);
10410 break;
10411 case 0x02: //not_equal
10412 __ c_eq_d(reg_op1, reg_op2);
10413 __ movf_d(dst, src);
10414 break;
10415 case 0x03: //greater
10416 __ c_ole_d(reg_op1, reg_op2);
10417 __ movf_d(dst, src);
10418 break;
10419 case 0x04: //greater_equal
10420 __ c_olt_d(reg_op1, reg_op2);
10421 __ movf_d(dst, src);
10422 break;
10423 case 0x05: //less
10424 __ c_ult_d(reg_op1, reg_op2);
10425 __ movt_d(dst, src);
10426 break;
10427 case 0x06: //less_equal
10428 __ c_ule_d(reg_op1, reg_op2);
10429 __ movt_d(dst, src);
10430 break;
10431 default:
10432 Unimplemented();
10433 }
10434 %}
10436 ins_pipe( pipe_slow );
10437 %}
10439 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10440 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10441 ins_cost(200);
10442 format %{
10443 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
10444 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
10445 %}
10447 ins_encode %{
10448 Register op1 = $tmp1$$Register;
10449 Register op2 = $tmp2$$Register;
10450 FloatRegister dst = as_FloatRegister($dst$$reg);
10451 FloatRegister src = as_FloatRegister($src$$reg);
10452 int flag = $cop$$cmpcode;
10453 Label L;
10455 switch(flag) {
10456 case 0x01: //equal
10457 __ bne(op1, op2, L);
10458 __ delayed()->nop();
10459 __ mov_s(dst, src);
10460 __ bind(L);
10461 break;
10462 case 0x02: //not_equal
10463 __ beq(op1, op2, L);
10464 __ delayed()->nop();
10465 __ mov_s(dst, src);
10466 __ bind(L);
10467 break;
10468 case 0x03: //great
10469 __ slt(AT, op2, op1);
10470 __ beq(AT, R0, L);
10471 __ delayed()->nop();
10472 __ mov_s(dst, src);
10473 __ bind(L);
10474 break;
10475 case 0x04: //great_equal
10476 __ slt(AT, op1, op2);
10477 __ bne(AT, R0, L);
10478 __ delayed()->nop();
10479 __ mov_s(dst, src);
10480 __ bind(L);
10481 break;
10482 case 0x05: //less
10483 __ slt(AT, op1, op2);
10484 __ beq(AT, R0, L);
10485 __ delayed()->nop();
10486 __ mov_s(dst, src);
10487 __ bind(L);
10488 break;
10489 case 0x06: //less_equal
10490 __ slt(AT, op2, op1);
10491 __ bne(AT, R0, L);
10492 __ delayed()->nop();
10493 __ mov_s(dst, src);
10494 __ bind(L);
10495 break;
10496 default:
10497 Unimplemented();
10498 }
10499 %}
10501 ins_pipe( pipe_slow );
10502 %}
10504 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10505 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10506 ins_cost(200);
10507 format %{
10508 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
10509 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
10510 %}
10512 ins_encode %{
10513 Register op1 = $tmp1$$Register;
10514 Register op2 = $tmp2$$Register;
10515 FloatRegister dst = as_FloatRegister($dst$$reg);
10516 FloatRegister src = as_FloatRegister($src$$reg);
10517 int flag = $cop$$cmpcode;
10518 Label L;
10520 switch(flag) {
10521 case 0x01: //equal
10522 __ bne(op1, op2, L);
10523 __ delayed()->nop();
10524 __ mov_d(dst, src);
10525 __ bind(L);
10526 break;
10527 case 0x02: //not_equal
10528 __ beq(op1, op2, L);
10529 __ delayed()->nop();
10530 __ mov_d(dst, src);
10531 __ bind(L);
10532 break;
10533 case 0x03: //great
10534 __ slt(AT, op2, op1);
10535 __ beq(AT, R0, L);
10536 __ delayed()->nop();
10537 __ mov_d(dst, src);
10538 __ bind(L);
10539 break;
10540 case 0x04: //great_equal
10541 __ slt(AT, op1, op2);
10542 __ bne(AT, R0, L);
10543 __ delayed()->nop();
10544 __ mov_d(dst, src);
10545 __ bind(L);
10546 break;
10547 case 0x05: //less
10548 __ slt(AT, op1, op2);
10549 __ beq(AT, R0, L);
10550 __ delayed()->nop();
10551 __ mov_d(dst, src);
10552 __ bind(L);
10553 break;
10554 case 0x06: //less_equal
10555 __ slt(AT, op2, op1);
10556 __ bne(AT, R0, L);
10557 __ delayed()->nop();
10558 __ mov_d(dst, src);
10559 __ bind(L);
10560 break;
10561 default:
10562 Unimplemented();
10563 }
10564 %}
10566 ins_pipe( pipe_slow );
10567 %}
10569 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
10570 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
10571 ins_cost(200);
10572 format %{
10573 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
10574 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
10575 %}
10577 ins_encode %{
10578 Register op1 = $tmp1$$Register;
10579 Register op2 = $tmp2$$Register;
10580 FloatRegister dst = as_FloatRegister($dst$$reg);
10581 FloatRegister src = as_FloatRegister($src$$reg);
10582 int flag = $cop$$cmpcode;
10583 Label L;
10585 switch(flag) {
10586 case 0x01: //equal
10587 __ bne(op1, op2, L);
10588 __ delayed()->nop();
10589 __ mov_d(dst, src);
10590 __ bind(L);
10591 break;
10592 case 0x02: //not_equal
10593 __ beq(op1, op2, L);
10594 __ delayed()->nop();
10595 __ mov_d(dst, src);
10596 __ bind(L);
10597 break;
10598 case 0x03: //great
10599 __ slt(AT, op2, op1);
10600 __ beq(AT, R0, L);
10601 __ delayed()->nop();
10602 __ mov_d(dst, src);
10603 __ bind(L);
10604 break;
10605 case 0x04: //great_equal
10606 __ slt(AT, op1, op2);
10607 __ bne(AT, R0, L);
10608 __ delayed()->nop();
10609 __ mov_d(dst, src);
10610 __ bind(L);
10611 break;
10612 case 0x05: //less
10613 __ slt(AT, op1, op2);
10614 __ beq(AT, R0, L);
10615 __ delayed()->nop();
10616 __ mov_d(dst, src);
10617 __ bind(L);
10618 break;
10619 case 0x06: //less_equal
10620 __ slt(AT, op2, op1);
10621 __ bne(AT, R0, L);
10622 __ delayed()->nop();
10623 __ mov_d(dst, src);
10624 __ bind(L);
10625 break;
10626 default:
10627 Unimplemented();
10628 }
10629 %}
10631 ins_pipe( pipe_slow );
10632 %}
10634 //FIXME
10635 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
10636 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10637 ins_cost(80);
10638 format %{
10639 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
10640 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
10641 %}
10643 ins_encode %{
10644 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10645 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10646 Register dst = $dst$$Register;
10647 Register src = $src$$Register;
10648 int flag = $cop$$cmpcode;
10650 switch(flag) {
10651 case 0x01: //equal
10652 __ c_eq_s(reg_op1, reg_op2);
10653 __ movt(dst, src);
10654 break;
10655 case 0x02: //not_equal
10656 __ c_eq_s(reg_op1, reg_op2);
10657 __ movf(dst, src);
10658 break;
10659 case 0x03: //greater
10660 __ c_ole_s(reg_op1, reg_op2);
10661 __ movf(dst, src);
10662 break;
10663 case 0x04: //greater_equal
10664 __ c_olt_s(reg_op1, reg_op2);
10665 __ movf(dst, src);
10666 break;
10667 case 0x05: //less
10668 __ c_ult_s(reg_op1, reg_op2);
10669 __ movt(dst, src);
10670 break;
10671 case 0x06: //less_equal
10672 __ c_ule_s(reg_op1, reg_op2);
10673 __ movt(dst, src);
10674 break;
10675 default:
10676 Unimplemented();
10677 }
10678 %}
10679 ins_pipe( pipe_slow );
10680 %}
10682 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
10683 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10684 ins_cost(200);
10685 format %{
10686 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
10687 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
10688 %}
10690 ins_encode %{
10691 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10692 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10693 FloatRegister dst = $dst$$FloatRegister;
10694 FloatRegister src = $src$$FloatRegister;
10695 int flag = $cop$$cmpcode;
10697 switch(flag) {
10698 case 0x01: //equal
10699 __ c_eq_s(reg_op1, reg_op2);
10700 __ movt_s(dst, src);
10701 break;
10702 case 0x02: //not_equal
10703 __ c_eq_s(reg_op1, reg_op2);
10704 __ movf_s(dst, src);
10705 break;
10706 case 0x03: //greater
10707 __ c_ole_s(reg_op1, reg_op2);
10708 __ movf_s(dst, src);
10709 break;
10710 case 0x04: //greater_equal
10711 __ c_olt_s(reg_op1, reg_op2);
10712 __ movf_s(dst, src);
10713 break;
10714 case 0x05: //less
10715 __ c_ult_s(reg_op1, reg_op2);
10716 __ movt_s(dst, src);
10717 break;
10718 case 0x06: //less_equal
10719 __ c_ule_s(reg_op1, reg_op2);
10720 __ movt_s(dst, src);
10721 break;
10722 default:
10723 Unimplemented();
10724 }
10725 %}
10726 ins_pipe( pipe_slow );
10727 %}
10729 // Manifest a CmpL result in an integer register. Very painful.
10730 // This is the test to avoid.
10731 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
10732 match(Set dst (CmpL3 src1 src2));
10733 ins_cost(1000);
10734 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
10735 ins_encode %{
10736 Register opr1 = as_Register($src1$$reg);
10737 Register opr2 = as_Register($src2$$reg);
10738 Register dst = as_Register($dst$$reg);
10740 Label Done;
10742 __ subu(AT, opr1, opr2);
10743 __ bltz(AT, Done);
10744 __ delayed()->daddiu(dst, R0, -1);
10746 __ move(dst, 1);
10747 __ movz(dst, R0, AT);
10749 __ bind(Done);
10750 %}
10751 ins_pipe( pipe_slow );
10752 %}
10754 //
10755 // less_rsult = -1
10756 // greater_result = 1
10757 // equal_result = 0
10758 // nan_result = -1
10759 //
10760 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
10761 match(Set dst (CmpF3 src1 src2));
10762 ins_cost(1000);
10763 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
10764 ins_encode %{
10765 FloatRegister src1 = as_FloatRegister($src1$$reg);
10766 FloatRegister src2 = as_FloatRegister($src2$$reg);
10767 Register dst = as_Register($dst$$reg);
10769 Label Done;
10771 __ c_ult_s(src1, src2);
10772 __ bc1t(Done);
10773 __ delayed()->daddiu(dst, R0, -1);
10775 __ c_eq_s(src1, src2);
10776 __ move(dst, 1);
10777 __ movt(dst, R0);
10779 __ bind(Done);
10780 %}
10781 ins_pipe( pipe_slow );
10782 %}
10784 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
10785 match(Set dst (CmpD3 src1 src2));
10786 ins_cost(1000);
10787 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
10788 ins_encode %{
10789 FloatRegister src1 = as_FloatRegister($src1$$reg);
10790 FloatRegister src2 = as_FloatRegister($src2$$reg);
10791 Register dst = as_Register($dst$$reg);
10793 Label Done;
10795 __ c_ult_d(src1, src2);
10796 __ bc1t(Done);
10797 __ delayed()->daddiu(dst, R0, -1);
10799 __ c_eq_d(src1, src2);
10800 __ move(dst, 1);
10801 __ movt(dst, R0);
10803 __ bind(Done);
10804 %}
10805 ins_pipe( pipe_slow );
10806 %}
10808 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
10809 match(Set dummy (ClearArray cnt base));
10810 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
10811 ins_encode %{
10812 //Assume cnt is the number of bytes in an array to be cleared,
10813 //and base points to the starting address of the array.
10814 Register base = $base$$Register;
10815 Register num = $cnt$$Register;
10816 Label Loop, done;
10818 __ beq(num, R0, done);
10819 __ delayed()->daddu(AT, base, R0);
10821 __ move(T9, num); /* T9 = words */
10823 __ bind(Loop);
10824 __ sd(R0, AT, 0);
10825 __ daddi(T9, T9, -1);
10826 __ bne(T9, R0, Loop);
10827 __ delayed()->daddi(AT, AT, wordSize);
10829 __ bind(done);
10830 %}
10831 ins_pipe( pipe_slow );
10832 %}
10834 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
10835 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10836 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
10838 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
10839 ins_encode %{
10840 // Get the first character position in both strings
10841 // [8] char array, [12] offset, [16] count
10842 Register str1 = $str1$$Register;
10843 Register str2 = $str2$$Register;
10844 Register cnt1 = $cnt1$$Register;
10845 Register cnt2 = $cnt2$$Register;
10846 Register result = $result$$Register;
10848 Label L, Loop, haveResult, done;
10850 // compute the and difference of lengths (in result)
10851 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
10853 // compute the shorter length (in cnt1)
10854 __ slt(AT, cnt2, cnt1);
10855 __ movn(cnt1, cnt2, AT);
10857 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
10858 __ bind(Loop); // Loop begin
10859 __ beq(cnt1, R0, done);
10860 __ delayed()->lhu(AT, str1, 0);;
10862 // compare current character
10863 __ lhu(cnt2, str2, 0);
10864 __ bne(AT, cnt2, haveResult);
10865 __ delayed()->addi(str1, str1, 2);
10866 __ addi(str2, str2, 2);
10867 __ b(Loop);
10868 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
10870 __ bind(haveResult);
10871 __ subu(result, AT, cnt2);
10873 __ bind(done);
10874 %}
10876 ins_pipe( pipe_slow );
10877 %}
10879 // intrinsic optimization
10880 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
10881 match(Set result (StrEquals (Binary str1 str2) cnt));
10882 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
10884 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
10885 ins_encode %{
10886 // Get the first character position in both strings
10887 // [8] char array, [12] offset, [16] count
10888 Register str1 = $str1$$Register;
10889 Register str2 = $str2$$Register;
10890 Register cnt = $cnt$$Register;
10891 Register tmp = $temp$$Register;
10892 Register result = $result$$Register;
10894 Label Loop, True, False;
10896 __ beq(str1, str2, True); // same char[] ?
10897 __ delayed()->daddiu(result, R0, 1);
10899 __ beq(cnt, R0, True);
10900 __ delayed()->nop(); // count == 0
10902 __ bind(Loop);
10904 // compare current character
10905 __ lhu(AT, str1, 0);
10906 __ lhu(tmp, str2, 0);
10907 __ bne(AT, tmp, False);
10908 __ delayed()->addi(str1, str1, 2);
10909 __ addi(cnt, cnt, -1);
10910 __ bne(cnt, R0, Loop);
10911 __ delayed()->addi(str2, str2, 2);
10913 __ b(True);
10914 __ delayed()->nop();
10916 __ bind(False);
10917 __ daddi(result, R0, 0);
10919 __ bind(True);
10920 %}
10922 ins_pipe( pipe_slow );
10923 %}
10925 //----------Arithmetic Instructions-------------------------------------------
10926 //----------Addition Instructions---------------------------------------------
10927 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10928 match(Set dst (AddI src1 src2));
10930 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
10931 ins_encode %{
10932 Register dst = $dst$$Register;
10933 Register src1 = $src1$$Register;
10934 Register src2 = $src2$$Register;
10935 __ addu32(dst, src1, src2);
10936 %}
10937 ins_pipe( ialu_regI_regI );
10938 %}
10940 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
10941 match(Set dst (AddI src1 src2));
10943 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
10944 ins_encode %{
10945 Register dst = $dst$$Register;
10946 Register src1 = $src1$$Register;
10947 int imm = $src2$$constant;
10949 if(Assembler::is_simm16(imm)) {
10950 __ addiu32(dst, src1, imm);
10951 } else {
10952 __ move(AT, imm);
10953 __ addu32(dst, src1, AT);
10954 }
10955 %}
10956 ins_pipe( ialu_regI_regI );
10957 %}
10959 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
10960 match(Set dst (AddP src1 src2));
10962 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
10964 ins_encode %{
10965 Register dst = $dst$$Register;
10966 Register src1 = $src1$$Register;
10967 Register src2 = $src2$$Register;
10968 __ daddu(dst, src1, src2);
10969 %}
10971 ins_pipe( ialu_regI_regI );
10972 %}
10974 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
10975 match(Set dst (AddP src1 (ConvI2L src2)));
10977 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
10979 ins_encode %{
10980 Register dst = $dst$$Register;
10981 Register src1 = $src1$$Register;
10982 Register src2 = $src2$$Register;
10983 __ daddu(dst, src1, src2);
10984 %}
10986 ins_pipe( ialu_regI_regI );
10987 %}
10989 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
10990 match(Set dst (AddP src1 src2));
10992 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
10993 ins_encode %{
10994 Register src1 = $src1$$Register;
10995 long src2 = $src2$$constant;
10996 Register dst = $dst$$Register;
10998 if(Assembler::is_simm16(src2)) {
10999 __ daddiu(dst, src1, src2);
11000 } else {
11001 __ set64(AT, src2);
11002 __ daddu(dst, src1, AT);
11003 }
11004 %}
11005 ins_pipe( ialu_regI_imm16 );
11006 %}
11008 // Add Long Register with Register
11009 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11010 match(Set dst (AddL src1 src2));
11011 ins_cost(200);
11012 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
11014 ins_encode %{
11015 Register dst_reg = as_Register($dst$$reg);
11016 Register src1_reg = as_Register($src1$$reg);
11017 Register src2_reg = as_Register($src2$$reg);
11019 __ daddu(dst_reg, src1_reg, src2_reg);
11020 %}
11022 ins_pipe( ialu_regL_regL );
11023 %}
11025 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
11026 %{
11027 match(Set dst (AddL src1 src2));
11029 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
11030 ins_encode %{
11031 Register dst_reg = as_Register($dst$$reg);
11032 Register src1_reg = as_Register($src1$$reg);
11033 int src2_imm = $src2$$constant;
11035 __ daddiu(dst_reg, src1_reg, src2_imm);
11036 %}
11038 ins_pipe( ialu_regL_regL );
11039 %}
11041 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
11042 %{
11043 match(Set dst (AddL (ConvI2L src1) src2));
11045 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
11046 ins_encode %{
11047 Register dst_reg = as_Register($dst$$reg);
11048 Register src1_reg = as_Register($src1$$reg);
11049 int src2_imm = $src2$$constant;
11051 __ daddiu(dst_reg, src1_reg, src2_imm);
11052 %}
11054 ins_pipe( ialu_regL_regL );
11055 %}
11057 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
11058 match(Set dst (AddL (ConvI2L src1) src2));
11059 ins_cost(200);
11060 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
11062 ins_encode %{
11063 Register dst_reg = as_Register($dst$$reg);
11064 Register src1_reg = as_Register($src1$$reg);
11065 Register src2_reg = as_Register($src2$$reg);
11067 __ daddu(dst_reg, src1_reg, src2_reg);
11068 %}
11070 ins_pipe( ialu_regL_regL );
11071 %}
11073 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
11074 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
11075 ins_cost(200);
11076 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
11078 ins_encode %{
11079 Register dst_reg = as_Register($dst$$reg);
11080 Register src1_reg = as_Register($src1$$reg);
11081 Register src2_reg = as_Register($src2$$reg);
11083 __ daddu(dst_reg, src1_reg, src2_reg);
11084 %}
11086 ins_pipe( ialu_regL_regL );
11087 %}
11089 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
11090 match(Set dst (AddL src1 (ConvI2L src2)));
11091 ins_cost(200);
11092 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
11094 ins_encode %{
11095 Register dst_reg = as_Register($dst$$reg);
11096 Register src1_reg = as_Register($src1$$reg);
11097 Register src2_reg = as_Register($src2$$reg);
11099 __ daddu(dst_reg, src1_reg, src2_reg);
11100 %}
11102 ins_pipe( ialu_regL_regL );
11103 %}
11105 //----------Subtraction Instructions-------------------------------------------
11106 // Integer Subtraction Instructions
11107 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11108 match(Set dst (SubI src1 src2));
11109 ins_cost(100);
11111 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
11112 ins_encode %{
11113 Register dst = $dst$$Register;
11114 Register src1 = $src1$$Register;
11115 Register src2 = $src2$$Register;
11116 __ subu32(dst, src1, src2);
11117 %}
11118 ins_pipe( ialu_regI_regI );
11119 %}
11121 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
11122 match(Set dst (SubI src1 src2));
11123 ins_cost(80);
11125 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
11126 ins_encode %{
11127 Register dst = $dst$$Register;
11128 Register src1 = $src1$$Register;
11129 __ addiu32(dst, src1, -1 * $src2$$constant);
11130 %}
11131 ins_pipe( ialu_regI_regI );
11132 %}
11134 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
11135 match(Set dst (SubI zero src));
11136 ins_cost(80);
11138 format %{ "neg $dst, $src #@negI_Reg" %}
11139 ins_encode %{
11140 Register dst = $dst$$Register;
11141 Register src = $src$$Register;
11142 __ subu32(dst, R0, src);
11143 %}
11144 ins_pipe( ialu_regI_regI );
11145 %}
11147 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
11148 match(Set dst (SubL zero src));
11149 ins_cost(80);
11151 format %{ "neg $dst, $src #@negL_Reg" %}
11152 ins_encode %{
11153 Register dst = $dst$$Register;
11154 Register src = $src$$Register;
11155 __ subu(dst, R0, src);
11156 %}
11157 ins_pipe( ialu_regI_regI );
11158 %}
11160 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
11161 match(Set dst (SubL src1 src2));
11162 ins_cost(80);
11164 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
11165 ins_encode %{
11166 Register dst = $dst$$Register;
11167 Register src1 = $src1$$Register;
11168 __ daddiu(dst, src1, -1 * $src2$$constant);
11169 %}
11170 ins_pipe( ialu_regI_regI );
11171 %}
11173 // Subtract Long Register with Register.
11174 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11175 match(Set dst (SubL src1 src2));
11176 ins_cost(100);
11177 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
11178 ins_encode %{
11179 Register dst = as_Register($dst$$reg);
11180 Register src1 = as_Register($src1$$reg);
11181 Register src2 = as_Register($src2$$reg);
11183 __ subu(dst, src1, src2);
11184 %}
11185 ins_pipe( ialu_regL_regL );
11186 %}
11188 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
11189 match(Set dst (SubL src1 (ConvI2L src2)));
11190 ins_cost(100);
11191 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
11192 ins_encode %{
11193 Register dst = as_Register($dst$$reg);
11194 Register src1 = as_Register($src1$$reg);
11195 Register src2 = as_Register($src2$$reg);
11197 __ subu(dst, src1, src2);
11198 %}
11199 ins_pipe( ialu_regL_regL );
11200 %}
11202 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
11203 match(Set dst (SubL (ConvI2L src1) src2));
11204 ins_cost(200);
11205 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
11206 ins_encode %{
11207 Register dst = as_Register($dst$$reg);
11208 Register src1 = as_Register($src1$$reg);
11209 Register src2 = as_Register($src2$$reg);
11211 __ subu(dst, src1, src2);
11212 %}
11213 ins_pipe( ialu_regL_regL );
11214 %}
11216 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
11217 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
11218 ins_cost(200);
11219 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
11220 ins_encode %{
11221 Register dst = as_Register($dst$$reg);
11222 Register src1 = as_Register($src1$$reg);
11223 Register src2 = as_Register($src2$$reg);
11225 __ subu(dst, src1, src2);
11226 %}
11227 ins_pipe( ialu_regL_regL );
11228 %}
11230 // Integer MOD with Register
11231 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11232 match(Set dst (ModI src1 src2));
11233 ins_cost(300);
11234 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
11235 ins_encode %{
11236 Register dst = $dst$$Register;
11237 Register src1 = $src1$$Register;
11238 Register src2 = $src2$$Register;
11240 //if (UseLEXT1) {
11241 if (0) {
11242 // 2016.08.10
11243 // Experiments show that gsmod is slower that div+mfhi.
11244 // So I just disable it here.
11245 __ gsmod(dst, src1, src2);
11246 } else {
11247 __ div(src1, src2);
11248 __ mfhi(dst);
11249 }
11250 %}
11252 //ins_pipe( ialu_mod );
11253 ins_pipe( ialu_regI_regI );
11254 %}
11256 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11257 match(Set dst (ModL src1 src2));
11258 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
11260 ins_encode %{
11261 Register dst = as_Register($dst$$reg);
11262 Register op1 = as_Register($src1$$reg);
11263 Register op2 = as_Register($src2$$reg);
11265 if (UseLEXT1) {
11266 __ gsdmod(dst, op1, op2);
11267 } else {
11268 __ ddiv(op1, op2);
11269 __ mfhi(dst);
11270 }
11271 %}
11272 ins_pipe( pipe_slow );
11273 %}
11275 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11276 match(Set dst (MulI src1 src2));
11278 ins_cost(300);
11279 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
11280 ins_encode %{
11281 Register src1 = $src1$$Register;
11282 Register src2 = $src2$$Register;
11283 Register dst = $dst$$Register;
11285 __ mul(dst, src1, src2);
11286 %}
11287 ins_pipe( ialu_mult );
11288 %}
11290 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
11291 match(Set dst (AddI (MulI src1 src2) src3));
11293 ins_cost(999);
11294 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
11295 ins_encode %{
11296 Register src1 = $src1$$Register;
11297 Register src2 = $src2$$Register;
11298 Register src3 = $src3$$Register;
11299 Register dst = $dst$$Register;
11301 __ mtlo(src3);
11302 __ madd(src1, src2);
11303 __ mflo(dst);
11304 %}
11305 ins_pipe( ialu_mult );
11306 %}
11308 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11309 match(Set dst (DivI src1 src2));
11311 ins_cost(300);
11312 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
11313 ins_encode %{
11314 Register src1 = $src1$$Register;
11315 Register src2 = $src2$$Register;
11316 Register dst = $dst$$Register;
11318 // In MIPS, div does not cause exception.
11319 // We must trap an exception manually.
11320 __ teq(R0, src2, 0x7);
11322 if (UseLEXT1) {
11323 __ gsdiv(dst, src1, src2);
11324 } else {
11325 __ div(src1, src2);
11327 __ nop();
11328 __ nop();
11329 __ mflo(dst);
11330 }
11331 %}
11332 ins_pipe( ialu_mod );
11333 %}
11335 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
11336 match(Set dst (DivF src1 src2));
11338 ins_cost(300);
11339 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
11340 ins_encode %{
11341 FloatRegister src1 = $src1$$FloatRegister;
11342 FloatRegister src2 = $src2$$FloatRegister;
11343 FloatRegister dst = $dst$$FloatRegister;
11345 /* Here do we need to trap an exception manually ? */
11346 __ div_s(dst, src1, src2);
11347 %}
11348 ins_pipe( pipe_slow );
11349 %}
11351 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
11352 match(Set dst (DivD src1 src2));
11354 ins_cost(300);
11355 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
11356 ins_encode %{
11357 FloatRegister src1 = $src1$$FloatRegister;
11358 FloatRegister src2 = $src2$$FloatRegister;
11359 FloatRegister dst = $dst$$FloatRegister;
11361 /* Here do we need to trap an exception manually ? */
11362 __ div_d(dst, src1, src2);
11363 %}
11364 ins_pipe( pipe_slow );
11365 %}
11367 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11368 match(Set dst (MulL src1 src2));
11369 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
11370 ins_encode %{
11371 Register dst = as_Register($dst$$reg);
11372 Register op1 = as_Register($src1$$reg);
11373 Register op2 = as_Register($src2$$reg);
11375 if (UseLEXT1) {
11376 __ gsdmult(dst, op1, op2);
11377 } else {
11378 __ dmult(op1, op2);
11379 __ mflo(dst);
11380 }
11381 %}
11382 ins_pipe( pipe_slow );
11383 %}
11385 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
11386 match(Set dst (MulL src1 (ConvI2L src2)));
11387 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
11388 ins_encode %{
11389 Register dst = as_Register($dst$$reg);
11390 Register op1 = as_Register($src1$$reg);
11391 Register op2 = as_Register($src2$$reg);
11393 if (UseLEXT1) {
11394 __ gsdmult(dst, op1, op2);
11395 } else {
11396 __ dmult(op1, op2);
11397 __ mflo(dst);
11398 }
11399 %}
11400 ins_pipe( pipe_slow );
11401 %}
11403 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11404 match(Set dst (DivL src1 src2));
11405 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
11407 ins_encode %{
11408 Register dst = as_Register($dst$$reg);
11409 Register op1 = as_Register($src1$$reg);
11410 Register op2 = as_Register($src2$$reg);
11412 if (UseLEXT1) {
11413 __ gsddiv(dst, op1, op2);
11414 } else {
11415 __ ddiv(op1, op2);
11416 __ mflo(dst);
11417 }
11418 %}
11419 ins_pipe( pipe_slow );
11420 %}
11422 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
11423 match(Set dst (AddF src1 src2));
11424 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
11425 ins_encode %{
11426 FloatRegister src1 = as_FloatRegister($src1$$reg);
11427 FloatRegister src2 = as_FloatRegister($src2$$reg);
11428 FloatRegister dst = as_FloatRegister($dst$$reg);
11430 __ add_s(dst, src1, src2);
11431 %}
11432 ins_pipe( fpu_regF_regF );
11433 %}
11435 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
11436 match(Set dst (SubF src1 src2));
11437 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
11438 ins_encode %{
11439 FloatRegister src1 = as_FloatRegister($src1$$reg);
11440 FloatRegister src2 = as_FloatRegister($src2$$reg);
11441 FloatRegister dst = as_FloatRegister($dst$$reg);
11443 __ sub_s(dst, src1, src2);
11444 %}
11445 ins_pipe( fpu_regF_regF );
11446 %}
11447 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
11448 match(Set dst (AddD src1 src2));
11449 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
11450 ins_encode %{
11451 FloatRegister src1 = as_FloatRegister($src1$$reg);
11452 FloatRegister src2 = as_FloatRegister($src2$$reg);
11453 FloatRegister dst = as_FloatRegister($dst$$reg);
11455 __ add_d(dst, src1, src2);
11456 %}
11457 ins_pipe( fpu_regF_regF );
11458 %}
11460 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
11461 match(Set dst (SubD src1 src2));
11462 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
11463 ins_encode %{
11464 FloatRegister src1 = as_FloatRegister($src1$$reg);
11465 FloatRegister src2 = as_FloatRegister($src2$$reg);
11466 FloatRegister dst = as_FloatRegister($dst$$reg);
11468 __ sub_d(dst, src1, src2);
11469 %}
11470 ins_pipe( fpu_regF_regF );
11471 %}
11473 instruct negF_reg(regF dst, regF src) %{
11474 match(Set dst (NegF src));
11475 format %{ "negF $dst, $src @negF_reg" %}
11476 ins_encode %{
11477 FloatRegister src = as_FloatRegister($src$$reg);
11478 FloatRegister dst = as_FloatRegister($dst$$reg);
11480 __ neg_s(dst, src);
11481 %}
11482 ins_pipe( fpu_regF_regF );
11483 %}
11485 instruct negD_reg(regD dst, regD src) %{
11486 match(Set dst (NegD src));
11487 format %{ "negD $dst, $src @negD_reg" %}
11488 ins_encode %{
11489 FloatRegister src = as_FloatRegister($src$$reg);
11490 FloatRegister dst = as_FloatRegister($dst$$reg);
11492 __ neg_d(dst, src);
11493 %}
11494 ins_pipe( fpu_regF_regF );
11495 %}
11498 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
11499 match(Set dst (MulF src1 src2));
11500 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
11501 ins_encode %{
11502 FloatRegister src1 = $src1$$FloatRegister;
11503 FloatRegister src2 = $src2$$FloatRegister;
11504 FloatRegister dst = $dst$$FloatRegister;
11506 __ mul_s(dst, src1, src2);
11507 %}
11508 ins_pipe( fpu_regF_regF );
11509 %}
11511 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
11512 match(Set dst (AddF (MulF src1 src2) src3));
11513 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
11514 ins_cost(44444);
11515 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
11516 ins_encode %{
11517 FloatRegister src1 = $src1$$FloatRegister;
11518 FloatRegister src2 = $src2$$FloatRegister;
11519 FloatRegister src3 = $src3$$FloatRegister;
11520 FloatRegister dst = $dst$$FloatRegister;
11522 __ madd_s(dst, src1, src2, src3);
11523 %}
11524 ins_pipe( fpu_regF_regF );
11525 %}
11527 // Mul two double precision floating piont number
11528 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
11529 match(Set dst (MulD src1 src2));
11530 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
11531 ins_encode %{
11532 FloatRegister src1 = $src1$$FloatRegister;
11533 FloatRegister src2 = $src2$$FloatRegister;
11534 FloatRegister dst = $dst$$FloatRegister;
11536 __ mul_d(dst, src1, src2);
11537 %}
11538 ins_pipe( fpu_regF_regF );
11539 %}
11541 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
11542 match(Set dst (AddD (MulD src1 src2) src3));
11543 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
11544 ins_cost(44444);
11545 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
11546 ins_encode %{
11547 FloatRegister src1 = $src1$$FloatRegister;
11548 FloatRegister src2 = $src2$$FloatRegister;
11549 FloatRegister src3 = $src3$$FloatRegister;
11550 FloatRegister dst = $dst$$FloatRegister;
11552 __ madd_d(dst, src1, src2, src3);
11553 %}
11554 ins_pipe( fpu_regF_regF );
11555 %}
11557 instruct absF_reg(regF dst, regF src) %{
11558 match(Set dst (AbsF src));
11559 ins_cost(100);
11560 format %{ "absF $dst, $src @absF_reg" %}
11561 ins_encode %{
11562 FloatRegister src = as_FloatRegister($src$$reg);
11563 FloatRegister dst = as_FloatRegister($dst$$reg);
11565 __ abs_s(dst, src);
11566 %}
11567 ins_pipe( fpu_regF_regF );
11568 %}
11571 // intrinsics for math_native.
11572 // AbsD SqrtD CosD SinD TanD LogD Log10D
11574 instruct absD_reg(regD dst, regD src) %{
11575 match(Set dst (AbsD src));
11576 ins_cost(100);
11577 format %{ "absD $dst, $src @absD_reg" %}
11578 ins_encode %{
11579 FloatRegister src = as_FloatRegister($src$$reg);
11580 FloatRegister dst = as_FloatRegister($dst$$reg);
11582 __ abs_d(dst, src);
11583 %}
11584 ins_pipe( fpu_regF_regF );
11585 %}
11587 instruct sqrtD_reg(regD dst, regD src) %{
11588 match(Set dst (SqrtD src));
11589 ins_cost(100);
11590 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
11591 ins_encode %{
11592 FloatRegister src = as_FloatRegister($src$$reg);
11593 FloatRegister dst = as_FloatRegister($dst$$reg);
11595 __ sqrt_d(dst, src);
11596 %}
11597 ins_pipe( fpu_regF_regF );
11598 %}
11600 instruct sqrtF_reg(regF dst, regF src) %{
11601 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11602 ins_cost(100);
11603 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
11604 ins_encode %{
11605 FloatRegister src = as_FloatRegister($src$$reg);
11606 FloatRegister dst = as_FloatRegister($dst$$reg);
11608 __ sqrt_s(dst, src);
11609 %}
11610 ins_pipe( fpu_regF_regF );
11611 %}
11612 //----------------------------------Logical Instructions----------------------
11613 //__________________________________Integer Logical Instructions-------------
11615 //And Instuctions
11616 // And Register with Immediate
11617 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
11618 match(Set dst (AndI src1 src2));
11620 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
11621 ins_encode %{
11622 Register dst = $dst$$Register;
11623 Register src = $src1$$Register;
11624 int val = $src2$$constant;
11626 __ move(AT, val);
11627 __ andr(dst, src, AT);
11628 %}
11629 ins_pipe( ialu_regI_regI );
11630 %}
11632 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
11633 match(Set dst (AndI src1 src2));
11634 ins_cost(60);
11636 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
11637 ins_encode %{
11638 Register dst = $dst$$Register;
11639 Register src = $src1$$Register;
11640 int val = $src2$$constant;
11642 __ andi(dst, src, val);
11643 %}
11644 ins_pipe( ialu_regI_regI );
11645 %}
11647 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
11648 match(Set dst (AndI src1 mask));
11649 ins_cost(60);
11651 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
11652 ins_encode %{
11653 Register dst = $dst$$Register;
11654 Register src = $src1$$Register;
11655 int size = Assembler::is_int_mask($mask$$constant);
11657 __ ext(dst, src, 0, size);
11658 %}
11659 ins_pipe( ialu_regI_regI );
11660 %}
11662 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
11663 match(Set dst (AndL src1 mask));
11664 ins_cost(60);
11666 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
11667 ins_encode %{
11668 Register dst = $dst$$Register;
11669 Register src = $src1$$Register;
11670 int size = Assembler::is_jlong_mask($mask$$constant);
11672 __ dext(dst, src, 0, size);
11673 %}
11674 ins_pipe( ialu_regI_regI );
11675 %}
11677 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
11678 match(Set dst (XorI src1 src2));
11679 ins_cost(60);
11681 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
11682 ins_encode %{
11683 Register dst = $dst$$Register;
11684 Register src = $src1$$Register;
11685 int val = $src2$$constant;
11687 __ xori(dst, src, val);
11688 %}
11689 ins_pipe( ialu_regI_regI );
11690 %}
11692 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
11693 match(Set dst (XorI src1 M1));
11694 predicate(UseLEXT3);
11695 ins_cost(60);
11697 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
11698 ins_encode %{
11699 Register dst = $dst$$Register;
11700 Register src = $src1$$Register;
11702 __ gsorn(dst, R0, src);
11703 %}
11704 ins_pipe( ialu_regI_regI );
11705 %}
11707 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
11708 match(Set dst (XorI (ConvL2I src1) M1));
11709 predicate(UseLEXT3);
11710 ins_cost(60);
11712 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
11713 ins_encode %{
11714 Register dst = $dst$$Register;
11715 Register src = $src1$$Register;
11717 __ gsorn(dst, R0, src);
11718 %}
11719 ins_pipe( ialu_regI_regI );
11720 %}
11722 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
11723 match(Set dst (XorL src1 src2));
11724 ins_cost(60);
11726 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
11727 ins_encode %{
11728 Register dst = $dst$$Register;
11729 Register src = $src1$$Register;
11730 int val = $src2$$constant;
11732 __ xori(dst, src, val);
11733 %}
11734 ins_pipe( ialu_regI_regI );
11735 %}
11737 /*
11738 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
11739 match(Set dst (XorL src1 M1));
11740 predicate(UseLEXT3);
11741 ins_cost(60);
11743 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
11744 ins_encode %{
11745 Register dst = $dst$$Register;
11746 Register src = $src1$$Register;
11748 __ gsorn(dst, R0, src);
11749 %}
11750 ins_pipe( ialu_regI_regI );
11751 %}
11752 */
11754 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
11755 match(Set dst (AndI mask (LoadB mem)));
11756 ins_cost(60);
11758 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
11759 ins_encode(load_UB_enc(dst, mem));
11760 ins_pipe( ialu_loadI );
11761 %}
11763 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
11764 match(Set dst (AndI (LoadB mem) mask));
11765 ins_cost(60);
11767 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
11768 ins_encode(load_UB_enc(dst, mem));
11769 ins_pipe( ialu_loadI );
11770 %}
11772 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11773 match(Set dst (AndI src1 src2));
11775 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
11776 ins_encode %{
11777 Register dst = $dst$$Register;
11778 Register src1 = $src1$$Register;
11779 Register src2 = $src2$$Register;
11780 __ andr(dst, src1, src2);
11781 %}
11782 ins_pipe( ialu_regI_regI );
11783 %}
11785 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11786 match(Set dst (AndI src1 (XorI src2 M1)));
11787 predicate(UseLEXT3);
11789 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
11790 ins_encode %{
11791 Register dst = $dst$$Register;
11792 Register src1 = $src1$$Register;
11793 Register src2 = $src2$$Register;
11795 __ gsandn(dst, src1, src2);
11796 %}
11797 ins_pipe( ialu_regI_regI );
11798 %}
11800 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11801 match(Set dst (OrI src1 (XorI src2 M1)));
11802 predicate(UseLEXT3);
11804 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
11805 ins_encode %{
11806 Register dst = $dst$$Register;
11807 Register src1 = $src1$$Register;
11808 Register src2 = $src2$$Register;
11810 __ gsorn(dst, src1, src2);
11811 %}
11812 ins_pipe( ialu_regI_regI );
11813 %}
11815 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11816 match(Set dst (AndI (XorI src1 M1) src2));
11817 predicate(UseLEXT3);
11819 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
11820 ins_encode %{
11821 Register dst = $dst$$Register;
11822 Register src1 = $src1$$Register;
11823 Register src2 = $src2$$Register;
11825 __ gsandn(dst, src2, src1);
11826 %}
11827 ins_pipe( ialu_regI_regI );
11828 %}
11830 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11831 match(Set dst (OrI (XorI src1 M1) src2));
11832 predicate(UseLEXT3);
11834 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
11835 ins_encode %{
11836 Register dst = $dst$$Register;
11837 Register src1 = $src1$$Register;
11838 Register src2 = $src2$$Register;
11840 __ gsorn(dst, src2, src1);
11841 %}
11842 ins_pipe( ialu_regI_regI );
11843 %}
11845 // And Long Register with Register
11846 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11847 match(Set dst (AndL src1 src2));
11848 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
11849 ins_encode %{
11850 Register dst_reg = as_Register($dst$$reg);
11851 Register src1_reg = as_Register($src1$$reg);
11852 Register src2_reg = as_Register($src2$$reg);
11854 __ andr(dst_reg, src1_reg, src2_reg);
11855 %}
11856 ins_pipe( ialu_regL_regL );
11857 %}
11859 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
11860 match(Set dst (AndL src1 (ConvI2L src2)));
11861 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
11862 ins_encode %{
11863 Register dst_reg = as_Register($dst$$reg);
11864 Register src1_reg = as_Register($src1$$reg);
11865 Register src2_reg = as_Register($src2$$reg);
11867 __ andr(dst_reg, src1_reg, src2_reg);
11868 %}
11869 ins_pipe( ialu_regL_regL );
11870 %}
11872 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
11873 match(Set dst (AndL src1 src2));
11874 ins_cost(60);
11876 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
11877 ins_encode %{
11878 Register dst = $dst$$Register;
11879 Register src = $src1$$Register;
11880 long val = $src2$$constant;
11882 __ andi(dst, src, val);
11883 %}
11884 ins_pipe( ialu_regI_regI );
11885 %}
11887 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
11888 match(Set dst (ConvL2I (AndL src1 src2)));
11889 ins_cost(60);
11891 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
11892 ins_encode %{
11893 Register dst = $dst$$Register;
11894 Register src = $src1$$Register;
11895 long val = $src2$$constant;
11897 __ andi(dst, src, val);
11898 %}
11899 ins_pipe( ialu_regI_regI );
11900 %}
11902 /*
11903 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11904 match(Set dst (AndL src1 (XorL src2 M1)));
11905 predicate(UseLEXT3);
11907 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
11908 ins_encode %{
11909 Register dst = $dst$$Register;
11910 Register src1 = $src1$$Register;
11911 Register src2 = $src2$$Register;
11913 __ gsandn(dst, src1, src2);
11914 %}
11915 ins_pipe( ialu_regI_regI );
11916 %}
11917 */
11919 /*
11920 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11921 match(Set dst (OrL src1 (XorL src2 M1)));
11922 predicate(UseLEXT3);
11924 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
11925 ins_encode %{
11926 Register dst = $dst$$Register;
11927 Register src1 = $src1$$Register;
11928 Register src2 = $src2$$Register;
11930 __ gsorn(dst, src1, src2);
11931 %}
11932 ins_pipe( ialu_regI_regI );
11933 %}
11934 */
11936 /*
11937 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11938 match(Set dst (AndL (XorL src1 M1) src2));
11939 predicate(UseLEXT3);
11941 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
11942 ins_encode %{
11943 Register dst = $dst$$Register;
11944 Register src1 = $src1$$Register;
11945 Register src2 = $src2$$Register;
11947 __ gsandn(dst, src2, src1);
11948 %}
11949 ins_pipe( ialu_regI_regI );
11950 %}
11951 */
11953 /*
11954 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11955 match(Set dst (OrL (XorL src1 M1) src2));
11956 predicate(UseLEXT3);
11958 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
11959 ins_encode %{
11960 Register dst = $dst$$Register;
11961 Register src1 = $src1$$Register;
11962 Register src2 = $src2$$Register;
11964 __ gsorn(dst, src2, src1);
11965 %}
11966 ins_pipe( ialu_regI_regI );
11967 %}
11968 */
11970 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
11971 match(Set dst (AndL dst M8));
11972 ins_cost(60);
11974 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
11975 ins_encode %{
11976 Register dst = $dst$$Register;
11978 __ dins(dst, R0, 0, 3);
11979 %}
11980 ins_pipe( ialu_regI_regI );
11981 %}
11983 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
11984 match(Set dst (AndL dst M5));
11985 ins_cost(60);
11987 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
11988 ins_encode %{
11989 Register dst = $dst$$Register;
11991 __ dins(dst, R0, 2, 1);
11992 %}
11993 ins_pipe( ialu_regI_regI );
11994 %}
11996 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
11997 match(Set dst (AndL dst M7));
11998 ins_cost(60);
12000 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
12001 ins_encode %{
12002 Register dst = $dst$$Register;
12004 __ dins(dst, R0, 1, 2);
12005 %}
12006 ins_pipe( ialu_regI_regI );
12007 %}
12009 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
12010 match(Set dst (AndL dst M4));
12011 ins_cost(60);
12013 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
12014 ins_encode %{
12015 Register dst = $dst$$Register;
12017 __ dins(dst, R0, 0, 2);
12018 %}
12019 ins_pipe( ialu_regI_regI );
12020 %}
12022 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
12023 match(Set dst (AndL dst M121));
12024 ins_cost(60);
12026 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
12027 ins_encode %{
12028 Register dst = $dst$$Register;
12030 __ dins(dst, R0, 3, 4);
12031 %}
12032 ins_pipe( ialu_regI_regI );
12033 %}
12035 // Or Long Register with Register
12036 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
12037 match(Set dst (OrL src1 src2));
12038 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
12039 ins_encode %{
12040 Register dst_reg = $dst$$Register;
12041 Register src1_reg = $src1$$Register;
12042 Register src2_reg = $src2$$Register;
12044 __ orr(dst_reg, src1_reg, src2_reg);
12045 %}
12046 ins_pipe( ialu_regL_regL );
12047 %}
12049 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
12050 match(Set dst (OrL (CastP2X src1) src2));
12051 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
12052 ins_encode %{
12053 Register dst_reg = $dst$$Register;
12054 Register src1_reg = $src1$$Register;
12055 Register src2_reg = $src2$$Register;
12057 __ orr(dst_reg, src1_reg, src2_reg);
12058 %}
12059 ins_pipe( ialu_regL_regL );
12060 %}
12062 // Xor Long Register with Register
12063 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
12064 match(Set dst (XorL src1 src2));
12065 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
12066 ins_encode %{
12067 Register dst_reg = as_Register($dst$$reg);
12068 Register src1_reg = as_Register($src1$$reg);
12069 Register src2_reg = as_Register($src2$$reg);
12071 __ xorr(dst_reg, src1_reg, src2_reg);
12072 %}
12073 ins_pipe( ialu_regL_regL );
12074 %}
12076 // Shift Left by 8-bit immediate
12077 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12078 match(Set dst (LShiftI src shift));
12080 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
12081 ins_encode %{
12082 Register src = $src$$Register;
12083 Register dst = $dst$$Register;
12084 int shamt = $shift$$constant;
12086 __ sll(dst, src, shamt);
12087 %}
12088 ins_pipe( ialu_regI_regI );
12089 %}
12091 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
12092 match(Set dst (LShiftI (ConvL2I src) shift));
12094 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
12095 ins_encode %{
12096 Register src = $src$$Register;
12097 Register dst = $dst$$Register;
12098 int shamt = $shift$$constant;
12100 __ sll(dst, src, shamt);
12101 %}
12102 ins_pipe( ialu_regI_regI );
12103 %}
12105 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
12106 match(Set dst (AndI (LShiftI src shift) mask));
12108 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
12109 ins_encode %{
12110 Register src = $src$$Register;
12111 Register dst = $dst$$Register;
12113 __ sll(dst, src, 16);
12114 %}
12115 ins_pipe( ialu_regI_regI );
12116 %}
12118 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
12119 %{
12120 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
12122 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
12123 ins_encode %{
12124 Register src = $src$$Register;
12125 Register dst = $dst$$Register;
12127 __ andi(dst, src, 7);
12128 %}
12129 ins_pipe(ialu_regI_regI);
12130 %}
12132 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
12133 %{
12134 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
12136 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
12137 ins_encode %{
12138 Register src = $src1$$Register;
12139 int val = $src2$$constant;
12140 Register dst = $dst$$Register;
12142 __ ori(dst, src, val);
12143 %}
12144 ins_pipe(ialu_regI_regI);
12145 %}
12147 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
12148 // This idiom is used by the compiler the i2s bytecode.
12149 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
12150 %{
12151 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
12153 format %{ "i2s $dst, $src\t# @i2s" %}
12154 ins_encode %{
12155 Register src = $src$$Register;
12156 Register dst = $dst$$Register;
12158 __ seh(dst, src);
12159 %}
12160 ins_pipe(ialu_regI_regI);
12161 %}
12163 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
12164 // This idiom is used by the compiler for the i2b bytecode.
12165 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
12166 %{
12167 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
12169 format %{ "i2b $dst, $src\t# @i2b" %}
12170 ins_encode %{
12171 Register src = $src$$Register;
12172 Register dst = $dst$$Register;
12174 __ seb(dst, src);
12175 %}
12176 ins_pipe(ialu_regI_regI);
12177 %}
12180 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
12181 match(Set dst (LShiftI (ConvL2I src) shift));
12183 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
12184 ins_encode %{
12185 Register src = $src$$Register;
12186 Register dst = $dst$$Register;
12187 int shamt = $shift$$constant;
12189 __ sll(dst, src, shamt);
12190 %}
12191 ins_pipe( ialu_regI_regI );
12192 %}
12194 // Shift Left by 8-bit immediate
12195 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12196 match(Set dst (LShiftI src shift));
12198 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
12199 ins_encode %{
12200 Register src = $src$$Register;
12201 Register dst = $dst$$Register;
12202 Register shamt = $shift$$Register;
12203 __ sllv(dst, src, shamt);
12204 %}
12205 ins_pipe( ialu_regI_regI );
12206 %}
12209 // Shift Left Long
12210 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
12211 match(Set dst (LShiftL src shift));
12212 ins_cost(100);
12213 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
12214 ins_encode %{
12215 Register src_reg = as_Register($src$$reg);
12216 Register dst_reg = as_Register($dst$$reg);
12217 int shamt = $shift$$constant;
12219 if (__ is_simm(shamt, 5))
12220 __ dsll(dst_reg, src_reg, shamt);
12221 else {
12222 int sa = Assembler::low(shamt, 6);
12223 if (sa < 32) {
12224 __ dsll(dst_reg, src_reg, sa);
12225 } else {
12226 __ dsll32(dst_reg, src_reg, sa - 32);
12227 }
12228 }
12229 %}
12230 ins_pipe( ialu_regL_regL );
12231 %}
12233 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
12234 match(Set dst (LShiftL (ConvI2L src) shift));
12235 ins_cost(100);
12236 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
12237 ins_encode %{
12238 Register src_reg = as_Register($src$$reg);
12239 Register dst_reg = as_Register($dst$$reg);
12240 int shamt = $shift$$constant;
12242 if (__ is_simm(shamt, 5))
12243 __ dsll(dst_reg, src_reg, shamt);
12244 else {
12245 int sa = Assembler::low(shamt, 6);
12246 if (sa < 32) {
12247 __ dsll(dst_reg, src_reg, sa);
12248 } else {
12249 __ dsll32(dst_reg, src_reg, sa - 32);
12250 }
12251 }
12252 %}
12253 ins_pipe( ialu_regL_regL );
12254 %}
12256 // Shift Left Long
12257 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12258 match(Set dst (LShiftL src shift));
12259 ins_cost(100);
12260 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
12261 ins_encode %{
12262 Register src_reg = as_Register($src$$reg);
12263 Register dst_reg = as_Register($dst$$reg);
12265 __ dsllv(dst_reg, src_reg, $shift$$Register);
12266 %}
12267 ins_pipe( ialu_regL_regL );
12268 %}
12270 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
12271 match(Set dst (LShiftL (ConvI2L src) shift));
12272 ins_cost(100);
12273 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
12274 ins_encode %{
12275 Register src_reg = as_Register($src$$reg);
12276 Register dst_reg = as_Register($dst$$reg);
12277 int shamt = $shift$$constant;
12279 if (__ is_simm(shamt, 5)) {
12280 __ dsll(dst_reg, src_reg, shamt);
12281 } else {
12282 int sa = Assembler::low(shamt, 6);
12283 if (sa < 32) {
12284 __ dsll(dst_reg, src_reg, sa);
12285 } else {
12286 __ dsll32(dst_reg, src_reg, sa - 32);
12287 }
12288 }
12289 %}
12290 ins_pipe( ialu_regL_regL );
12291 %}
12293 // Shift Right Long
12294 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
12295 match(Set dst (RShiftL src shift));
12296 ins_cost(100);
12297 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
12298 ins_encode %{
12299 Register src_reg = as_Register($src$$reg);
12300 Register dst_reg = as_Register($dst$$reg);
12301 int shamt = ($shift$$constant & 0x3f);
12302 if (__ is_simm(shamt, 5))
12303 __ dsra(dst_reg, src_reg, shamt);
12304 else {
12305 int sa = Assembler::low(shamt, 6);
12306 if (sa < 32) {
12307 __ dsra(dst_reg, src_reg, sa);
12308 } else {
12309 __ dsra32(dst_reg, src_reg, sa - 32);
12310 }
12311 }
12312 %}
12313 ins_pipe( ialu_regL_regL );
12314 %}
12316 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
12317 match(Set dst (ConvL2I (RShiftL src shift)));
12318 ins_cost(100);
12319 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
12320 ins_encode %{
12321 Register src_reg = as_Register($src$$reg);
12322 Register dst_reg = as_Register($dst$$reg);
12323 int shamt = $shift$$constant;
12325 __ dsra32(dst_reg, src_reg, shamt - 32);
12326 %}
12327 ins_pipe( ialu_regL_regL );
12328 %}
12330 // Shift Right Long arithmetically
12331 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12332 match(Set dst (RShiftL src shift));
12333 ins_cost(100);
12334 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
12335 ins_encode %{
12336 Register src_reg = as_Register($src$$reg);
12337 Register dst_reg = as_Register($dst$$reg);
12339 __ dsrav(dst_reg, src_reg, $shift$$Register);
12340 %}
12341 ins_pipe( ialu_regL_regL );
12342 %}
12344 // Shift Right Long logically
12345 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12346 match(Set dst (URShiftL src shift));
12347 ins_cost(100);
12348 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
12349 ins_encode %{
12350 Register src_reg = as_Register($src$$reg);
12351 Register dst_reg = as_Register($dst$$reg);
12353 __ dsrlv(dst_reg, src_reg, $shift$$Register);
12354 %}
12355 ins_pipe( ialu_regL_regL );
12356 %}
12358 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
12359 match(Set dst (URShiftL src shift));
12360 ins_cost(80);
12361 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
12362 ins_encode %{
12363 Register src_reg = as_Register($src$$reg);
12364 Register dst_reg = as_Register($dst$$reg);
12365 int shamt = $shift$$constant;
12367 __ dsrl(dst_reg, src_reg, shamt);
12368 %}
12369 ins_pipe( ialu_regL_regL );
12370 %}
12372 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
12373 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
12374 ins_cost(80);
12375 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
12376 ins_encode %{
12377 Register src_reg = as_Register($src$$reg);
12378 Register dst_reg = as_Register($dst$$reg);
12379 int shamt = $shift$$constant;
12381 __ dext(dst_reg, src_reg, shamt, 31);
12382 %}
12383 ins_pipe( ialu_regL_regL );
12384 %}
12386 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
12387 match(Set dst (URShiftL (CastP2X src) shift));
12388 ins_cost(80);
12389 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
12390 ins_encode %{
12391 Register src_reg = as_Register($src$$reg);
12392 Register dst_reg = as_Register($dst$$reg);
12393 int shamt = $shift$$constant;
12395 __ dsrl(dst_reg, src_reg, shamt);
12396 %}
12397 ins_pipe( ialu_regL_regL );
12398 %}
12400 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
12401 match(Set dst (URShiftL src shift));
12402 ins_cost(80);
12403 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
12404 ins_encode %{
12405 Register src_reg = as_Register($src$$reg);
12406 Register dst_reg = as_Register($dst$$reg);
12407 int shamt = $shift$$constant;
12409 __ dsrl32(dst_reg, src_reg, shamt - 32);
12410 %}
12411 ins_pipe( ialu_regL_regL );
12412 %}
12414 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
12415 match(Set dst (ConvL2I (URShiftL src shift)));
12416 predicate(n->in(1)->in(2)->get_int() > 32);
12417 ins_cost(80);
12418 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
12419 ins_encode %{
12420 Register src_reg = as_Register($src$$reg);
12421 Register dst_reg = as_Register($dst$$reg);
12422 int shamt = $shift$$constant;
12424 __ dsrl32(dst_reg, src_reg, shamt - 32);
12425 %}
12426 ins_pipe( ialu_regL_regL );
12427 %}
12429 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
12430 match(Set dst (URShiftL (CastP2X src) shift));
12431 ins_cost(80);
12432 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
12433 ins_encode %{
12434 Register src_reg = as_Register($src$$reg);
12435 Register dst_reg = as_Register($dst$$reg);
12436 int shamt = $shift$$constant;
12438 __ dsrl32(dst_reg, src_reg, shamt - 32);
12439 %}
12440 ins_pipe( ialu_regL_regL );
12441 %}
12443 // Xor Instructions
12444 // Xor Register with Register
12445 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
12446 match(Set dst (XorI src1 src2));
12448 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
12450 ins_encode %{
12451 Register dst = $dst$$Register;
12452 Register src1 = $src1$$Register;
12453 Register src2 = $src2$$Register;
12454 __ xorr(dst, src1, src2);
12455 __ sll(dst, dst, 0); /* long -> int */
12456 %}
12458 ins_pipe( ialu_regI_regI );
12459 %}
12461 // Or Instructions
12462 // Or Register with Register
12463 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
12464 match(Set dst (OrI src1 src2));
12466 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
12467 ins_encode %{
12468 Register dst = $dst$$Register;
12469 Register src1 = $src1$$Register;
12470 Register src2 = $src2$$Register;
12471 __ orr(dst, src1, src2);
12472 %}
12474 ins_pipe( ialu_regI_regI );
12475 %}
12477 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
12478 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
12479 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
12481 format %{ "rotr $dst, $src, 1 ...\n\t"
12482 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
12483 ins_encode %{
12484 Register dst = $dst$$Register;
12485 Register src = $src$$Register;
12486 int rshift = $rshift$$constant;
12488 __ rotr(dst, src, 1);
12489 if (rshift - 1) {
12490 __ srl(dst, dst, rshift - 1);
12491 }
12492 %}
12494 ins_pipe( ialu_regI_regI );
12495 %}
12497 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
12498 match(Set dst (OrI src1 (CastP2X src2)));
12500 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
12501 ins_encode %{
12502 Register dst = $dst$$Register;
12503 Register src1 = $src1$$Register;
12504 Register src2 = $src2$$Register;
12505 __ orr(dst, src1, src2);
12506 %}
12508 ins_pipe( ialu_regI_regI );
12509 %}
12511 // Logical Shift Right by 8-bit immediate
12512 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12513 match(Set dst (URShiftI src shift));
12514 //effect(KILL cr);
12516 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
12517 ins_encode %{
12518 Register src = $src$$Register;
12519 Register dst = $dst$$Register;
12520 int shift = $shift$$constant;
12522 __ srl(dst, src, shift);
12523 %}
12524 ins_pipe( ialu_regI_regI );
12525 %}
12527 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
12528 match(Set dst (AndI (URShiftI src shift) mask));
12530 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
12531 ins_encode %{
12532 Register src = $src$$Register;
12533 Register dst = $dst$$Register;
12534 int pos = $shift$$constant;
12535 int size = Assembler::is_int_mask($mask$$constant);
12537 __ ext(dst, src, pos, size);
12538 %}
12539 ins_pipe( ialu_regI_regI );
12540 %}
12542 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
12543 %{
12544 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
12545 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
12547 ins_cost(100);
12548 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
12549 ins_encode %{
12550 Register dst = $dst$$Register;
12551 int sa = $rshift$$constant;
12553 __ rotr(dst, dst, sa);
12554 %}
12555 ins_pipe( ialu_regI_regI );
12556 %}
12558 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
12559 %{
12560 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12561 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
12563 ins_cost(100);
12564 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
12565 ins_encode %{
12566 Register dst = $dst$$Register;
12567 int sa = $rshift$$constant;
12569 __ drotr(dst, dst, sa);
12570 %}
12571 ins_pipe( ialu_regI_regI );
12572 %}
12574 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
12575 %{
12576 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12577 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
12579 ins_cost(100);
12580 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
12581 ins_encode %{
12582 Register dst = $dst$$Register;
12583 int sa = $rshift$$constant;
12585 __ drotr32(dst, dst, sa - 32);
12586 %}
12587 ins_pipe( ialu_regI_regI );
12588 %}
12590 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
12591 %{
12592 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
12593 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
12595 ins_cost(100);
12596 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
12597 ins_encode %{
12598 Register dst = $dst$$Register;
12599 int sa = $rshift$$constant;
12601 __ rotr(dst, dst, sa);
12602 %}
12603 ins_pipe( ialu_regI_regI );
12604 %}
12606 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
12607 %{
12608 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12609 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
12611 ins_cost(100);
12612 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
12613 ins_encode %{
12614 Register dst = $dst$$Register;
12615 int sa = $rshift$$constant;
12617 __ drotr(dst, dst, sa);
12618 %}
12619 ins_pipe( ialu_regI_regI );
12620 %}
12622 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
12623 %{
12624 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12625 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
12627 ins_cost(100);
12628 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
12629 ins_encode %{
12630 Register dst = $dst$$Register;
12631 int sa = $rshift$$constant;
12633 __ drotr32(dst, dst, sa - 32);
12634 %}
12635 ins_pipe( ialu_regI_regI );
12636 %}
12638 // Logical Shift Right
12639 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12640 match(Set dst (URShiftI src shift));
12642 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
12643 ins_encode %{
12644 Register src = $src$$Register;
12645 Register dst = $dst$$Register;
12646 Register shift = $shift$$Register;
12647 __ srlv(dst, src, shift);
12648 %}
12649 ins_pipe( ialu_regI_regI );
12650 %}
12653 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12654 match(Set dst (RShiftI src shift));
12655 // effect(KILL cr);
12657 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
12658 ins_encode %{
12659 Register src = $src$$Register;
12660 Register dst = $dst$$Register;
12661 int shift = $shift$$constant;
12662 __ sra(dst, src, shift);
12663 %}
12664 ins_pipe( ialu_regI_regI );
12665 %}
12667 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12668 match(Set dst (RShiftI src shift));
12669 // effect(KILL cr);
12671 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
12672 ins_encode %{
12673 Register src = $src$$Register;
12674 Register dst = $dst$$Register;
12675 Register shift = $shift$$Register;
12676 __ srav(dst, src, shift);
12677 %}
12678 ins_pipe( ialu_regI_regI );
12679 %}
12681 //----------Convert Int to Boolean---------------------------------------------
12683 instruct convI2B(mRegI dst, mRegI src) %{
12684 match(Set dst (Conv2B src));
12686 ins_cost(100);
12687 format %{ "convI2B $dst, $src @ convI2B" %}
12688 ins_encode %{
12689 Register dst = as_Register($dst$$reg);
12690 Register src = as_Register($src$$reg);
12692 if (dst != src) {
12693 __ daddiu(dst, R0, 1);
12694 __ movz(dst, R0, src);
12695 } else {
12696 __ move(AT, src);
12697 __ daddiu(dst, R0, 1);
12698 __ movz(dst, R0, AT);
12699 }
12700 %}
12702 ins_pipe( ialu_regL_regL );
12703 %}
12705 instruct convI2L_reg( mRegL dst, mRegI src) %{
12706 match(Set dst (ConvI2L src));
12708 ins_cost(100);
12709 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
12710 ins_encode %{
12711 Register dst = as_Register($dst$$reg);
12712 Register src = as_Register($src$$reg);
12714 if(dst != src) __ sll(dst, src, 0);
12715 %}
12716 ins_pipe( ialu_regL_regL );
12717 %}
12720 instruct convL2I_reg( mRegI dst, mRegL src ) %{
12721 match(Set dst (ConvL2I src));
12723 format %{ "MOV $dst, $src @ convL2I_reg" %}
12724 ins_encode %{
12725 Register dst = as_Register($dst$$reg);
12726 Register src = as_Register($src$$reg);
12728 __ sll(dst, src, 0);
12729 %}
12731 ins_pipe( ialu_regI_regI );
12732 %}
12734 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
12735 match(Set dst (ConvI2L (ConvL2I src)));
12737 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
12738 ins_encode %{
12739 Register dst = as_Register($dst$$reg);
12740 Register src = as_Register($src$$reg);
12742 __ sll(dst, src, 0);
12743 %}
12745 ins_pipe( ialu_regI_regI );
12746 %}
12748 instruct convL2D_reg( regD dst, mRegL src ) %{
12749 match(Set dst (ConvL2D src));
12750 format %{ "convL2D $dst, $src @ convL2D_reg" %}
12751 ins_encode %{
12752 Register src = as_Register($src$$reg);
12753 FloatRegister dst = as_FloatRegister($dst$$reg);
12755 __ dmtc1(src, dst);
12756 __ cvt_d_l(dst, dst);
12757 %}
12759 ins_pipe( pipe_slow );
12760 %}
12763 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
12764 match(Set dst (ConvD2L src));
12765 ins_cost(150);
12766 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
12767 ins_encode %{
12768 Register dst = as_Register($dst$$reg);
12769 FloatRegister src = as_FloatRegister($src$$reg);
12771 Label Done;
12773 __ trunc_l_d(F30, src);
12774 // max_long: 0x7fffffffffffffff
12775 // __ set64(AT, 0x7fffffffffffffff);
12776 __ daddiu(AT, R0, -1);
12777 __ dsrl(AT, AT, 1);
12778 __ dmfc1(dst, F30);
12780 __ bne(dst, AT, Done);
12781 __ delayed()->mtc1(R0, F30);
12783 __ cvt_d_w(F30, F30);
12784 __ c_ult_d(src, F30);
12785 __ bc1f(Done);
12786 __ delayed()->daddiu(T9, R0, -1);
12788 __ c_un_d(src, src); //NaN?
12789 __ subu(dst, T9, AT);
12790 __ movt(dst, R0);
12792 __ bind(Done);
12793 %}
12795 ins_pipe( pipe_slow );
12796 %}
12799 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
12800 match(Set dst (ConvD2L src));
12801 ins_cost(250);
12802 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
12803 ins_encode %{
12804 Register dst = as_Register($dst$$reg);
12805 FloatRegister src = as_FloatRegister($src$$reg);
12807 Label L;
12809 __ c_un_d(src, src); //NaN?
12810 __ bc1t(L);
12811 __ delayed();
12812 __ move(dst, R0);
12814 __ trunc_l_d(F30, src);
12815 __ cfc1(AT, 31);
12816 __ li(T9, 0x10000);
12817 __ andr(AT, AT, T9);
12818 __ beq(AT, R0, L);
12819 __ delayed()->dmfc1(dst, F30);
12821 __ mov_d(F12, src);
12822 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
12823 __ move(dst, V0);
12824 __ bind(L);
12825 %}
12827 ins_pipe( pipe_slow );
12828 %}
12831 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
12832 match(Set dst (ConvF2I src));
12833 ins_cost(150);
12834 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
12835 ins_encode %{
12836 Register dreg = $dst$$Register;
12837 FloatRegister fval = $src$$FloatRegister;
12838 Label L;
12840 __ trunc_w_s(F30, fval);
12841 __ move(AT, 0x7fffffff);
12842 __ mfc1(dreg, F30);
12843 __ c_un_s(fval, fval); //NaN?
12844 __ movt(dreg, R0);
12846 __ bne(AT, dreg, L);
12847 __ delayed()->lui(T9, 0x8000);
12849 __ mfc1(AT, fval);
12850 __ andr(AT, AT, T9);
12852 __ movn(dreg, T9, AT);
12854 __ bind(L);
12856 %}
12858 ins_pipe( pipe_slow );
12859 %}
12863 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
12864 match(Set dst (ConvF2I src));
12865 ins_cost(250);
12866 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
12867 ins_encode %{
12868 Register dreg = $dst$$Register;
12869 FloatRegister fval = $src$$FloatRegister;
12870 Label L;
12872 __ c_un_s(fval, fval); //NaN?
12873 __ bc1t(L);
12874 __ delayed();
12875 __ move(dreg, R0);
12877 __ trunc_w_s(F30, fval);
12879 /* Call SharedRuntime:f2i() to do valid convention */
12880 __ cfc1(AT, 31);
12881 __ li(T9, 0x10000);
12882 __ andr(AT, AT, T9);
12883 __ beq(AT, R0, L);
12884 __ delayed()->mfc1(dreg, F30);
12886 __ mov_s(F12, fval);
12888 //This bug was found when running ezDS's control-panel.
12889 // J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
12890 //
12891 // An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
12892 // V0 is corrupted during call_VM_leaf(), and should be preserved.
12893 //
12894 __ push(fval);
12895 if(dreg != V0) {
12896 __ push(V0);
12897 }
12898 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
12899 if(dreg != V0) {
12900 __ move(dreg, V0);
12901 __ pop(V0);
12902 }
12903 __ pop(fval);
12904 __ bind(L);
12905 %}
12907 ins_pipe( pipe_slow );
12908 %}
12911 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
12912 match(Set dst (ConvF2L src));
12913 ins_cost(150);
12914 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
12915 ins_encode %{
12916 Register dreg = $dst$$Register;
12917 FloatRegister fval = $src$$FloatRegister;
12918 Label L;
12920 __ trunc_l_s(F30, fval);
12921 __ daddiu(AT, R0, -1);
12922 __ dsrl(AT, AT, 1);
12923 __ dmfc1(dreg, F30);
12924 __ c_un_s(fval, fval); //NaN?
12925 __ movt(dreg, R0);
12927 __ bne(AT, dreg, L);
12928 __ delayed()->lui(T9, 0x8000);
12930 __ mfc1(AT, fval);
12931 __ andr(AT, AT, T9);
12933 __ dsll32(T9, T9, 0);
12934 __ movn(dreg, T9, AT);
12936 __ bind(L);
12937 %}
12939 ins_pipe( pipe_slow );
12940 %}
12943 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
12944 match(Set dst (ConvF2L src));
12945 ins_cost(250);
12946 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
12947 ins_encode %{
12948 Register dst = as_Register($dst$$reg);
12949 FloatRegister fval = $src$$FloatRegister;
12950 Label L;
12952 __ c_un_s(fval, fval); //NaN?
12953 __ bc1t(L);
12954 __ delayed();
12955 __ move(dst, R0);
12957 __ trunc_l_s(F30, fval);
12958 __ cfc1(AT, 31);
12959 __ li(T9, 0x10000);
12960 __ andr(AT, AT, T9);
12961 __ beq(AT, R0, L);
12962 __ delayed()->dmfc1(dst, F30);
12964 __ mov_s(F12, fval);
12965 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
12966 __ move(dst, V0);
12967 __ bind(L);
12968 %}
12970 ins_pipe( pipe_slow );
12971 %}
12973 instruct convL2F_reg( regF dst, mRegL src ) %{
12974 match(Set dst (ConvL2F src));
12975 format %{ "convl2f $dst, $src @ convL2F_reg" %}
12976 ins_encode %{
12977 FloatRegister dst = $dst$$FloatRegister;
12978 Register src = as_Register($src$$reg);
12979 Label L;
12981 __ dmtc1(src, dst);
12982 __ cvt_s_l(dst, dst);
12983 %}
12985 ins_pipe( pipe_slow );
12986 %}
12988 instruct convI2F_reg( regF dst, mRegI src ) %{
12989 match(Set dst (ConvI2F src));
12990 format %{ "convi2f $dst, $src @ convI2F_reg" %}
12991 ins_encode %{
12992 Register src = $src$$Register;
12993 FloatRegister dst = $dst$$FloatRegister;
12995 __ mtc1(src, dst);
12996 __ cvt_s_w(dst, dst);
12997 %}
12999 ins_pipe( fpu_regF_regF );
13000 %}
13002 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
13003 match(Set dst (CmpLTMask p zero));
13004 ins_cost(100);
13006 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
13007 ins_encode %{
13008 Register src = $p$$Register;
13009 Register dst = $dst$$Register;
13011 __ sra(dst, src, 31);
13012 %}
13013 ins_pipe( pipe_slow );
13014 %}
13017 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
13018 match(Set dst (CmpLTMask p q));
13019 ins_cost(400);
13021 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
13022 ins_encode %{
13023 Register p = $p$$Register;
13024 Register q = $q$$Register;
13025 Register dst = $dst$$Register;
13027 __ slt(dst, p, q);
13028 __ subu(dst, R0, dst);
13029 %}
13030 ins_pipe( pipe_slow );
13031 %}
13033 instruct convP2B(mRegI dst, mRegP src) %{
13034 match(Set dst (Conv2B src));
13036 ins_cost(100);
13037 format %{ "convP2B $dst, $src @ convP2B" %}
13038 ins_encode %{
13039 Register dst = as_Register($dst$$reg);
13040 Register src = as_Register($src$$reg);
13042 if (dst != src) {
13043 __ daddiu(dst, R0, 1);
13044 __ movz(dst, R0, src);
13045 } else {
13046 __ move(AT, src);
13047 __ daddiu(dst, R0, 1);
13048 __ movz(dst, R0, AT);
13049 }
13050 %}
13052 ins_pipe( ialu_regL_regL );
13053 %}
13056 instruct convI2D_reg_reg(regD dst, mRegI src) %{
13057 match(Set dst (ConvI2D src));
13058 format %{ "conI2D $dst, $src @convI2D_reg" %}
13059 ins_encode %{
13060 Register src = $src$$Register;
13061 FloatRegister dst = $dst$$FloatRegister;
13062 __ mtc1(src, dst);
13063 __ cvt_d_w(dst, dst);
13064 %}
13065 ins_pipe( fpu_regF_regF );
13066 %}
13068 instruct convF2D_reg_reg(regD dst, regF src) %{
13069 match(Set dst (ConvF2D src));
13070 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
13071 ins_encode %{
13072 FloatRegister dst = $dst$$FloatRegister;
13073 FloatRegister src = $src$$FloatRegister;
13075 __ cvt_d_s(dst, src);
13076 %}
13077 ins_pipe( fpu_regF_regF );
13078 %}
13080 instruct convD2F_reg_reg(regF dst, regD src) %{
13081 match(Set dst (ConvD2F src));
13082 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
13083 ins_encode %{
13084 FloatRegister dst = $dst$$FloatRegister;
13085 FloatRegister src = $src$$FloatRegister;
13087 __ cvt_s_d(dst, src);
13088 %}
13089 ins_pipe( fpu_regF_regF );
13090 %}
13093 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
13094 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
13095 match(Set dst (ConvD2I src));
13097 ins_cost(150);
13098 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
13100 ins_encode %{
13101 FloatRegister src = $src$$FloatRegister;
13102 Register dst = $dst$$Register;
13104 Label Done;
13106 __ trunc_w_d(F30, src);
13107 // max_int: 2147483647
13108 __ move(AT, 0x7fffffff);
13109 __ mfc1(dst, F30);
13111 __ bne(dst, AT, Done);
13112 __ delayed()->mtc1(R0, F30);
13114 __ cvt_d_w(F30, F30);
13115 __ c_ult_d(src, F30);
13116 __ bc1f(Done);
13117 __ delayed()->addiu(T9, R0, -1);
13119 __ c_un_d(src, src); //NaN?
13120 __ subu32(dst, T9, AT);
13121 __ movt(dst, R0);
13123 __ bind(Done);
13124 %}
13125 ins_pipe( pipe_slow );
13126 %}
13129 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
13130 match(Set dst (ConvD2I src));
13132 ins_cost(250);
13133 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
13135 ins_encode %{
13136 FloatRegister src = $src$$FloatRegister;
13137 Register dst = $dst$$Register;
13138 Label L;
13140 __ trunc_w_d(F30, src);
13141 __ cfc1(AT, 31);
13142 __ li(T9, 0x10000);
13143 __ andr(AT, AT, T9);
13144 __ beq(AT, R0, L);
13145 __ delayed()->mfc1(dst, F30);
13147 __ mov_d(F12, src);
13148 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
13149 __ move(dst, V0);
13150 __ bind(L);
13152 %}
13153 ins_pipe( pipe_slow );
13154 %}
13156 // Convert oop pointer into compressed form
13157 instruct encodeHeapOop(mRegN dst, mRegP src) %{
13158 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
13159 match(Set dst (EncodeP src));
13160 format %{ "encode_heap_oop $dst,$src" %}
13161 ins_encode %{
13162 Register src = $src$$Register;
13163 Register dst = $dst$$Register;
13165 __ encode_heap_oop(dst, src);
13166 %}
13167 ins_pipe( ialu_regL_regL );
13168 %}
13170 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
13171 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
13172 match(Set dst (EncodeP src));
13173 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
13174 ins_encode %{
13175 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
13176 %}
13177 ins_pipe( ialu_regL_regL );
13178 %}
13180 instruct decodeHeapOop(mRegP dst, mRegN src) %{
13181 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
13182 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
13183 match(Set dst (DecodeN src));
13184 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
13185 ins_encode %{
13186 Register s = $src$$Register;
13187 Register d = $dst$$Register;
13189 __ decode_heap_oop(d, s);
13190 %}
13191 ins_pipe( ialu_regL_regL );
13192 %}
13194 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
13195 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
13196 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
13197 match(Set dst (DecodeN src));
13198 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
13199 ins_encode %{
13200 Register s = $src$$Register;
13201 Register d = $dst$$Register;
13202 if (s != d) {
13203 __ decode_heap_oop_not_null(d, s);
13204 } else {
13205 __ decode_heap_oop_not_null(d);
13206 }
13207 %}
13208 ins_pipe( ialu_regL_regL );
13209 %}
13211 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
13212 match(Set dst (EncodePKlass src));
13213 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
13214 ins_encode %{
13215 __ encode_klass_not_null($dst$$Register, $src$$Register);
13216 %}
13217 ins_pipe( ialu_regL_regL );
13218 %}
13220 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
13221 match(Set dst (DecodeNKlass src));
13222 format %{ "decode_heap_klass_not_null $dst,$src" %}
13223 ins_encode %{
13224 Register s = $src$$Register;
13225 Register d = $dst$$Register;
13226 if (s != d) {
13227 __ decode_klass_not_null(d, s);
13228 } else {
13229 __ decode_klass_not_null(d);
13230 }
13231 %}
13232 ins_pipe( ialu_regL_regL );
13233 %}
13235 //FIXME
13236 instruct tlsLoadP(mRegP dst) %{
13237 match(Set dst (ThreadLocal));
13239 ins_cost(0);
13240 format %{ " get_thread in $dst #@tlsLoadP" %}
13241 ins_encode %{
13242 Register dst = $dst$$Register;
13243 #ifdef OPT_THREAD
13244 __ move(dst, TREG);
13245 #else
13246 __ get_thread(dst);
13247 #endif
13248 %}
13250 ins_pipe( ialu_loadI );
13251 %}
13254 instruct checkCastPP( mRegP dst ) %{
13255 match(Set dst (CheckCastPP dst));
13257 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
13258 ins_encode( /*empty encoding*/ );
13259 ins_pipe( empty );
13260 %}
13262 instruct castPP(mRegP dst)
13263 %{
13264 match(Set dst (CastPP dst));
13266 size(0);
13267 format %{ "# castPP of $dst" %}
13268 ins_encode(/* empty encoding */);
13269 ins_pipe(empty);
13270 %}
13272 instruct castII( mRegI dst ) %{
13273 match(Set dst (CastII dst));
13274 format %{ "#castII of $dst empty encoding" %}
13275 ins_encode( /*empty encoding*/ );
13276 ins_cost(0);
13277 ins_pipe( empty );
13278 %}
13280 // Return Instruction
13281 // Remove the return address & jump to it.
13282 instruct Ret() %{
13283 match(Return);
13284 format %{ "RET #@Ret" %}
13286 ins_encode %{
13287 __ jr(RA);
13288 __ delayed()->nop();
13289 %}
13291 ins_pipe( pipe_jump );
13292 %}
13294 /*
13295 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
13296 instruct jumpXtnd(mRegL switch_val) %{
13297 match(Jump switch_val);
13299 ins_cost(350);
13301 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
13302 "jr T9\n\t"
13303 "nop" %}
13304 ins_encode %{
13305 Register table_base = $constanttablebase;
13306 int con_offset = $constantoffset;
13307 Register switch_reg = $switch_val$$Register;
13309 if (UseLEXT1) {
13310 if (Assembler::is_simm(con_offset, 8)) {
13311 __ gsldx(T9, table_base, switch_reg, con_offset);
13312 } else if (Assembler::is_simm16(con_offset)) {
13313 __ daddu(T9, table_base, switch_reg);
13314 __ ld(T9, T9, con_offset);
13315 } else {
13316 __ move(T9, con_offset);
13317 __ daddu(AT, table_base, switch_reg);
13318 __ gsldx(T9, AT, T9, 0);
13319 }
13320 } else {
13321 if (Assembler::is_simm16(con_offset)) {
13322 __ daddu(T9, table_base, switch_reg);
13323 __ ld(T9, T9, con_offset);
13324 } else {
13325 __ move(T9, con_offset);
13326 __ daddu(AT, table_base, switch_reg);
13327 __ daddu(AT, T9, AT);
13328 __ ld(T9, AT, 0);
13329 }
13330 }
13332 __ jr(T9);
13333 __ delayed()->nop();
13335 %}
13336 ins_pipe(pipe_jump);
13337 %}
13338 */
13341 // Tail Jump; remove the return address; jump to target.
13342 // TailCall above leaves the return address around.
13343 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
13344 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
13345 // "restore" before this instruction (in Epilogue), we need to materialize it
13346 // in %i0.
13347 //FIXME
13348 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
13349 match( TailJump jump_target ex_oop );
13350 ins_cost(200);
13351 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
13352 ins_encode %{
13353 Register target = $jump_target$$Register;
13355 // V0, V1 are indicated in:
13356 // [stubGenerator_mips.cpp] generate_forward_exception()
13357 // [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
13358 //
13359 Register oop = $ex_oop$$Register;
13360 Register exception_oop = V0;
13361 Register exception_pc = V1;
13363 __ move(exception_pc, RA);
13364 __ move(exception_oop, oop);
13366 __ jr(target);
13367 __ delayed()->nop();
13368 %}
13369 ins_pipe( pipe_jump );
13370 %}
13372 // ============================================================================
13373 // Procedure Call/Return Instructions
13374 // Call Java Static Instruction
13375 // Note: If this code changes, the corresponding ret_addr_offset() and
13376 // compute_padding() functions will have to be adjusted.
13377 instruct CallStaticJavaDirect(method meth) %{
13378 match(CallStaticJava);
13379 effect(USE meth);
13381 ins_cost(300);
13382 format %{ "CALL,static #@CallStaticJavaDirect " %}
13383 ins_encode( Java_Static_Call( meth ) );
13384 ins_pipe( pipe_slow );
13385 ins_pc_relative(1);
13386 %}
13388 // Call Java Dynamic Instruction
13389 // Note: If this code changes, the corresponding ret_addr_offset() and
13390 // compute_padding() functions will have to be adjusted.
13391 instruct CallDynamicJavaDirect(method meth) %{
13392 match(CallDynamicJava);
13393 effect(USE meth);
13395 ins_cost(300);
13396 format %{"MOV IC_Klass, #Universe::non_oop_word()\n\t"
13397 "CallDynamic @ CallDynamicJavaDirect" %}
13398 ins_encode( Java_Dynamic_Call( meth ) );
13399 ins_pipe( pipe_slow );
13400 ins_pc_relative(1);
13401 %}
13403 instruct CallLeafNoFPDirect(method meth) %{
13404 match(CallLeafNoFP);
13405 effect(USE meth);
13407 ins_cost(300);
13408 format %{ "CALL_LEAF_NOFP,runtime " %}
13409 ins_encode(Java_To_Runtime(meth));
13410 ins_pipe( pipe_slow );
13411 ins_pc_relative(1);
13412 ins_alignment(16);
13413 %}
13415 // Prefetch instructions.
13417 instruct prefetchrNTA( memory mem ) %{
13418 match(PrefetchRead mem);
13419 ins_cost(125);
13421 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
13422 ins_encode %{
13423 int base = $mem$$base;
13424 int index = $mem$$index;
13425 int scale = $mem$$scale;
13426 int disp = $mem$$disp;
13428 if( index != 0 ) {
13429 if (scale == 0) {
13430 __ daddu(AT, as_Register(base), as_Register(index));
13431 } else {
13432 __ dsll(AT, as_Register(index), scale);
13433 __ daddu(AT, as_Register(base), AT);
13434 }
13435 } else {
13436 __ move(AT, as_Register(base));
13437 }
13438 if( Assembler::is_simm16(disp) ) {
13439 __ daddiu(AT, AT, disp);
13440 } else {
13441 __ move(T9, disp);
13442 __ daddu(AT, AT, T9);
13443 }
13444 __ pref(0, AT, 0); //hint: 0:load
13445 %}
13446 ins_pipe(pipe_slow);
13447 %}
13449 instruct prefetchwNTA( memory mem ) %{
13450 match(PrefetchWrite mem);
13451 ins_cost(125);
13452 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
13453 ins_encode %{
13454 int base = $mem$$base;
13455 int index = $mem$$index;
13456 int scale = $mem$$scale;
13457 int disp = $mem$$disp;
13459 if( index != 0 ) {
13460 if (scale == 0) {
13461 __ daddu(AT, as_Register(base), as_Register(index));
13462 } else {
13463 __ dsll(AT, as_Register(index), scale);
13464 __ daddu(AT, as_Register(base), AT);
13465 }
13466 } else {
13467 __ move(AT, as_Register(base));
13468 }
13469 if( Assembler::is_simm16(disp) ) {
13470 __ daddiu(AT, AT, disp);
13471 } else {
13472 __ move(T9, disp);
13473 __ daddu(AT, AT, T9);
13474 }
13475 __ pref(1, AT, 0); //hint: 1:store
13476 %}
13477 ins_pipe(pipe_slow);
13478 %}
13480 // Prefetch instructions for allocation.
13482 instruct prefetchAllocNTA( memory mem ) %{
13483 match(PrefetchAllocation mem);
13484 ins_cost(125);
13485 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
13486 ins_encode %{
13487 int base = $mem$$base;
13488 int index = $mem$$index;
13489 int scale = $mem$$scale;
13490 int disp = $mem$$disp;
13492 Register dst = R0;
13494 if ( index != 0 ) {
13495 if ( Assembler::is_simm16(disp) ) {
13496 if (UseLEXT1) {
13497 if (scale == 0) {
13498 __ gslbx(dst, as_Register(base), as_Register(index), disp);
13499 } else {
13500 __ dsll(AT, as_Register(index), scale);
13501 __ gslbx(dst, as_Register(base), AT, disp);
13502 }
13503 } else {
13504 if (scale == 0) {
13505 __ addu(AT, as_Register(base), as_Register(index));
13506 } else {
13507 __ dsll(AT, as_Register(index), scale);
13508 __ addu(AT, as_Register(base), AT);
13509 }
13510 __ lb(dst, AT, disp);
13511 }
13512 } else {
13513 if (scale == 0) {
13514 __ addu(AT, as_Register(base), as_Register(index));
13515 } else {
13516 __ dsll(AT, as_Register(index), scale);
13517 __ addu(AT, as_Register(base), AT);
13518 }
13519 __ move(T9, disp);
13520 if (UseLEXT1) {
13521 __ gslbx(dst, AT, T9, 0);
13522 } else {
13523 __ addu(AT, AT, T9);
13524 __ lb(dst, AT, 0);
13525 }
13526 }
13527 } else {
13528 if ( Assembler::is_simm16(disp) ) {
13529 __ lb(dst, as_Register(base), disp);
13530 } else {
13531 __ move(T9, disp);
13532 if (UseLEXT1) {
13533 __ gslbx(dst, as_Register(base), T9, 0);
13534 } else {
13535 __ addu(AT, as_Register(base), T9);
13536 __ lb(dst, AT, 0);
13537 }
13538 }
13539 }
13540 %}
13541 ins_pipe(pipe_slow);
13542 %}
13545 // Call runtime without safepoint
13546 instruct CallLeafDirect(method meth) %{
13547 match(CallLeaf);
13548 effect(USE meth);
13550 ins_cost(300);
13551 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
13552 ins_encode(Java_To_Runtime(meth));
13553 ins_pipe( pipe_slow );
13554 ins_pc_relative(1);
13555 ins_alignment(16);
13556 %}
13558 // Load Char (16bit unsigned)
13559 instruct loadUS(mRegI dst, memory mem) %{
13560 match(Set dst (LoadUS mem));
13562 ins_cost(125);
13563 format %{ "loadUS $dst,$mem @ loadC" %}
13564 ins_encode(load_C_enc(dst, mem));
13565 ins_pipe( ialu_loadI );
13566 %}
13568 instruct loadUS_convI2L(mRegL dst, memory mem) %{
13569 match(Set dst (ConvI2L (LoadUS mem)));
13571 ins_cost(125);
13572 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
13573 ins_encode(load_C_enc(dst, mem));
13574 ins_pipe( ialu_loadI );
13575 %}
13577 // Store Char (16bit unsigned)
13578 instruct storeC(memory mem, mRegI src) %{
13579 match(Set mem (StoreC mem src));
13581 ins_cost(125);
13582 format %{ "storeC $src, $mem @ storeC" %}
13583 ins_encode(store_C_reg_enc(mem, src));
13584 ins_pipe( ialu_loadI );
13585 %}
13587 instruct storeC0(memory mem, immI0 zero) %{
13588 match(Set mem (StoreC mem zero));
13590 ins_cost(125);
13591 format %{ "storeC $zero, $mem @ storeC0" %}
13592 ins_encode(store_C0_enc(mem));
13593 ins_pipe( ialu_loadI );
13594 %}
13597 instruct loadConF0(regF dst, immF0 zero) %{
13598 match(Set dst zero);
13599 ins_cost(100);
13601 format %{ "mov $dst, zero @ loadConF0\n"%}
13602 ins_encode %{
13603 FloatRegister dst = $dst$$FloatRegister;
13605 __ mtc1(R0, dst);
13606 %}
13607 ins_pipe( fpu_loadF );
13608 %}
13611 instruct loadConF(regF dst, immF src) %{
13612 match(Set dst src);
13613 ins_cost(125);
13615 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
13616 ins_encode %{
13617 int con_offset = $constantoffset($src);
13619 if (Assembler::is_simm16(con_offset)) {
13620 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
13621 } else {
13622 __ set64(AT, con_offset);
13623 if (UseLEXT1) {
13624 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
13625 } else {
13626 __ daddu(AT, $constanttablebase, AT);
13627 __ lwc1($dst$$FloatRegister, AT, 0);
13628 }
13629 }
13630 %}
13631 ins_pipe( fpu_loadF );
13632 %}
13635 instruct loadConD0(regD dst, immD0 zero) %{
13636 match(Set dst zero);
13637 ins_cost(100);
13639 format %{ "mov $dst, zero @ loadConD0"%}
13640 ins_encode %{
13641 FloatRegister dst = as_FloatRegister($dst$$reg);
13643 __ dmtc1(R0, dst);
13644 %}
13645 ins_pipe( fpu_loadF );
13646 %}
13648 instruct loadConD(regD dst, immD src) %{
13649 match(Set dst src);
13650 ins_cost(125);
13652 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
13653 ins_encode %{
13654 int con_offset = $constantoffset($src);
13656 if (Assembler::is_simm16(con_offset)) {
13657 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
13658 } else {
13659 __ set64(AT, con_offset);
13660 if (UseLEXT1) {
13661 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
13662 } else {
13663 __ daddu(AT, $constanttablebase, AT);
13664 __ ldc1($dst$$FloatRegister, AT, 0);
13665 }
13666 }
13667 %}
13668 ins_pipe( fpu_loadF );
13669 %}
13671 // Store register Float value (it is faster than store from FPU register)
13672 instruct storeF_reg( memory mem, regF src) %{
13673 match(Set mem (StoreF mem src));
13675 ins_cost(50);
13676 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
13677 ins_encode(store_F_reg_enc(mem, src));
13678 ins_pipe( fpu_storeF );
13679 %}
13681 instruct storeF_imm0( memory mem, immF0 zero) %{
13682 match(Set mem (StoreF mem zero));
13684 ins_cost(40);
13685 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
13686 ins_encode %{
13687 int base = $mem$$base;
13688 int index = $mem$$index;
13689 int scale = $mem$$scale;
13690 int disp = $mem$$disp;
13692 if( index != 0 ) {
13693 if (UseLEXT1) {
13694 if ( Assembler::is_simm(disp, 8) ) {
13695 if ( scale == 0 ) {
13696 __ gsswx(R0, as_Register(base), as_Register(index), disp);
13697 } else {
13698 __ dsll(T9, as_Register(index), scale);
13699 __ gsswx(R0, as_Register(base), T9, disp);
13700 }
13701 } else if ( Assembler::is_simm16(disp) ) {
13702 if ( scale == 0 ) {
13703 __ daddu(AT, as_Register(base), as_Register(index));
13704 } else {
13705 __ dsll(T9, as_Register(index), scale);
13706 __ daddu(AT, as_Register(base), T9);
13707 }
13708 __ sw(R0, AT, disp);
13709 } else {
13710 if ( scale == 0 ) {
13711 __ move(T9, disp);
13712 __ daddu(AT, as_Register(index), T9);
13713 __ gsswx(R0, as_Register(base), AT, 0);
13714 } else {
13715 __ dsll(T9, as_Register(index), scale);
13716 __ move(AT, disp);
13717 __ daddu(AT, AT, T9);
13718 __ gsswx(R0, as_Register(base), AT, 0);
13719 }
13720 }
13721 } else { //not use loongson isa
13722 if(scale != 0) {
13723 __ dsll(T9, as_Register(index), scale);
13724 __ daddu(AT, as_Register(base), T9);
13725 } else {
13726 __ daddu(AT, as_Register(base), as_Register(index));
13727 }
13728 if( Assembler::is_simm16(disp) ) {
13729 __ sw(R0, AT, disp);
13730 } else {
13731 __ move(T9, disp);
13732 __ daddu(AT, AT, T9);
13733 __ sw(R0, AT, 0);
13734 }
13735 }
13736 } else { //index is 0
13737 if (UseLEXT1) {
13738 if ( Assembler::is_simm16(disp) ) {
13739 __ sw(R0, as_Register(base), disp);
13740 } else {
13741 __ move(T9, disp);
13742 __ gsswx(R0, as_Register(base), T9, 0);
13743 }
13744 } else {
13745 if( Assembler::is_simm16(disp) ) {
13746 __ sw(R0, as_Register(base), disp);
13747 } else {
13748 __ move(T9, disp);
13749 __ daddu(AT, as_Register(base), T9);
13750 __ sw(R0, AT, 0);
13751 }
13752 }
13753 }
13754 %}
13755 ins_pipe( ialu_storeI );
13756 %}
13758 // Load Double
13759 instruct loadD(regD dst, memory mem) %{
13760 match(Set dst (LoadD mem));
13762 ins_cost(150);
13763 format %{ "loadD $dst, $mem #@loadD" %}
13764 ins_encode(load_D_enc(dst, mem));
13765 ins_pipe( ialu_loadI );
13766 %}
13768 // Load Double - UNaligned
13769 instruct loadD_unaligned(regD dst, memory mem ) %{
13770 match(Set dst (LoadD_unaligned mem));
13771 ins_cost(250);
13772 // FIXME: Need more effective ldl/ldr
13773 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
13774 ins_encode(load_D_enc(dst, mem));
13775 ins_pipe( ialu_loadI );
13776 %}
13778 instruct storeD_reg( memory mem, regD src) %{
13779 match(Set mem (StoreD mem src));
13781 ins_cost(50);
13782 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
13783 ins_encode(store_D_reg_enc(mem, src));
13784 ins_pipe( fpu_storeF );
13785 %}
13787 instruct storeD_imm0( memory mem, immD0 zero) %{
13788 match(Set mem (StoreD mem zero));
13790 ins_cost(40);
13791 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
13792 ins_encode %{
13793 int base = $mem$$base;
13794 int index = $mem$$index;
13795 int scale = $mem$$scale;
13796 int disp = $mem$$disp;
13798 __ mtc1(R0, F30);
13799 __ cvt_d_w(F30, F30);
13801 if( index != 0 ) {
13802 if (UseLEXT1) {
13803 if ( Assembler::is_simm(disp, 8) ) {
13804 if (scale == 0) {
13805 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
13806 } else {
13807 __ dsll(T9, as_Register(index), scale);
13808 __ gssdxc1(F30, as_Register(base), T9, disp);
13809 }
13810 } else if ( Assembler::is_simm16(disp) ) {
13811 if (scale == 0) {
13812 __ daddu(AT, as_Register(base), as_Register(index));
13813 __ sdc1(F30, AT, disp);
13814 } else {
13815 __ dsll(T9, as_Register(index), scale);
13816 __ daddu(AT, as_Register(base), T9);
13817 __ sdc1(F30, AT, disp);
13818 }
13819 } else {
13820 if (scale == 0) {
13821 __ move(T9, disp);
13822 __ daddu(AT, as_Register(index), T9);
13823 __ gssdxc1(F30, as_Register(base), AT, 0);
13824 } else {
13825 __ move(T9, disp);
13826 __ dsll(AT, as_Register(index), scale);
13827 __ daddu(AT, AT, T9);
13828 __ gssdxc1(F30, as_Register(base), AT, 0);
13829 }
13830 }
13831 } else { // not use loongson isa
13832 if(scale != 0) {
13833 __ dsll(T9, as_Register(index), scale);
13834 __ daddu(AT, as_Register(base), T9);
13835 } else {
13836 __ daddu(AT, as_Register(base), as_Register(index));
13837 }
13838 if( Assembler::is_simm16(disp) ) {
13839 __ sdc1(F30, AT, disp);
13840 } else {
13841 __ move(T9, disp);
13842 __ daddu(AT, AT, T9);
13843 __ sdc1(F30, AT, 0);
13844 }
13845 }
13846 } else {// index is 0
13847 if (UseLEXT1) {
13848 if ( Assembler::is_simm16(disp) ) {
13849 __ sdc1(F30, as_Register(base), disp);
13850 } else {
13851 __ move(T9, disp);
13852 __ gssdxc1(F30, as_Register(base), T9, 0);
13853 }
13854 } else {
13855 if( Assembler::is_simm16(disp) ) {
13856 __ sdc1(F30, as_Register(base), disp);
13857 } else {
13858 __ move(T9, disp);
13859 __ daddu(AT, as_Register(base), T9);
13860 __ sdc1(F30, AT, 0);
13861 }
13862 }
13863 }
13864 %}
13865 ins_pipe( ialu_storeI );
13866 %}
13868 instruct loadSSI(mRegI dst, stackSlotI src)
13869 %{
13870 match(Set dst src);
13872 ins_cost(125);
13873 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
13874 ins_encode %{
13875 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
13876 __ lw($dst$$Register, SP, $src$$disp);
13877 %}
13878 ins_pipe(ialu_loadI);
13879 %}
13881 instruct storeSSI(stackSlotI dst, mRegI src)
13882 %{
13883 match(Set dst src);
13885 ins_cost(100);
13886 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
13887 ins_encode %{
13888 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
13889 __ sw($src$$Register, SP, $dst$$disp);
13890 %}
13891 ins_pipe(ialu_storeI);
13892 %}
13894 instruct loadSSL(mRegL dst, stackSlotL src)
13895 %{
13896 match(Set dst src);
13898 ins_cost(125);
13899 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
13900 ins_encode %{
13901 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
13902 __ ld($dst$$Register, SP, $src$$disp);
13903 %}
13904 ins_pipe(ialu_loadI);
13905 %}
13907 instruct storeSSL(stackSlotL dst, mRegL src)
13908 %{
13909 match(Set dst src);
13911 ins_cost(100);
13912 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
13913 ins_encode %{
13914 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
13915 __ sd($src$$Register, SP, $dst$$disp);
13916 %}
13917 ins_pipe(ialu_storeI);
13918 %}
13920 instruct loadSSP(mRegP dst, stackSlotP src)
13921 %{
13922 match(Set dst src);
13924 ins_cost(125);
13925 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
13926 ins_encode %{
13927 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
13928 __ ld($dst$$Register, SP, $src$$disp);
13929 %}
13930 ins_pipe(ialu_loadI);
13931 %}
13933 instruct storeSSP(stackSlotP dst, mRegP src)
13934 %{
13935 match(Set dst src);
13937 ins_cost(100);
13938 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
13939 ins_encode %{
13940 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
13941 __ sd($src$$Register, SP, $dst$$disp);
13942 %}
13943 ins_pipe(ialu_storeI);
13944 %}
13946 instruct loadSSF(regF dst, stackSlotF src)
13947 %{
13948 match(Set dst src);
13950 ins_cost(125);
13951 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
13952 ins_encode %{
13953 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
13954 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
13955 %}
13956 ins_pipe(ialu_loadI);
13957 %}
13959 instruct storeSSF(stackSlotF dst, regF src)
13960 %{
13961 match(Set dst src);
13963 ins_cost(100);
13964 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
13965 ins_encode %{
13966 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
13967 __ swc1($src$$FloatRegister, SP, $dst$$disp);
13968 %}
13969 ins_pipe(fpu_storeF);
13970 %}
13972 // Use the same format since predicate() can not be used here.
13973 instruct loadSSD(regD dst, stackSlotD src)
13974 %{
13975 match(Set dst src);
13977 ins_cost(125);
13978 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
13979 ins_encode %{
13980 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
13981 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
13982 %}
13983 ins_pipe(ialu_loadI);
13984 %}
13986 instruct storeSSD(stackSlotD dst, regD src)
13987 %{
13988 match(Set dst src);
13990 ins_cost(100);
13991 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
13992 ins_encode %{
13993 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
13994 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
13995 %}
13996 ins_pipe(fpu_storeF);
13997 %}
13999 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
14000 match( Set cr (FastLock object box) );
14001 effect( TEMP tmp, TEMP scr, USE_KILL box );
14002 ins_cost(300);
14003 format %{ "FASTLOCK $cr <-- $object, $box, $tmp, $scr #@ cmpFastLock" %}
14004 ins_encode %{
14005 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
14006 %}
14008 ins_pipe( pipe_slow );
14009 ins_pc_relative(1);
14010 %}
14012 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
14013 match( Set cr (FastUnlock object box) );
14014 effect( TEMP tmp, USE_KILL box );
14015 ins_cost(300);
14016 format %{ "FASTUNLOCK $cr <-- $object, $box, $tmp #@cmpFastUnlock" %}
14017 ins_encode %{
14018 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
14019 %}
14021 ins_pipe( pipe_slow );
14022 ins_pc_relative(1);
14023 %}
14025 // Store CMS card-mark Immediate
14026 instruct storeImmCM(memory mem, immI8 src) %{
14027 match(Set mem (StoreCM mem src));
14029 ins_cost(150);
14030 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
14031 // opcode(0xC6);
14032 ins_encode(store_B_immI_enc_sync(mem, src));
14033 ins_pipe( ialu_storeI );
14034 %}
14036 // Die now
14037 instruct ShouldNotReachHere( )
14038 %{
14039 match(Halt);
14040 ins_cost(300);
14042 // Use the following format syntax
14043 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
14044 ins_encode %{
14045 // Here we should emit illtrap !
14047 __ stop("in ShoudNotReachHere");
14049 %}
14050 ins_pipe( pipe_jump );
14051 %}
14053 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
14054 %{
14055 predicate(Universe::narrow_oop_shift() == 0);
14056 match(Set dst mem);
14058 ins_cost(110);
14059 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
14060 ins_encode %{
14061 Register dst = $dst$$Register;
14062 Register base = as_Register($mem$$base);
14063 int disp = $mem$$disp;
14065 __ daddiu(dst, base, disp);
14066 %}
14067 ins_pipe( ialu_regI_imm16 );
14068 %}
14070 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
14071 %{
14072 match(Set dst mem);
14074 ins_cost(110);
14075 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
14076 ins_encode %{
14077 Register dst = $dst$$Register;
14078 Register base = as_Register($mem$$base);
14079 Register index = as_Register($mem$$index);
14080 int scale = $mem$$scale;
14081 int disp = $mem$$disp;
14083 if (scale == 0) {
14084 __ daddu(AT, base, index);
14085 __ daddiu(dst, AT, disp);
14086 } else {
14087 __ dsll(AT, index, scale);
14088 __ daddu(AT, base, AT);
14089 __ daddiu(dst, AT, disp);
14090 }
14091 %}
14093 ins_pipe( ialu_regI_imm16 );
14094 %}
14096 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
14097 %{
14098 match(Set dst mem);
14100 ins_cost(110);
14101 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
14102 ins_encode %{
14103 Register dst = $dst$$Register;
14104 Register base = as_Register($mem$$base);
14105 Register index = as_Register($mem$$index);
14106 int scale = $mem$$scale;
14108 if (scale == 0) {
14109 __ daddu(dst, base, index);
14110 } else {
14111 __ dsll(AT, index, scale);
14112 __ daddu(dst, base, AT);
14113 }
14114 %}
14116 ins_pipe( ialu_regI_imm16 );
14117 %}
14120 // ============================================================================
14121 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
14122 // array for an instance of the superklass. Set a hidden internal cache on a
14123 // hit (cache is checked with exposed code in gen_subtype_check()). Return
14124 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
14125 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
14126 match(Set result (PartialSubtypeCheck sub super));
14127 effect(KILL tmp);
14128 ins_cost(1100); // slightly larger than the next version
14129 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
14131 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
14132 ins_pipe( pipe_slow );
14133 %}
14135 // Conditional-store of the updated heap-top.
14136 // Used during allocation of the shared heap.
14138 instruct storePConditional( memory heap_top_ptr, mRegP oldval, mRegP newval, FlagsReg cr ) %{
14139 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
14141 format %{ "CMPXCHG $heap_top_ptr, $newval\t# (ptr) @storePConditional "
14142 "If $oldval == $heap_top_ptr then store $newval into $heap_top_ptr" %}
14143 ins_encode%{
14144 Register oldval = $oldval$$Register;
14145 Register newval = $newval$$Register;
14146 Address addr(as_Register($heap_top_ptr$$base), $heap_top_ptr$$disp);
14148 int index = $heap_top_ptr$$index;
14149 int scale = $heap_top_ptr$$scale;
14150 int disp = $heap_top_ptr$$disp;
14152 guarantee(Assembler::is_simm16(disp), "");
14154 if( index != 0 ) {
14155 __ stop("in storePConditional: index != 0");
14156 } else {
14157 __ cmpxchg(newval, addr, oldval);
14158 }
14159 %}
14160 ins_pipe( long_memory_op );
14161 %}
14163 // Conditional-store of an int value.
14164 // AT flag is set on success, reset otherwise.
14165 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
14166 match(Set cr (StoreIConditional mem (Binary oldval newval)));
14167 // effect(KILL oldval);
14168 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
14170 ins_encode %{
14171 Register oldval = $oldval$$Register;
14172 Register newval = $newval$$Register;
14173 Address addr(as_Register($mem$$base), $mem$$disp);
14174 Label again, failure;
14176 int index = $mem$$index;
14177 int scale = $mem$$scale;
14178 int disp = $mem$$disp;
14180 guarantee(Assembler::is_simm16(disp), "");
14182 if( index != 0 ) {
14183 __ stop("in storeIConditional: index != 0");
14184 } else {
14185 __ bind(again);
14186 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) __ sync();
14187 __ ll(AT, addr);
14188 __ bne(AT, oldval, failure);
14189 __ delayed()->addu(AT, R0, R0);
14191 __ addu(AT, newval, R0);
14192 __ sc(AT, addr);
14193 __ beq(AT, R0, again);
14194 __ delayed()->addiu(AT, R0, 0xFF);
14195 __ bind(failure);
14196 __ sync();
14197 }
14198 %}
14200 ins_pipe( long_memory_op );
14201 %}
14203 // Conditional-store of a long value.
14204 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
14205 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
14206 %{
14207 match(Set cr (StoreLConditional mem (Binary oldval newval)));
14208 effect(KILL oldval);
14210 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
14211 ins_encode%{
14212 Register oldval = $oldval$$Register;
14213 Register newval = $newval$$Register;
14214 Address addr(as_Register($mem$$base), $mem$$disp);
14216 int index = $mem$$index;
14217 int scale = $mem$$scale;
14218 int disp = $mem$$disp;
14220 guarantee(Assembler::is_simm16(disp), "");
14222 if( index != 0 ) {
14223 __ stop("in storeIConditional: index != 0");
14224 } else {
14225 __ cmpxchg(newval, addr, oldval);
14226 }
14227 %}
14228 ins_pipe( long_memory_op );
14229 %}
14231 // Implement LoadPLocked. Must be ordered against changes of the memory location
14232 // by storePConditional.
14233 instruct loadPLocked(mRegP dst, memory mem) %{
14234 match(Set dst (LoadPLocked mem));
14235 ins_cost(MEMORY_REF_COST);
14237 format %{ "ld $dst, $mem #@loadPLocked\n\t"
14238 "sync" %}
14239 size(12);
14240 ins_encode (load_P_enc_ac(dst, mem));
14241 ins_pipe( ialu_loadI );
14242 %}
14245 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
14246 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
14247 effect(KILL oldval);
14248 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
14249 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapL\n\t"
14250 "MOV $res, 1 @ compareAndSwapI\n\t"
14251 "BNE AT, R0 @ compareAndSwapI\n\t"
14252 "MOV $res, 0 @ compareAndSwapI\n"
14253 "L:" %}
14254 ins_encode %{
14255 Register newval = $newval$$Register;
14256 Register oldval = $oldval$$Register;
14257 Register res = $res$$Register;
14258 Address addr($mem_ptr$$Register, 0);
14259 Label L;
14261 __ cmpxchg32(newval, addr, oldval);
14262 __ move(res, AT);
14263 %}
14264 ins_pipe( long_memory_op );
14265 %}
14267 instruct compareAndSwapL( mRegI res, mRegP mem_ptr, s2RegL oldval, mRegL newval) %{
14268 predicate(VM_Version::supports_cx8());
14269 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
14270 effect(KILL oldval);
14271 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
14272 "MOV $res, 1 @ compareAndSwapI\n\t"
14273 "BNE AT, R0 @ compareAndSwapI\n\t"
14274 "MOV $res, 0 @ compareAndSwapI\n"
14275 "L:" %}
14276 ins_encode %{
14277 Register newval = $newval$$Register;
14278 Register oldval = $oldval$$Register;
14279 Register res = $res$$Register;
14280 Address addr($mem_ptr$$Register, 0);
14281 Label L;
14283 __ cmpxchg(newval, addr, oldval);
14284 __ move(res, AT);
14285 %}
14286 ins_pipe( long_memory_op );
14287 %}
14289 //FIXME:
14290 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
14291 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
14292 effect(KILL oldval);
14293 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
14294 "MOV $res, AT @ compareAndSwapP\n\t"
14295 "L:" %}
14296 ins_encode %{
14297 Register newval = $newval$$Register;
14298 Register oldval = $oldval$$Register;
14299 Register res = $res$$Register;
14300 Address addr($mem_ptr$$Register, 0);
14301 Label L;
14303 __ cmpxchg(newval, addr, oldval);
14304 __ move(res, AT);
14305 %}
14306 ins_pipe( long_memory_op );
14307 %}
14309 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
14310 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
14311 effect(KILL oldval);
14312 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
14313 "MOV $res, AT @ compareAndSwapN\n\t"
14314 "L:" %}
14315 ins_encode %{
14316 Register newval = $newval$$Register;
14317 Register oldval = $oldval$$Register;
14318 Register res = $res$$Register;
14319 Address addr($mem_ptr$$Register, 0);
14320 Label L;
14322 // cmpxchg32 is implemented with ll/sc, which will do sign extension.
14323 // Thus, we should extend oldval's sign for correct comparision.
14324 //
14325 __ sll(oldval, oldval, 0);
14327 __ cmpxchg32(newval, addr, oldval);
14328 __ move(res, AT);
14329 %}
14330 ins_pipe( long_memory_op );
14331 %}
14333 //----------Max and Min--------------------------------------------------------
14334 // Min Instructions
14335 ////
14336 // *** Min and Max using the conditional move are slower than the
14337 // *** branch version on a Pentium III.
14338 // // Conditional move for min
14339 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
14340 // effect( USE_DEF op2, USE op1, USE cr );
14341 // format %{ "CMOVlt $op2,$op1\t! min" %}
14342 // opcode(0x4C,0x0F);
14343 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
14344 // ins_pipe( pipe_cmov_reg );
14345 //%}
14346 //
14347 //// Min Register with Register (P6 version)
14348 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
14349 // predicate(VM_Version::supports_cmov() );
14350 // match(Set op2 (MinI op1 op2));
14351 // ins_cost(200);
14352 // expand %{
14353 // eFlagsReg cr;
14354 // compI_eReg(cr,op1,op2);
14355 // cmovI_reg_lt(op2,op1,cr);
14356 // %}
14357 //%}
14359 // Min Register with Register (generic version)
14360 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
14361 match(Set dst (MinI dst src));
14362 //effect(KILL flags);
14363 ins_cost(80);
14365 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
14366 ins_encode %{
14367 Register dst = $dst$$Register;
14368 Register src = $src$$Register;
14370 __ slt(AT, src, dst);
14371 __ movn(dst, src, AT);
14373 %}
14375 ins_pipe( pipe_slow );
14376 %}
14378 // Max Register with Register
14379 // *** Min and Max using the conditional move are slower than the
14380 // *** branch version on a Pentium III.
14381 // // Conditional move for max
14382 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
14383 // effect( USE_DEF op2, USE op1, USE cr );
14384 // format %{ "CMOVgt $op2,$op1\t! max" %}
14385 // opcode(0x4F,0x0F);
14386 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
14387 // ins_pipe( pipe_cmov_reg );
14388 //%}
14389 //
14390 // // Max Register with Register (P6 version)
14391 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
14392 // predicate(VM_Version::supports_cmov() );
14393 // match(Set op2 (MaxI op1 op2));
14394 // ins_cost(200);
14395 // expand %{
14396 // eFlagsReg cr;
14397 // compI_eReg(cr,op1,op2);
14398 // cmovI_reg_gt(op2,op1,cr);
14399 // %}
14400 //%}
14402 // Max Register with Register (generic version)
14403 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
14404 match(Set dst (MaxI dst src));
14405 ins_cost(80);
14407 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
14409 ins_encode %{
14410 Register dst = $dst$$Register;
14411 Register src = $src$$Register;
14413 __ slt(AT, dst, src);
14414 __ movn(dst, src, AT);
14416 %}
14418 ins_pipe( pipe_slow );
14419 %}
14421 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
14422 match(Set dst (MaxI dst zero));
14423 ins_cost(50);
14425 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
14427 ins_encode %{
14428 Register dst = $dst$$Register;
14430 __ slt(AT, dst, R0);
14431 __ movn(dst, R0, AT);
14433 %}
14435 ins_pipe( pipe_slow );
14436 %}
14438 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
14439 %{
14440 match(Set dst (AndL src mask));
14442 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
14443 ins_encode %{
14444 Register dst = $dst$$Register;
14445 Register src = $src$$Register;
14447 __ dext(dst, src, 0, 32);
14448 %}
14449 ins_pipe(ialu_regI_regI);
14450 %}
14452 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
14453 %{
14454 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
14456 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
14457 ins_encode %{
14458 Register dst = $dst$$Register;
14459 Register src1 = $src1$$Register;
14460 Register src2 = $src2$$Register;
14462 if (src1 == dst) {
14463 __ dinsu(dst, src2, 32, 32);
14464 } else if (src2 == dst) {
14465 __ dsll32(dst, dst, 0);
14466 __ dins(dst, src1, 0, 32);
14467 } else {
14468 __ dext(dst, src1, 0, 32);
14469 __ dinsu(dst, src2, 32, 32);
14470 }
14471 %}
14472 ins_pipe(ialu_regI_regI);
14473 %}
14475 // Zero-extend convert int to long
14476 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
14477 %{
14478 match(Set dst (AndL (ConvI2L src) mask));
14480 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
14481 ins_encode %{
14482 Register dst = $dst$$Register;
14483 Register src = $src$$Register;
14485 __ dext(dst, src, 0, 32);
14486 %}
14487 ins_pipe(ialu_regI_regI);
14488 %}
14490 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
14491 %{
14492 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
14494 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
14495 ins_encode %{
14496 Register dst = $dst$$Register;
14497 Register src = $src$$Register;
14499 __ dext(dst, src, 0, 32);
14500 %}
14501 ins_pipe(ialu_regI_regI);
14502 %}
14504 // Match loading integer and casting it to unsigned int in long register.
14505 // LoadI + ConvI2L + AndL 0xffffffff.
14506 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
14507 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
14509 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
14510 ins_encode (load_N_enc(dst, mem));
14511 ins_pipe(ialu_loadI);
14512 %}
14514 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
14515 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
14517 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
14518 ins_encode (load_N_enc(dst, mem));
14519 ins_pipe(ialu_loadI);
14520 %}
14523 // ============================================================================
14524 // Safepoint Instruction
14525 instruct safePoint_poll_reg(mRegP poll) %{
14526 match(SafePoint poll);
14527 predicate(false);
14528 effect(USE poll);
14530 ins_cost(125);
14531 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
14533 ins_encode %{
14534 Register poll_reg = $poll$$Register;
14536 __ block_comment("Safepoint:");
14537 __ relocate(relocInfo::poll_type);
14538 __ lw(AT, poll_reg, 0);
14539 %}
14541 ins_pipe( ialu_storeI );
14542 %}
14544 instruct safePoint_poll() %{
14545 match(SafePoint);
14547 ins_cost(105);
14548 format %{ "poll for GC @ safePoint_poll" %}
14550 ins_encode %{
14551 __ block_comment("Safepoint:");
14552 __ set64(T9, (long)os::get_polling_page());
14553 __ relocate(relocInfo::poll_type);
14554 __ lw(AT, T9, 0);
14555 %}
14557 ins_pipe( ialu_storeI );
14558 %}
14560 //----------Arithmetic Conversion Instructions---------------------------------
14562 instruct roundFloat_nop(regF dst)
14563 %{
14564 match(Set dst (RoundFloat dst));
14566 ins_cost(0);
14567 ins_encode();
14568 ins_pipe(empty);
14569 %}
14571 instruct roundDouble_nop(regD dst)
14572 %{
14573 match(Set dst (RoundDouble dst));
14575 ins_cost(0);
14576 ins_encode();
14577 ins_pipe(empty);
14578 %}
14580 //---------- Zeros Count Instructions ------------------------------------------
14581 // CountLeadingZerosINode CountTrailingZerosINode
14582 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
14583 predicate(UseCountLeadingZerosInstructionMIPS64);
14584 match(Set dst (CountLeadingZerosI src));
14586 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
14587 ins_encode %{
14588 __ clz($dst$$Register, $src$$Register);
14589 %}
14590 ins_pipe( ialu_regL_regL );
14591 %}
14593 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
14594 predicate(UseCountLeadingZerosInstructionMIPS64);
14595 match(Set dst (CountLeadingZerosL src));
14597 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
14598 ins_encode %{
14599 __ dclz($dst$$Register, $src$$Register);
14600 %}
14601 ins_pipe( ialu_regL_regL );
14602 %}
14604 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
14605 predicate(UseCountTrailingZerosInstructionMIPS64);
14606 match(Set dst (CountTrailingZerosI src));
14608 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
14609 ins_encode %{
14610 // ctz and dctz is gs instructions.
14611 __ ctz($dst$$Register, $src$$Register);
14612 %}
14613 ins_pipe( ialu_regL_regL );
14614 %}
14616 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
14617 predicate(UseCountTrailingZerosInstructionMIPS64);
14618 match(Set dst (CountTrailingZerosL src));
14620 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
14621 ins_encode %{
14622 __ dctz($dst$$Register, $src$$Register);
14623 %}
14624 ins_pipe( ialu_regL_regL );
14625 %}
14627 // ====================VECTOR INSTRUCTIONS=====================================
14629 // Load vectors (8 bytes long)
14630 instruct loadV8(vecD dst, memory mem) %{
14631 predicate(n->as_LoadVector()->memory_size() == 8);
14632 match(Set dst (LoadVector mem));
14633 ins_cost(125);
14634 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
14635 ins_encode(load_D_enc(dst, mem));
14636 ins_pipe( fpu_loadF );
14637 %}
14639 // Store vectors (8 bytes long)
14640 instruct storeV8(memory mem, vecD src) %{
14641 predicate(n->as_StoreVector()->memory_size() == 8);
14642 match(Set mem (StoreVector mem src));
14643 ins_cost(145);
14644 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
14645 ins_encode(store_D_reg_enc(mem, src));
14646 ins_pipe( fpu_storeF );
14647 %}
14649 instruct Repl8B_DSP(vecD dst, mRegI src) %{
14650 predicate(n->as_Vector()->length() == 8 && UseLEXT3);
14651 match(Set dst (ReplicateB src));
14652 ins_cost(100);
14653 format %{ "replv_ob AT, $src\n\t"
14654 "dmtc1 AT, $dst\t! replicate8B" %}
14655 ins_encode %{
14656 __ replv_ob(AT, $src$$Register);
14657 __ dmtc1(AT, $dst$$FloatRegister);
14658 %}
14659 ins_pipe( pipe_mtc1 );
14660 %}
14662 instruct Repl8B(vecD dst, mRegI src) %{
14663 predicate(n->as_Vector()->length() == 8);
14664 match(Set dst (ReplicateB src));
14665 ins_cost(140);
14666 format %{ "move AT, $src\n\t"
14667 "dins AT, AT, 8, 8\n\t"
14668 "dins AT, AT, 16, 16\n\t"
14669 "dinsu AT, AT, 32, 32\n\t"
14670 "dmtc1 AT, $dst\t! replicate8B" %}
14671 ins_encode %{
14672 __ move(AT, $src$$Register);
14673 __ dins(AT, AT, 8, 8);
14674 __ dins(AT, AT, 16, 16);
14675 __ dinsu(AT, AT, 32, 32);
14676 __ dmtc1(AT, $dst$$FloatRegister);
14677 %}
14678 ins_pipe( pipe_mtc1 );
14679 %}
14681 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
14682 predicate(n->as_Vector()->length() == 8 && UseLEXT3);
14683 match(Set dst (ReplicateB con));
14684 ins_cost(110);
14685 format %{ "repl_ob AT, [$con]\n\t"
14686 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
14687 ins_encode %{
14688 int val = $con$$constant;
14689 __ repl_ob(AT, val);
14690 __ dmtc1(AT, $dst$$FloatRegister);
14691 %}
14692 ins_pipe( pipe_mtc1 );
14693 %}
14695 instruct Repl8B_imm(vecD dst, immI con) %{
14696 predicate(n->as_Vector()->length() == 8);
14697 match(Set dst (ReplicateB con));
14698 ins_cost(150);
14699 format %{ "move AT, [$con]\n\t"
14700 "dins AT, AT, 8, 8\n\t"
14701 "dins AT, AT, 16, 16\n\t"
14702 "dinsu AT, AT, 32, 32\n\t"
14703 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
14704 ins_encode %{
14705 __ move(AT, $con$$constant);
14706 __ dins(AT, AT, 8, 8);
14707 __ dins(AT, AT, 16, 16);
14708 __ dinsu(AT, AT, 32, 32);
14709 __ dmtc1(AT, $dst$$FloatRegister);
14710 %}
14711 ins_pipe( pipe_mtc1 );
14712 %}
14714 instruct Repl8B_zero(vecD dst, immI0 zero) %{
14715 predicate(n->as_Vector()->length() == 8);
14716 match(Set dst (ReplicateB zero));
14717 ins_cost(90);
14718 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
14719 ins_encode %{
14720 __ dmtc1(R0, $dst$$FloatRegister);
14721 %}
14722 ins_pipe( pipe_mtc1 );
14723 %}
14725 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
14726 predicate(n->as_Vector()->length() == 8);
14727 match(Set dst (ReplicateB M1));
14728 ins_cost(80);
14729 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
14730 ins_encode %{
14731 __ nor(AT, R0, R0);
14732 __ dmtc1(AT, $dst$$FloatRegister);
14733 %}
14734 ins_pipe( pipe_mtc1 );
14735 %}
14737 instruct Repl4S_DSP(vecD dst, mRegI src) %{
14738 predicate(n->as_Vector()->length() == 4 && UseLEXT3);
14739 match(Set dst (ReplicateS src));
14740 ins_cost(100);
14741 format %{ "replv_qh AT, $src\n\t"
14742 "dmtc1 AT, $dst\t! replicate4S" %}
14743 ins_encode %{
14744 __ replv_qh(AT, $src$$Register);
14745 __ dmtc1(AT, $dst$$FloatRegister);
14746 %}
14747 ins_pipe( pipe_mtc1 );
14748 %}
14750 instruct Repl4S(vecD dst, mRegI src) %{
14751 predicate(n->as_Vector()->length() == 4);
14752 match(Set dst (ReplicateS src));
14753 ins_cost(120);
14754 format %{ "move AT, $src \n\t"
14755 "dins AT, AT, 16, 16\n\t"
14756 "dinsu AT, AT, 32, 32\n\t"
14757 "dmtc1 AT, $dst\t! replicate4S" %}
14758 ins_encode %{
14759 __ move(AT, $src$$Register);
14760 __ dins(AT, AT, 16, 16);
14761 __ dinsu(AT, AT, 32, 32);
14762 __ dmtc1(AT, $dst$$FloatRegister);
14763 %}
14764 ins_pipe( pipe_mtc1 );
14765 %}
14767 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
14768 predicate(n->as_Vector()->length() == 4 && UseLEXT3);
14769 match(Set dst (ReplicateS con));
14770 ins_cost(100);
14771 format %{ "repl_qh AT, [$con]\n\t"
14772 "dmtc1 AT, $dst\t! replicate4S($con)" %}
14773 ins_encode %{
14774 int val = $con$$constant;
14775 if ( Assembler::is_simm(val, 10)) {
14776 //repl_qh supports 10 bits immediate
14777 __ repl_qh(AT, val);
14778 } else {
14779 __ li32(AT, val);
14780 __ replv_qh(AT, AT);
14781 }
14782 __ dmtc1(AT, $dst$$FloatRegister);
14783 %}
14784 ins_pipe( pipe_mtc1 );
14785 %}
14787 instruct Repl4S_imm(vecD dst, immI con) %{
14788 predicate(n->as_Vector()->length() == 4);
14789 match(Set dst (ReplicateS con));
14790 ins_cost(110);
14791 format %{ "move AT, [$con]\n\t"
14792 "dins AT, AT, 16, 16\n\t"
14793 "dinsu AT, AT, 32, 32\n\t"
14794 "dmtc1 AT, $dst\t! replicate4S($con)" %}
14795 ins_encode %{
14796 __ move(AT, $con$$constant);
14797 __ dins(AT, AT, 16, 16);
14798 __ dinsu(AT, AT, 32, 32);
14799 __ dmtc1(AT, $dst$$FloatRegister);
14800 %}
14801 ins_pipe( pipe_mtc1 );
14802 %}
14804 instruct Repl4S_zero(vecD dst, immI0 zero) %{
14805 predicate(n->as_Vector()->length() == 4);
14806 match(Set dst (ReplicateS zero));
14807 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
14808 ins_encode %{
14809 __ dmtc1(R0, $dst$$FloatRegister);
14810 %}
14811 ins_pipe( pipe_mtc1 );
14812 %}
14814 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
14815 predicate(n->as_Vector()->length() == 4);
14816 match(Set dst (ReplicateS M1));
14817 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
14818 ins_encode %{
14819 __ nor(AT, R0, R0);
14820 __ dmtc1(AT, $dst$$FloatRegister);
14821 %}
14822 ins_pipe( pipe_mtc1 );
14823 %}
14825 // Replicate integer (4 byte) scalar to be vector
14826 instruct Repl2I(vecD dst, mRegI src) %{
14827 predicate(n->as_Vector()->length() == 2);
14828 match(Set dst (ReplicateI src));
14829 format %{ "dins AT, $src, 0, 32\n\t"
14830 "dinsu AT, $src, 32, 32\n\t"
14831 "dmtc1 AT, $dst\t! replicate2I" %}
14832 ins_encode %{
14833 __ dins(AT, $src$$Register, 0, 32);
14834 __ dinsu(AT, $src$$Register, 32, 32);
14835 __ dmtc1(AT, $dst$$FloatRegister);
14836 %}
14837 ins_pipe( pipe_mtc1 );
14838 %}
14840 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
14841 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
14842 predicate(n->as_Vector()->length() == 2);
14843 match(Set dst (ReplicateI con));
14844 effect(KILL tmp);
14845 format %{ "li32 AT, [$con], 32\n\t"
14846 "dinsu AT, AT\n\t"
14847 "dmtc1 AT, $dst\t! replicate2I($con)" %}
14848 ins_encode %{
14849 int val = $con$$constant;
14850 __ li32(AT, val);
14851 __ dinsu(AT, AT, 32, 32);
14852 __ dmtc1(AT, $dst$$FloatRegister);
14853 %}
14854 ins_pipe( pipe_mtc1 );
14855 %}
14857 // Replicate integer (4 byte) scalar zero to be vector
14858 instruct Repl2I_zero(vecD dst, immI0 zero) %{
14859 predicate(n->as_Vector()->length() == 2);
14860 match(Set dst (ReplicateI zero));
14861 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
14862 ins_encode %{
14863 __ dmtc1(R0, $dst$$FloatRegister);
14864 %}
14865 ins_pipe( pipe_mtc1 );
14866 %}
14868 // Replicate integer (4 byte) scalar -1 to be vector
14869 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
14870 predicate(n->as_Vector()->length() == 2);
14871 match(Set dst (ReplicateI M1));
14872 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
14873 ins_encode %{
14874 __ nor(AT, R0, R0);
14875 __ dmtc1(AT, $dst$$FloatRegister);
14876 %}
14877 ins_pipe( pipe_mtc1 );
14878 %}
14880 // Replicate float (4 byte) scalar to be vector
14881 instruct Repl2F(vecD dst, regF src) %{
14882 predicate(n->as_Vector()->length() == 2);
14883 match(Set dst (ReplicateF src));
14884 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
14885 ins_encode %{
14886 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
14887 %}
14888 ins_pipe( pipe_slow );
14889 %}
14891 // Replicate float (4 byte) scalar zero to be vector
14892 instruct Repl2F_zero(vecD dst, immF0 zero) %{
14893 predicate(n->as_Vector()->length() == 2);
14894 match(Set dst (ReplicateF zero));
14895 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
14896 ins_encode %{
14897 __ dmtc1(R0, $dst$$FloatRegister);
14898 %}
14899 ins_pipe( pipe_mtc1 );
14900 %}
14903 // ====================VECTOR ARITHMETIC=======================================
14905 // --------------------------------- ADD --------------------------------------
14907 // Floats vector add
14908 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
14909 instruct vadd2F(vecD dst, vecD src) %{
14910 predicate(n->as_Vector()->length() == 2);
14911 match(Set dst (AddVF dst src));
14912 format %{ "add.ps $dst,$src\t! add packed2F" %}
14913 ins_encode %{
14914 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14915 %}
14916 ins_pipe( pipe_slow );
14917 %}
14919 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
14920 predicate(n->as_Vector()->length() == 2);
14921 match(Set dst (AddVF src1 src2));
14922 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14923 ins_encode %{
14924 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14925 %}
14926 ins_pipe( fpu_regF_regF );
14927 %}
14929 // --------------------------------- SUB --------------------------------------
14931 // Floats vector sub
14932 instruct vsub2F(vecD dst, vecD src) %{
14933 predicate(n->as_Vector()->length() == 2);
14934 match(Set dst (SubVF dst src));
14935 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14936 ins_encode %{
14937 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14938 %}
14939 ins_pipe( fpu_regF_regF );
14940 %}
14942 // --------------------------------- MUL --------------------------------------
14944 // Floats vector mul
14945 instruct vmul2F(vecD dst, vecD src) %{
14946 predicate(n->as_Vector()->length() == 2);
14947 match(Set dst (MulVF dst src));
14948 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14949 ins_encode %{
14950 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14951 %}
14952 ins_pipe( fpu_regF_regF );
14953 %}
14955 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14956 predicate(n->as_Vector()->length() == 2);
14957 match(Set dst (MulVF src1 src2));
14958 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14959 ins_encode %{
14960 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14961 %}
14962 ins_pipe( fpu_regF_regF );
14963 %}
14965 // --------------------------------- DIV --------------------------------------
14966 // MIPS do not have div.ps
14968 // --------------------------------- MADD --------------------------------------
14969 // Floats vector madd
14970 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14971 // predicate(n->as_Vector()->length() == 2);
14972 // match(Set dst (AddVF (MulVF src1 src2) src3));
14973 // ins_cost(50);
14974 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14975 // ins_encode %{
14976 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14977 // %}
14978 // ins_pipe( fpu_regF_regF );
14979 //%}
14982 //----------PEEPHOLE RULES-----------------------------------------------------
14983 // These must follow all instruction definitions as they use the names
14984 // defined in the instructions definitions.
14985 //
14986 // peepmatch ( root_instr_name [preceeding_instruction]* );
14987 //
14988 // peepconstraint %{
14989 // (instruction_number.operand_name relational_op instruction_number.operand_name
14990 // [, ...] );
14991 // // instruction numbers are zero-based using left to right order in peepmatch
14992 //
14993 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14994 // // provide an instruction_number.operand_name for each operand that appears
14995 // // in the replacement instruction's match rule
14996 //
14997 // ---------VM FLAGS---------------------------------------------------------
14998 //
14999 // All peephole optimizations can be turned off using -XX:-OptoPeephole
15000 //
15001 // Each peephole rule is given an identifying number starting with zero and
15002 // increasing by one in the order seen by the parser. An individual peephole
15003 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
15004 // on the command-line.
15005 //
15006 // ---------CURRENT LIMITATIONS----------------------------------------------
15007 //
15008 // Only match adjacent instructions in same basic block
15009 // Only equality constraints
15010 // Only constraints between operands, not (0.dest_reg == EAX_enc)
15011 // Only one replacement instruction
15012 //
15013 // ---------EXAMPLE----------------------------------------------------------
15014 //
15015 // // pertinent parts of existing instructions in architecture description
15016 // instruct movI(eRegI dst, eRegI src) %{
15017 // match(Set dst (CopyI src));
15018 // %}
15019 //
15020 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
15021 // match(Set dst (AddI dst src));
15022 // effect(KILL cr);
15023 // %}
15024 //
15025 // // Change (inc mov) to lea
15026 // peephole %{
15027 // // increment preceeded by register-register move
15028 // peepmatch ( incI_eReg movI );
15029 // // require that the destination register of the increment
15030 // // match the destination register of the move
15031 // peepconstraint ( 0.dst == 1.dst );
15032 // // construct a replacement instruction that sets
15033 // // the destination to ( move's source register + one )
15034 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15035 // %}
15036 //
15037 // Implementation no longer uses movX instructions since
15038 // machine-independent system no longer uses CopyX nodes.
15039 //
15040 // peephole %{
15041 // peepmatch ( incI_eReg movI );
15042 // peepconstraint ( 0.dst == 1.dst );
15043 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15044 // %}
15045 //
15046 // peephole %{
15047 // peepmatch ( decI_eReg movI );
15048 // peepconstraint ( 0.dst == 1.dst );
15049 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15050 // %}
15051 //
15052 // peephole %{
15053 // peepmatch ( addI_eReg_imm movI );
15054 // peepconstraint ( 0.dst == 1.dst );
15055 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15056 // %}
15057 //
15058 // peephole %{
15059 // peepmatch ( addP_eReg_imm movP );
15060 // peepconstraint ( 0.dst == 1.dst );
15061 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
15062 // %}
15064 // // Change load of spilled value to only a spill
15065 // instruct storeI(memory mem, eRegI src) %{
15066 // match(Set mem (StoreI mem src));
15067 // %}
15068 //
15069 // instruct loadI(eRegI dst, memory mem) %{
15070 // match(Set dst (LoadI mem));
15071 // %}
15072 //
15073 //peephole %{
15074 // peepmatch ( loadI storeI );
15075 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
15076 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
15077 //%}
15079 //----------SMARTSPILL RULES---------------------------------------------------
15080 // These must follow all instruction definitions as they use the names
15081 // defined in the instructions definitions.