Wed, 31 Oct 2018 14:29:13 +0800
#7520 added instruct matching StorePConditional and LoadPLocked
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2018, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // F31 are not used as temporary registers in D2I
384 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
385 reg_class dbl_reg( F0, F0_H,
386 F1, F1_H,
387 F2, F2_H,
388 F3, F3_H,
389 F4, F4_H,
390 F5, F5_H,
391 F6, F6_H,
392 F7, F7_H,
393 F8, F8_H,
394 F9, F9_H,
395 F10, F10_H,
396 F11, F11_H,
397 F12, F12_H,
398 F13, F13_H,
399 F14, F14_H,
400 F15, F15_H,
401 F16, F16_H,
402 F17, F17_H,
403 F18, F18_H,
404 F19, F19_H,
405 F20, F20_H,
406 F21, F21_H,
407 F22, F22_H,
408 F23, F23_H,
409 F24, F24_H,
410 F25, F25_H,
411 F26, F26_H,
412 F27, F27_H,
413 F28, F28_H,
414 F29, F29_H,
415 F31, F31_H);
417 reg_class flt_arg0( F12 );
418 reg_class dbl_arg0( F12, F12_H );
419 reg_class dbl_arg1( F14, F14_H );
421 %}
423 //----------DEFINITION BLOCK---------------------------------------------------
424 // Define name --> value mappings to inform the ADLC of an integer valued name
425 // Current support includes integer values in the range [0, 0x7FFFFFFF]
426 // Format:
427 // int_def <name> ( <int_value>, <expression>);
428 // Generated Code in ad_<arch>.hpp
429 // #define <name> (<expression>)
430 // // value == <int_value>
431 // Generated code in ad_<arch>.cpp adlc_verification()
432 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
433 //
434 definitions %{
435 int_def DEFAULT_COST ( 100, 100);
436 int_def HUGE_COST (1000000, 1000000);
438 // Memory refs are twice as expensive as run-of-the-mill.
439 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
441 // Branches are even more expensive.
442 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
443 // we use jr instruction to construct call, so more expensive
444 int_def CALL_COST ( 500, DEFAULT_COST * 5);
445 /*
446 int_def EQUAL ( 1, 1 );
447 int_def NOT_EQUAL ( 2, 2 );
448 int_def GREATER ( 3, 3 );
449 int_def GREATER_EQUAL ( 4, 4 );
450 int_def LESS ( 5, 5 );
451 int_def LESS_EQUAL ( 6, 6 );
452 */
453 %}
457 //----------SOURCE BLOCK-------------------------------------------------------
458 // This is a block of C++ code which provides values, functions, and
459 // definitions necessary in the rest of the architecture description
461 source_hpp %{
462 // Header information of the source block.
463 // Method declarations/definitions which are used outside
464 // the ad-scope can conveniently be defined here.
465 //
466 // To keep related declarations/definitions/uses close together,
467 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
469 class CallStubImpl {
471 //--------------------------------------------------------------
472 //---< Used for optimization in Compile::shorten_branches >---
473 //--------------------------------------------------------------
475 public:
476 // Size of call trampoline stub.
477 static uint size_call_trampoline() {
478 return 0; // no call trampolines on this platform
479 }
481 // number of relocations needed by a call trampoline stub
482 static uint reloc_call_trampoline() {
483 return 0; // no call trampolines on this platform
484 }
485 };
487 class HandlerImpl {
489 public:
491 static int emit_exception_handler(CodeBuffer &cbuf);
492 static int emit_deopt_handler(CodeBuffer& cbuf);
494 static uint size_exception_handler() {
495 // NativeCall instruction size is the same as NativeJump.
496 // exception handler starts out as jump and can be patched to
497 // a call be deoptimization. (4932387)
498 // Note that this value is also credited (in output.cpp) to
499 // the size of the code section.
500 int size = NativeCall::instruction_size;
501 return round_to(size, 16);
502 }
504 #ifdef _LP64
505 static uint size_deopt_handler() {
506 int size = NativeCall::instruction_size;
507 return round_to(size, 16);
508 }
509 #else
510 static uint size_deopt_handler() {
511 // NativeCall instruction size is the same as NativeJump.
512 // exception handler starts out as jump and can be patched to
513 // a call be deoptimization. (4932387)
514 // Note that this value is also credited (in output.cpp) to
515 // the size of the code section.
516 return 5 + NativeJump::instruction_size; // pushl(); jmp;
517 }
518 #endif
519 };
521 %} // end source_hpp
523 source %{
525 #define NO_INDEX 0
526 #define RELOC_IMM64 Assembler::imm_operand
527 #define RELOC_DISP32 Assembler::disp32_operand
530 #define __ _masm.
533 // Emit exception handler code.
534 // Stuff framesize into a register and call a VM stub routine.
535 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
536 // Note that the code buffer's insts_mark is always relative to insts.
537 // That's why we must use the macroassembler to generate a handler.
538 MacroAssembler _masm(&cbuf);
539 address base = __ start_a_stub(size_exception_handler());
540 if (base == NULL) {
541 ciEnv::current()->record_failure("CodeCache is full");
542 return 0; // CodeBuffer::expand failed
543 }
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base = __ start_a_stub(size_deopt_handler());
564 if (base == NULL) {
565 ciEnv::current()->record_failure("CodeCache is full");
566 return 0; // CodeBuffer::expand failed
567 }
569 int offset = __ offset();
571 __ block_comment("; emit_deopt_handler");
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
575 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
583 const bool Matcher::match_rule_supported(int opcode) {
584 if (!has_match_rule(opcode))
585 return false;
587 switch (opcode) {
588 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
589 case Op_CountLeadingZerosI:
590 case Op_CountLeadingZerosL:
591 if (!UseCountLeadingZerosInstructionMIPS64)
592 return false;
593 break;
594 case Op_CountTrailingZerosI:
595 case Op_CountTrailingZerosL:
596 if (!UseCountTrailingZerosInstructionMIPS64)
597 return false;
598 break;
599 }
601 return true; // Per default match rules are supported.
602 }
604 //FIXME
605 // emit call stub, compiled java to interpreter
606 void emit_java_to_interp(CodeBuffer &cbuf ) {
607 // Stub is fixed up when the corresponding call is converted from calling
608 // compiled code to calling interpreted code.
609 // mov rbx,0
610 // jmp -1
612 address mark = cbuf.insts_mark(); // get mark within main instrs section
614 // Note that the code buffer's insts_mark is always relative to insts.
615 // That's why we must use the macroassembler to generate a stub.
616 MacroAssembler _masm(&cbuf);
618 address base = __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) { // CodeBuffer::expand failed
620 ciEnv::current()->record_failure("CodeCache is full");
621 }
623 // static stub relocation stores the instruction address of the call
625 __ relocate(static_stub_Relocation::spec(mark), 0);
627 // static stub relocation also tags the methodOop in the code-stream.
628 __ patchable_set48(S3, (long)0);
629 // This is recognized as unresolved by relocs/nativeInst/ic code
631 __ relocate(relocInfo::runtime_call_type);
633 cbuf.set_insts_mark();
634 address call_pc = (address)-1;
635 __ patchable_jump(call_pc);
636 __ align(16);
637 __ end_a_stub();
638 // Update current stubs pointer and restore code_end.
639 }
641 // size of call stub, compiled java to interpretor
642 uint size_java_to_interp() {
643 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
644 return round_to(size, 16);
645 }
647 // relocation entries for call stub, compiled java to interpreter
648 uint reloc_java_to_interp() {
649 return 16; // in emit_java_to_interp + in Java_Static_Call
650 }
652 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
653 int offs = offset - br_size + 4;
654 // To be conservative on MIPS
655 // branch node should be end with:
656 // branch inst
657 // delay slot
658 const int safety_zone = 3 * BytesPerInstWord;
659 return Assembler::is_simm16((offs<0 ? offs-safety_zone : offs+safety_zone) >> 2);
660 }
663 // No additional cost for CMOVL.
664 const int Matcher::long_cmove_cost() { return 0; }
666 // No CMOVF/CMOVD with SSE2
667 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
669 // Does the CPU require late expand (see block.cpp for description of late expand)?
670 const bool Matcher::require_postalloc_expand = false;
672 // Should the Matcher clone shifts on addressing modes, expecting them
673 // to be subsumed into complex addressing expressions or compute them
674 // into registers? True for Intel but false for most RISCs
675 const bool Matcher::clone_shift_expressions = false;
677 // Do we need to mask the count passed to shift instructions or does
678 // the cpu only look at the lower 5/6 bits anyway?
679 const bool Matcher::need_masked_shift_count = false;
681 bool Matcher::narrow_oop_use_complex_address() {
682 NOT_LP64(ShouldNotCallThis());
683 assert(UseCompressedOops, "only for compressed oops code");
684 return false;
685 }
687 bool Matcher::narrow_klass_use_complex_address() {
688 NOT_LP64(ShouldNotCallThis());
689 assert(UseCompressedClassPointers, "only for compressed klass code");
690 return false;
691 }
693 // This is UltraSparc specific, true just means we have fast l2f conversion
694 const bool Matcher::convL2FSupported(void) {
695 return true;
696 }
698 // Max vector size in bytes. 0 if not supported.
699 const int Matcher::vector_width_in_bytes(BasicType bt) {
700 if (MaxVectorSize == 0)
701 return 0;
702 assert(MaxVectorSize == 8, "");
703 return 8;
704 }
706 // Vector ideal reg
707 const int Matcher::vector_ideal_reg(int size) {
708 assert(MaxVectorSize == 8, "");
709 switch(size) {
710 case 8: return Op_VecD;
711 }
712 ShouldNotReachHere();
713 return 0;
714 }
716 // Only lowest bits of xmm reg are used for vector shift count.
717 const int Matcher::vector_shift_count_ideal_reg(int size) {
718 fatal("vector shift is not supported");
719 return Node::NotAMachineReg;
720 }
722 // Limits on vector size (number of elements) loaded into vector.
723 const int Matcher::max_vector_size(const BasicType bt) {
724 assert(is_java_primitive(bt), "only primitive type vectors");
725 return vector_width_in_bytes(bt)/type2aelembytes(bt);
726 }
728 const int Matcher::min_vector_size(const BasicType bt) {
729 return max_vector_size(bt); // Same as max.
730 }
732 // MIPS supports misaligned vectors store/load? FIXME
733 const bool Matcher::misaligned_vectors_ok() {
734 return false;
735 //return !AlignVector; // can be changed by flag
736 }
738 // Register for DIVI projection of divmodI
739 RegMask Matcher::divI_proj_mask() {
740 ShouldNotReachHere();
741 return RegMask();
742 }
744 // Register for MODI projection of divmodI
745 RegMask Matcher::modI_proj_mask() {
746 ShouldNotReachHere();
747 return RegMask();
748 }
750 // Register for DIVL projection of divmodL
751 RegMask Matcher::divL_proj_mask() {
752 ShouldNotReachHere();
753 return RegMask();
754 }
756 int Matcher::regnum_to_fpu_offset(int regnum) {
757 return regnum - 32; // The FP registers are in the second chunk
758 }
761 const bool Matcher::isSimpleConstant64(jlong value) {
762 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
763 return true;
764 }
767 // Return whether or not this register is ever used as an argument. This
768 // function is used on startup to build the trampoline stubs in generateOptoStub.
769 // Registers not mentioned will be killed by the VM call in the trampoline, and
770 // arguments in those registers not be available to the callee.
771 bool Matcher::can_be_java_arg( int reg ) {
772 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
773 if ( reg == T0_num || reg == T0_H_num
774 || reg == A0_num || reg == A0_H_num
775 || reg == A1_num || reg == A1_H_num
776 || reg == A2_num || reg == A2_H_num
777 || reg == A3_num || reg == A3_H_num
778 || reg == A4_num || reg == A4_H_num
779 || reg == A5_num || reg == A5_H_num
780 || reg == A6_num || reg == A6_H_num
781 || reg == A7_num || reg == A7_H_num )
782 return true;
784 if ( reg == F12_num || reg == F12_H_num
785 || reg == F13_num || reg == F13_H_num
786 || reg == F14_num || reg == F14_H_num
787 || reg == F15_num || reg == F15_H_num
788 || reg == F16_num || reg == F16_H_num
789 || reg == F17_num || reg == F17_H_num
790 || reg == F18_num || reg == F18_H_num
791 || reg == F19_num || reg == F19_H_num )
792 return true;
794 return false;
795 }
797 bool Matcher::is_spillable_arg( int reg ) {
798 return can_be_java_arg(reg);
799 }
801 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
802 return false;
803 }
805 // Register for MODL projection of divmodL
806 RegMask Matcher::modL_proj_mask() {
807 ShouldNotReachHere();
808 return RegMask();
809 }
811 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
812 return FP_REG_mask();
813 }
815 // MIPS doesn't support AES intrinsics
816 const bool Matcher::pass_original_key_for_aes() {
817 return false;
818 }
820 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
821 //lui
822 //ori
823 //dsll
824 //ori
826 //jalr
827 //nop
829 return round_to(current_offset, alignment_required()) - current_offset;
830 }
832 int CallLeafDirectNode::compute_padding(int current_offset) const {
833 //lui
834 //ori
835 //dsll
836 //ori
838 //jalr
839 //nop
841 return round_to(current_offset, alignment_required()) - current_offset;
842 }
844 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
845 //lui
846 //ori
847 //dsll
848 //ori
850 //jalr
851 //nop
853 return round_to(current_offset, alignment_required()) - current_offset;
854 }
856 // If CPU can load and store mis-aligned doubles directly then no fixup is
857 // needed. Else we split the double into 2 integer pieces and move it
858 // piece-by-piece. Only happens when passing doubles into C code as the
859 // Java calling convention forces doubles to be aligned.
860 const bool Matcher::misaligned_doubles_ok = false;
861 // Do floats take an entire double register or just half?
862 //const bool Matcher::float_in_double = true;
863 bool Matcher::float_in_double() { return false; }
864 // Threshold size for cleararray.
865 const int Matcher::init_array_short_size = 8 * BytesPerLong;
866 // Do ints take an entire long register or just half?
867 const bool Matcher::int_in_long = true;
868 // Is it better to copy float constants, or load them directly from memory?
869 // Intel can load a float constant from a direct address, requiring no
870 // extra registers. Most RISCs will have to materialize an address into a
871 // register first, so they would do better to copy the constant from stack.
872 const bool Matcher::rematerialize_float_constants = false;
873 // Advertise here if the CPU requires explicit rounding operations
874 // to implement the UseStrictFP mode.
875 const bool Matcher::strict_fp_requires_explicit_rounding = false;
876 // The ecx parameter to rep stos for the ClearArray node is in dwords.
877 const bool Matcher::init_array_count_is_in_bytes = false;
880 // Indicate if the safepoint node needs the polling page as an input.
881 // Since MIPS doesn't have absolute addressing, it needs.
882 bool SafePointNode::needs_polling_address_input() {
883 return false;
884 }
886 // !!!!! Special hack to get all type of calls to specify the byte offset
887 // from the start of the call to the point where the return address
888 // will point.
889 int MachCallStaticJavaNode::ret_addr_offset() {
890 //lui
891 //ori
892 //nop
893 //nop
894 //jalr
895 //nop
896 return 24;
897 }
899 int MachCallDynamicJavaNode::ret_addr_offset() {
900 //lui IC_Klass,
901 //ori IC_Klass,
902 //dsll IC_Klass
903 //ori IC_Klass
905 //lui T9
906 //ori T9
907 //nop
908 //nop
909 //jalr T9
910 //nop
911 return 4 * 4 + 4 * 6;
912 }
914 //=============================================================================
916 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
917 enum RC { rc_bad, rc_int, rc_float, rc_stack };
918 static enum RC rc_class( OptoReg::Name reg ) {
919 if( !OptoReg::is_valid(reg) ) return rc_bad;
920 if (OptoReg::is_stack(reg)) return rc_stack;
921 VMReg r = OptoReg::as_VMReg(reg);
922 if (r->is_Register()) return rc_int;
923 assert(r->is_FloatRegister(), "must be");
924 return rc_float;
925 }
927 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
928 // Get registers to move
929 OptoReg::Name src_second = ra_->get_reg_second(in(1));
930 OptoReg::Name src_first = ra_->get_reg_first(in(1));
931 OptoReg::Name dst_second = ra_->get_reg_second(this );
932 OptoReg::Name dst_first = ra_->get_reg_first(this );
934 enum RC src_second_rc = rc_class(src_second);
935 enum RC src_first_rc = rc_class(src_first);
936 enum RC dst_second_rc = rc_class(dst_second);
937 enum RC dst_first_rc = rc_class(dst_first);
939 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
941 // Generate spill code!
942 int size = 0;
944 if( src_first == dst_first && src_second == dst_second )
945 return 0; // Self copy, no move
947 if (src_first_rc == rc_stack) {
948 // mem ->
949 if (dst_first_rc == rc_stack) {
950 // mem -> mem
951 assert(src_second != dst_first, "overlap");
952 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
953 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
954 // 64-bit
955 int src_offset = ra_->reg2offset(src_first);
956 int dst_offset = ra_->reg2offset(dst_first);
957 if (cbuf) {
958 MacroAssembler _masm(cbuf);
959 __ ld(AT, Address(SP, src_offset));
960 __ sd(AT, Address(SP, dst_offset));
961 #ifndef PRODUCT
962 } else {
963 if(!do_size){
964 if (size != 0) st->print("\n\t");
965 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
966 "sd AT, [SP + #%d]",
967 src_offset, dst_offset);
968 }
969 #endif
970 }
971 size += 8;
972 } else {
973 // 32-bit
974 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
975 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
976 // No pushl/popl, so:
977 int src_offset = ra_->reg2offset(src_first);
978 int dst_offset = ra_->reg2offset(dst_first);
979 if (cbuf) {
980 MacroAssembler _masm(cbuf);
981 __ lw(AT, Address(SP, src_offset));
982 __ sw(AT, Address(SP, dst_offset));
983 #ifndef PRODUCT
984 } else {
985 if(!do_size){
986 if (size != 0) st->print("\n\t");
987 st->print("lw AT, [SP + #%d] spill 2\n\t"
988 "sw AT, [SP + #%d]\n\t",
989 src_offset, dst_offset);
990 }
991 #endif
992 }
993 size += 8;
994 }
995 return size;
996 } else if (dst_first_rc == rc_int) {
997 // mem -> gpr
998 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
999 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1000 // 64-bit
1001 int offset = ra_->reg2offset(src_first);
1002 if (cbuf) {
1003 MacroAssembler _masm(cbuf);
1004 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1005 #ifndef PRODUCT
1006 } else {
1007 if(!do_size){
1008 if (size != 0) st->print("\n\t");
1009 st->print("ld %s, [SP + #%d]\t# spill 3",
1010 Matcher::regName[dst_first],
1011 offset);
1012 }
1013 #endif
1014 }
1015 size += 4;
1016 } else {
1017 // 32-bit
1018 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1019 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1020 int offset = ra_->reg2offset(src_first);
1021 if (cbuf) {
1022 MacroAssembler _masm(cbuf);
1023 if (this->ideal_reg() == Op_RegI)
1024 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1025 else
1026 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1027 #ifndef PRODUCT
1028 } else {
1029 if(!do_size){
1030 if (size != 0) st->print("\n\t");
1031 if (this->ideal_reg() == Op_RegI)
1032 st->print("lw %s, [SP + #%d]\t# spill 4",
1033 Matcher::regName[dst_first],
1034 offset);
1035 else
1036 st->print("lwu %s, [SP + #%d]\t# spill 5",
1037 Matcher::regName[dst_first],
1038 offset);
1039 }
1040 #endif
1041 }
1042 size += 4;
1043 }
1044 return size;
1045 } else if (dst_first_rc == rc_float) {
1046 // mem-> xmm
1047 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1048 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1049 // 64-bit
1050 int offset = ra_->reg2offset(src_first);
1051 if (cbuf) {
1052 MacroAssembler _masm(cbuf);
1053 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1054 #ifndef PRODUCT
1055 } else {
1056 if (!do_size) {
1057 if (size != 0) st->print("\n\t");
1058 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1059 Matcher::regName[dst_first],
1060 offset);
1061 }
1062 #endif
1063 }
1064 size += 4;
1065 } else {
1066 // 32-bit
1067 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1068 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1069 int offset = ra_->reg2offset(src_first);
1070 if (cbuf) {
1071 MacroAssembler _masm(cbuf);
1072 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1078 Matcher::regName[dst_first],
1079 offset);
1080 }
1081 #endif
1082 }
1083 size += 4;
1084 }
1085 return size;
1086 }
1087 } else if (src_first_rc == rc_int) {
1088 // gpr ->
1089 if (dst_first_rc == rc_stack) {
1090 // gpr -> mem
1091 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1092 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1093 // 64-bit
1094 int offset = ra_->reg2offset(dst_first);
1095 if (cbuf) {
1096 MacroAssembler _masm(cbuf);
1097 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1098 #ifndef PRODUCT
1099 } else {
1100 if(!do_size){
1101 if (size != 0) st->print("\n\t");
1102 st->print("sd %s, [SP + #%d] # spill 8",
1103 Matcher::regName[src_first],
1104 offset);
1105 }
1106 #endif
1107 }
1108 size += 4;
1109 } else {
1110 // 32-bit
1111 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1112 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1113 int offset = ra_->reg2offset(dst_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if (!do_size) {
1120 if (size != 0) st->print("\n\t");
1121 st->print("sw %s, [SP + #%d]\t# spill 9",
1122 Matcher::regName[src_first], offset);
1123 }
1124 #endif
1125 }
1126 size += 4;
1127 }
1128 return size;
1129 } else if (dst_first_rc == rc_int) {
1130 // gpr -> gpr
1131 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1132 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1133 // 64-bit
1134 if (cbuf) {
1135 MacroAssembler _masm(cbuf);
1136 __ move(as_Register(Matcher::_regEncode[dst_first]),
1137 as_Register(Matcher::_regEncode[src_first]));
1138 #ifndef PRODUCT
1139 } else {
1140 if(!do_size){
1141 if (size != 0) st->print("\n\t");
1142 st->print("move(64bit) %s <-- %s\t# spill 10",
1143 Matcher::regName[dst_first],
1144 Matcher::regName[src_first]);
1145 }
1146 #endif
1147 }
1148 size += 4;
1149 return size;
1150 } else {
1151 // 32-bit
1152 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1153 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1154 if (cbuf) {
1155 MacroAssembler _masm(cbuf);
1156 if (this->ideal_reg() == Op_RegI)
1157 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1158 else
1159 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1160 #ifndef PRODUCT
1161 } else {
1162 if (!do_size) {
1163 if (size != 0) st->print("\n\t");
1164 st->print("move(32-bit) %s <-- %s\t# spill 11",
1165 Matcher::regName[dst_first],
1166 Matcher::regName[src_first]);
1167 }
1168 #endif
1169 }
1170 size += 4;
1171 return size;
1172 }
1173 } else if (dst_first_rc == rc_float) {
1174 // gpr -> xmm
1175 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1176 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1177 // 64-bit
1178 if (cbuf) {
1179 MacroAssembler _masm(cbuf);
1180 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1181 #ifndef PRODUCT
1182 } else {
1183 if(!do_size){
1184 if (size != 0) st->print("\n\t");
1185 st->print("dmtc1 %s, %s\t# spill 12",
1186 Matcher::regName[dst_first],
1187 Matcher::regName[src_first]);
1188 }
1189 #endif
1190 }
1191 size += 4;
1192 } else {
1193 // 32-bit
1194 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1195 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1196 if (cbuf) {
1197 MacroAssembler _masm(cbuf);
1198 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1199 #ifndef PRODUCT
1200 } else {
1201 if(!do_size){
1202 if (size != 0) st->print("\n\t");
1203 st->print("mtc1 %s, %s\t# spill 13",
1204 Matcher::regName[dst_first],
1205 Matcher::regName[src_first]);
1206 }
1207 #endif
1208 }
1209 size += 4;
1210 }
1211 return size;
1212 }
1213 } else if (src_first_rc == rc_float) {
1214 // xmm ->
1215 if (dst_first_rc == rc_stack) {
1216 // xmm -> mem
1217 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1218 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1219 // 64-bit
1220 int offset = ra_->reg2offset(dst_first);
1221 if (cbuf) {
1222 MacroAssembler _masm(cbuf);
1223 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1224 #ifndef PRODUCT
1225 } else {
1226 if(!do_size){
1227 if (size != 0) st->print("\n\t");
1228 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1229 Matcher::regName[src_first],
1230 offset);
1231 }
1232 #endif
1233 }
1234 size += 4;
1235 } else {
1236 // 32-bit
1237 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1238 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1239 int offset = ra_->reg2offset(dst_first);
1240 if (cbuf) {
1241 MacroAssembler _masm(cbuf);
1242 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1243 #ifndef PRODUCT
1244 } else {
1245 if(!do_size){
1246 if (size != 0) st->print("\n\t");
1247 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1248 Matcher::regName[src_first],
1249 offset);
1250 }
1251 #endif
1252 }
1253 size += 4;
1254 }
1255 return size;
1256 } else if (dst_first_rc == rc_int) {
1257 // xmm -> gpr
1258 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1259 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1260 // 64-bit
1261 if (cbuf) {
1262 MacroAssembler _masm(cbuf);
1263 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1264 #ifndef PRODUCT
1265 } else {
1266 if(!do_size){
1267 if (size != 0) st->print("\n\t");
1268 st->print("dmfc1 %s, %s\t# spill 16",
1269 Matcher::regName[dst_first],
1270 Matcher::regName[src_first]);
1271 }
1272 #endif
1273 }
1274 size += 4;
1275 } else {
1276 // 32-bit
1277 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1278 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1279 if (cbuf) {
1280 MacroAssembler _masm(cbuf);
1281 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1282 #ifndef PRODUCT
1283 } else {
1284 if(!do_size){
1285 if (size != 0) st->print("\n\t");
1286 st->print("mfc1 %s, %s\t# spill 17",
1287 Matcher::regName[dst_first],
1288 Matcher::regName[src_first]);
1289 }
1290 #endif
1291 }
1292 size += 4;
1293 }
1294 return size;
1295 } else if (dst_first_rc == rc_float) {
1296 // xmm -> xmm
1297 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1298 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1299 // 64-bit
1300 if (cbuf) {
1301 MacroAssembler _masm(cbuf);
1302 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1303 #ifndef PRODUCT
1304 } else {
1305 if(!do_size){
1306 if (size != 0) st->print("\n\t");
1307 st->print("mov_d %s <-- %s\t# spill 18",
1308 Matcher::regName[dst_first],
1309 Matcher::regName[src_first]);
1310 }
1311 #endif
1312 }
1313 size += 4;
1314 } else {
1315 // 32-bit
1316 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1317 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1318 if (cbuf) {
1319 MacroAssembler _masm(cbuf);
1320 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1321 #ifndef PRODUCT
1322 } else {
1323 if(!do_size){
1324 if (size != 0) st->print("\n\t");
1325 st->print("mov_s %s <-- %s\t# spill 19",
1326 Matcher::regName[dst_first],
1327 Matcher::regName[src_first]);
1328 }
1329 #endif
1330 }
1331 size += 4;
1332 }
1333 return size;
1334 }
1335 }
1337 assert(0," foo ");
1338 Unimplemented();
1339 return size;
1341 }
1343 #ifndef PRODUCT
1344 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1345 implementation( NULL, ra_, false, st );
1346 }
1347 #endif
1349 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1350 implementation( &cbuf, ra_, false, NULL );
1351 }
1353 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1354 return implementation( NULL, ra_, true, NULL );
1355 }
1357 //=============================================================================
1358 #
1360 #ifndef PRODUCT
1361 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1362 st->print("INT3");
1363 }
1364 #endif
1366 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1367 MacroAssembler _masm(&cbuf);
1368 __ int3();
1369 }
1371 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1372 return MachNode::size(ra_);
1373 }
1376 //=============================================================================
1377 #ifndef PRODUCT
1378 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1379 Compile *C = ra_->C;
1380 int framesize = C->frame_size_in_bytes();
1382 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1384 st->print_cr("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode", framesize);
1385 st->print("\t");
1386 if (UseLoongsonISA) {
1387 st->print_cr("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1388 } else {
1389 st->print_cr("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1390 st->print("\t");
1391 st->print_cr("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1392 }
1394 if( do_polling() && C->is_method_compilation() ) {
1395 st->print("\t");
1396 st->print_cr("Poll Safepoint # MachEpilogNode");
1397 }
1398 }
1399 #endif
1401 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1402 Compile *C = ra_->C;
1403 MacroAssembler _masm(&cbuf);
1404 int framesize = C->frame_size_in_bytes();
1406 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1408 __ daddiu(SP, SP, framesize);
1410 if (UseLoongsonISA) {
1411 __ gslq(RA, FP, SP, -wordSize*2);
1412 } else {
1413 __ ld(RA, SP, -wordSize );
1414 __ ld(FP, SP, -wordSize*2 );
1415 }
1417 if( do_polling() && C->is_method_compilation() ) {
1418 __ set64(AT, (long)os::get_polling_page());
1419 __ relocate(relocInfo::poll_return_type);
1420 __ lw(AT, AT, 0);
1421 }
1422 }
1424 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1425 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1426 }
1428 int MachEpilogNode::reloc() const {
1429 return 0; // a large enough number
1430 }
1432 const Pipeline * MachEpilogNode::pipeline() const {
1433 return MachNode::pipeline_class();
1434 }
1436 int MachEpilogNode::safepoint_offset() const { return 0; }
1438 //=============================================================================
1440 #ifndef PRODUCT
1441 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1442 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1443 int reg = ra_->get_reg_first(this);
1444 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1445 }
1446 #endif
1449 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1450 return 4;
1451 }
1453 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1454 MacroAssembler _masm(&cbuf);
1455 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1456 int reg = ra_->get_encode(this);
1458 __ addi(as_Register(reg), SP, offset);
1459 }
1462 //static int sizeof_FFree_Float_Stack_All = -1;
1464 int MachCallRuntimeNode::ret_addr_offset() {
1465 //lui
1466 //ori
1467 //dsll
1468 //ori
1469 //jalr
1470 //nop
1471 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1472 return NativeCall::instruction_size;
1473 }
1476 //=============================================================================
1477 #ifndef PRODUCT
1478 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1479 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1480 }
1481 #endif
1483 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1484 MacroAssembler _masm(&cbuf);
1485 int i = 0;
1486 for(i = 0; i < _count; i++)
1487 __ nop();
1488 }
1490 uint MachNopNode::size(PhaseRegAlloc *) const {
1491 return 4 * _count;
1492 }
1493 const Pipeline* MachNopNode::pipeline() const {
1494 return MachNode::pipeline_class();
1495 }
1497 //=============================================================================
1499 //=============================================================================
1500 #ifndef PRODUCT
1501 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1502 st->print_cr("load_klass(T9, T0)");
1503 st->print_cr("\tbeq(T9, iCache, L)");
1504 st->print_cr("\tnop");
1505 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1506 st->print_cr("\tnop");
1507 st->print_cr("\tnop");
1508 st->print_cr(" L:");
1509 }
1510 #endif
1513 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 MacroAssembler _masm(&cbuf);
1515 #ifdef ASSERT
1516 //uint code_size = cbuf.code_size();
1517 #endif
1518 int ic_reg = Matcher::inline_cache_reg_encode();
1519 Label L;
1520 Register receiver = T0;
1521 Register iCache = as_Register(ic_reg);
1522 __ load_klass(T9, receiver);
1523 __ beq(T9, iCache, L);
1524 __ delayed()->nop();
1526 __ relocate(relocInfo::runtime_call_type);
1527 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1529 /* WARNING these NOPs are critical so that verified entry point is properly
1530 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1531 __ align(CodeEntryAlignment);
1532 __ bind(L);
1533 }
1535 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1536 return MachNode::size(ra_);
1537 }
1541 //=============================================================================
1543 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1545 int Compile::ConstantTable::calculate_table_base_offset() const {
1546 return 0; // absolute addressing, no offset
1547 }
1549 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1550 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1551 ShouldNotReachHere();
1552 }
1554 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1555 Compile* C = ra_->C;
1556 Compile::ConstantTable& constant_table = C->constant_table();
1557 MacroAssembler _masm(&cbuf);
1559 Register Rtoc = as_Register(ra_->get_encode(this));
1560 CodeSection* consts_section = __ code()->consts();
1561 int consts_size = consts_section->align_at_start(consts_section->size());
1562 assert(constant_table.size() == consts_size, "must be equal");
1564 if (consts_section->size()) {
1565 // Materialize the constant table base.
1566 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1567 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1568 __ relocate(relocInfo::internal_word_type);
1569 __ patchable_set48(Rtoc, (long)baseaddr);
1570 }
1571 }
1573 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1574 // patchable_set48 (4 insts)
1575 return 4 * 4;
1576 }
1578 #ifndef PRODUCT
1579 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1580 Register r = as_Register(ra_->get_encode(this));
1581 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1582 }
1583 #endif
1586 //=============================================================================
1587 #ifndef PRODUCT
1588 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1589 Compile* C = ra_->C;
1591 int framesize = C->frame_size_in_bytes();
1592 int bangsize = C->bang_size_in_bytes();
1593 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1595 // Calls to C2R adapters often do not accept exceptional returns.
1596 // We require that their callers must bang for them. But be careful, because
1597 // some VM calls (such as call site linkage) can use several kilobytes of
1598 // stack. But the stack safety zone should account for that.
1599 // See bugs 4446381, 4468289, 4497237.
1600 if (C->need_stack_bang(bangsize)) {
1601 st->print_cr("# stack bang"); st->print("\t");
1602 }
1603 if (UseLoongsonISA) {
1604 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1605 } else {
1606 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1607 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1608 }
1609 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1610 st->print("daddiu SP, SP, -%d \t",framesize);
1611 }
1612 #endif
1615 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1616 Compile* C = ra_->C;
1617 MacroAssembler _masm(&cbuf);
1619 int framesize = C->frame_size_in_bytes();
1620 int bangsize = C->bang_size_in_bytes();
1622 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1624 if (C->need_stack_bang(bangsize)) {
1625 __ generate_stack_overflow_check(bangsize);
1626 }
1628 if (UseLoongsonISA) {
1629 __ gssq(RA, FP, SP, -wordSize*2);
1630 } else {
1631 __ sd(RA, SP, -wordSize);
1632 __ sd(FP, SP, -wordSize*2);
1633 }
1634 __ daddiu(FP, SP, -wordSize*2);
1635 __ daddiu(SP, SP, -framesize);
1636 __ nop(); // Make enough room for patch_verified_entry()
1637 __ nop();
1639 C->set_frame_complete(cbuf.insts_size());
1640 if (C->has_mach_constant_base_node()) {
1641 // NOTE: We set the table base offset here because users might be
1642 // emitted before MachConstantBaseNode.
1643 Compile::ConstantTable& constant_table = C->constant_table();
1644 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1645 }
1647 }
1650 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1651 return MachNode::size(ra_); // too many variables; just compute it the hard way
1652 }
1654 int MachPrologNode::reloc() const {
1655 return 0; // a large enough number
1656 }
1658 %}
1660 //----------ENCODING BLOCK-----------------------------------------------------
1661 // This block specifies the encoding classes used by the compiler to output
1662 // byte streams. Encoding classes generate functions which are called by
1663 // Machine Instruction Nodes in order to generate the bit encoding of the
1664 // instruction. Operands specify their base encoding interface with the
1665 // interface keyword. There are currently supported four interfaces,
1666 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1667 // operand to generate a function which returns its register number when
1668 // queried. CONST_INTER causes an operand to generate a function which
1669 // returns the value of the constant when queried. MEMORY_INTER causes an
1670 // operand to generate four functions which return the Base Register, the
1671 // Index Register, the Scale Value, and the Offset Value of the operand when
1672 // queried. COND_INTER causes an operand to generate six functions which
1673 // return the encoding code (ie - encoding bits for the instruction)
1674 // associated with each basic boolean condition for a conditional instruction.
1675 // Instructions specify two basic values for encoding. They use the
1676 // ins_encode keyword to specify their encoding class (which must be one of
1677 // the class names specified in the encoding block), and they use the
1678 // opcode keyword to specify, in order, their primary, secondary, and
1679 // tertiary opcode. Only the opcode sections which a particular instruction
1680 // needs for encoding need to be specified.
1681 encode %{
1683 //Load byte signed
1684 enc_class load_B_enc (mRegI dst, memory mem) %{
1685 MacroAssembler _masm(&cbuf);
1686 int dst = $dst$$reg;
1687 int base = $mem$$base;
1688 int index = $mem$$index;
1689 int scale = $mem$$scale;
1690 int disp = $mem$$disp;
1692 if( index != 0 ) {
1693 if( Assembler::is_simm16(disp) ) {
1694 if( UseLoongsonISA ) {
1695 if (scale == 0) {
1696 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1697 } else {
1698 __ dsll(AT, as_Register(index), scale);
1699 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1700 }
1701 } else {
1702 if (scale == 0) {
1703 __ addu(AT, as_Register(base), as_Register(index));
1704 } else {
1705 __ dsll(AT, as_Register(index), scale);
1706 __ addu(AT, as_Register(base), AT);
1707 }
1708 __ lb(as_Register(dst), AT, disp);
1709 }
1710 } else {
1711 if (scale == 0) {
1712 __ addu(AT, as_Register(base), as_Register(index));
1713 } else {
1714 __ dsll(AT, as_Register(index), scale);
1715 __ addu(AT, as_Register(base), AT);
1716 }
1717 __ move(T9, disp);
1718 if( UseLoongsonISA ) {
1719 __ gslbx(as_Register(dst), AT, T9, 0);
1720 } else {
1721 __ addu(AT, AT, T9);
1722 __ lb(as_Register(dst), AT, 0);
1723 }
1724 }
1725 } else {
1726 if( Assembler::is_simm16(disp) ) {
1727 __ lb(as_Register(dst), as_Register(base), disp);
1728 } else {
1729 __ move(T9, disp);
1730 if( UseLoongsonISA ) {
1731 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1732 } else {
1733 __ addu(AT, as_Register(base), T9);
1734 __ lb(as_Register(dst), AT, 0);
1735 }
1736 }
1737 }
1738 %}
1740 //Load byte unsigned
1741 enc_class load_UB_enc (mRegI dst, memory mem) %{
1742 MacroAssembler _masm(&cbuf);
1743 int dst = $dst$$reg;
1744 int base = $mem$$base;
1745 int index = $mem$$index;
1746 int scale = $mem$$scale;
1747 int disp = $mem$$disp;
1749 if( index != 0 ) {
1750 if (scale == 0) {
1751 __ daddu(AT, as_Register(base), as_Register(index));
1752 } else {
1753 __ dsll(AT, as_Register(index), scale);
1754 __ daddu(AT, as_Register(base), AT);
1755 }
1756 if( Assembler::is_simm16(disp) ) {
1757 __ lbu(as_Register(dst), AT, disp);
1758 } else {
1759 __ move(T9, disp);
1760 __ daddu(AT, AT, T9);
1761 __ lbu(as_Register(dst), AT, 0);
1762 }
1763 } else {
1764 if( Assembler::is_simm16(disp) ) {
1765 __ lbu(as_Register(dst), as_Register(base), disp);
1766 } else {
1767 __ move(T9, disp);
1768 __ daddu(AT, as_Register(base), T9);
1769 __ lbu(as_Register(dst), AT, 0);
1770 }
1771 }
1772 %}
1774 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1775 MacroAssembler _masm(&cbuf);
1776 int src = $src$$reg;
1777 int base = $mem$$base;
1778 int index = $mem$$index;
1779 int scale = $mem$$scale;
1780 int disp = $mem$$disp;
1782 if( index != 0 ) {
1783 if (scale == 0) {
1784 if( Assembler::is_simm(disp, 8) ) {
1785 if (UseLoongsonISA) {
1786 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1787 } else {
1788 __ addu(AT, as_Register(base), as_Register(index));
1789 __ sb(as_Register(src), AT, disp);
1790 }
1791 } else if( Assembler::is_simm16(disp) ) {
1792 __ addu(AT, as_Register(base), as_Register(index));
1793 __ sb(as_Register(src), AT, disp);
1794 } else {
1795 __ addu(AT, as_Register(base), as_Register(index));
1796 __ move(T9, disp);
1797 if (UseLoongsonISA) {
1798 __ gssbx(as_Register(src), AT, T9, 0);
1799 } else {
1800 __ addu(AT, AT, T9);
1801 __ sb(as_Register(src), AT, 0);
1802 }
1803 }
1804 } else {
1805 __ dsll(AT, as_Register(index), scale);
1806 if( Assembler::is_simm(disp, 8) ) {
1807 if (UseLoongsonISA) {
1808 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1809 } else {
1810 __ addu(AT, as_Register(base), AT);
1811 __ sb(as_Register(src), AT, disp);
1812 }
1813 } else if( Assembler::is_simm16(disp) ) {
1814 __ addu(AT, as_Register(base), AT);
1815 __ sb(as_Register(src), AT, disp);
1816 } else {
1817 __ addu(AT, as_Register(base), AT);
1818 __ move(T9, disp);
1819 if (UseLoongsonISA) {
1820 __ gssbx(as_Register(src), AT, T9, 0);
1821 } else {
1822 __ addu(AT, AT, T9);
1823 __ sb(as_Register(src), AT, 0);
1824 }
1825 }
1826 }
1827 } else {
1828 if( Assembler::is_simm16(disp) ) {
1829 __ sb(as_Register(src), as_Register(base), disp);
1830 } else {
1831 __ move(T9, disp);
1832 if (UseLoongsonISA) {
1833 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1834 } else {
1835 __ addu(AT, as_Register(base), T9);
1836 __ sb(as_Register(src), AT, 0);
1837 }
1838 }
1839 }
1840 %}
1842 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1843 MacroAssembler _masm(&cbuf);
1844 int base = $mem$$base;
1845 int index = $mem$$index;
1846 int scale = $mem$$scale;
1847 int disp = $mem$$disp;
1848 int value = $src$$constant;
1850 if( index != 0 ) {
1851 if (!UseLoongsonISA) {
1852 if (scale == 0) {
1853 __ daddu(AT, as_Register(base), as_Register(index));
1854 } else {
1855 __ dsll(AT, as_Register(index), scale);
1856 __ daddu(AT, as_Register(base), AT);
1857 }
1858 if( Assembler::is_simm16(disp) ) {
1859 if (value == 0) {
1860 __ sb(R0, AT, disp);
1861 } else {
1862 __ move(T9, value);
1863 __ sb(T9, AT, disp);
1864 }
1865 } else {
1866 if (value == 0) {
1867 __ move(T9, disp);
1868 __ daddu(AT, AT, T9);
1869 __ sb(R0, AT, 0);
1870 } else {
1871 __ move(T9, disp);
1872 __ daddu(AT, AT, T9);
1873 __ move(T9, value);
1874 __ sb(T9, AT, 0);
1875 }
1876 }
1877 } else {
1879 if (scale == 0) {
1880 if( Assembler::is_simm(disp, 8) ) {
1881 if (value == 0) {
1882 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1883 } else {
1884 __ move(T9, value);
1885 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1886 }
1887 } else if( Assembler::is_simm16(disp) ) {
1888 __ daddu(AT, as_Register(base), as_Register(index));
1889 if (value == 0) {
1890 __ sb(R0, AT, disp);
1891 } else {
1892 __ move(T9, value);
1893 __ sb(T9, AT, disp);
1894 }
1895 } else {
1896 if (value == 0) {
1897 __ daddu(AT, as_Register(base), as_Register(index));
1898 __ move(T9, disp);
1899 __ gssbx(R0, AT, T9, 0);
1900 } else {
1901 __ move(AT, disp);
1902 __ move(T9, value);
1903 __ daddu(AT, as_Register(base), AT);
1904 __ gssbx(T9, AT, as_Register(index), 0);
1905 }
1906 }
1908 } else {
1910 if( Assembler::is_simm(disp, 8) ) {
1911 __ dsll(AT, as_Register(index), scale);
1912 if (value == 0) {
1913 __ gssbx(R0, as_Register(base), AT, disp);
1914 } else {
1915 __ move(T9, value);
1916 __ gssbx(T9, as_Register(base), AT, disp);
1917 }
1918 } else if( Assembler::is_simm16(disp) ) {
1919 __ dsll(AT, as_Register(index), scale);
1920 __ daddu(AT, as_Register(base), AT);
1921 if (value == 0) {
1922 __ sb(R0, AT, disp);
1923 } else {
1924 __ move(T9, value);
1925 __ sb(T9, AT, disp);
1926 }
1927 } else {
1928 __ dsll(AT, as_Register(index), scale);
1929 if (value == 0) {
1930 __ daddu(AT, as_Register(base), AT);
1931 __ move(T9, disp);
1932 __ gssbx(R0, AT, T9, 0);
1933 } else {
1934 __ move(T9, disp);
1935 __ daddu(AT, AT, T9);
1936 __ move(T9, value);
1937 __ gssbx(T9, as_Register(base), AT, 0);
1938 }
1939 }
1940 }
1941 }
1942 } else {
1943 if( Assembler::is_simm16(disp) ) {
1944 if (value == 0) {
1945 __ sb(R0, as_Register(base), disp);
1946 } else {
1947 __ move(AT, value);
1948 __ sb(AT, as_Register(base), disp);
1949 }
1950 } else {
1951 if (value == 0) {
1952 __ move(T9, disp);
1953 if (UseLoongsonISA) {
1954 __ gssbx(R0, as_Register(base), T9, 0);
1955 } else {
1956 __ daddu(AT, as_Register(base), T9);
1957 __ sb(R0, AT, 0);
1958 }
1959 } else {
1960 __ move(T9, disp);
1961 if (UseLoongsonISA) {
1962 __ move(AT, value);
1963 __ gssbx(AT, as_Register(base), T9, 0);
1964 } else {
1965 __ daddu(AT, as_Register(base), T9);
1966 __ move(T9, value);
1967 __ sb(T9, AT, 0);
1968 }
1969 }
1970 }
1971 }
1972 %}
1975 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1976 MacroAssembler _masm(&cbuf);
1977 int base = $mem$$base;
1978 int index = $mem$$index;
1979 int scale = $mem$$scale;
1980 int disp = $mem$$disp;
1981 int value = $src$$constant;
1983 if( index != 0 ) {
1984 if ( UseLoongsonISA ) {
1985 if ( Assembler::is_simm(disp,8) ) {
1986 if ( scale == 0 ) {
1987 if ( value == 0 ) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(AT, value);
1991 __ gssbx(AT, as_Register(base), as_Register(index), disp);
1992 }
1993 } else {
1994 __ dsll(AT, as_Register(index), scale);
1995 if ( value == 0 ) {
1996 __ gssbx(R0, as_Register(base), AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ gssbx(T9, as_Register(base), AT, disp);
2000 }
2001 }
2002 } else if ( Assembler::is_simm16(disp) ) {
2003 if ( scale == 0 ) {
2004 __ daddu(AT, as_Register(base), as_Register(index));
2005 if ( value == 0 ){
2006 __ sb(R0, AT, disp);
2007 } else {
2008 __ move(T9, value);
2009 __ sb(T9, AT, disp);
2010 }
2011 } else {
2012 __ dsll(AT, as_Register(index), scale);
2013 __ daddu(AT, as_Register(base), AT);
2014 if ( value == 0 ) {
2015 __ sb(R0, AT, disp);
2016 } else {
2017 __ move(T9, value);
2018 __ sb(T9, AT, disp);
2019 }
2020 }
2021 } else {
2022 if ( scale == 0 ) {
2023 __ move(AT, disp);
2024 __ daddu(AT, as_Register(index), AT);
2025 if ( value == 0 ) {
2026 __ gssbx(R0, as_Register(base), AT, 0);
2027 } else {
2028 __ move(T9, value);
2029 __ gssbx(T9, as_Register(base), AT, 0);
2030 }
2031 } else {
2032 __ dsll(AT, as_Register(index), scale);
2033 __ move(T9, disp);
2034 __ daddu(AT, AT, T9);
2035 if ( value == 0 ) {
2036 __ gssbx(R0, as_Register(base), AT, 0);
2037 } else {
2038 __ move(T9, value);
2039 __ gssbx(T9, as_Register(base), AT, 0);
2040 }
2041 }
2042 }
2043 } else { //not use loongson isa
2044 if (scale == 0) {
2045 __ daddu(AT, as_Register(base), as_Register(index));
2046 } else {
2047 __ dsll(AT, as_Register(index), scale);
2048 __ daddu(AT, as_Register(base), AT);
2049 }
2050 if( Assembler::is_simm16(disp) ) {
2051 if (value == 0) {
2052 __ sb(R0, AT, disp);
2053 } else {
2054 __ move(T9, value);
2055 __ sb(T9, AT, disp);
2056 }
2057 } else {
2058 if (value == 0) {
2059 __ move(T9, disp);
2060 __ daddu(AT, AT, T9);
2061 __ sb(R0, AT, 0);
2062 } else {
2063 __ move(T9, disp);
2064 __ daddu(AT, AT, T9);
2065 __ move(T9, value);
2066 __ sb(T9, AT, 0);
2067 }
2068 }
2069 }
2070 } else {
2071 if ( UseLoongsonISA ){
2072 if ( Assembler::is_simm16(disp) ){
2073 if ( value == 0 ) {
2074 __ sb(R0, as_Register(base), disp);
2075 } else {
2076 __ move(AT, value);
2077 __ sb(AT, as_Register(base), disp);
2078 }
2079 } else {
2080 __ move(AT, disp);
2081 if ( value == 0 ) {
2082 __ gssbx(R0, as_Register(base), AT, 0);
2083 } else {
2084 __ move(T9, value);
2085 __ gssbx(T9, as_Register(base), AT, 0);
2086 }
2087 }
2088 } else {
2089 if( Assembler::is_simm16(disp) ) {
2090 if (value == 0) {
2091 __ sb(R0, as_Register(base), disp);
2092 } else {
2093 __ move(AT, value);
2094 __ sb(AT, as_Register(base), disp);
2095 }
2096 } else {
2097 if (value == 0) {
2098 __ move(T9, disp);
2099 __ daddu(AT, as_Register(base), T9);
2100 __ sb(R0, AT, 0);
2101 } else {
2102 __ move(T9, disp);
2103 __ daddu(AT, as_Register(base), T9);
2104 __ move(T9, value);
2105 __ sb(T9, AT, 0);
2106 }
2107 }
2108 }
2109 }
2111 __ sync();
2112 %}
2114 // Load Short (16bit signed)
2115 enc_class load_S_enc (mRegI dst, memory mem) %{
2116 MacroAssembler _masm(&cbuf);
2117 int dst = $dst$$reg;
2118 int base = $mem$$base;
2119 int index = $mem$$index;
2120 int scale = $mem$$scale;
2121 int disp = $mem$$disp;
2123 if( index != 0 ) {
2124 if ( UseLoongsonISA ) {
2125 if ( Assembler::is_simm(disp, 8) ) {
2126 if (scale == 0) {
2127 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2128 } else {
2129 __ dsll(AT, as_Register(index), scale);
2130 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2131 }
2132 } else if ( Assembler::is_simm16(disp) ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 __ lh(as_Register(dst), AT, disp);
2136 } else {
2137 __ dsll(AT, as_Register(index), scale);
2138 __ daddu(AT, as_Register(base), AT);
2139 __ lh(as_Register(dst), AT, disp);
2140 }
2141 } else {
2142 if (scale == 0) {
2143 __ move(AT, disp);
2144 __ daddu(AT, as_Register(index), AT);
2145 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2146 } else {
2147 __ dsll(AT, as_Register(index), scale);
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2151 }
2152 }
2153 } else { // not use loongson isa
2154 if (scale == 0) {
2155 __ daddu(AT, as_Register(base), as_Register(index));
2156 } else {
2157 __ dsll(AT, as_Register(index), scale);
2158 __ daddu(AT, as_Register(base), AT);
2159 }
2160 if( Assembler::is_simm16(disp) ) {
2161 __ lh(as_Register(dst), AT, disp);
2162 } else {
2163 __ move(T9, disp);
2164 __ daddu(AT, AT, T9);
2165 __ lh(as_Register(dst), AT, 0);
2166 }
2167 }
2168 } else { // index is 0
2169 if ( UseLoongsonISA ) {
2170 if ( Assembler::is_simm16(disp) ) {
2171 __ lh(as_Register(dst), as_Register(base), disp);
2172 } else {
2173 __ move(T9, disp);
2174 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2175 }
2176 } else { //not use loongson isa
2177 if( Assembler::is_simm16(disp) ) {
2178 __ lh(as_Register(dst), as_Register(base), disp);
2179 } else {
2180 __ move(T9, disp);
2181 __ daddu(AT, as_Register(base), T9);
2182 __ lh(as_Register(dst), AT, 0);
2183 }
2184 }
2185 }
2186 %}
2188 // Load Char (16bit unsigned)
2189 enc_class load_C_enc (mRegI dst, memory mem) %{
2190 MacroAssembler _masm(&cbuf);
2191 int dst = $dst$$reg;
2192 int base = $mem$$base;
2193 int index = $mem$$index;
2194 int scale = $mem$$scale;
2195 int disp = $mem$$disp;
2197 if( index != 0 ) {
2198 if (scale == 0) {
2199 __ daddu(AT, as_Register(base), as_Register(index));
2200 } else {
2201 __ dsll(AT, as_Register(index), scale);
2202 __ daddu(AT, as_Register(base), AT);
2203 }
2204 if( Assembler::is_simm16(disp) ) {
2205 __ lhu(as_Register(dst), AT, disp);
2206 } else {
2207 __ move(T9, disp);
2208 __ addu(AT, AT, T9);
2209 __ lhu(as_Register(dst), AT, 0);
2210 }
2211 } else {
2212 if( Assembler::is_simm16(disp) ) {
2213 __ lhu(as_Register(dst), as_Register(base), disp);
2214 } else {
2215 __ move(T9, disp);
2216 __ daddu(AT, as_Register(base), T9);
2217 __ lhu(as_Register(dst), AT, 0);
2218 }
2219 }
2220 %}
2222 // Store Char (16bit unsigned)
2223 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2224 MacroAssembler _masm(&cbuf);
2225 int src = $src$$reg;
2226 int base = $mem$$base;
2227 int index = $mem$$index;
2228 int scale = $mem$$scale;
2229 int disp = $mem$$disp;
2231 if( index != 0 ) {
2232 if( Assembler::is_simm16(disp) ) {
2233 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2234 if (scale == 0) {
2235 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2236 } else {
2237 __ dsll(AT, as_Register(index), scale);
2238 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2239 }
2240 } else {
2241 if (scale == 0) {
2242 __ addu(AT, as_Register(base), as_Register(index));
2243 } else {
2244 __ dsll(AT, as_Register(index), scale);
2245 __ addu(AT, as_Register(base), AT);
2246 }
2247 __ sh(as_Register(src), AT, disp);
2248 }
2249 } else {
2250 if (scale == 0) {
2251 __ addu(AT, as_Register(base), as_Register(index));
2252 } else {
2253 __ dsll(AT, as_Register(index), scale);
2254 __ addu(AT, as_Register(base), AT);
2255 }
2256 __ move(T9, disp);
2257 if( UseLoongsonISA ) {
2258 __ gsshx(as_Register(src), AT, T9, 0);
2259 } else {
2260 __ addu(AT, AT, T9);
2261 __ sh(as_Register(src), AT, 0);
2262 }
2263 }
2264 } else {
2265 if( Assembler::is_simm16(disp) ) {
2266 __ sh(as_Register(src), as_Register(base), disp);
2267 } else {
2268 __ move(T9, disp);
2269 if( UseLoongsonISA ) {
2270 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2271 } else {
2272 __ addu(AT, as_Register(base), T9);
2273 __ sh(as_Register(src), AT, 0);
2274 }
2275 }
2276 }
2277 %}
2279 enc_class store_C0_enc (memory mem) %{
2280 MacroAssembler _masm(&cbuf);
2281 int base = $mem$$base;
2282 int index = $mem$$index;
2283 int scale = $mem$$scale;
2284 int disp = $mem$$disp;
2286 if( index != 0 ) {
2287 if( Assembler::is_simm16(disp) ) {
2288 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2289 if (scale == 0) {
2290 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2291 } else {
2292 __ dsll(AT, as_Register(index), scale);
2293 __ gsshx(R0, as_Register(base), AT, disp);
2294 }
2295 } else {
2296 if (scale == 0) {
2297 __ addu(AT, as_Register(base), as_Register(index));
2298 } else {
2299 __ dsll(AT, as_Register(index), scale);
2300 __ addu(AT, as_Register(base), AT);
2301 }
2302 __ sh(R0, AT, disp);
2303 }
2304 } else {
2305 if (scale == 0) {
2306 __ addu(AT, as_Register(base), as_Register(index));
2307 } else {
2308 __ dsll(AT, as_Register(index), scale);
2309 __ addu(AT, as_Register(base), AT);
2310 }
2311 __ move(T9, disp);
2312 if( UseLoongsonISA ) {
2313 __ gsshx(R0, AT, T9, 0);
2314 } else {
2315 __ addu(AT, AT, T9);
2316 __ sh(R0, AT, 0);
2317 }
2318 }
2319 } else {
2320 if( Assembler::is_simm16(disp) ) {
2321 __ sh(R0, as_Register(base), disp);
2322 } else {
2323 __ move(T9, disp);
2324 if( UseLoongsonISA ) {
2325 __ gsshx(R0, as_Register(base), T9, 0);
2326 } else {
2327 __ addu(AT, as_Register(base), T9);
2328 __ sh(R0, AT, 0);
2329 }
2330 }
2331 }
2332 %}
2334 enc_class load_I_enc (mRegI dst, memory mem) %{
2335 MacroAssembler _masm(&cbuf);
2336 int dst = $dst$$reg;
2337 int base = $mem$$base;
2338 int index = $mem$$index;
2339 int scale = $mem$$scale;
2340 int disp = $mem$$disp;
2342 if( index != 0 ) {
2343 if( Assembler::is_simm16(disp) ) {
2344 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2345 if (scale == 0) {
2346 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2347 } else {
2348 __ dsll(AT, as_Register(index), scale);
2349 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2350 }
2351 } else {
2352 if (scale == 0) {
2353 __ addu(AT, as_Register(base), as_Register(index));
2354 } else {
2355 __ dsll(AT, as_Register(index), scale);
2356 __ addu(AT, as_Register(base), AT);
2357 }
2358 __ lw(as_Register(dst), AT, disp);
2359 }
2360 } else {
2361 if (scale == 0) {
2362 __ addu(AT, as_Register(base), as_Register(index));
2363 } else {
2364 __ dsll(AT, as_Register(index), scale);
2365 __ addu(AT, as_Register(base), AT);
2366 }
2367 __ move(T9, disp);
2368 if( UseLoongsonISA ) {
2369 __ gslwx(as_Register(dst), AT, T9, 0);
2370 } else {
2371 __ addu(AT, AT, T9);
2372 __ lw(as_Register(dst), AT, 0);
2373 }
2374 }
2375 } else {
2376 if( Assembler::is_simm16(disp) ) {
2377 __ lw(as_Register(dst), as_Register(base), disp);
2378 } else {
2379 __ move(T9, disp);
2380 if( UseLoongsonISA ) {
2381 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2382 } else {
2383 __ addu(AT, as_Register(base), T9);
2384 __ lw(as_Register(dst), AT, 0);
2385 }
2386 }
2387 }
2388 %}
2390 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2391 MacroAssembler _masm(&cbuf);
2392 int src = $src$$reg;
2393 int base = $mem$$base;
2394 int index = $mem$$index;
2395 int scale = $mem$$scale;
2396 int disp = $mem$$disp;
2398 if( index != 0 ) {
2399 if( Assembler::is_simm16(disp) ) {
2400 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2401 if (scale == 0) {
2402 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2403 } else {
2404 __ dsll(AT, as_Register(index), scale);
2405 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2406 }
2407 } else {
2408 if (scale == 0) {
2409 __ addu(AT, as_Register(base), as_Register(index));
2410 } else {
2411 __ dsll(AT, as_Register(index), scale);
2412 __ addu(AT, as_Register(base), AT);
2413 }
2414 __ sw(as_Register(src), AT, disp);
2415 }
2416 } else {
2417 if (scale == 0) {
2418 __ addu(AT, as_Register(base), as_Register(index));
2419 } else {
2420 __ dsll(AT, as_Register(index), scale);
2421 __ addu(AT, as_Register(base), AT);
2422 }
2423 __ move(T9, disp);
2424 if( UseLoongsonISA ) {
2425 __ gsswx(as_Register(src), AT, T9, 0);
2426 } else {
2427 __ addu(AT, AT, T9);
2428 __ sw(as_Register(src), AT, 0);
2429 }
2430 }
2431 } else {
2432 if( Assembler::is_simm16(disp) ) {
2433 __ sw(as_Register(src), as_Register(base), disp);
2434 } else {
2435 __ move(T9, disp);
2436 if( UseLoongsonISA ) {
2437 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2438 } else {
2439 __ addu(AT, as_Register(base), T9);
2440 __ sw(as_Register(src), AT, 0);
2441 }
2442 }
2443 }
2444 %}
2446 enc_class store_I_immI_enc (memory mem, immI src) %{
2447 MacroAssembler _masm(&cbuf);
2448 int base = $mem$$base;
2449 int index = $mem$$index;
2450 int scale = $mem$$scale;
2451 int disp = $mem$$disp;
2452 int value = $src$$constant;
2454 if( index != 0 ) {
2455 if ( UseLoongsonISA ) {
2456 if ( Assembler::is_simm(disp, 8) ) {
2457 if ( scale == 0 ) {
2458 if ( value == 0 ) {
2459 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2460 } else {
2461 __ move(T9, value);
2462 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2463 }
2464 } else {
2465 __ dsll(AT, as_Register(index), scale);
2466 if ( value == 0 ) {
2467 __ gsswx(R0, as_Register(base), AT, disp);
2468 } else {
2469 __ move(T9, value);
2470 __ gsswx(T9, as_Register(base), AT, disp);
2471 }
2472 }
2473 } else if ( Assembler::is_simm16(disp) ) {
2474 if ( scale == 0 ) {
2475 __ daddu(AT, as_Register(base), as_Register(index));
2476 if ( value == 0 ) {
2477 __ sw(R0, AT, disp);
2478 } else {
2479 __ move(T9, value);
2480 __ sw(T9, AT, disp);
2481 }
2482 } else {
2483 __ dsll(AT, as_Register(index), scale);
2484 __ daddu(AT, as_Register(base), AT);
2485 if ( value == 0 ) {
2486 __ sw(R0, AT, disp);
2487 } else {
2488 __ move(T9, value);
2489 __ sw(T9, AT, disp);
2490 }
2491 }
2492 } else {
2493 if ( scale == 0 ) {
2494 __ move(T9, disp);
2495 __ daddu(AT, as_Register(index), T9);
2496 if ( value ==0 ) {
2497 __ gsswx(R0, as_Register(base), AT, 0);
2498 } else {
2499 __ move(T9, value);
2500 __ gsswx(T9, as_Register(base), AT, 0);
2501 }
2502 } else {
2503 __ dsll(AT, as_Register(index), scale);
2504 __ move(T9, disp);
2505 __ daddu(AT, AT, T9);
2506 if ( value == 0 ) {
2507 __ gsswx(R0, as_Register(base), AT, 0);
2508 } else {
2509 __ move(T9, value);
2510 __ gsswx(T9, as_Register(base), AT, 0);
2511 }
2512 }
2513 }
2514 } else { //not use loongson isa
2515 if (scale == 0) {
2516 __ daddu(AT, as_Register(base), as_Register(index));
2517 } else {
2518 __ dsll(AT, as_Register(index), scale);
2519 __ daddu(AT, as_Register(base), AT);
2520 }
2521 if( Assembler::is_simm16(disp) ) {
2522 if (value == 0) {
2523 __ sw(R0, AT, disp);
2524 } else {
2525 __ move(T9, value);
2526 __ sw(T9, AT, disp);
2527 }
2528 } else {
2529 if (value == 0) {
2530 __ move(T9, disp);
2531 __ daddu(AT, AT, T9);
2532 __ sw(R0, AT, 0);
2533 } else {
2534 __ move(T9, disp);
2535 __ daddu(AT, AT, T9);
2536 __ move(T9, value);
2537 __ sw(T9, AT, 0);
2538 }
2539 }
2540 }
2541 } else {
2542 if ( UseLoongsonISA ) {
2543 if ( Assembler::is_simm16(disp) ) {
2544 if ( value == 0 ) {
2545 __ sw(R0, as_Register(base), disp);
2546 } else {
2547 __ move(AT, value);
2548 __ sw(AT, as_Register(base), disp);
2549 }
2550 } else {
2551 __ move(T9, disp);
2552 if ( value == 0 ) {
2553 __ gsswx(R0, as_Register(base), T9, 0);
2554 } else {
2555 __ move(AT, value);
2556 __ gsswx(AT, as_Register(base), T9, 0);
2557 }
2558 }
2559 } else {
2560 if( Assembler::is_simm16(disp) ) {
2561 if (value == 0) {
2562 __ sw(R0, as_Register(base), disp);
2563 } else {
2564 __ move(AT, value);
2565 __ sw(AT, as_Register(base), disp);
2566 }
2567 } else {
2568 if (value == 0) {
2569 __ move(T9, disp);
2570 __ daddu(AT, as_Register(base), T9);
2571 __ sw(R0, AT, 0);
2572 } else {
2573 __ move(T9, disp);
2574 __ daddu(AT, as_Register(base), T9);
2575 __ move(T9, value);
2576 __ sw(T9, AT, 0);
2577 }
2578 }
2579 }
2580 }
2581 %}
2583 enc_class load_N_enc (mRegN dst, memory mem) %{
2584 MacroAssembler _masm(&cbuf);
2585 int dst = $dst$$reg;
2586 int base = $mem$$base;
2587 int index = $mem$$index;
2588 int scale = $mem$$scale;
2589 int disp = $mem$$disp;
2590 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2591 assert(disp_reloc == relocInfo::none, "cannot have disp");
2593 if( index != 0 ) {
2594 if (scale == 0) {
2595 __ daddu(AT, as_Register(base), as_Register(index));
2596 } else {
2597 __ dsll(AT, as_Register(index), scale);
2598 __ daddu(AT, as_Register(base), AT);
2599 }
2600 if( Assembler::is_simm16(disp) ) {
2601 __ lwu(as_Register(dst), AT, disp);
2602 } else {
2603 __ set64(T9, disp);
2604 __ daddu(AT, AT, T9);
2605 __ lwu(as_Register(dst), AT, 0);
2606 }
2607 } else {
2608 if( Assembler::is_simm16(disp) ) {
2609 __ lwu(as_Register(dst), as_Register(base), disp);
2610 } else {
2611 __ set64(T9, disp);
2612 __ daddu(AT, as_Register(base), T9);
2613 __ lwu(as_Register(dst), AT, 0);
2614 }
2615 }
2616 %}
2619 enc_class load_P_enc (mRegP dst, memory mem) %{
2620 MacroAssembler _masm(&cbuf);
2621 int dst = $dst$$reg;
2622 int base = $mem$$base;
2623 int index = $mem$$index;
2624 int scale = $mem$$scale;
2625 int disp = $mem$$disp;
2626 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2627 assert(disp_reloc == relocInfo::none, "cannot have disp");
2629 if( index != 0 ) {
2630 if ( UseLoongsonISA ) {
2631 if ( Assembler::is_simm(disp, 8) ) {
2632 if ( scale != 0 ) {
2633 __ dsll(AT, as_Register(index), scale);
2634 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2635 } else {
2636 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2637 }
2638 } else if ( Assembler::is_simm16(disp) ){
2639 if ( scale != 0 ) {
2640 __ dsll(AT, as_Register(index), scale);
2641 __ daddu(AT, AT, as_Register(base));
2642 } else {
2643 __ daddu(AT, as_Register(index), as_Register(base));
2644 }
2645 __ ld(as_Register(dst), AT, disp);
2646 } else {
2647 if ( scale != 0 ) {
2648 __ dsll(AT, as_Register(index), scale);
2649 __ move(T9, disp);
2650 __ daddu(AT, AT, T9);
2651 } else {
2652 __ move(T9, disp);
2653 __ daddu(AT, as_Register(index), T9);
2654 }
2655 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2656 }
2657 } else { //not use loongson isa
2658 if (scale == 0) {
2659 __ daddu(AT, as_Register(base), as_Register(index));
2660 } else {
2661 __ dsll(AT, as_Register(index), scale);
2662 __ daddu(AT, as_Register(base), AT);
2663 }
2664 if( Assembler::is_simm16(disp) ) {
2665 __ ld(as_Register(dst), AT, disp);
2666 } else {
2667 __ set64(T9, disp);
2668 __ daddu(AT, AT, T9);
2669 __ ld(as_Register(dst), AT, 0);
2670 }
2671 }
2672 } else {
2673 if ( UseLoongsonISA ) {
2674 if ( Assembler::is_simm16(disp) ){
2675 __ ld(as_Register(dst), as_Register(base), disp);
2676 } else {
2677 __ set64(T9, disp);
2678 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2679 }
2680 } else { //not use loongson isa
2681 if( Assembler::is_simm16(disp) ) {
2682 __ ld(as_Register(dst), as_Register(base), disp);
2683 } else {
2684 __ set64(T9, disp);
2685 __ daddu(AT, as_Register(base), T9);
2686 __ ld(as_Register(dst), AT, 0);
2687 }
2688 }
2689 }
2690 %}
2692 // Load acquire.
2693 // load_P_enc + sync
2694 enc_class load_P_enc_ac (mRegP dst, memory mem) %{
2695 MacroAssembler _masm(&cbuf);
2696 int dst = $dst$$reg;
2697 int base = $mem$$base;
2698 int index = $mem$$index;
2699 int scale = $mem$$scale;
2700 int disp = $mem$$disp;
2701 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2702 assert(disp_reloc == relocInfo::none, "cannot have disp");
2704 if( index != 0 ) {
2705 if ( UseLoongsonISA ) {
2706 if ( Assembler::is_simm(disp, 8) ) {
2707 if ( scale != 0 ) {
2708 __ dsll(AT, as_Register(index), scale);
2709 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2710 } else {
2711 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2712 }
2713 } else if ( Assembler::is_simm16(disp) ){
2714 if ( scale != 0 ) {
2715 __ dsll(AT, as_Register(index), scale);
2716 __ daddu(AT, AT, as_Register(base));
2717 } else {
2718 __ daddu(AT, as_Register(index), as_Register(base));
2719 }
2720 __ ld(as_Register(dst), AT, disp);
2721 } else {
2722 if ( scale != 0 ) {
2723 __ dsll(AT, as_Register(index), scale);
2724 __ move(T9, disp);
2725 __ daddu(AT, AT, T9);
2726 } else {
2727 __ move(T9, disp);
2728 __ daddu(AT, as_Register(index), T9);
2729 }
2730 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2731 }
2732 } else { //not use loongson isa
2733 if (scale == 0) {
2734 __ daddu(AT, as_Register(base), as_Register(index));
2735 } else {
2736 __ dsll(AT, as_Register(index), scale);
2737 __ daddu(AT, as_Register(base), AT);
2738 }
2739 if( Assembler::is_simm16(disp) ) {
2740 __ ld(as_Register(dst), AT, disp);
2741 } else {
2742 __ set64(T9, disp);
2743 __ daddu(AT, AT, T9);
2744 __ ld(as_Register(dst), AT, 0);
2745 }
2746 }
2747 } else {
2748 if ( UseLoongsonISA ) {
2749 if ( Assembler::is_simm16(disp) ){
2750 __ ld(as_Register(dst), as_Register(base), disp);
2751 } else {
2752 __ set64(T9, disp);
2753 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2754 }
2755 } else { //not use loongson isa
2756 if( Assembler::is_simm16(disp) ) {
2757 __ ld(as_Register(dst), as_Register(base), disp);
2758 } else {
2759 __ set64(T9, disp);
2760 __ daddu(AT, as_Register(base), T9);
2761 __ ld(as_Register(dst), AT, 0);
2762 }
2763 }
2764 }
2765 __ sync();
2766 %}
2768 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2769 MacroAssembler _masm(&cbuf);
2770 int src = $src$$reg;
2771 int base = $mem$$base;
2772 int index = $mem$$index;
2773 int scale = $mem$$scale;
2774 int disp = $mem$$disp;
2776 if( index != 0 ) {
2777 if ( UseLoongsonISA ){
2778 if ( Assembler::is_simm(disp, 8) ) {
2779 if ( scale == 0 ) {
2780 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2781 } else {
2782 __ dsll(AT, as_Register(index), scale);
2783 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2784 }
2785 } else if ( Assembler::is_simm16(disp) ) {
2786 if ( scale == 0 ) {
2787 __ daddu(AT, as_Register(base), as_Register(index));
2788 } else {
2789 __ dsll(AT, as_Register(index), scale);
2790 __ daddu(AT, as_Register(base), AT);
2791 }
2792 __ sd(as_Register(src), AT, disp);
2793 } else {
2794 if ( scale == 0 ) {
2795 __ move(T9, disp);
2796 __ daddu(AT, as_Register(index), T9);
2797 } else {
2798 __ dsll(AT, as_Register(index), scale);
2799 __ move(T9, disp);
2800 __ daddu(AT, AT, T9);
2801 }
2802 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2803 }
2804 } else { //not use loongson isa
2805 if (scale == 0) {
2806 __ daddu(AT, as_Register(base), as_Register(index));
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ daddu(AT, as_Register(base), AT);
2810 }
2811 if( Assembler::is_simm16(disp) ) {
2812 __ sd(as_Register(src), AT, disp);
2813 } else {
2814 __ move(T9, disp);
2815 __ daddu(AT, AT, T9);
2816 __ sd(as_Register(src), AT, 0);
2817 }
2818 }
2819 } else {
2820 if ( UseLoongsonISA ) {
2821 if ( Assembler::is_simm16(disp) ) {
2822 __ sd(as_Register(src), as_Register(base), disp);
2823 } else {
2824 __ move(T9, disp);
2825 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2826 }
2827 } else {
2828 if( Assembler::is_simm16(disp) ) {
2829 __ sd(as_Register(src), as_Register(base), disp);
2830 } else {
2831 __ move(T9, disp);
2832 __ daddu(AT, as_Register(base), T9);
2833 __ sd(as_Register(src), AT, 0);
2834 }
2835 }
2836 }
2837 %}
2839 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2840 MacroAssembler _masm(&cbuf);
2841 int src = $src$$reg;
2842 int base = $mem$$base;
2843 int index = $mem$$index;
2844 int scale = $mem$$scale;
2845 int disp = $mem$$disp;
2847 if( index != 0 ) {
2848 if ( UseLoongsonISA ){
2849 if ( Assembler::is_simm(disp, 8) ) {
2850 if ( scale == 0 ) {
2851 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2852 } else {
2853 __ dsll(AT, as_Register(index), scale);
2854 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2855 }
2856 } else if ( Assembler::is_simm16(disp) ) {
2857 if ( scale == 0 ) {
2858 __ daddu(AT, as_Register(base), as_Register(index));
2859 } else {
2860 __ dsll(AT, as_Register(index), scale);
2861 __ daddu(AT, as_Register(base), AT);
2862 }
2863 __ sw(as_Register(src), AT, disp);
2864 } else {
2865 if ( scale == 0 ) {
2866 __ move(T9, disp);
2867 __ daddu(AT, as_Register(index), T9);
2868 } else {
2869 __ dsll(AT, as_Register(index), scale);
2870 __ move(T9, disp);
2871 __ daddu(AT, AT, T9);
2872 }
2873 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2874 }
2875 } else { //not use loongson isa
2876 if (scale == 0) {
2877 __ daddu(AT, as_Register(base), as_Register(index));
2878 } else {
2879 __ dsll(AT, as_Register(index), scale);
2880 __ daddu(AT, as_Register(base), AT);
2881 }
2882 if( Assembler::is_simm16(disp) ) {
2883 __ sw(as_Register(src), AT, disp);
2884 } else {
2885 __ move(T9, disp);
2886 __ daddu(AT, AT, T9);
2887 __ sw(as_Register(src), AT, 0);
2888 }
2889 }
2890 } else {
2891 if ( UseLoongsonISA ) {
2892 if ( Assembler::is_simm16(disp) ) {
2893 __ sw(as_Register(src), as_Register(base), disp);
2894 } else {
2895 __ move(T9, disp);
2896 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2897 }
2898 } else {
2899 if( Assembler::is_simm16(disp) ) {
2900 __ sw(as_Register(src), as_Register(base), disp);
2901 } else {
2902 __ move(T9, disp);
2903 __ daddu(AT, as_Register(base), T9);
2904 __ sw(as_Register(src), AT, 0);
2905 }
2906 }
2907 }
2908 %}
2910 enc_class store_P_immP0_enc (memory mem) %{
2911 MacroAssembler _masm(&cbuf);
2912 int base = $mem$$base;
2913 int index = $mem$$index;
2914 int scale = $mem$$scale;
2915 int disp = $mem$$disp;
2917 if( index != 0 ) {
2918 if (scale == 0) {
2919 if( Assembler::is_simm16(disp) ) {
2920 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2921 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2922 } else {
2923 __ daddu(AT, as_Register(base), as_Register(index));
2924 __ sd(R0, AT, disp);
2925 }
2926 } else {
2927 __ daddu(AT, as_Register(base), as_Register(index));
2928 __ move(T9, disp);
2929 if(UseLoongsonISA) {
2930 __ gssdx(R0, AT, T9, 0);
2931 } else {
2932 __ daddu(AT, AT, T9);
2933 __ sd(R0, AT, 0);
2934 }
2935 }
2936 } else {
2937 __ dsll(AT, as_Register(index), scale);
2938 if( Assembler::is_simm16(disp) ) {
2939 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2940 __ gssdx(R0, as_Register(base), AT, disp);
2941 } else {
2942 __ daddu(AT, as_Register(base), AT);
2943 __ sd(R0, AT, disp);
2944 }
2945 } else {
2946 __ daddu(AT, as_Register(base), AT);
2947 __ move(T9, disp);
2948 if (UseLoongsonISA) {
2949 __ gssdx(R0, AT, T9, 0);
2950 } else {
2951 __ daddu(AT, AT, T9);
2952 __ sd(R0, AT, 0);
2953 }
2954 }
2955 }
2956 } else {
2957 if( Assembler::is_simm16(disp) ) {
2958 __ sd(R0, as_Register(base), disp);
2959 } else {
2960 __ move(T9, disp);
2961 if (UseLoongsonISA) {
2962 __ gssdx(R0, as_Register(base), T9, 0);
2963 } else {
2964 __ daddu(AT, as_Register(base), T9);
2965 __ sd(R0, AT, 0);
2966 }
2967 }
2968 }
2969 %}
2971 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2972 MacroAssembler _masm(&cbuf);
2973 int base = $mem$$base;
2974 int index = $mem$$index;
2975 int scale = $mem$$scale;
2976 int disp = $mem$$disp;
2978 if(index!=0){
2979 if (scale == 0) {
2980 __ daddu(AT, as_Register(base), as_Register(index));
2981 } else {
2982 __ dsll(AT, as_Register(index), scale);
2983 __ daddu(AT, as_Register(base), AT);
2984 }
2986 if( Assembler::is_simm16(disp) ) {
2987 __ sw(R0, AT, disp);
2988 } else {
2989 __ move(T9, disp);
2990 __ daddu(AT, AT, T9);
2991 __ sw(R0, AT, 0);
2992 }
2993 } else {
2994 if( Assembler::is_simm16(disp) ) {
2995 __ sw(R0, as_Register(base), disp);
2996 } else {
2997 __ move(T9, disp);
2998 __ daddu(AT, as_Register(base), T9);
2999 __ sw(R0, AT, 0);
3000 }
3001 }
3002 %}
3004 enc_class load_L_enc (mRegL dst, memory mem) %{
3005 MacroAssembler _masm(&cbuf);
3006 int base = $mem$$base;
3007 int index = $mem$$index;
3008 int scale = $mem$$scale;
3009 int disp = $mem$$disp;
3010 Register dst_reg = as_Register($dst$$reg);
3012 if( index != 0 ) {
3013 if (scale == 0) {
3014 __ daddu(AT, as_Register(base), as_Register(index));
3015 } else {
3016 __ dsll(AT, as_Register(index), scale);
3017 __ daddu(AT, as_Register(base), AT);
3018 }
3019 if( Assembler::is_simm16(disp) ) {
3020 __ ld(dst_reg, AT, disp);
3021 } else {
3022 __ move(T9, disp);
3023 __ daddu(AT, AT, T9);
3024 __ ld(dst_reg, AT, 0);
3025 }
3026 } else {
3027 if( Assembler::is_simm16(disp) ) {
3028 __ ld(dst_reg, as_Register(base), disp);
3029 } else {
3030 __ move(T9, disp);
3031 __ daddu(AT, as_Register(base), T9);
3032 __ ld(dst_reg, AT, 0);
3033 }
3034 }
3035 %}
3037 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3038 MacroAssembler _masm(&cbuf);
3039 int base = $mem$$base;
3040 int index = $mem$$index;
3041 int scale = $mem$$scale;
3042 int disp = $mem$$disp;
3043 Register src_reg = as_Register($src$$reg);
3045 if( index != 0 ) {
3046 if (scale == 0) {
3047 __ daddu(AT, as_Register(base), as_Register(index));
3048 } else {
3049 __ dsll(AT, as_Register(index), scale);
3050 __ daddu(AT, as_Register(base), AT);
3051 }
3052 if( Assembler::is_simm16(disp) ) {
3053 __ sd(src_reg, AT, disp);
3054 } else {
3055 __ move(T9, disp);
3056 __ daddu(AT, AT, T9);
3057 __ sd(src_reg, AT, 0);
3058 }
3059 } else {
3060 if( Assembler::is_simm16(disp) ) {
3061 __ sd(src_reg, as_Register(base), disp);
3062 } else {
3063 __ move(T9, disp);
3064 __ daddu(AT, as_Register(base), T9);
3065 __ sd(src_reg, AT, 0);
3066 }
3067 }
3068 %}
3070 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3071 MacroAssembler _masm(&cbuf);
3072 int base = $mem$$base;
3073 int index = $mem$$index;
3074 int scale = $mem$$scale;
3075 int disp = $mem$$disp;
3077 if( index != 0 ) {
3078 if (scale == 0) {
3079 __ daddu(AT, as_Register(base), as_Register(index));
3080 } else {
3081 __ dsll(AT, as_Register(index), scale);
3082 __ daddu(AT, as_Register(base), AT);
3083 }
3084 if( Assembler::is_simm16(disp) ) {
3085 __ sd(R0, AT, disp);
3086 } else {
3087 __ move(T9, disp);
3088 __ addu(AT, AT, T9);
3089 __ sd(R0, AT, 0);
3090 }
3091 } else {
3092 if( Assembler::is_simm16(disp) ) {
3093 __ sd(R0, as_Register(base), disp);
3094 } else {
3095 __ move(T9, disp);
3096 __ addu(AT, as_Register(base), T9);
3097 __ sd(R0, AT, 0);
3098 }
3099 }
3100 %}
3102 enc_class store_L_immL_enc (memory mem, immL src) %{
3103 MacroAssembler _masm(&cbuf);
3104 int base = $mem$$base;
3105 int index = $mem$$index;
3106 int scale = $mem$$scale;
3107 int disp = $mem$$disp;
3108 long imm = $src$$constant;
3110 if( index != 0 ) {
3111 if (scale == 0) {
3112 __ daddu(AT, as_Register(base), as_Register(index));
3113 } else {
3114 __ dsll(AT, as_Register(index), scale);
3115 __ daddu(AT, as_Register(base), AT);
3116 }
3117 if( Assembler::is_simm16(disp) ) {
3118 __ set64(T9, imm);
3119 __ sd(T9, AT, disp);
3120 } else {
3121 __ move(T9, disp);
3122 __ addu(AT, AT, T9);
3123 __ set64(T9, imm);
3124 __ sd(T9, AT, 0);
3125 }
3126 } else {
3127 if( Assembler::is_simm16(disp) ) {
3128 __ move(AT, as_Register(base));
3129 __ set64(T9, imm);
3130 __ sd(T9, AT, disp);
3131 } else {
3132 __ move(T9, disp);
3133 __ addu(AT, as_Register(base), T9);
3134 __ set64(T9, imm);
3135 __ sd(T9, AT, 0);
3136 }
3137 }
3138 %}
3140 enc_class load_F_enc (regF dst, memory mem) %{
3141 MacroAssembler _masm(&cbuf);
3142 int base = $mem$$base;
3143 int index = $mem$$index;
3144 int scale = $mem$$scale;
3145 int disp = $mem$$disp;
3146 FloatRegister dst = $dst$$FloatRegister;
3148 if( index != 0 ) {
3149 if( Assembler::is_simm16(disp) ) {
3150 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3151 if (scale == 0) {
3152 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3153 } else {
3154 __ dsll(AT, as_Register(index), scale);
3155 __ gslwxc1(dst, as_Register(base), AT, disp);
3156 }
3157 } else {
3158 if (scale == 0) {
3159 __ daddu(AT, as_Register(base), as_Register(index));
3160 } else {
3161 __ dsll(AT, as_Register(index), scale);
3162 __ daddu(AT, as_Register(base), AT);
3163 }
3164 __ lwc1(dst, AT, disp);
3165 }
3166 } else {
3167 if (scale == 0) {
3168 __ daddu(AT, as_Register(base), as_Register(index));
3169 } else {
3170 __ dsll(AT, as_Register(index), scale);
3171 __ daddu(AT, as_Register(base), AT);
3172 }
3173 __ move(T9, disp);
3174 if( UseLoongsonISA ) {
3175 __ gslwxc1(dst, AT, T9, 0);
3176 } else {
3177 __ daddu(AT, AT, T9);
3178 __ lwc1(dst, AT, 0);
3179 }
3180 }
3181 } else {
3182 if( Assembler::is_simm16(disp) ) {
3183 __ lwc1(dst, as_Register(base), disp);
3184 } else {
3185 __ move(T9, disp);
3186 if( UseLoongsonISA ) {
3187 __ gslwxc1(dst, as_Register(base), T9, 0);
3188 } else {
3189 __ daddu(AT, as_Register(base), T9);
3190 __ lwc1(dst, AT, 0);
3191 }
3192 }
3193 }
3194 %}
3196 enc_class store_F_reg_enc (memory mem, regF src) %{
3197 MacroAssembler _masm(&cbuf);
3198 int base = $mem$$base;
3199 int index = $mem$$index;
3200 int scale = $mem$$scale;
3201 int disp = $mem$$disp;
3202 FloatRegister src = $src$$FloatRegister;
3204 if( index != 0 ) {
3205 if( Assembler::is_simm16(disp) ) {
3206 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3207 if (scale == 0) {
3208 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3209 } else {
3210 __ dsll(AT, as_Register(index), scale);
3211 __ gsswxc1(src, as_Register(base), AT, disp);
3212 }
3213 } else {
3214 if (scale == 0) {
3215 __ daddu(AT, as_Register(base), as_Register(index));
3216 } else {
3217 __ dsll(AT, as_Register(index), scale);
3218 __ daddu(AT, as_Register(base), AT);
3219 }
3220 __ swc1(src, AT, disp);
3221 }
3222 } else {
3223 if (scale == 0) {
3224 __ daddu(AT, as_Register(base), as_Register(index));
3225 } else {
3226 __ dsll(AT, as_Register(index), scale);
3227 __ daddu(AT, as_Register(base), AT);
3228 }
3229 __ move(T9, disp);
3230 if( UseLoongsonISA ) {
3231 __ gsswxc1(src, AT, T9, 0);
3232 } else {
3233 __ daddu(AT, AT, T9);
3234 __ swc1(src, AT, 0);
3235 }
3236 }
3237 } else {
3238 if( Assembler::is_simm16(disp) ) {
3239 __ swc1(src, as_Register(base), disp);
3240 } else {
3241 __ move(T9, disp);
3242 if( UseLoongsonISA ) {
3243 __ gsswxc1(src, as_Register(base), T9, 0);
3244 } else {
3245 __ daddu(AT, as_Register(base), T9);
3246 __ swc1(src, AT, 0);
3247 }
3248 }
3249 }
3250 %}
3252 enc_class load_D_enc (regD dst, memory mem) %{
3253 MacroAssembler _masm(&cbuf);
3254 int base = $mem$$base;
3255 int index = $mem$$index;
3256 int scale = $mem$$scale;
3257 int disp = $mem$$disp;
3258 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3260 if( index != 0 ) {
3261 if( Assembler::is_simm16(disp) ) {
3262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3263 if (scale == 0) {
3264 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3265 } else {
3266 __ dsll(AT, as_Register(index), scale);
3267 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3268 }
3269 } else {
3270 if (scale == 0) {
3271 __ daddu(AT, as_Register(base), as_Register(index));
3272 } else {
3273 __ dsll(AT, as_Register(index), scale);
3274 __ daddu(AT, as_Register(base), AT);
3275 }
3276 __ ldc1(dst_reg, AT, disp);
3277 }
3278 } else {
3279 if (scale == 0) {
3280 __ daddu(AT, as_Register(base), as_Register(index));
3281 } else {
3282 __ dsll(AT, as_Register(index), scale);
3283 __ daddu(AT, as_Register(base), AT);
3284 }
3285 __ move(T9, disp);
3286 if( UseLoongsonISA ) {
3287 __ gsldxc1(dst_reg, AT, T9, 0);
3288 } else {
3289 __ addu(AT, AT, T9);
3290 __ ldc1(dst_reg, AT, 0);
3291 }
3292 }
3293 } else {
3294 if( Assembler::is_simm16(disp) ) {
3295 __ ldc1(dst_reg, as_Register(base), disp);
3296 } else {
3297 __ move(T9, disp);
3298 if( UseLoongsonISA ) {
3299 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3300 } else {
3301 __ addu(AT, as_Register(base), T9);
3302 __ ldc1(dst_reg, AT, 0);
3303 }
3304 }
3305 }
3306 %}
3308 enc_class store_D_reg_enc (memory mem, regD src) %{
3309 MacroAssembler _masm(&cbuf);
3310 int base = $mem$$base;
3311 int index = $mem$$index;
3312 int scale = $mem$$scale;
3313 int disp = $mem$$disp;
3314 FloatRegister src_reg = as_FloatRegister($src$$reg);
3316 if( index != 0 ) {
3317 if( Assembler::is_simm16(disp) ) {
3318 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3319 if (scale == 0) {
3320 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3321 } else {
3322 __ dsll(AT, as_Register(index), scale);
3323 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3324 }
3325 } else {
3326 if (scale == 0) {
3327 __ daddu(AT, as_Register(base), as_Register(index));
3328 } else {
3329 __ dsll(AT, as_Register(index), scale);
3330 __ daddu(AT, as_Register(base), AT);
3331 }
3332 __ sdc1(src_reg, AT, disp);
3333 }
3334 } else {
3335 if (scale == 0) {
3336 __ daddu(AT, as_Register(base), as_Register(index));
3337 } else {
3338 __ dsll(AT, as_Register(index), scale);
3339 __ daddu(AT, as_Register(base), AT);
3340 }
3341 __ move(T9, disp);
3342 if( UseLoongsonISA ) {
3343 __ gssdxc1(src_reg, AT, T9, 0);
3344 } else {
3345 __ addu(AT, AT, T9);
3346 __ sdc1(src_reg, AT, 0);
3347 }
3348 }
3349 } else {
3350 if( Assembler::is_simm16(disp) ) {
3351 __ sdc1(src_reg, as_Register(base), disp);
3352 } else {
3353 __ move(T9, disp);
3354 if( UseLoongsonISA ) {
3355 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3356 } else {
3357 __ addu(AT, as_Register(base), T9);
3358 __ sdc1(src_reg, AT, 0);
3359 }
3360 }
3361 }
3362 %}
3364 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3365 MacroAssembler _masm(&cbuf);
3366 // This is the instruction starting address for relocation info.
3367 __ block_comment("Java_To_Runtime");
3368 cbuf.set_insts_mark();
3369 __ relocate(relocInfo::runtime_call_type);
3371 __ patchable_call((address)$meth$$method);
3372 %}
3374 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3375 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3376 // who we intended to call.
3377 MacroAssembler _masm(&cbuf);
3378 cbuf.set_insts_mark();
3380 if ( !_method ) {
3381 __ relocate(relocInfo::runtime_call_type);
3382 } else if(_optimized_virtual) {
3383 __ relocate(relocInfo::opt_virtual_call_type);
3384 } else {
3385 __ relocate(relocInfo::static_call_type);
3386 }
3388 __ patchable_call((address)($meth$$method));
3389 if( _method ) { // Emit stub for static call
3390 emit_java_to_interp(cbuf);
3391 }
3392 %}
3395 /*
3396 * [Ref: LIR_Assembler::ic_call() ]
3397 */
3398 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3399 MacroAssembler _masm(&cbuf);
3400 __ block_comment("Java_Dynamic_Call");
3401 __ ic_call((address)$meth$$method);
3402 %}
3405 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3406 Register flags = $cr$$Register;
3407 Label L;
3409 MacroAssembler _masm(&cbuf);
3411 __ addu(flags, R0, R0);
3412 __ beq(AT, R0, L);
3413 __ delayed()->nop();
3414 __ move(flags, 0xFFFFFFFF);
3415 __ bind(L);
3416 %}
3418 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3419 Register result = $result$$Register;
3420 Register sub = $sub$$Register;
3421 Register super = $super$$Register;
3422 Register length = $tmp$$Register;
3423 Register tmp = T9;
3424 Label miss;
3426 // result may be the same as sub
3427 // 47c B40: # B21 B41 <- B20 Freq: 0.155379
3428 // 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3429 // 4bc mov S2, NULL #@loadConP
3430 // 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3431 //
3432 MacroAssembler _masm(&cbuf);
3433 Label done;
3434 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3435 NULL, &miss,
3436 /*set_cond_codes:*/ true);
3437 // Refer to X86_64's RDI
3438 __ move(result, 0);
3439 __ b(done);
3440 __ delayed()->nop();
3442 __ bind(miss);
3443 __ move(result, 1);
3444 __ bind(done);
3445 %}
3447 %}
3450 //---------MIPS FRAME--------------------------------------------------------------
3451 // Definition of frame structure and management information.
3452 //
3453 // S T A C K L A Y O U T Allocators stack-slot number
3454 // | (to get allocators register number
3455 // G Owned by | | v add SharedInfo::stack0)
3456 // r CALLER | |
3457 // o | +--------+ pad to even-align allocators stack-slot
3458 // w V | pad0 | numbers; owned by CALLER
3459 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3460 // h ^ | in | 5
3461 // | | args | 4 Holes in incoming args owned by SELF
3462 // | | old | | 3
3463 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3464 // v | | ret | 3 return address
3465 // Owned by +--------+
3466 // Self | pad2 | 2 pad to align old SP
3467 // | +--------+ 1
3468 // | | locks | 0
3469 // | +--------+----> SharedInfo::stack0, even aligned
3470 // | | pad1 | 11 pad to align new SP
3471 // | +--------+
3472 // | | | 10
3473 // | | spills | 9 spills
3474 // V | | 8 (pad0 slot for callee)
3475 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3476 // ^ | out | 7
3477 // | | args | 6 Holes in outgoing args owned by CALLEE
3478 // Owned by new | |
3479 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3480 // | |
3481 //
3482 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3483 // known from SELF's arguments and the Java calling convention.
3484 // Region 6-7 is determined per call site.
3485 // Note 2: If the calling convention leaves holes in the incoming argument
3486 // area, those holes are owned by SELF. Holes in the outgoing area
3487 // are owned by the CALLEE. Holes should not be nessecary in the
3488 // incoming area, as the Java calling convention is completely under
3489 // the control of the AD file. Doubles can be sorted and packed to
3490 // avoid holes. Holes in the outgoing arguments may be nessecary for
3491 // varargs C calling conventions.
3492 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3493 // even aligned with pad0 as needed.
3494 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3495 // region 6-11 is even aligned; it may be padded out more so that
3496 // the region from SP to FP meets the minimum stack alignment.
3497 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3498 // alignment. Region 11, pad1, may be dynamically extended so that
3499 // SP meets the minimum alignment.
3502 frame %{
3504 stack_direction(TOWARDS_LOW);
3506 // These two registers define part of the calling convention
3507 // between compiled code and the interpreter.
3508 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3509 // for more information.
3511 inline_cache_reg(T1); // Inline Cache Register
3512 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3514 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3515 cisc_spilling_operand_name(indOffset32);
3517 // Number of stack slots consumed by locking an object
3518 // generate Compile::sync_stack_slots
3519 #ifdef _LP64
3520 sync_stack_slots(2);
3521 #else
3522 sync_stack_slots(1);
3523 #endif
3525 frame_pointer(SP);
3527 // Interpreter stores its frame pointer in a register which is
3528 // stored to the stack by I2CAdaptors.
3529 // I2CAdaptors convert from interpreted java to compiled java.
3531 interpreter_frame_pointer(FP);
3533 // generate Matcher::stack_alignment
3534 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3536 // Number of stack slots between incoming argument block and the start of
3537 // a new frame. The PROLOG must add this many slots to the stack. The
3538 // EPILOG must remove this many slots. Intel needs one slot for
3539 // return address.
3540 // generate Matcher::in_preserve_stack_slots
3541 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3542 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3544 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3545 // for calls to C. Supports the var-args backing area for register parms.
3546 varargs_C_out_slots_killed(0);
3548 // The after-PROLOG location of the return address. Location of
3549 // return address specifies a type (REG or STACK) and a number
3550 // representing the register number (i.e. - use a register name) or
3551 // stack slot.
3552 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3553 // Otherwise, it is above the locks and verification slot and alignment word
3554 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3555 return_addr(REG RA);
3557 // Body of function which returns an integer array locating
3558 // arguments either in registers or in stack slots. Passed an array
3559 // of ideal registers called "sig" and a "length" count. Stack-slot
3560 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3561 // arguments for a CALLEE. Incoming stack arguments are
3562 // automatically biased by the preserve_stack_slots field above.
3565 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3566 // StartNode::calling_convention call this.
3567 calling_convention %{
3568 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3569 %}
3574 // Body of function which returns an integer array locating
3575 // arguments either in registers or in stack slots. Passed an array
3576 // of ideal registers called "sig" and a "length" count. Stack-slot
3577 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3578 // arguments for a CALLEE. Incoming stack arguments are
3579 // automatically biased by the preserve_stack_slots field above.
3582 // SEE CallRuntimeNode::calling_convention for more information.
3583 c_calling_convention %{
3584 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3585 %}
3588 // Location of C & interpreter return values
3589 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3590 // SEE Matcher::match.
3591 c_return_value %{
3592 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3593 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3594 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3595 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3596 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3597 %}
3599 // Location of return values
3600 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3601 // SEE Matcher::match.
3603 return_value %{
3604 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3605 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3606 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3607 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3608 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3609 %}
3611 %}
3613 //----------ATTRIBUTES---------------------------------------------------------
3614 //----------Operand Attributes-------------------------------------------------
3615 op_attrib op_cost(0); // Required cost attribute
3617 //----------Instruction Attributes---------------------------------------------
3618 ins_attrib ins_cost(100); // Required cost attribute
3619 ins_attrib ins_size(32); // Required size attribute (in bits)
3620 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3621 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3622 // non-matching short branch variant of some
3623 // long branch?
3624 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3625 // specifies the alignment that some part of the instruction (not
3626 // necessarily the start) requires. If > 1, a compute_padding()
3627 // function must be provided for the instruction
3629 //----------OPERANDS-----------------------------------------------------------
3630 // Operand definitions must precede instruction definitions for correct parsing
3631 // in the ADLC because operands constitute user defined types which are used in
3632 // instruction definitions.
3634 // Vectors
3635 operand vecD() %{
3636 constraint(ALLOC_IN_RC(dbl_reg));
3637 match(VecD);
3639 format %{ %}
3640 interface(REG_INTER);
3641 %}
3643 // Flags register, used as output of compare instructions
3644 operand FlagsReg() %{
3645 constraint(ALLOC_IN_RC(mips_flags));
3646 match(RegFlags);
3648 format %{ "AT" %}
3649 interface(REG_INTER);
3650 %}
3652 //----------Simple Operands----------------------------------------------------
3653 //TODO: Should we need to define some more special immediate number ?
3654 // Immediate Operands
3655 // Integer Immediate
3656 operand immI() %{
3657 match(ConI);
3658 //TODO: should not match immI8 here LEE
3659 match(immI8);
3661 op_cost(20);
3662 format %{ %}
3663 interface(CONST_INTER);
3664 %}
3666 // Long Immediate 8-bit
3667 operand immL8()
3668 %{
3669 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3670 match(ConL);
3672 op_cost(5);
3673 format %{ %}
3674 interface(CONST_INTER);
3675 %}
3677 // Constant for test vs zero
3678 operand immI0() %{
3679 predicate(n->get_int() == 0);
3680 match(ConI);
3682 op_cost(0);
3683 format %{ %}
3684 interface(CONST_INTER);
3685 %}
3687 // Constant for increment
3688 operand immI1() %{
3689 predicate(n->get_int() == 1);
3690 match(ConI);
3692 op_cost(0);
3693 format %{ %}
3694 interface(CONST_INTER);
3695 %}
3697 // Constant for decrement
3698 operand immI_M1() %{
3699 predicate(n->get_int() == -1);
3700 match(ConI);
3702 op_cost(0);
3703 format %{ %}
3704 interface(CONST_INTER);
3705 %}
3707 operand immI_MaxI() %{
3708 predicate(n->get_int() == 2147483647);
3709 match(ConI);
3711 op_cost(0);
3712 format %{ %}
3713 interface(CONST_INTER);
3714 %}
3716 // Valid scale values for addressing modes
3717 operand immI2() %{
3718 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3719 match(ConI);
3721 format %{ %}
3722 interface(CONST_INTER);
3723 %}
3725 operand immI8() %{
3726 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3727 match(ConI);
3729 op_cost(5);
3730 format %{ %}
3731 interface(CONST_INTER);
3732 %}
3734 operand immI16() %{
3735 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3736 match(ConI);
3738 op_cost(10);
3739 format %{ %}
3740 interface(CONST_INTER);
3741 %}
3743 // Constant for long shifts
3744 operand immI_32() %{
3745 predicate( n->get_int() == 32 );
3746 match(ConI);
3748 op_cost(0);
3749 format %{ %}
3750 interface(CONST_INTER);
3751 %}
3753 operand immI_63() %{
3754 predicate( n->get_int() == 63 );
3755 match(ConI);
3757 op_cost(0);
3758 format %{ %}
3759 interface(CONST_INTER);
3760 %}
3762 operand immI_0_31() %{
3763 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3764 match(ConI);
3766 op_cost(0);
3767 format %{ %}
3768 interface(CONST_INTER);
3769 %}
3771 // Operand for non-negtive integer mask
3772 operand immI_nonneg_mask() %{
3773 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3774 match(ConI);
3776 op_cost(0);
3777 format %{ %}
3778 interface(CONST_INTER);
3779 %}
3781 operand immI_32_63() %{
3782 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3783 match(ConI);
3784 op_cost(0);
3786 format %{ %}
3787 interface(CONST_INTER);
3788 %}
3790 operand immI16_sub() %{
3791 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3792 match(ConI);
3794 op_cost(10);
3795 format %{ %}
3796 interface(CONST_INTER);
3797 %}
3799 operand immI_0_32767() %{
3800 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3801 match(ConI);
3802 op_cost(0);
3804 format %{ %}
3805 interface(CONST_INTER);
3806 %}
3808 operand immI_0_65535() %{
3809 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3810 match(ConI);
3811 op_cost(0);
3813 format %{ %}
3814 interface(CONST_INTER);
3815 %}
3817 operand immI_1() %{
3818 predicate( n->get_int() == 1 );
3819 match(ConI);
3821 op_cost(0);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 operand immI_2() %{
3827 predicate( n->get_int() == 2 );
3828 match(ConI);
3830 op_cost(0);
3831 format %{ %}
3832 interface(CONST_INTER);
3833 %}
3835 operand immI_3() %{
3836 predicate( n->get_int() == 3 );
3837 match(ConI);
3839 op_cost(0);
3840 format %{ %}
3841 interface(CONST_INTER);
3842 %}
3844 operand immI_7() %{
3845 predicate( n->get_int() == 7 );
3846 match(ConI);
3848 format %{ %}
3849 interface(CONST_INTER);
3850 %}
3852 // Immediates for special shifts (sign extend)
3854 // Constants for increment
3855 operand immI_16() %{
3856 predicate( n->get_int() == 16 );
3857 match(ConI);
3859 format %{ %}
3860 interface(CONST_INTER);
3861 %}
3863 operand immI_24() %{
3864 predicate( n->get_int() == 24 );
3865 match(ConI);
3867 format %{ %}
3868 interface(CONST_INTER);
3869 %}
3871 // Constant for byte-wide masking
3872 operand immI_255() %{
3873 predicate( n->get_int() == 255 );
3874 match(ConI);
3876 op_cost(0);
3877 format %{ %}
3878 interface(CONST_INTER);
3879 %}
3881 operand immI_65535() %{
3882 predicate( n->get_int() == 65535 );
3883 match(ConI);
3885 op_cost(5);
3886 format %{ %}
3887 interface(CONST_INTER);
3888 %}
3890 operand immI_65536() %{
3891 predicate( n->get_int() == 65536 );
3892 match(ConI);
3894 op_cost(5);
3895 format %{ %}
3896 interface(CONST_INTER);
3897 %}
3899 operand immI_M65536() %{
3900 predicate( n->get_int() == -65536 );
3901 match(ConI);
3903 op_cost(5);
3904 format %{ %}
3905 interface(CONST_INTER);
3906 %}
3908 // Pointer Immediate
3909 operand immP() %{
3910 match(ConP);
3912 op_cost(10);
3913 format %{ %}
3914 interface(CONST_INTER);
3915 %}
3917 // NULL Pointer Immediate
3918 operand immP0() %{
3919 predicate( n->get_ptr() == 0 );
3920 match(ConP);
3921 op_cost(0);
3923 format %{ %}
3924 interface(CONST_INTER);
3925 %}
3927 // Pointer Immediate: 64-bit
3928 operand immP_set() %{
3929 match(ConP);
3931 op_cost(5);
3932 // formats are generated automatically for constants and base registers
3933 format %{ %}
3934 interface(CONST_INTER);
3935 %}
3937 // Pointer Immediate: 64-bit
3938 operand immP_load() %{
3939 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3940 match(ConP);
3942 op_cost(5);
3943 // formats are generated automatically for constants and base registers
3944 format %{ %}
3945 interface(CONST_INTER);
3946 %}
3948 // Pointer Immediate: 64-bit
3949 operand immP_no_oop_cheap() %{
3950 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3951 match(ConP);
3953 op_cost(5);
3954 // formats are generated automatically for constants and base registers
3955 format %{ %}
3956 interface(CONST_INTER);
3957 %}
3959 // Pointer for polling page
3960 operand immP_poll() %{
3961 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3962 match(ConP);
3963 op_cost(5);
3965 format %{ %}
3966 interface(CONST_INTER);
3967 %}
3969 // Pointer Immediate
3970 operand immN() %{
3971 match(ConN);
3973 op_cost(10);
3974 format %{ %}
3975 interface(CONST_INTER);
3976 %}
3978 operand immNKlass() %{
3979 match(ConNKlass);
3981 op_cost(10);
3982 format %{ %}
3983 interface(CONST_INTER);
3984 %}
3986 // NULL Pointer Immediate
3987 operand immN0() %{
3988 predicate(n->get_narrowcon() == 0);
3989 match(ConN);
3991 op_cost(5);
3992 format %{ %}
3993 interface(CONST_INTER);
3994 %}
3996 // Long Immediate
3997 operand immL() %{
3998 match(ConL);
4000 op_cost(20);
4001 format %{ %}
4002 interface(CONST_INTER);
4003 %}
4005 // Long Immediate zero
4006 operand immL0() %{
4007 predicate( n->get_long() == 0L );
4008 match(ConL);
4009 op_cost(0);
4011 format %{ %}
4012 interface(CONST_INTER);
4013 %}
4015 operand immL7() %{
4016 predicate( n->get_long() == 7L );
4017 match(ConL);
4018 op_cost(0);
4020 format %{ %}
4021 interface(CONST_INTER);
4022 %}
4024 operand immL_M1() %{
4025 predicate( n->get_long() == -1L );
4026 match(ConL);
4027 op_cost(0);
4029 format %{ %}
4030 interface(CONST_INTER);
4031 %}
4033 // bit 0..2 zero
4034 operand immL_M8() %{
4035 predicate( n->get_long() == -8L );
4036 match(ConL);
4037 op_cost(0);
4039 format %{ %}
4040 interface(CONST_INTER);
4041 %}
4043 // bit 2 zero
4044 operand immL_M5() %{
4045 predicate( n->get_long() == -5L );
4046 match(ConL);
4047 op_cost(0);
4049 format %{ %}
4050 interface(CONST_INTER);
4051 %}
4053 // bit 1..2 zero
4054 operand immL_M7() %{
4055 predicate( n->get_long() == -7L );
4056 match(ConL);
4057 op_cost(0);
4059 format %{ %}
4060 interface(CONST_INTER);
4061 %}
4063 // bit 0..1 zero
4064 operand immL_M4() %{
4065 predicate( n->get_long() == -4L );
4066 match(ConL);
4067 op_cost(0);
4069 format %{ %}
4070 interface(CONST_INTER);
4071 %}
4073 // bit 3..6 zero
4074 operand immL_M121() %{
4075 predicate( n->get_long() == -121L );
4076 match(ConL);
4077 op_cost(0);
4079 format %{ %}
4080 interface(CONST_INTER);
4081 %}
4083 // Long immediate from 0 to 127.
4084 // Used for a shorter form of long mul by 10.
4085 operand immL_127() %{
4086 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4087 match(ConL);
4088 op_cost(0);
4090 format %{ %}
4091 interface(CONST_INTER);
4092 %}
4094 // Operand for non-negtive long mask
4095 operand immL_nonneg_mask() %{
4096 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4097 match(ConL);
4099 op_cost(0);
4100 format %{ %}
4101 interface(CONST_INTER);
4102 %}
4104 operand immL_0_65535() %{
4105 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4106 match(ConL);
4107 op_cost(0);
4109 format %{ %}
4110 interface(CONST_INTER);
4111 %}
4113 // Long Immediate: cheap (materialize in <= 3 instructions)
4114 operand immL_cheap() %{
4115 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4116 match(ConL);
4117 op_cost(0);
4119 format %{ %}
4120 interface(CONST_INTER);
4121 %}
4123 // Long Immediate: expensive (materialize in > 3 instructions)
4124 operand immL_expensive() %{
4125 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4126 match(ConL);
4127 op_cost(0);
4129 format %{ %}
4130 interface(CONST_INTER);
4131 %}
4133 operand immL16() %{
4134 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4135 match(ConL);
4137 op_cost(10);
4138 format %{ %}
4139 interface(CONST_INTER);
4140 %}
4142 operand immL16_sub() %{
4143 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4144 match(ConL);
4146 op_cost(10);
4147 format %{ %}
4148 interface(CONST_INTER);
4149 %}
4151 // Long Immediate: low 32-bit mask
4152 operand immL_32bits() %{
4153 predicate(n->get_long() == 0xFFFFFFFFL);
4154 match(ConL);
4155 op_cost(20);
4157 format %{ %}
4158 interface(CONST_INTER);
4159 %}
4161 // Long Immediate 32-bit signed
4162 operand immL32()
4163 %{
4164 predicate(n->get_long() == (int) (n->get_long()));
4165 match(ConL);
4167 op_cost(15);
4168 format %{ %}
4169 interface(CONST_INTER);
4170 %}
4173 //single-precision floating-point zero
4174 operand immF0() %{
4175 predicate(jint_cast(n->getf()) == 0);
4176 match(ConF);
4178 op_cost(5);
4179 format %{ %}
4180 interface(CONST_INTER);
4181 %}
4183 //single-precision floating-point immediate
4184 operand immF() %{
4185 match(ConF);
4187 op_cost(20);
4188 format %{ %}
4189 interface(CONST_INTER);
4190 %}
4192 //double-precision floating-point zero
4193 operand immD0() %{
4194 predicate(jlong_cast(n->getd()) == 0);
4195 match(ConD);
4197 op_cost(5);
4198 format %{ %}
4199 interface(CONST_INTER);
4200 %}
4202 //double-precision floating-point immediate
4203 operand immD() %{
4204 match(ConD);
4206 op_cost(20);
4207 format %{ %}
4208 interface(CONST_INTER);
4209 %}
4211 // Register Operands
4212 // Integer Register
4213 operand mRegI() %{
4214 constraint(ALLOC_IN_RC(int_reg));
4215 match(RegI);
4217 format %{ %}
4218 interface(REG_INTER);
4219 %}
4221 operand no_Ax_mRegI() %{
4222 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4223 match(RegI);
4224 match(mRegI);
4226 format %{ %}
4227 interface(REG_INTER);
4228 %}
4230 operand mS0RegI() %{
4231 constraint(ALLOC_IN_RC(s0_reg));
4232 match(RegI);
4233 match(mRegI);
4235 format %{ "S0" %}
4236 interface(REG_INTER);
4237 %}
4239 operand mS1RegI() %{
4240 constraint(ALLOC_IN_RC(s1_reg));
4241 match(RegI);
4242 match(mRegI);
4244 format %{ "S1" %}
4245 interface(REG_INTER);
4246 %}
4248 operand mS2RegI() %{
4249 constraint(ALLOC_IN_RC(s2_reg));
4250 match(RegI);
4251 match(mRegI);
4253 format %{ "S2" %}
4254 interface(REG_INTER);
4255 %}
4257 operand mS3RegI() %{
4258 constraint(ALLOC_IN_RC(s3_reg));
4259 match(RegI);
4260 match(mRegI);
4262 format %{ "S3" %}
4263 interface(REG_INTER);
4264 %}
4266 operand mS4RegI() %{
4267 constraint(ALLOC_IN_RC(s4_reg));
4268 match(RegI);
4269 match(mRegI);
4271 format %{ "S4" %}
4272 interface(REG_INTER);
4273 %}
4275 operand mS5RegI() %{
4276 constraint(ALLOC_IN_RC(s5_reg));
4277 match(RegI);
4278 match(mRegI);
4280 format %{ "S5" %}
4281 interface(REG_INTER);
4282 %}
4284 operand mS6RegI() %{
4285 constraint(ALLOC_IN_RC(s6_reg));
4286 match(RegI);
4287 match(mRegI);
4289 format %{ "S6" %}
4290 interface(REG_INTER);
4291 %}
4293 operand mS7RegI() %{
4294 constraint(ALLOC_IN_RC(s7_reg));
4295 match(RegI);
4296 match(mRegI);
4298 format %{ "S7" %}
4299 interface(REG_INTER);
4300 %}
4303 operand mT0RegI() %{
4304 constraint(ALLOC_IN_RC(t0_reg));
4305 match(RegI);
4306 match(mRegI);
4308 format %{ "T0" %}
4309 interface(REG_INTER);
4310 %}
4312 operand mT1RegI() %{
4313 constraint(ALLOC_IN_RC(t1_reg));
4314 match(RegI);
4315 match(mRegI);
4317 format %{ "T1" %}
4318 interface(REG_INTER);
4319 %}
4321 operand mT2RegI() %{
4322 constraint(ALLOC_IN_RC(t2_reg));
4323 match(RegI);
4324 match(mRegI);
4326 format %{ "T2" %}
4327 interface(REG_INTER);
4328 %}
4330 operand mT3RegI() %{
4331 constraint(ALLOC_IN_RC(t3_reg));
4332 match(RegI);
4333 match(mRegI);
4335 format %{ "T3" %}
4336 interface(REG_INTER);
4337 %}
4339 operand mT8RegI() %{
4340 constraint(ALLOC_IN_RC(t8_reg));
4341 match(RegI);
4342 match(mRegI);
4344 format %{ "T8" %}
4345 interface(REG_INTER);
4346 %}
4348 operand mT9RegI() %{
4349 constraint(ALLOC_IN_RC(t9_reg));
4350 match(RegI);
4351 match(mRegI);
4353 format %{ "T9" %}
4354 interface(REG_INTER);
4355 %}
4357 operand mA0RegI() %{
4358 constraint(ALLOC_IN_RC(a0_reg));
4359 match(RegI);
4360 match(mRegI);
4362 format %{ "A0" %}
4363 interface(REG_INTER);
4364 %}
4366 operand mA1RegI() %{
4367 constraint(ALLOC_IN_RC(a1_reg));
4368 match(RegI);
4369 match(mRegI);
4371 format %{ "A1" %}
4372 interface(REG_INTER);
4373 %}
4375 operand mA2RegI() %{
4376 constraint(ALLOC_IN_RC(a2_reg));
4377 match(RegI);
4378 match(mRegI);
4380 format %{ "A2" %}
4381 interface(REG_INTER);
4382 %}
4384 operand mA3RegI() %{
4385 constraint(ALLOC_IN_RC(a3_reg));
4386 match(RegI);
4387 match(mRegI);
4389 format %{ "A3" %}
4390 interface(REG_INTER);
4391 %}
4393 operand mA4RegI() %{
4394 constraint(ALLOC_IN_RC(a4_reg));
4395 match(RegI);
4396 match(mRegI);
4398 format %{ "A4" %}
4399 interface(REG_INTER);
4400 %}
4402 operand mA5RegI() %{
4403 constraint(ALLOC_IN_RC(a5_reg));
4404 match(RegI);
4405 match(mRegI);
4407 format %{ "A5" %}
4408 interface(REG_INTER);
4409 %}
4411 operand mA6RegI() %{
4412 constraint(ALLOC_IN_RC(a6_reg));
4413 match(RegI);
4414 match(mRegI);
4416 format %{ "A6" %}
4417 interface(REG_INTER);
4418 %}
4420 operand mA7RegI() %{
4421 constraint(ALLOC_IN_RC(a7_reg));
4422 match(RegI);
4423 match(mRegI);
4425 format %{ "A7" %}
4426 interface(REG_INTER);
4427 %}
4429 operand mV0RegI() %{
4430 constraint(ALLOC_IN_RC(v0_reg));
4431 match(RegI);
4432 match(mRegI);
4434 format %{ "V0" %}
4435 interface(REG_INTER);
4436 %}
4438 operand mV1RegI() %{
4439 constraint(ALLOC_IN_RC(v1_reg));
4440 match(RegI);
4441 match(mRegI);
4443 format %{ "V1" %}
4444 interface(REG_INTER);
4445 %}
4447 operand mRegN() %{
4448 constraint(ALLOC_IN_RC(int_reg));
4449 match(RegN);
4451 format %{ %}
4452 interface(REG_INTER);
4453 %}
4455 operand t0_RegN() %{
4456 constraint(ALLOC_IN_RC(t0_reg));
4457 match(RegN);
4458 match(mRegN);
4460 format %{ %}
4461 interface(REG_INTER);
4462 %}
4464 operand t1_RegN() %{
4465 constraint(ALLOC_IN_RC(t1_reg));
4466 match(RegN);
4467 match(mRegN);
4469 format %{ %}
4470 interface(REG_INTER);
4471 %}
4473 operand t2_RegN() %{
4474 constraint(ALLOC_IN_RC(t2_reg));
4475 match(RegN);
4476 match(mRegN);
4478 format %{ %}
4479 interface(REG_INTER);
4480 %}
4482 operand t3_RegN() %{
4483 constraint(ALLOC_IN_RC(t3_reg));
4484 match(RegN);
4485 match(mRegN);
4487 format %{ %}
4488 interface(REG_INTER);
4489 %}
4491 operand t8_RegN() %{
4492 constraint(ALLOC_IN_RC(t8_reg));
4493 match(RegN);
4494 match(mRegN);
4496 format %{ %}
4497 interface(REG_INTER);
4498 %}
4500 operand t9_RegN() %{
4501 constraint(ALLOC_IN_RC(t9_reg));
4502 match(RegN);
4503 match(mRegN);
4505 format %{ %}
4506 interface(REG_INTER);
4507 %}
4509 operand a0_RegN() %{
4510 constraint(ALLOC_IN_RC(a0_reg));
4511 match(RegN);
4512 match(mRegN);
4514 format %{ %}
4515 interface(REG_INTER);
4516 %}
4518 operand a1_RegN() %{
4519 constraint(ALLOC_IN_RC(a1_reg));
4520 match(RegN);
4521 match(mRegN);
4523 format %{ %}
4524 interface(REG_INTER);
4525 %}
4527 operand a2_RegN() %{
4528 constraint(ALLOC_IN_RC(a2_reg));
4529 match(RegN);
4530 match(mRegN);
4532 format %{ %}
4533 interface(REG_INTER);
4534 %}
4536 operand a3_RegN() %{
4537 constraint(ALLOC_IN_RC(a3_reg));
4538 match(RegN);
4539 match(mRegN);
4541 format %{ %}
4542 interface(REG_INTER);
4543 %}
4545 operand a4_RegN() %{
4546 constraint(ALLOC_IN_RC(a4_reg));
4547 match(RegN);
4548 match(mRegN);
4550 format %{ %}
4551 interface(REG_INTER);
4552 %}
4554 operand a5_RegN() %{
4555 constraint(ALLOC_IN_RC(a5_reg));
4556 match(RegN);
4557 match(mRegN);
4559 format %{ %}
4560 interface(REG_INTER);
4561 %}
4563 operand a6_RegN() %{
4564 constraint(ALLOC_IN_RC(a6_reg));
4565 match(RegN);
4566 match(mRegN);
4568 format %{ %}
4569 interface(REG_INTER);
4570 %}
4572 operand a7_RegN() %{
4573 constraint(ALLOC_IN_RC(a7_reg));
4574 match(RegN);
4575 match(mRegN);
4577 format %{ %}
4578 interface(REG_INTER);
4579 %}
4581 operand s0_RegN() %{
4582 constraint(ALLOC_IN_RC(s0_reg));
4583 match(RegN);
4584 match(mRegN);
4586 format %{ %}
4587 interface(REG_INTER);
4588 %}
4590 operand s1_RegN() %{
4591 constraint(ALLOC_IN_RC(s1_reg));
4592 match(RegN);
4593 match(mRegN);
4595 format %{ %}
4596 interface(REG_INTER);
4597 %}
4599 operand s2_RegN() %{
4600 constraint(ALLOC_IN_RC(s2_reg));
4601 match(RegN);
4602 match(mRegN);
4604 format %{ %}
4605 interface(REG_INTER);
4606 %}
4608 operand s3_RegN() %{
4609 constraint(ALLOC_IN_RC(s3_reg));
4610 match(RegN);
4611 match(mRegN);
4613 format %{ %}
4614 interface(REG_INTER);
4615 %}
4617 operand s4_RegN() %{
4618 constraint(ALLOC_IN_RC(s4_reg));
4619 match(RegN);
4620 match(mRegN);
4622 format %{ %}
4623 interface(REG_INTER);
4624 %}
4626 operand s5_RegN() %{
4627 constraint(ALLOC_IN_RC(s5_reg));
4628 match(RegN);
4629 match(mRegN);
4631 format %{ %}
4632 interface(REG_INTER);
4633 %}
4635 operand s6_RegN() %{
4636 constraint(ALLOC_IN_RC(s6_reg));
4637 match(RegN);
4638 match(mRegN);
4640 format %{ %}
4641 interface(REG_INTER);
4642 %}
4644 operand s7_RegN() %{
4645 constraint(ALLOC_IN_RC(s7_reg));
4646 match(RegN);
4647 match(mRegN);
4649 format %{ %}
4650 interface(REG_INTER);
4651 %}
4653 operand v0_RegN() %{
4654 constraint(ALLOC_IN_RC(v0_reg));
4655 match(RegN);
4656 match(mRegN);
4658 format %{ %}
4659 interface(REG_INTER);
4660 %}
4662 operand v1_RegN() %{
4663 constraint(ALLOC_IN_RC(v1_reg));
4664 match(RegN);
4665 match(mRegN);
4667 format %{ %}
4668 interface(REG_INTER);
4669 %}
4671 // Pointer Register
4672 operand mRegP() %{
4673 constraint(ALLOC_IN_RC(p_reg));
4674 match(RegP);
4675 match(a0_RegP);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand no_T8_mRegP() %{
4682 constraint(ALLOC_IN_RC(no_T8_p_reg));
4683 match(RegP);
4684 match(mRegP);
4686 format %{ %}
4687 interface(REG_INTER);
4688 %}
4690 operand s0_RegP()
4691 %{
4692 constraint(ALLOC_IN_RC(s0_long_reg));
4693 match(RegP);
4694 match(mRegP);
4695 match(no_T8_mRegP);
4697 format %{ %}
4698 interface(REG_INTER);
4699 %}
4701 operand s1_RegP()
4702 %{
4703 constraint(ALLOC_IN_RC(s1_long_reg));
4704 match(RegP);
4705 match(mRegP);
4706 match(no_T8_mRegP);
4708 format %{ %}
4709 interface(REG_INTER);
4710 %}
4712 operand s2_RegP()
4713 %{
4714 constraint(ALLOC_IN_RC(s2_long_reg));
4715 match(RegP);
4716 match(mRegP);
4717 match(no_T8_mRegP);
4719 format %{ %}
4720 interface(REG_INTER);
4721 %}
4723 operand s3_RegP()
4724 %{
4725 constraint(ALLOC_IN_RC(s3_long_reg));
4726 match(RegP);
4727 match(mRegP);
4728 match(no_T8_mRegP);
4730 format %{ %}
4731 interface(REG_INTER);
4732 %}
4734 operand s4_RegP()
4735 %{
4736 constraint(ALLOC_IN_RC(s4_long_reg));
4737 match(RegP);
4738 match(mRegP);
4739 match(no_T8_mRegP);
4741 format %{ %}
4742 interface(REG_INTER);
4743 %}
4745 operand s5_RegP()
4746 %{
4747 constraint(ALLOC_IN_RC(s5_long_reg));
4748 match(RegP);
4749 match(mRegP);
4750 match(no_T8_mRegP);
4752 format %{ %}
4753 interface(REG_INTER);
4754 %}
4756 operand s6_RegP()
4757 %{
4758 constraint(ALLOC_IN_RC(s6_long_reg));
4759 match(RegP);
4760 match(mRegP);
4761 match(no_T8_mRegP);
4763 format %{ %}
4764 interface(REG_INTER);
4765 %}
4767 operand s7_RegP()
4768 %{
4769 constraint(ALLOC_IN_RC(s7_long_reg));
4770 match(RegP);
4771 match(mRegP);
4772 match(no_T8_mRegP);
4774 format %{ %}
4775 interface(REG_INTER);
4776 %}
4778 operand t0_RegP()
4779 %{
4780 constraint(ALLOC_IN_RC(t0_long_reg));
4781 match(RegP);
4782 match(mRegP);
4783 match(no_T8_mRegP);
4785 format %{ %}
4786 interface(REG_INTER);
4787 %}
4789 operand t1_RegP()
4790 %{
4791 constraint(ALLOC_IN_RC(t1_long_reg));
4792 match(RegP);
4793 match(mRegP);
4794 match(no_T8_mRegP);
4796 format %{ %}
4797 interface(REG_INTER);
4798 %}
4800 operand t2_RegP()
4801 %{
4802 constraint(ALLOC_IN_RC(t2_long_reg));
4803 match(RegP);
4804 match(mRegP);
4805 match(no_T8_mRegP);
4807 format %{ %}
4808 interface(REG_INTER);
4809 %}
4811 operand t3_RegP()
4812 %{
4813 constraint(ALLOC_IN_RC(t3_long_reg));
4814 match(RegP);
4815 match(mRegP);
4816 match(no_T8_mRegP);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand t8_RegP()
4823 %{
4824 constraint(ALLOC_IN_RC(t8_long_reg));
4825 match(RegP);
4826 match(mRegP);
4828 format %{ %}
4829 interface(REG_INTER);
4830 %}
4832 operand t9_RegP()
4833 %{
4834 constraint(ALLOC_IN_RC(t9_long_reg));
4835 match(RegP);
4836 match(mRegP);
4837 match(no_T8_mRegP);
4839 format %{ %}
4840 interface(REG_INTER);
4841 %}
4843 operand a0_RegP()
4844 %{
4845 constraint(ALLOC_IN_RC(a0_long_reg));
4846 match(RegP);
4847 match(mRegP);
4848 match(no_T8_mRegP);
4850 format %{ %}
4851 interface(REG_INTER);
4852 %}
4854 operand a1_RegP()
4855 %{
4856 constraint(ALLOC_IN_RC(a1_long_reg));
4857 match(RegP);
4858 match(mRegP);
4859 match(no_T8_mRegP);
4861 format %{ %}
4862 interface(REG_INTER);
4863 %}
4865 operand a2_RegP()
4866 %{
4867 constraint(ALLOC_IN_RC(a2_long_reg));
4868 match(RegP);
4869 match(mRegP);
4870 match(no_T8_mRegP);
4872 format %{ %}
4873 interface(REG_INTER);
4874 %}
4876 operand a3_RegP()
4877 %{
4878 constraint(ALLOC_IN_RC(a3_long_reg));
4879 match(RegP);
4880 match(mRegP);
4881 match(no_T8_mRegP);
4883 format %{ %}
4884 interface(REG_INTER);
4885 %}
4887 operand a4_RegP()
4888 %{
4889 constraint(ALLOC_IN_RC(a4_long_reg));
4890 match(RegP);
4891 match(mRegP);
4892 match(no_T8_mRegP);
4894 format %{ %}
4895 interface(REG_INTER);
4896 %}
4899 operand a5_RegP()
4900 %{
4901 constraint(ALLOC_IN_RC(a5_long_reg));
4902 match(RegP);
4903 match(mRegP);
4904 match(no_T8_mRegP);
4906 format %{ %}
4907 interface(REG_INTER);
4908 %}
4910 operand a6_RegP()
4911 %{
4912 constraint(ALLOC_IN_RC(a6_long_reg));
4913 match(RegP);
4914 match(mRegP);
4915 match(no_T8_mRegP);
4917 format %{ %}
4918 interface(REG_INTER);
4919 %}
4921 operand a7_RegP()
4922 %{
4923 constraint(ALLOC_IN_RC(a7_long_reg));
4924 match(RegP);
4925 match(mRegP);
4926 match(no_T8_mRegP);
4928 format %{ %}
4929 interface(REG_INTER);
4930 %}
4932 operand v0_RegP()
4933 %{
4934 constraint(ALLOC_IN_RC(v0_long_reg));
4935 match(RegP);
4936 match(mRegP);
4937 match(no_T8_mRegP);
4939 format %{ %}
4940 interface(REG_INTER);
4941 %}
4943 operand v1_RegP()
4944 %{
4945 constraint(ALLOC_IN_RC(v1_long_reg));
4946 match(RegP);
4947 match(mRegP);
4948 match(no_T8_mRegP);
4950 format %{ %}
4951 interface(REG_INTER);
4952 %}
4954 /*
4955 operand mSPRegP(mRegP reg) %{
4956 constraint(ALLOC_IN_RC(sp_reg));
4957 match(reg);
4959 format %{ "SP" %}
4960 interface(REG_INTER);
4961 %}
4963 operand mFPRegP(mRegP reg) %{
4964 constraint(ALLOC_IN_RC(fp_reg));
4965 match(reg);
4967 format %{ "FP" %}
4968 interface(REG_INTER);
4969 %}
4970 */
4972 operand mRegL() %{
4973 constraint(ALLOC_IN_RC(long_reg));
4974 match(RegL);
4976 format %{ %}
4977 interface(REG_INTER);
4978 %}
4980 operand v0RegL() %{
4981 constraint(ALLOC_IN_RC(v0_long_reg));
4982 match(RegL);
4983 match(mRegL);
4985 format %{ %}
4986 interface(REG_INTER);
4987 %}
4989 operand v1RegL() %{
4990 constraint(ALLOC_IN_RC(v1_long_reg));
4991 match(RegL);
4992 match(mRegL);
4994 format %{ %}
4995 interface(REG_INTER);
4996 %}
4998 operand a0RegL() %{
4999 constraint(ALLOC_IN_RC(a0_long_reg));
5000 match(RegL);
5001 match(mRegL);
5003 format %{ "A0" %}
5004 interface(REG_INTER);
5005 %}
5007 operand a1RegL() %{
5008 constraint(ALLOC_IN_RC(a1_long_reg));
5009 match(RegL);
5010 match(mRegL);
5012 format %{ %}
5013 interface(REG_INTER);
5014 %}
5016 operand a2RegL() %{
5017 constraint(ALLOC_IN_RC(a2_long_reg));
5018 match(RegL);
5019 match(mRegL);
5021 format %{ %}
5022 interface(REG_INTER);
5023 %}
5025 operand a3RegL() %{
5026 constraint(ALLOC_IN_RC(a3_long_reg));
5027 match(RegL);
5028 match(mRegL);
5030 format %{ %}
5031 interface(REG_INTER);
5032 %}
5034 operand t0RegL() %{
5035 constraint(ALLOC_IN_RC(t0_long_reg));
5036 match(RegL);
5037 match(mRegL);
5039 format %{ %}
5040 interface(REG_INTER);
5041 %}
5043 operand t1RegL() %{
5044 constraint(ALLOC_IN_RC(t1_long_reg));
5045 match(RegL);
5046 match(mRegL);
5048 format %{ %}
5049 interface(REG_INTER);
5050 %}
5052 operand t2RegL() %{
5053 constraint(ALLOC_IN_RC(t2_long_reg));
5054 match(RegL);
5055 match(mRegL);
5057 format %{ %}
5058 interface(REG_INTER);
5059 %}
5061 operand t3RegL() %{
5062 constraint(ALLOC_IN_RC(t3_long_reg));
5063 match(RegL);
5064 match(mRegL);
5066 format %{ %}
5067 interface(REG_INTER);
5068 %}
5070 operand t8RegL() %{
5071 constraint(ALLOC_IN_RC(t8_long_reg));
5072 match(RegL);
5073 match(mRegL);
5075 format %{ %}
5076 interface(REG_INTER);
5077 %}
5079 operand a4RegL() %{
5080 constraint(ALLOC_IN_RC(a4_long_reg));
5081 match(RegL);
5082 match(mRegL);
5084 format %{ %}
5085 interface(REG_INTER);
5086 %}
5088 operand a5RegL() %{
5089 constraint(ALLOC_IN_RC(a5_long_reg));
5090 match(RegL);
5091 match(mRegL);
5093 format %{ %}
5094 interface(REG_INTER);
5095 %}
5097 operand a6RegL() %{
5098 constraint(ALLOC_IN_RC(a6_long_reg));
5099 match(RegL);
5100 match(mRegL);
5102 format %{ %}
5103 interface(REG_INTER);
5104 %}
5106 operand a7RegL() %{
5107 constraint(ALLOC_IN_RC(a7_long_reg));
5108 match(RegL);
5109 match(mRegL);
5111 format %{ %}
5112 interface(REG_INTER);
5113 %}
5115 operand s0RegL() %{
5116 constraint(ALLOC_IN_RC(s0_long_reg));
5117 match(RegL);
5118 match(mRegL);
5120 format %{ %}
5121 interface(REG_INTER);
5122 %}
5124 operand s1RegL() %{
5125 constraint(ALLOC_IN_RC(s1_long_reg));
5126 match(RegL);
5127 match(mRegL);
5129 format %{ %}
5130 interface(REG_INTER);
5131 %}
5133 operand s2RegL() %{
5134 constraint(ALLOC_IN_RC(s2_long_reg));
5135 match(RegL);
5136 match(mRegL);
5138 format %{ %}
5139 interface(REG_INTER);
5140 %}
5142 operand s3RegL() %{
5143 constraint(ALLOC_IN_RC(s3_long_reg));
5144 match(RegL);
5145 match(mRegL);
5147 format %{ %}
5148 interface(REG_INTER);
5149 %}
5151 operand s4RegL() %{
5152 constraint(ALLOC_IN_RC(s4_long_reg));
5153 match(RegL);
5154 match(mRegL);
5156 format %{ %}
5157 interface(REG_INTER);
5158 %}
5160 operand s7RegL() %{
5161 constraint(ALLOC_IN_RC(s7_long_reg));
5162 match(RegL);
5163 match(mRegL);
5165 format %{ %}
5166 interface(REG_INTER);
5167 %}
5169 // Floating register operands
5170 operand regF() %{
5171 constraint(ALLOC_IN_RC(flt_reg));
5172 match(RegF);
5174 format %{ %}
5175 interface(REG_INTER);
5176 %}
5178 //Double Precision Floating register operands
5179 operand regD() %{
5180 constraint(ALLOC_IN_RC(dbl_reg));
5181 match(RegD);
5183 format %{ %}
5184 interface(REG_INTER);
5185 %}
5187 //----------Memory Operands----------------------------------------------------
5188 // Indirect Memory Operand
5189 operand indirect(mRegP reg) %{
5190 constraint(ALLOC_IN_RC(p_reg));
5191 match(reg);
5193 format %{ "[$reg] @ indirect" %}
5194 interface(MEMORY_INTER) %{
5195 base($reg);
5196 index(0x0); /* NO_INDEX */
5197 scale(0x0);
5198 disp(0x0);
5199 %}
5200 %}
5202 // Indirect Memory Plus Short Offset Operand
5203 operand indOffset8(mRegP reg, immL8 off)
5204 %{
5205 constraint(ALLOC_IN_RC(p_reg));
5206 match(AddP reg off);
5208 op_cost(10);
5209 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5210 interface(MEMORY_INTER) %{
5211 base($reg);
5212 index(0x0); /* NO_INDEX */
5213 scale(0x0);
5214 disp($off);
5215 %}
5216 %}
5218 // Indirect Memory Times Scale Plus Index Register
5219 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5220 %{
5221 constraint(ALLOC_IN_RC(p_reg));
5222 match(AddP reg (LShiftL lreg scale));
5224 op_cost(10);
5225 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5226 interface(MEMORY_INTER) %{
5227 base($reg);
5228 index($lreg);
5229 scale($scale);
5230 disp(0x0);
5231 %}
5232 %}
5235 // [base + index + offset]
5236 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5237 %{
5238 constraint(ALLOC_IN_RC(p_reg));
5239 op_cost(5);
5240 match(AddP (AddP base index) off);
5242 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5243 interface(MEMORY_INTER) %{
5244 base($base);
5245 index($index);
5246 scale(0x0);
5247 disp($off);
5248 %}
5249 %}
5251 // [base + index + offset]
5252 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5253 %{
5254 constraint(ALLOC_IN_RC(p_reg));
5255 op_cost(5);
5256 match(AddP (AddP base (ConvI2L index)) off);
5258 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5259 interface(MEMORY_INTER) %{
5260 base($base);
5261 index($index);
5262 scale(0x0);
5263 disp($off);
5264 %}
5265 %}
5267 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5268 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5269 %{
5270 constraint(ALLOC_IN_RC(p_reg));
5271 match(AddP (AddP reg (LShiftL lreg scale)) off);
5273 op_cost(10);
5274 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5275 interface(MEMORY_INTER) %{
5276 base($reg);
5277 index($lreg);
5278 scale($scale);
5279 disp($off);
5280 %}
5281 %}
5283 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5284 %{
5285 constraint(ALLOC_IN_RC(p_reg));
5286 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5288 op_cost(10);
5289 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5290 interface(MEMORY_INTER) %{
5291 base($reg);
5292 index($ireg);
5293 scale($scale);
5294 disp($off);
5295 %}
5296 %}
5298 // [base + index<<scale + offset]
5299 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5300 %{
5301 constraint(ALLOC_IN_RC(p_reg));
5302 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5303 op_cost(10);
5304 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5306 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5307 interface(MEMORY_INTER) %{
5308 base($base);
5309 index($index);
5310 scale($scale);
5311 disp($off);
5312 %}
5313 %}
5315 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5316 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5317 %{
5318 predicate(Universe::narrow_oop_shift() == 0);
5319 constraint(ALLOC_IN_RC(p_reg));
5320 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5322 op_cost(10);
5323 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5324 interface(MEMORY_INTER) %{
5325 base($reg);
5326 index($lreg);
5327 scale($scale);
5328 disp($off);
5329 %}
5330 %}
5332 // [base + index<<scale + offset] for compressd Oops
5333 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5334 %{
5335 constraint(ALLOC_IN_RC(p_reg));
5336 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5337 predicate(Universe::narrow_oop_shift() == 0);
5338 op_cost(10);
5339 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5341 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5342 interface(MEMORY_INTER) %{
5343 base($base);
5344 index($index);
5345 scale($scale);
5346 disp($off);
5347 %}
5348 %}
5350 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5351 // Indirect Memory Plus Long Offset Operand
5352 operand indOffset32(mRegP reg, immL32 off) %{
5353 constraint(ALLOC_IN_RC(p_reg));
5354 op_cost(20);
5355 match(AddP reg off);
5357 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5358 interface(MEMORY_INTER) %{
5359 base($reg);
5360 index(0x0); /* NO_INDEX */
5361 scale(0x0);
5362 disp($off);
5363 %}
5364 %}
5366 // Indirect Memory Plus Index Register
5367 operand indIndex(mRegP addr, mRegL index) %{
5368 constraint(ALLOC_IN_RC(p_reg));
5369 match(AddP addr index);
5371 op_cost(20);
5372 format %{"[$addr + $index] @ indIndex" %}
5373 interface(MEMORY_INTER) %{
5374 base($addr);
5375 index($index);
5376 scale(0x0);
5377 disp(0x0);
5378 %}
5379 %}
5381 operand indirectNarrowKlass(mRegN reg)
5382 %{
5383 predicate(Universe::narrow_klass_shift() == 0);
5384 constraint(ALLOC_IN_RC(p_reg));
5385 op_cost(10);
5386 match(DecodeNKlass reg);
5388 format %{ "[$reg] @ indirectNarrowKlass" %}
5389 interface(MEMORY_INTER) %{
5390 base($reg);
5391 index(0x0);
5392 scale(0x0);
5393 disp(0x0);
5394 %}
5395 %}
5397 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5398 %{
5399 predicate(Universe::narrow_klass_shift() == 0);
5400 constraint(ALLOC_IN_RC(p_reg));
5401 op_cost(10);
5402 match(AddP (DecodeNKlass reg) off);
5404 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5405 interface(MEMORY_INTER) %{
5406 base($reg);
5407 index(0x0);
5408 scale(0x0);
5409 disp($off);
5410 %}
5411 %}
5413 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5414 %{
5415 predicate(Universe::narrow_klass_shift() == 0);
5416 constraint(ALLOC_IN_RC(p_reg));
5417 op_cost(10);
5418 match(AddP (DecodeNKlass reg) off);
5420 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5421 interface(MEMORY_INTER) %{
5422 base($reg);
5423 index(0x0);
5424 scale(0x0);
5425 disp($off);
5426 %}
5427 %}
5429 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5430 %{
5431 predicate(Universe::narrow_klass_shift() == 0);
5432 constraint(ALLOC_IN_RC(p_reg));
5433 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5435 op_cost(10);
5436 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5437 interface(MEMORY_INTER) %{
5438 base($reg);
5439 index($lreg);
5440 scale(0x0);
5441 disp($off);
5442 %}
5443 %}
5445 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5446 %{
5447 predicate(Universe::narrow_klass_shift() == 0);
5448 constraint(ALLOC_IN_RC(p_reg));
5449 match(AddP (DecodeNKlass reg) lreg);
5451 op_cost(10);
5452 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5453 interface(MEMORY_INTER) %{
5454 base($reg);
5455 index($lreg);
5456 scale(0x0);
5457 disp(0x0);
5458 %}
5459 %}
5461 // Indirect Memory Operand
5462 operand indirectNarrow(mRegN reg)
5463 %{
5464 predicate(Universe::narrow_oop_shift() == 0);
5465 constraint(ALLOC_IN_RC(p_reg));
5466 op_cost(10);
5467 match(DecodeN reg);
5469 format %{ "[$reg] @ indirectNarrow" %}
5470 interface(MEMORY_INTER) %{
5471 base($reg);
5472 index(0x0);
5473 scale(0x0);
5474 disp(0x0);
5475 %}
5476 %}
5478 // Indirect Memory Plus Short Offset Operand
5479 operand indOffset8Narrow(mRegN reg, immL8 off)
5480 %{
5481 predicate(Universe::narrow_oop_shift() == 0);
5482 constraint(ALLOC_IN_RC(p_reg));
5483 op_cost(10);
5484 match(AddP (DecodeN reg) off);
5486 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5487 interface(MEMORY_INTER) %{
5488 base($reg);
5489 index(0x0);
5490 scale(0x0);
5491 disp($off);
5492 %}
5493 %}
5495 // Indirect Memory Plus Index Register Plus Offset Operand
5496 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5497 %{
5498 predicate(Universe::narrow_oop_shift() == 0);
5499 constraint(ALLOC_IN_RC(p_reg));
5500 match(AddP (AddP (DecodeN reg) lreg) off);
5502 op_cost(10);
5503 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5504 interface(MEMORY_INTER) %{
5505 base($reg);
5506 index($lreg);
5507 scale(0x0);
5508 disp($off);
5509 %}
5510 %}
5512 //----------Load Long Memory Operands------------------------------------------
5513 // The load-long idiom will use it's address expression again after loading
5514 // the first word of the long. If the load-long destination overlaps with
5515 // registers used in the addressing expression, the 2nd half will be loaded
5516 // from a clobbered address. Fix this by requiring that load-long use
5517 // address registers that do not overlap with the load-long target.
5519 // load-long support
5520 operand load_long_RegP() %{
5521 constraint(ALLOC_IN_RC(p_reg));
5522 match(RegP);
5523 match(mRegP);
5524 op_cost(100);
5525 format %{ %}
5526 interface(REG_INTER);
5527 %}
5529 // Indirect Memory Operand Long
5530 operand load_long_indirect(load_long_RegP reg) %{
5531 constraint(ALLOC_IN_RC(p_reg));
5532 match(reg);
5534 format %{ "[$reg]" %}
5535 interface(MEMORY_INTER) %{
5536 base($reg);
5537 index(0x0);
5538 scale(0x0);
5539 disp(0x0);
5540 %}
5541 %}
5543 // Indirect Memory Plus Long Offset Operand
5544 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5545 match(AddP reg off);
5547 format %{ "[$reg + $off]" %}
5548 interface(MEMORY_INTER) %{
5549 base($reg);
5550 index(0x0);
5551 scale(0x0);
5552 disp($off);
5553 %}
5554 %}
5556 //----------Conditional Branch Operands----------------------------------------
5557 // Comparison Op - This is the operation of the comparison, and is limited to
5558 // the following set of codes:
5559 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5560 //
5561 // Other attributes of the comparison, such as unsignedness, are specified
5562 // by the comparison instruction that sets a condition code flags register.
5563 // That result is represented by a flags operand whose subtype is appropriate
5564 // to the unsignedness (etc.) of the comparison.
5565 //
5566 // Later, the instruction which matches both the Comparison Op (a Bool) and
5567 // the flags (produced by the Cmp) specifies the coding of the comparison op
5568 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5570 // Comparision Code
5571 operand cmpOp() %{
5572 match(Bool);
5574 format %{ "" %}
5575 interface(COND_INTER) %{
5576 equal(0x01);
5577 not_equal(0x02);
5578 greater(0x03);
5579 greater_equal(0x04);
5580 less(0x05);
5581 less_equal(0x06);
5582 overflow(0x7);
5583 no_overflow(0x8);
5584 %}
5585 %}
5588 // Comparision Code
5589 // Comparison Code, unsigned compare. Used by FP also, with
5590 // C2 (unordered) turned into GT or LT already. The other bits
5591 // C0 and C3 are turned into Carry & Zero flags.
5592 operand cmpOpU() %{
5593 match(Bool);
5595 format %{ "" %}
5596 interface(COND_INTER) %{
5597 equal(0x01);
5598 not_equal(0x02);
5599 greater(0x03);
5600 greater_equal(0x04);
5601 less(0x05);
5602 less_equal(0x06);
5603 overflow(0x7);
5604 no_overflow(0x8);
5605 %}
5606 %}
5609 //----------Special Memory Operands--------------------------------------------
5610 // Stack Slot Operand - This operand is used for loading and storing temporary
5611 // values on the stack where a match requires a value to
5612 // flow through memory.
5613 operand stackSlotP(sRegP reg) %{
5614 constraint(ALLOC_IN_RC(stack_slots));
5615 // No match rule because this operand is only generated in matching
5616 op_cost(50);
5617 format %{ "[$reg]" %}
5618 interface(MEMORY_INTER) %{
5619 base(0x1d); // SP
5620 index(0x0); // No Index
5621 scale(0x0); // No Scale
5622 disp($reg); // Stack Offset
5623 %}
5624 %}
5626 operand stackSlotI(sRegI reg) %{
5627 constraint(ALLOC_IN_RC(stack_slots));
5628 // No match rule because this operand is only generated in matching
5629 op_cost(50);
5630 format %{ "[$reg]" %}
5631 interface(MEMORY_INTER) %{
5632 base(0x1d); // SP
5633 index(0x0); // No Index
5634 scale(0x0); // No Scale
5635 disp($reg); // Stack Offset
5636 %}
5637 %}
5639 operand stackSlotF(sRegF reg) %{
5640 constraint(ALLOC_IN_RC(stack_slots));
5641 // No match rule because this operand is only generated in matching
5642 op_cost(50);
5643 format %{ "[$reg]" %}
5644 interface(MEMORY_INTER) %{
5645 base(0x1d); // SP
5646 index(0x0); // No Index
5647 scale(0x0); // No Scale
5648 disp($reg); // Stack Offset
5649 %}
5650 %}
5652 operand stackSlotD(sRegD reg) %{
5653 constraint(ALLOC_IN_RC(stack_slots));
5654 // No match rule because this operand is only generated in matching
5655 op_cost(50);
5656 format %{ "[$reg]" %}
5657 interface(MEMORY_INTER) %{
5658 base(0x1d); // SP
5659 index(0x0); // No Index
5660 scale(0x0); // No Scale
5661 disp($reg); // Stack Offset
5662 %}
5663 %}
5665 operand stackSlotL(sRegL reg) %{
5666 constraint(ALLOC_IN_RC(stack_slots));
5667 // No match rule because this operand is only generated in matching
5668 op_cost(50);
5669 format %{ "[$reg]" %}
5670 interface(MEMORY_INTER) %{
5671 base(0x1d); // SP
5672 index(0x0); // No Index
5673 scale(0x0); // No Scale
5674 disp($reg); // Stack Offset
5675 %}
5676 %}
5679 //------------------------OPERAND CLASSES--------------------------------------
5680 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5681 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5684 //----------PIPELINE-----------------------------------------------------------
5685 // Rules which define the behavior of the target architectures pipeline.
5687 pipeline %{
5689 //----------ATTRIBUTES---------------------------------------------------------
5690 attributes %{
5691 fixed_size_instructions; // Fixed size instructions
5692 branch_has_delay_slot; // branch have delay slot in gs2
5693 max_instructions_per_bundle = 1; // 1 instruction per bundle
5694 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5695 bundle_unit_size=4;
5696 instruction_unit_size = 4; // An instruction is 4 bytes long
5697 instruction_fetch_unit_size = 16; // The processor fetches one line
5698 instruction_fetch_units = 1; // of 16 bytes
5700 // List of nop instructions
5701 nops( MachNop );
5702 %}
5704 //----------RESOURCES----------------------------------------------------------
5705 // Resources are the functional units available to the machine
5707 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5709 //----------PIPELINE DESCRIPTION-----------------------------------------------
5710 // Pipeline Description specifies the stages in the machine's pipeline
5712 // IF: fetch
5713 // ID: decode
5714 // RD: read
5715 // CA: caculate
5716 // WB: write back
5717 // CM: commit
5719 pipe_desc(IF, ID, RD, CA, WB, CM);
5722 //----------PIPELINE CLASSES---------------------------------------------------
5723 // Pipeline Classes describe the stages in which input and output are
5724 // referenced by the hardware pipeline.
5726 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5727 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5728 single_instruction;
5729 src1 : RD(read);
5730 src2 : RD(read);
5731 dst : WB(write)+1;
5732 DECODE : ID;
5733 ALU : CA;
5734 %}
5736 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5737 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5738 src1 : RD(read);
5739 src2 : RD(read);
5740 dst : WB(write)+5;
5741 DECODE : ID;
5742 ALU2 : CA;
5743 %}
5745 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5746 src1 : RD(read);
5747 src2 : RD(read);
5748 dst : WB(write)+10;
5749 DECODE : ID;
5750 ALU2 : CA;
5751 %}
5753 //No.19 Integer div operation : dst <-- reg1 div reg2
5754 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5755 src1 : RD(read);
5756 src2 : RD(read);
5757 dst : WB(write)+10;
5758 DECODE : ID;
5759 ALU2 : CA;
5760 %}
5762 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5763 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5764 instruction_count(2);
5765 src1 : RD(read);
5766 src2 : RD(read);
5767 dst : WB(write)+10;
5768 DECODE : ID;
5769 ALU2 : CA;
5770 %}
5772 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5773 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5774 instruction_count(2);
5775 src1 : RD(read);
5776 src2 : RD(read);
5777 dst : WB(write);
5778 DECODE : ID;
5779 ALU : CA;
5780 %}
5782 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5783 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5784 instruction_count(2);
5785 src : RD(read);
5786 dst : WB(write);
5787 DECODE : ID;
5788 ALU : CA;
5789 %}
5791 //no.16 load Long from memory :
5792 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5793 instruction_count(2);
5794 mem : RD(read);
5795 dst : WB(write)+5;
5796 DECODE : ID;
5797 MEM : RD;
5798 %}
5800 //No.17 Store Long to Memory :
5801 pipe_class ialu_storeL(mRegL src, memory mem) %{
5802 instruction_count(2);
5803 mem : RD(read);
5804 src : RD(read);
5805 DECODE : ID;
5806 MEM : RD;
5807 %}
5809 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5810 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5811 single_instruction;
5812 src : RD(read);
5813 dst : WB(write);
5814 DECODE : ID;
5815 ALU : CA;
5816 %}
5818 //No.3 Integer move operation : dst <-- reg
5819 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5820 src : RD(read);
5821 dst : WB(write);
5822 DECODE : ID;
5823 ALU : CA;
5824 %}
5826 //No.4 No instructions : do nothing
5827 pipe_class empty( ) %{
5828 instruction_count(0);
5829 %}
5831 //No.5 UnConditional branch :
5832 pipe_class pipe_jump( label labl ) %{
5833 multiple_bundles;
5834 DECODE : ID;
5835 BR : RD;
5836 %}
5838 //No.6 ALU Conditional branch :
5839 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5840 multiple_bundles;
5841 src1 : RD(read);
5842 src2 : RD(read);
5843 DECODE : ID;
5844 BR : RD;
5845 %}
5847 //no.7 load integer from memory :
5848 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5849 mem : RD(read);
5850 dst : WB(write)+3;
5851 DECODE : ID;
5852 MEM : RD;
5853 %}
5855 //No.8 Store Integer to Memory :
5856 pipe_class ialu_storeI(mRegI src, memory mem) %{
5857 mem : RD(read);
5858 src : RD(read);
5859 DECODE : ID;
5860 MEM : RD;
5861 %}
5864 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5865 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5866 src1 : RD(read);
5867 src2 : RD(read);
5868 dst : WB(write);
5869 DECODE : ID;
5870 FPU : CA;
5871 %}
5873 //No.22 Floating div operation : dst <-- reg1 div reg2
5874 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5875 src1 : RD(read);
5876 src2 : RD(read);
5877 dst : WB(write);
5878 DECODE : ID;
5879 FPU2 : CA;
5880 %}
5882 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5883 src : RD(read);
5884 dst : WB(write);
5885 DECODE : ID;
5886 FPU1 : CA;
5887 %}
5889 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5890 src : RD(read);
5891 dst : WB(write);
5892 DECODE : ID;
5893 FPU1 : CA;
5894 %}
5896 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5897 src : RD(read);
5898 dst : WB(write);
5899 DECODE : ID;
5900 MEM : RD;
5901 %}
5903 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5904 src : RD(read);
5905 dst : WB(write);
5906 DECODE : ID;
5907 MEM : RD(5);
5908 %}
5910 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5911 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5912 multiple_bundles;
5913 src1 : RD(read);
5914 src2 : RD(read);
5915 dst : WB(write);
5916 DECODE : ID;
5917 FPU2 : CA;
5918 %}
5920 //No.11 Load Floating from Memory :
5921 pipe_class fpu_loadF(regF dst, memory mem) %{
5922 instruction_count(1);
5923 mem : RD(read);
5924 dst : WB(write)+3;
5925 DECODE : ID;
5926 MEM : RD;
5927 %}
5929 //No.12 Store Floating to Memory :
5930 pipe_class fpu_storeF(regF src, memory mem) %{
5931 instruction_count(1);
5932 mem : RD(read);
5933 src : RD(read);
5934 DECODE : ID;
5935 MEM : RD;
5936 %}
5938 //No.13 FPU Conditional branch :
5939 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5940 multiple_bundles;
5941 src1 : RD(read);
5942 src2 : RD(read);
5943 DECODE : ID;
5944 BR : RD;
5945 %}
5947 //No.14 Floating FPU reg operation : dst <-- op reg
5948 pipe_class fpu1_regF(regF dst, regF src) %{
5949 src : RD(read);
5950 dst : WB(write);
5951 DECODE : ID;
5952 FPU : CA;
5953 %}
5955 pipe_class long_memory_op() %{
5956 instruction_count(10); multiple_bundles; force_serialization;
5957 fixed_latency(30);
5958 %}
5960 pipe_class simple_call() %{
5961 instruction_count(10); multiple_bundles; force_serialization;
5962 fixed_latency(200);
5963 BR : RD;
5964 %}
5966 pipe_class call() %{
5967 instruction_count(10); multiple_bundles; force_serialization;
5968 fixed_latency(200);
5969 %}
5971 //FIXME:
5972 //No.9 Piple slow : for multi-instructions
5973 pipe_class pipe_slow( ) %{
5974 instruction_count(20);
5975 force_serialization;
5976 multiple_bundles;
5977 fixed_latency(50);
5978 %}
5980 %}
5984 //----------INSTRUCTIONS-------------------------------------------------------
5985 //
5986 // match -- States which machine-independent subtree may be replaced
5987 // by this instruction.
5988 // ins_cost -- The estimated cost of this instruction is used by instruction
5989 // selection to identify a minimum cost tree of machine
5990 // instructions that matches a tree of machine-independent
5991 // instructions.
5992 // format -- A string providing the disassembly for this instruction.
5993 // The value of an instruction's operand may be inserted
5994 // by referring to it with a '$' prefix.
5995 // opcode -- Three instruction opcodes may be provided. These are referred
5996 // to within an encode class as $primary, $secondary, and $tertiary
5997 // respectively. The primary opcode is commonly used to
5998 // indicate the type of machine instruction, while secondary
5999 // and tertiary are often used for prefix options or addressing
6000 // modes.
6001 // ins_encode -- A list of encode classes with parameters. The encode class
6002 // name must have been defined in an 'enc_class' specification
6003 // in the encode section of the architecture description.
6006 // Load Integer
6007 instruct loadI(mRegI dst, memory mem) %{
6008 match(Set dst (LoadI mem));
6010 ins_cost(125);
6011 format %{ "lw $dst, $mem #@loadI" %}
6012 ins_encode (load_I_enc(dst, mem));
6013 ins_pipe( ialu_loadI );
6014 %}
6016 instruct loadI_convI2L(mRegL dst, memory mem) %{
6017 match(Set dst (ConvI2L (LoadI mem)));
6019 ins_cost(125);
6020 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6021 ins_encode (load_I_enc(dst, mem));
6022 ins_pipe( ialu_loadI );
6023 %}
6025 // Load Integer (32 bit signed) to Byte (8 bit signed)
6026 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6027 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6029 ins_cost(125);
6030 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6031 ins_encode(load_B_enc(dst, mem));
6032 ins_pipe(ialu_loadI);
6033 %}
6035 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6036 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6037 match(Set dst (AndI (LoadI mem) mask));
6039 ins_cost(125);
6040 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6041 ins_encode(load_UB_enc(dst, mem));
6042 ins_pipe(ialu_loadI);
6043 %}
6045 // Load Integer (32 bit signed) to Short (16 bit signed)
6046 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6047 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6049 ins_cost(125);
6050 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6051 ins_encode(load_S_enc(dst, mem));
6052 ins_pipe(ialu_loadI);
6053 %}
6055 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6056 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6057 match(Set dst (AndI (LoadI mem) mask));
6059 ins_cost(125);
6060 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6061 ins_encode(load_C_enc(dst, mem));
6062 ins_pipe(ialu_loadI);
6063 %}
6065 // Load Long.
6066 instruct loadL(mRegL dst, memory mem) %{
6067 // predicate(!((LoadLNode*)n)->require_atomic_access());
6068 match(Set dst (LoadL mem));
6070 ins_cost(250);
6071 format %{ "ld $dst, $mem #@loadL" %}
6072 ins_encode(load_L_enc(dst, mem));
6073 ins_pipe( ialu_loadL );
6074 %}
6076 // Load Long - UNaligned
6077 instruct loadL_unaligned(mRegL dst, memory mem) %{
6078 match(Set dst (LoadL_unaligned mem));
6080 // FIXME: Need more effective ldl/ldr
6081 ins_cost(450);
6082 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6083 ins_encode(load_L_enc(dst, mem));
6084 ins_pipe( ialu_loadL );
6085 %}
6087 // Store Long
6088 instruct storeL_reg(memory mem, mRegL src) %{
6089 match(Set mem (StoreL mem src));
6091 ins_cost(200);
6092 format %{ "sd $mem, $src #@storeL_reg\n" %}
6093 ins_encode(store_L_reg_enc(mem, src));
6094 ins_pipe( ialu_storeL );
6095 %}
6097 instruct storeL_immL0(memory mem, immL0 zero) %{
6098 match(Set mem (StoreL mem zero));
6100 ins_cost(180);
6101 format %{ "sd zero, $mem #@storeL_immL0" %}
6102 ins_encode(store_L_immL0_enc(mem, zero));
6103 ins_pipe( ialu_storeL );
6104 %}
6106 instruct storeL_imm(memory mem, immL src) %{
6107 match(Set mem (StoreL mem src));
6109 ins_cost(200);
6110 format %{ "sd $src, $mem #@storeL_imm" %}
6111 ins_encode(store_L_immL_enc(mem, src));
6112 ins_pipe( ialu_storeL );
6113 %}
6115 // Load Compressed Pointer
6116 instruct loadN(mRegN dst, memory mem)
6117 %{
6118 match(Set dst (LoadN mem));
6120 ins_cost(125); // XXX
6121 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6122 ins_encode (load_N_enc(dst, mem));
6123 ins_pipe( ialu_loadI ); // XXX
6124 %}
6126 instruct loadN2P(mRegP dst, memory mem)
6127 %{
6128 match(Set dst (DecodeN (LoadN mem)));
6129 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6131 ins_cost(125); // XXX
6132 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6133 ins_encode (load_N_enc(dst, mem));
6134 ins_pipe( ialu_loadI ); // XXX
6135 %}
6137 // Load Pointer
6138 instruct loadP(mRegP dst, memory mem) %{
6139 match(Set dst (LoadP mem));
6141 ins_cost(125);
6142 format %{ "ld $dst, $mem #@loadP" %}
6143 ins_encode (load_P_enc(dst, mem));
6144 ins_pipe( ialu_loadI );
6145 %}
6147 // Load Klass Pointer
6148 instruct loadKlass(mRegP dst, memory mem) %{
6149 match(Set dst (LoadKlass mem));
6151 ins_cost(125);
6152 format %{ "MOV $dst,$mem @ loadKlass" %}
6153 ins_encode (load_P_enc(dst, mem));
6154 ins_pipe( ialu_loadI );
6155 %}
6157 // Load narrow Klass Pointer
6158 instruct loadNKlass(mRegN dst, memory mem)
6159 %{
6160 match(Set dst (LoadNKlass mem));
6162 ins_cost(125); // XXX
6163 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6164 ins_encode (load_N_enc(dst, mem));
6165 ins_pipe( ialu_loadI ); // XXX
6166 %}
6168 instruct loadN2PKlass(mRegP dst, memory mem)
6169 %{
6170 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6171 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6173 ins_cost(125); // XXX
6174 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6175 ins_encode (load_N_enc(dst, mem));
6176 ins_pipe( ialu_loadI ); // XXX
6177 %}
6179 // Load Constant
6180 instruct loadConI(mRegI dst, immI src) %{
6181 match(Set dst src);
6183 ins_cost(150);
6184 format %{ "mov $dst, $src #@loadConI" %}
6185 ins_encode %{
6186 Register dst = $dst$$Register;
6187 int value = $src$$constant;
6188 __ move(dst, value);
6189 %}
6190 ins_pipe( ialu_regI_regI );
6191 %}
6194 instruct loadConL_set64(mRegL dst, immL src) %{
6195 match(Set dst src);
6196 ins_cost(120);
6197 format %{ "li $dst, $src @ loadConL_set64" %}
6198 ins_encode %{
6199 __ set64($dst$$Register, $src$$constant);
6200 %}
6201 ins_pipe(ialu_regL_regL);
6202 %}
6204 /*
6205 // Load long value from constant table (predicated by immL_expensive).
6206 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6207 match(Set dst src);
6208 ins_cost(150);
6209 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6210 ins_encode %{
6211 int con_offset = $constantoffset($src);
6213 if (Assembler::is_simm16(con_offset)) {
6214 __ ld($dst$$Register, $constanttablebase, con_offset);
6215 } else {
6216 __ set64(AT, con_offset);
6217 if (UseLoongsonISA) {
6218 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6219 } else {
6220 __ daddu(AT, $constanttablebase, AT);
6221 __ ld($dst$$Register, AT, 0);
6222 }
6223 }
6224 %}
6225 ins_pipe(ialu_loadI);
6226 %}
6227 */
6229 instruct loadConL16(mRegL dst, immL16 src) %{
6230 match(Set dst src);
6231 ins_cost(105);
6232 format %{ "mov $dst, $src #@loadConL16" %}
6233 ins_encode %{
6234 Register dst_reg = as_Register($dst$$reg);
6235 int value = $src$$constant;
6236 __ daddiu(dst_reg, R0, value);
6237 %}
6238 ins_pipe( ialu_regL_regL );
6239 %}
6242 instruct loadConL0(mRegL dst, immL0 src) %{
6243 match(Set dst src);
6244 ins_cost(100);
6245 format %{ "mov $dst, zero #@loadConL0" %}
6246 ins_encode %{
6247 Register dst_reg = as_Register($dst$$reg);
6248 __ daddu(dst_reg, R0, R0);
6249 %}
6250 ins_pipe( ialu_regL_regL );
6251 %}
6253 // Load Range
6254 instruct loadRange(mRegI dst, memory mem) %{
6255 match(Set dst (LoadRange mem));
6257 ins_cost(125);
6258 format %{ "MOV $dst,$mem @ loadRange" %}
6259 ins_encode(load_I_enc(dst, mem));
6260 ins_pipe( ialu_loadI );
6261 %}
6264 instruct storeP(memory mem, mRegP src ) %{
6265 match(Set mem (StoreP mem src));
6267 ins_cost(125);
6268 format %{ "sd $src, $mem #@storeP" %}
6269 ins_encode(store_P_reg_enc(mem, src));
6270 ins_pipe( ialu_storeI );
6271 %}
6273 // Store NULL Pointer, mark word, or other simple pointer constant.
6274 instruct storeImmP0(memory mem, immP0 zero) %{
6275 match(Set mem (StoreP mem zero));
6277 ins_cost(125);
6278 format %{ "mov $mem, $zero #@storeImmP0" %}
6279 ins_encode(store_P_immP0_enc(mem));
6280 ins_pipe( ialu_storeI );
6281 %}
6283 // Store Byte Immediate
6284 instruct storeImmB(memory mem, immI8 src) %{
6285 match(Set mem (StoreB mem src));
6287 ins_cost(150);
6288 format %{ "movb $mem, $src #@storeImmB" %}
6289 ins_encode(store_B_immI_enc(mem, src));
6290 ins_pipe( ialu_storeI );
6291 %}
6293 // Store Compressed Pointer
6294 instruct storeN(memory mem, mRegN src)
6295 %{
6296 match(Set mem (StoreN mem src));
6298 ins_cost(125); // XXX
6299 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6300 ins_encode(store_N_reg_enc(mem, src));
6301 ins_pipe( ialu_storeI );
6302 %}
6304 instruct storeP2N(memory mem, mRegP src)
6305 %{
6306 match(Set mem (StoreN mem (EncodeP src)));
6307 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6309 ins_cost(125); // XXX
6310 format %{ "sw $mem, $src\t# @ storeP2N" %}
6311 ins_encode(store_N_reg_enc(mem, src));
6312 ins_pipe( ialu_storeI );
6313 %}
6315 instruct storeNKlass(memory mem, mRegN src)
6316 %{
6317 match(Set mem (StoreNKlass mem src));
6319 ins_cost(125); // XXX
6320 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6321 ins_encode(store_N_reg_enc(mem, src));
6322 ins_pipe( ialu_storeI );
6323 %}
6325 instruct storeP2NKlass(memory mem, mRegP src)
6326 %{
6327 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6328 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6330 ins_cost(125); // XXX
6331 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6332 ins_encode(store_N_reg_enc(mem, src));
6333 ins_pipe( ialu_storeI );
6334 %}
6336 instruct storeImmN0(memory mem, immN0 zero)
6337 %{
6338 match(Set mem (StoreN mem zero));
6340 ins_cost(125); // XXX
6341 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6342 ins_encode(storeImmN0_enc(mem, zero));
6343 ins_pipe( ialu_storeI );
6344 %}
6346 // Store Byte
6347 instruct storeB(memory mem, mRegI src) %{
6348 match(Set mem (StoreB mem src));
6350 ins_cost(125);
6351 format %{ "sb $src, $mem #@storeB" %}
6352 ins_encode(store_B_reg_enc(mem, src));
6353 ins_pipe( ialu_storeI );
6354 %}
6356 instruct storeB_convL2I(memory mem, mRegL src) %{
6357 match(Set mem (StoreB mem (ConvL2I src)));
6359 ins_cost(125);
6360 format %{ "sb $src, $mem #@storeB_convL2I" %}
6361 ins_encode(store_B_reg_enc(mem, src));
6362 ins_pipe( ialu_storeI );
6363 %}
6365 // Load Byte (8bit signed)
6366 instruct loadB(mRegI dst, memory mem) %{
6367 match(Set dst (LoadB mem));
6369 ins_cost(125);
6370 format %{ "lb $dst, $mem #@loadB" %}
6371 ins_encode(load_B_enc(dst, mem));
6372 ins_pipe( ialu_loadI );
6373 %}
6375 instruct loadB_convI2L(mRegL dst, memory mem) %{
6376 match(Set dst (ConvI2L (LoadB mem)));
6378 ins_cost(125);
6379 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6380 ins_encode(load_B_enc(dst, mem));
6381 ins_pipe( ialu_loadI );
6382 %}
6384 // Load Byte (8bit UNsigned)
6385 instruct loadUB(mRegI dst, memory mem) %{
6386 match(Set dst (LoadUB mem));
6388 ins_cost(125);
6389 format %{ "lbu $dst, $mem #@loadUB" %}
6390 ins_encode(load_UB_enc(dst, mem));
6391 ins_pipe( ialu_loadI );
6392 %}
6394 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6395 match(Set dst (ConvI2L (LoadUB mem)));
6397 ins_cost(125);
6398 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6399 ins_encode(load_UB_enc(dst, mem));
6400 ins_pipe( ialu_loadI );
6401 %}
6403 // Load Short (16bit signed)
6404 instruct loadS(mRegI dst, memory mem) %{
6405 match(Set dst (LoadS mem));
6407 ins_cost(125);
6408 format %{ "lh $dst, $mem #@loadS" %}
6409 ins_encode(load_S_enc(dst, mem));
6410 ins_pipe( ialu_loadI );
6411 %}
6413 // Load Short (16 bit signed) to Byte (8 bit signed)
6414 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6415 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6417 ins_cost(125);
6418 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6419 ins_encode(load_B_enc(dst, mem));
6420 ins_pipe(ialu_loadI);
6421 %}
6423 instruct loadS_convI2L(mRegL dst, memory mem) %{
6424 match(Set dst (ConvI2L (LoadS mem)));
6426 ins_cost(125);
6427 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6428 ins_encode(load_S_enc(dst, mem));
6429 ins_pipe( ialu_loadI );
6430 %}
6432 // Store Integer Immediate
6433 instruct storeImmI(memory mem, immI src) %{
6434 match(Set mem (StoreI mem src));
6436 ins_cost(150);
6437 format %{ "mov $mem, $src #@storeImmI" %}
6438 ins_encode(store_I_immI_enc(mem, src));
6439 ins_pipe( ialu_storeI );
6440 %}
6442 // Store Integer
6443 instruct storeI(memory mem, mRegI src) %{
6444 match(Set mem (StoreI mem src));
6446 ins_cost(125);
6447 format %{ "sw $mem, $src #@storeI" %}
6448 ins_encode(store_I_reg_enc(mem, src));
6449 ins_pipe( ialu_storeI );
6450 %}
6452 instruct storeI_convL2I(memory mem, mRegL src) %{
6453 match(Set mem (StoreI mem (ConvL2I src)));
6455 ins_cost(125);
6456 format %{ "sw $mem, $src #@storeI_convL2I" %}
6457 ins_encode(store_I_reg_enc(mem, src));
6458 ins_pipe( ialu_storeI );
6459 %}
6461 // Load Float
6462 instruct loadF(regF dst, memory mem) %{
6463 match(Set dst (LoadF mem));
6465 ins_cost(150);
6466 format %{ "loadF $dst, $mem #@loadF" %}
6467 ins_encode(load_F_enc(dst, mem));
6468 ins_pipe( ialu_loadI );
6469 %}
6471 instruct loadConP_general(mRegP dst, immP src) %{
6472 match(Set dst src);
6474 ins_cost(120);
6475 format %{ "li $dst, $src #@loadConP_general" %}
6477 ins_encode %{
6478 Register dst = $dst$$Register;
6479 long* value = (long*)$src$$constant;
6481 if($src->constant_reloc() == relocInfo::metadata_type){
6482 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6483 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6485 __ relocate(rspec);
6486 __ patchable_set48(dst, (long)value);
6487 }else if($src->constant_reloc() == relocInfo::oop_type){
6488 int oop_index = __ oop_recorder()->find_index((jobject)value);
6489 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6491 __ relocate(rspec);
6492 __ patchable_set48(dst, (long)value);
6493 } else if ($src->constant_reloc() == relocInfo::none) {
6494 __ set64(dst, (long)value);
6495 }
6496 %}
6498 ins_pipe( ialu_regI_regI );
6499 %}
6501 /*
6502 instruct loadConP_load(mRegP dst, immP_load src) %{
6503 match(Set dst src);
6505 ins_cost(100);
6506 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6508 ins_encode %{
6510 int con_offset = $constantoffset($src);
6512 if (Assembler::is_simm16(con_offset)) {
6513 __ ld($dst$$Register, $constanttablebase, con_offset);
6514 } else {
6515 __ set64(AT, con_offset);
6516 if (UseLoongsonISA) {
6517 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6518 } else {
6519 __ daddu(AT, $constanttablebase, AT);
6520 __ ld($dst$$Register, AT, 0);
6521 }
6522 }
6523 %}
6525 ins_pipe(ialu_loadI);
6526 %}
6527 */
6529 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6530 match(Set dst src);
6532 ins_cost(80);
6533 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6535 ins_encode %{
6536 __ set64($dst$$Register, $src$$constant);
6537 %}
6539 ins_pipe(ialu_regI_regI);
6540 %}
6543 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6544 match(Set dst src);
6546 ins_cost(50);
6547 format %{ "li $dst, $src #@loadConP_poll" %}
6549 ins_encode %{
6550 Register dst = $dst$$Register;
6551 intptr_t value = (intptr_t)$src$$constant;
6553 __ set64(dst, (jlong)value);
6554 %}
6556 ins_pipe( ialu_regI_regI );
6557 %}
6559 instruct loadConP0(mRegP dst, immP0 src)
6560 %{
6561 match(Set dst src);
6563 ins_cost(50);
6564 format %{ "mov $dst, R0\t# ptr" %}
6565 ins_encode %{
6566 Register dst_reg = $dst$$Register;
6567 __ daddu(dst_reg, R0, R0);
6568 %}
6569 ins_pipe( ialu_regI_regI );
6570 %}
6572 instruct loadConN0(mRegN dst, immN0 src) %{
6573 match(Set dst src);
6574 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6575 ins_encode %{
6576 __ move($dst$$Register, R0);
6577 %}
6578 ins_pipe( ialu_regI_regI );
6579 %}
6581 instruct loadConN(mRegN dst, immN src) %{
6582 match(Set dst src);
6584 ins_cost(125);
6585 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6586 ins_encode %{
6587 Register dst = $dst$$Register;
6588 __ set_narrow_oop(dst, (jobject)$src$$constant);
6589 %}
6590 ins_pipe( ialu_regI_regI ); // XXX
6591 %}
6593 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6594 match(Set dst src);
6596 ins_cost(125);
6597 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6598 ins_encode %{
6599 Register dst = $dst$$Register;
6600 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6601 %}
6602 ins_pipe( ialu_regI_regI ); // XXX
6603 %}
6605 //FIXME
6606 // Tail Call; Jump from runtime stub to Java code.
6607 // Also known as an 'interprocedural jump'.
6608 // Target of jump will eventually return to caller.
6609 // TailJump below removes the return address.
6610 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6611 match(TailCall jump_target method_oop );
6612 ins_cost(300);
6613 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6615 ins_encode %{
6616 Register target = $jump_target$$Register;
6617 Register oop = $method_oop$$Register;
6619 // RA will be used in generate_forward_exception()
6620 __ push(RA);
6622 __ move(S3, oop);
6623 __ jr(target);
6624 __ delayed()->nop();
6625 %}
6627 ins_pipe( pipe_jump );
6628 %}
6630 // Create exception oop: created by stack-crawling runtime code.
6631 // Created exception is now available to this handler, and is setup
6632 // just prior to jumping to this handler. No code emitted.
6633 instruct CreateException( a0_RegP ex_oop )
6634 %{
6635 match(Set ex_oop (CreateEx));
6637 // use the following format syntax
6638 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6639 ins_encode %{
6640 // X86 leaves this function empty
6641 __ block_comment("CreateException is empty in MIPS");
6642 %}
6643 ins_pipe( empty );
6644 // ins_pipe( pipe_jump );
6645 %}
6648 /* The mechanism of exception handling is clear now.
6650 - Common try/catch:
6651 [stubGenerator_mips.cpp] generate_forward_exception()
6652 |- V0, V1 are created
6653 |- T9 <= SharedRuntime::exception_handler_for_return_address
6654 `- jr T9
6655 `- the caller's exception_handler
6656 `- jr OptoRuntime::exception_blob
6657 `- here
6658 - Rethrow(e.g. 'unwind'):
6659 * The callee:
6660 |- an exception is triggered during execution
6661 `- exits the callee method through RethrowException node
6662 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6663 `- The callee jumps to OptoRuntime::rethrow_stub()
6664 * In OptoRuntime::rethrow_stub:
6665 |- The VM calls _rethrow_Java to determine the return address in the caller method
6666 `- exits the stub with tailjmpInd
6667 |- pops exception_oop(V0) and exception_pc(V1)
6668 `- jumps to the return address(usually an exception_handler)
6669 * The caller:
6670 `- continues processing the exception_blob with V0/V1
6671 */
6673 /*
6674 Disassembling OptoRuntime::rethrow_stub()
6676 ; locals
6677 0x2d3bf320: addiu sp, sp, 0xfffffff8
6678 0x2d3bf324: sw ra, 0x4(sp)
6679 0x2d3bf328: sw fp, 0x0(sp)
6680 0x2d3bf32c: addu fp, sp, zero
6681 0x2d3bf330: addiu sp, sp, 0xfffffff0
6682 0x2d3bf334: sw ra, 0x8(sp)
6683 0x2d3bf338: sw t0, 0x4(sp)
6684 0x2d3bf33c: sw sp, 0x0(sp)
6686 ; get_thread(S2)
6687 0x2d3bf340: addu s2, sp, zero
6688 0x2d3bf344: srl s2, s2, 12
6689 0x2d3bf348: sll s2, s2, 2
6690 0x2d3bf34c: lui at, 0x2c85
6691 0x2d3bf350: addu at, at, s2
6692 0x2d3bf354: lw s2, 0xffffcc80(at)
6694 0x2d3bf358: lw s0, 0x0(sp)
6695 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6696 0x2d3bf360: sw s2, 0xc(sp)
6698 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6699 0x2d3bf364: lw a0, 0x4(sp)
6700 0x2d3bf368: lw a1, 0xc(sp)
6701 0x2d3bf36c: lw a2, 0x8(sp)
6702 ;; Java_To_Runtime
6703 0x2d3bf370: lui t9, 0x2c34
6704 0x2d3bf374: addiu t9, t9, 0xffff8a48
6705 0x2d3bf378: jalr t9
6706 0x2d3bf37c: nop
6708 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6710 0x2d3bf384: lw s0, 0xc(sp)
6711 0x2d3bf388: sw zero, 0x118(s0)
6712 0x2d3bf38c: sw zero, 0x11c(s0)
6713 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6714 0x2d3bf394: addu s2, s0, zero
6715 0x2d3bf398: sw zero, 0x144(s2)
6716 0x2d3bf39c: lw s0, 0x4(s2)
6717 0x2d3bf3a0: addiu s4, zero, 0x0
6718 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6719 0x2d3bf3a8: nop
6720 0x2d3bf3ac: addiu sp, sp, 0x10
6721 0x2d3bf3b0: addiu sp, sp, 0x8
6722 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6723 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6724 0x2d3bf3bc: lui at, 0x2b48
6725 0x2d3bf3c0: lw at, 0x100(at)
6727 ; tailjmpInd: Restores exception_oop & exception_pc
6728 0x2d3bf3c4: addu v1, ra, zero
6729 0x2d3bf3c8: addu v0, s1, zero
6730 0x2d3bf3cc: jr s3
6731 0x2d3bf3d0: nop
6732 ; Exception:
6733 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6734 0x2d3bf3d8: addiu s1, s1, 0x40
6735 0x2d3bf3dc: addiu s2, zero, 0x0
6736 0x2d3bf3e0: addiu sp, sp, 0x10
6737 0x2d3bf3e4: addiu sp, sp, 0x8
6738 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6739 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6740 0x2d3bf3f0: lui at, 0x2b48
6741 0x2d3bf3f4: lw at, 0x100(at)
6742 ; TailCalljmpInd
6743 __ push(RA); ; to be used in generate_forward_exception()
6744 0x2d3bf3f8: addu t7, s2, zero
6745 0x2d3bf3fc: jr s1
6746 0x2d3bf400: nop
6747 */
6748 // Rethrow exception:
6749 // The exception oop will come in the first argument position.
6750 // Then JUMP (not call) to the rethrow stub code.
6751 instruct RethrowException()
6752 %{
6753 match(Rethrow);
6755 // use the following format syntax
6756 format %{ "JMP rethrow_stub #@RethrowException" %}
6757 ins_encode %{
6758 __ block_comment("@ RethrowException");
6760 cbuf.set_insts_mark();
6761 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6763 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6764 __ patchable_jump((address)OptoRuntime::rethrow_stub());
6765 %}
6766 ins_pipe( pipe_jump );
6767 %}
6769 // ============================================================================
6770 // Branch Instructions --- long offset versions
6772 // Jump Direct
6773 instruct jmpDir_long(label labl) %{
6774 match(Goto);
6775 effect(USE labl);
6777 ins_cost(300);
6778 format %{ "JMP $labl #@jmpDir_long" %}
6780 ins_encode %{
6781 Label* L = $labl$$label;
6782 __ jmp_far(*L);
6783 %}
6785 ins_pipe( pipe_jump );
6786 //ins_pc_relative(1);
6787 %}
6789 // Jump Direct Conditional - Label defines a relative address from Jcc+1
6790 instruct jmpLoopEnd_long(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
6791 match(CountedLoopEnd cop (CmpI src1 src2));
6792 effect(USE labl);
6794 ins_cost(300);
6795 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_long" %}
6796 ins_encode %{
6797 Register op1 = $src1$$Register;
6798 Register op2 = $src2$$Register;
6799 Label* L = $labl$$label;
6800 int flag = $cop$$cmpcode;
6802 switch(flag) {
6803 case 0x01: //equal
6804 __ beq_long(op1, op2, *L);
6805 break;
6806 case 0x02: //not_equal
6807 __ bne_long(op1, op2, *L);
6808 break;
6809 case 0x03: //above
6810 __ slt(AT, op2, op1);
6811 __ bne_long(AT, R0, *L);
6812 break;
6813 case 0x04: //above_equal
6814 __ slt(AT, op1, op2);
6815 __ beq_long(AT, R0, *L);
6816 break;
6817 case 0x05: //below
6818 __ slt(AT, op1, op2);
6819 __ bne_long(AT, R0, *L);
6820 break;
6821 case 0x06: //below_equal
6822 __ slt(AT, op2, op1);
6823 __ beq_long(AT, R0, *L);
6824 break;
6825 default:
6826 Unimplemented();
6827 }
6828 %}
6829 ins_pipe( pipe_jump );
6830 ins_pc_relative(1);
6831 %}
6833 instruct jmpLoopEnd_reg_immI_long(cmpOp cop, mRegI src1, immI src2, label labl) %{
6834 match(CountedLoopEnd cop (CmpI src1 src2));
6835 effect(USE labl);
6837 ins_cost(300);
6838 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI_long" %}
6839 ins_encode %{
6840 Register op1 = $src1$$Register;
6841 Register op2 = AT;
6842 Label* L = $labl$$label;
6843 int flag = $cop$$cmpcode;
6845 __ move(op2, $src2$$constant);
6847 switch(flag) {
6848 case 0x01: //equal
6849 __ beq_long(op1, op2, *L);
6850 break;
6851 case 0x02: //not_equal
6852 __ bne_long(op1, op2, *L);
6853 break;
6854 case 0x03: //above
6855 __ slt(AT, op2, op1);
6856 __ bne_long(AT, R0, *L);
6857 break;
6858 case 0x04: //above_equal
6859 __ slt(AT, op1, op2);
6860 __ beq_long(AT, R0, *L);
6861 break;
6862 case 0x05: //below
6863 __ slt(AT, op1, op2);
6864 __ bne_long(AT, R0, *L);
6865 break;
6866 case 0x06: //below_equal
6867 __ slt(AT, op2, op1);
6868 __ beq_long(AT, R0, *L);
6869 break;
6870 default:
6871 Unimplemented();
6872 }
6873 %}
6874 ins_pipe( pipe_jump );
6875 ins_pc_relative(1);
6876 %}
6879 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
6880 instruct jmpCon_flags_long(cmpOp cop, FlagsReg cr, label labl) %{
6881 match(If cop cr);
6882 effect(USE labl);
6884 ins_cost(300);
6885 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags_long" %}
6887 ins_encode %{
6888 Label* L = $labl$$label;
6889 switch($cop$$cmpcode) {
6890 case 0x01: //equal
6891 __ bne_long(AT, R0, *L);
6892 break;
6893 case 0x02: //not equal
6894 __ beq_long(AT, R0, *L);
6895 break;
6896 default:
6897 Unimplemented();
6898 }
6899 %}
6901 ins_pipe( pipe_jump );
6902 ins_pc_relative(1);
6903 %}
6905 // Conditional jumps
6906 instruct branchConP_zero_long(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6907 match(If cmp (CmpP op1 zero));
6908 effect(USE labl);
6910 ins_cost(180);
6911 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero_long" %}
6913 ins_encode %{
6914 Register op1 = $op1$$Register;
6915 Register op2 = R0;
6916 Label* L = $labl$$label;
6917 int flag = $cmp$$cmpcode;
6919 switch(flag) {
6920 case 0x01: //equal
6921 __ beq_long(op1, op2, *L);
6922 break;
6923 case 0x02: //not_equal
6924 __ bne_long(op1, op2, *L);
6925 break;
6926 default:
6927 Unimplemented();
6928 }
6929 %}
6931 ins_pc_relative(1);
6932 ins_pipe( pipe_alu_branch );
6933 %}
6935 instruct branchConN2P_zero_long(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6936 match(If cmp (CmpP (DecodeN op1) zero));
6937 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6938 effect(USE labl);
6940 ins_cost(180);
6941 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero_long" %}
6943 ins_encode %{
6944 Register op1 = $op1$$Register;
6945 Register op2 = R0;
6946 Label* L = $labl$$label;
6947 int flag = $cmp$$cmpcode;
6949 switch(flag)
6950 {
6951 case 0x01: //equal
6952 __ beq_long(op1, op2, *L);
6953 break;
6954 case 0x02: //not_equal
6955 __ bne_long(op1, op2, *L);
6956 break;
6957 default:
6958 Unimplemented();
6959 }
6960 %}
6962 ins_pc_relative(1);
6963 ins_pipe( pipe_alu_branch );
6964 %}
6967 instruct branchConP_long(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6968 match(If cmp (CmpP op1 op2));
6969 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6970 effect(USE labl);
6972 ins_cost(200);
6973 format %{ "b$cmp $op1, $op2, $labl #@branchConP_long" %}
6975 ins_encode %{
6976 Register op1 = $op1$$Register;
6977 Register op2 = $op2$$Register;
6978 Label* L = $labl$$label;
6979 int flag = $cmp$$cmpcode;
6981 switch(flag) {
6982 case 0x01: //equal
6983 __ beq_long(op1, op2, *L);
6984 break;
6985 case 0x02: //not_equal
6986 __ bne_long(op1, op2, *L);
6987 break;
6988 case 0x03: //above
6989 __ sltu(AT, op2, op1);
6990 __ bne_long(R0, AT, *L);
6991 break;
6992 case 0x04: //above_equal
6993 __ sltu(AT, op1, op2);
6994 __ beq_long(AT, R0, *L);
6995 break;
6996 case 0x05: //below
6997 __ sltu(AT, op1, op2);
6998 __ bne_long(R0, AT, *L);
6999 break;
7000 case 0x06: //below_equal
7001 __ sltu(AT, op2, op1);
7002 __ beq_long(AT, R0, *L);
7003 break;
7004 default:
7005 Unimplemented();
7006 }
7007 %}
7009 ins_pc_relative(1);
7010 ins_pipe( pipe_alu_branch );
7011 %}
7013 instruct cmpN_null_branch_long(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7014 match(If cmp (CmpN op1 null));
7015 effect(USE labl);
7017 ins_cost(180);
7018 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7019 "BP$cmp $labl @ cmpN_null_branch_long" %}
7020 ins_encode %{
7021 Register op1 = $op1$$Register;
7022 Register op2 = R0;
7023 Label* L = $labl$$label;
7024 int flag = $cmp$$cmpcode;
7026 switch(flag) {
7027 case 0x01: //equal
7028 __ beq_long(op1, op2, *L);
7029 break;
7030 case 0x02: //not_equal
7031 __ bne_long(op1, op2, *L);
7032 break;
7033 default:
7034 Unimplemented();
7035 }
7036 %}
7037 //TODO: pipe_branchP or create pipe_branchN LEE
7038 ins_pc_relative(1);
7039 ins_pipe( pipe_alu_branch );
7040 %}
7042 instruct cmpN_reg_branch_long(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7043 match(If cmp (CmpN op1 op2));
7044 effect(USE labl);
7046 ins_cost(180);
7047 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7048 "BP$cmp $labl @ cmpN_reg_branch_long" %}
7049 ins_encode %{
7050 Register op1_reg = $op1$$Register;
7051 Register op2_reg = $op2$$Register;
7052 Label* L = $labl$$label;
7053 int flag = $cmp$$cmpcode;
7055 switch(flag) {
7056 case 0x01: //equal
7057 __ beq_long(op1_reg, op2_reg, *L);
7058 break;
7059 case 0x02: //not_equal
7060 __ bne_long(op1_reg, op2_reg, *L);
7061 break;
7062 case 0x03: //above
7063 __ sltu(AT, op2_reg, op1_reg);
7064 __ bne_long(R0, AT, *L);
7065 break;
7066 case 0x04: //above_equal
7067 __ sltu(AT, op1_reg, op2_reg);
7068 __ beq_long(AT, R0, *L);
7069 break;
7070 case 0x05: //below
7071 __ sltu(AT, op1_reg, op2_reg);
7072 __ bne_long(R0, AT, *L);
7073 break;
7074 case 0x06: //below_equal
7075 __ sltu(AT, op2_reg, op1_reg);
7076 __ beq_long(AT, R0, *L);
7077 break;
7078 default:
7079 Unimplemented();
7080 }
7081 %}
7082 ins_pc_relative(1);
7083 ins_pipe( pipe_alu_branch );
7084 %}
7086 instruct branchConIU_reg_reg_long(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7087 match( If cmp (CmpU src1 src2) );
7088 effect(USE labl);
7089 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg_long" %}
7091 ins_encode %{
7092 Register op1 = $src1$$Register;
7093 Register op2 = $src2$$Register;
7094 Label* L = $labl$$label;
7095 int flag = $cmp$$cmpcode;
7097 switch(flag) {
7098 case 0x01: //equal
7099 __ beq_long(op1, op2, *L);
7100 break;
7101 case 0x02: //not_equal
7102 __ bne_long(op1, op2, *L);
7103 break;
7104 case 0x03: //above
7105 __ sltu(AT, op2, op1);
7106 __ bne_long(AT, R0, *L);
7107 break;
7108 case 0x04: //above_equal
7109 __ sltu(AT, op1, op2);
7110 __ beq_long(AT, R0, *L);
7111 break;
7112 case 0x05: //below
7113 __ sltu(AT, op1, op2);
7114 __ bne_long(AT, R0, *L);
7115 break;
7116 case 0x06: //below_equal
7117 __ sltu(AT, op2, op1);
7118 __ beq_long(AT, R0, *L);
7119 break;
7120 default:
7121 Unimplemented();
7122 }
7123 %}
7125 ins_pc_relative(1);
7126 ins_pipe( pipe_alu_branch );
7127 %}
7130 instruct branchConIU_reg_imm_long(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7131 match( If cmp (CmpU src1 src2) );
7132 effect(USE labl);
7133 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_long" %}
7135 ins_encode %{
7136 Register op1 = $src1$$Register;
7137 int val = $src2$$constant;
7138 Label* L = $labl$$label;
7139 int flag = $cmp$$cmpcode;
7141 __ move(AT, val);
7142 switch(flag) {
7143 case 0x01: //equal
7144 __ beq_long(op1, AT, *L);
7145 break;
7146 case 0x02: //not_equal
7147 __ bne_long(op1, AT, *L);
7148 break;
7149 case 0x03: //above
7150 __ sltu(AT, AT, op1);
7151 __ bne_long(R0, AT, *L);
7152 break;
7153 case 0x04: //above_equal
7154 __ sltu(AT, op1, AT);
7155 __ beq_long(AT, R0, *L);
7156 break;
7157 case 0x05: //below
7158 __ sltu(AT, op1, AT);
7159 __ bne_long(R0, AT, *L);
7160 break;
7161 case 0x06: //below_equal
7162 __ sltu(AT, AT, op1);
7163 __ beq_long(AT, R0, *L);
7164 break;
7165 default:
7166 Unimplemented();
7167 }
7168 %}
7170 ins_pc_relative(1);
7171 ins_pipe( pipe_alu_branch );
7172 %}
7174 instruct branchConI_reg_reg_long(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7175 match( If cmp (CmpI src1 src2) );
7176 effect(USE labl);
7177 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg_long" %}
7179 ins_encode %{
7180 Register op1 = $src1$$Register;
7181 Register op2 = $src2$$Register;
7182 Label* L = $labl$$label;
7183 int flag = $cmp$$cmpcode;
7185 switch(flag) {
7186 case 0x01: //equal
7187 __ beq_long(op1, op2, *L);
7188 break;
7189 case 0x02: //not_equal
7190 __ bne_long(op1, op2, *L);
7191 break;
7192 case 0x03: //above
7193 __ slt(AT, op2, op1);
7194 __ bne_long(R0, AT, *L);
7195 break;
7196 case 0x04: //above_equal
7197 __ slt(AT, op1, op2);
7198 __ beq_long(AT, R0, *L);
7199 break;
7200 case 0x05: //below
7201 __ slt(AT, op1, op2);
7202 __ bne_long(R0, AT, *L);
7203 break;
7204 case 0x06: //below_equal
7205 __ slt(AT, op2, op1);
7206 __ beq_long(AT, R0, *L);
7207 break;
7208 default:
7209 Unimplemented();
7210 }
7211 %}
7213 ins_pc_relative(1);
7214 ins_pipe( pipe_alu_branch );
7215 %}
7217 instruct branchConI_reg_imm0_long(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7218 match( If cmp (CmpI src1 src2) );
7219 effect(USE labl);
7220 ins_cost(170);
7221 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0_long" %}
7223 ins_encode %{
7224 Register op1 = $src1$$Register;
7225 Label* L = $labl$$label;
7226 int flag = $cmp$$cmpcode;
7228 switch(flag) {
7229 case 0x01: //equal
7230 __ beq_long(op1, R0, *L);
7231 break;
7232 case 0x02: //not_equal
7233 __ bne_long(op1, R0, *L);
7234 break;
7235 case 0x03: //greater
7236 __ slt(AT, R0, op1);
7237 __ bne_long(R0, AT, *L);
7238 break;
7239 case 0x04: //greater_equal
7240 __ slt(AT, op1, R0);
7241 __ beq_long(AT, R0, *L);
7242 break;
7243 case 0x05: //less
7244 __ slt(AT, op1, R0);
7245 __ bne_long(R0, AT, *L);
7246 break;
7247 case 0x06: //less_equal
7248 __ slt(AT, R0, op1);
7249 __ beq_long(AT, R0, *L);
7250 break;
7251 default:
7252 Unimplemented();
7253 }
7254 %}
7256 ins_pc_relative(1);
7257 ins_pipe( pipe_alu_branch );
7258 %}
7260 instruct branchConI_reg_imm_long(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7261 match( If cmp (CmpI src1 src2) );
7262 effect(USE labl);
7263 ins_cost(200);
7264 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_long" %}
7266 ins_encode %{
7267 Register op1 = $src1$$Register;
7268 int val = $src2$$constant;
7269 Label* L = $labl$$label;
7270 int flag = $cmp$$cmpcode;
7272 __ move(AT, val);
7273 switch(flag) {
7274 case 0x01: //equal
7275 __ beq_long(op1, AT, *L);
7276 break;
7277 case 0x02: //not_equal
7278 __ bne_long(op1, AT, *L);
7279 break;
7280 case 0x03: //greater
7281 __ slt(AT, AT, op1);
7282 __ bne_long(R0, AT, *L);
7283 break;
7284 case 0x04: //greater_equal
7285 __ slt(AT, op1, AT);
7286 __ beq_long(AT, R0, *L);
7287 break;
7288 case 0x05: //less
7289 __ slt(AT, op1, AT);
7290 __ bne_long(R0, AT, *L);
7291 break;
7292 case 0x06: //less_equal
7293 __ slt(AT, AT, op1);
7294 __ beq_long(AT, R0, *L);
7295 break;
7296 default:
7297 Unimplemented();
7298 }
7299 %}
7301 ins_pc_relative(1);
7302 ins_pipe( pipe_alu_branch );
7303 %}
7305 instruct branchConIU_reg_imm0_long(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7306 match( If cmp (CmpU src1 zero) );
7307 effect(USE labl);
7308 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0_long" %}
7310 ins_encode %{
7311 Register op1 = $src1$$Register;
7312 Label* L = $labl$$label;
7313 int flag = $cmp$$cmpcode;
7315 switch(flag) {
7316 case 0x01: //equal
7317 __ beq_long(op1, R0, *L);
7318 break;
7319 case 0x02: //not_equal
7320 __ bne_long(op1, R0, *L);
7321 break;
7322 case 0x03: //above
7323 __ bne_long(R0, op1, *L);
7324 break;
7325 case 0x04: //above_equal
7326 __ beq_long(R0, R0, *L);
7327 break;
7328 case 0x05: //below
7329 return;
7330 break;
7331 case 0x06: //below_equal
7332 __ beq_long(op1, R0, *L);
7333 break;
7334 default:
7335 Unimplemented();
7336 }
7337 %}
7339 ins_pc_relative(1);
7340 ins_pipe( pipe_alu_branch );
7341 %}
7344 instruct branchConIU_reg_immI16_long(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7345 match( If cmp (CmpU src1 src2) );
7346 effect(USE labl);
7347 ins_cost(180);
7348 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16_long" %}
7350 ins_encode %{
7351 Register op1 = $src1$$Register;
7352 int val = $src2$$constant;
7353 Label* L = $labl$$label;
7354 int flag = $cmp$$cmpcode;
7356 switch(flag) {
7357 case 0x01: //equal
7358 __ move(AT, val);
7359 __ beq_long(op1, AT, *L);
7360 break;
7361 case 0x02: //not_equal
7362 __ move(AT, val);
7363 __ bne_long(op1, AT, *L);
7364 break;
7365 case 0x03: //above
7366 __ move(AT, val);
7367 __ sltu(AT, AT, op1);
7368 __ bne_long(R0, AT, *L);
7369 break;
7370 case 0x04: //above_equal
7371 __ sltiu(AT, op1, val);
7372 __ beq_long(AT, R0, *L);
7373 break;
7374 case 0x05: //below
7375 __ sltiu(AT, op1, val);
7376 __ bne_long(R0, AT, *L);
7377 break;
7378 case 0x06: //below_equal
7379 __ move(AT, val);
7380 __ sltu(AT, AT, op1);
7381 __ beq_long(AT, R0, *L);
7382 break;
7383 default:
7384 Unimplemented();
7385 }
7386 %}
7388 ins_pc_relative(1);
7389 ins_pipe( pipe_alu_branch );
7390 %}
7393 instruct branchConL_regL_regL_long(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7394 match( If cmp (CmpL src1 src2) );
7395 effect(USE labl);
7396 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL_long" %}
7397 ins_cost(250);
7399 ins_encode %{
7400 Register opr1_reg = as_Register($src1$$reg);
7401 Register opr2_reg = as_Register($src2$$reg);
7403 Label* target = $labl$$label;
7404 int flag = $cmp$$cmpcode;
7406 switch(flag) {
7407 case 0x01: //equal
7408 __ beq_long(opr1_reg, opr2_reg, *target);
7409 break;
7411 case 0x02: //not_equal
7412 __ bne_long(opr1_reg, opr2_reg, *target);
7413 break;
7415 case 0x03: //greater
7416 __ slt(AT, opr2_reg, opr1_reg);
7417 __ bne_long(AT, R0, *target);
7418 break;
7420 case 0x04: //greater_equal
7421 __ slt(AT, opr1_reg, opr2_reg);
7422 __ beq_long(AT, R0, *target);
7423 break;
7425 case 0x05: //less
7426 __ slt(AT, opr1_reg, opr2_reg);
7427 __ bne_long(AT, R0, *target);
7428 break;
7430 case 0x06: //less_equal
7431 __ slt(AT, opr2_reg, opr1_reg);
7432 __ beq_long(AT, R0, *target);
7433 break;
7435 default:
7436 Unimplemented();
7437 }
7438 %}
7441 ins_pc_relative(1);
7442 ins_pipe( pipe_alu_branch );
7443 %}
7445 instruct branchConL_regL_immL0_long(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7446 match( If cmp (CmpL src1 zero) );
7447 effect(USE labl);
7448 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0_long" %}
7449 ins_cost(150);
7451 ins_encode %{
7452 Register opr1_reg = as_Register($src1$$reg);
7453 Register opr2_reg = R0;
7455 Label* target = $labl$$label;
7456 int flag = $cmp$$cmpcode;
7458 switch(flag) {
7459 case 0x01: //equal
7460 __ beq_long(opr1_reg, opr2_reg, *target);
7461 break;
7463 case 0x02: //not_equal
7464 __ bne_long(opr1_reg, opr2_reg, *target);
7465 break;
7467 case 0x03: //greater
7468 __ slt(AT, opr2_reg, opr1_reg);
7469 __ bne_long(AT, R0, *target);
7470 break;
7472 case 0x04: //greater_equal
7473 __ slt(AT, opr1_reg, opr2_reg);
7474 __ beq_long(AT, R0, *target);
7475 break;
7477 case 0x05: //less
7478 __ slt(AT, opr1_reg, opr2_reg);
7479 __ bne_long(AT, R0, *target);
7480 break;
7482 case 0x06: //less_equal
7483 __ slt(AT, opr2_reg, opr1_reg);
7484 __ beq_long(AT, R0, *target);
7485 break;
7487 default:
7488 Unimplemented();
7489 }
7490 %}
7493 ins_pc_relative(1);
7494 ins_pipe( pipe_alu_branch );
7495 %}
7497 instruct branchConL_regL_immL_long(cmpOp cmp, mRegL src1, immL src2, label labl) %{
7498 match( If cmp (CmpL src1 src2) );
7499 effect(USE labl);
7500 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL_long" %}
7501 ins_cost(180);
7503 ins_encode %{
7504 Register opr1_reg = as_Register($src1$$reg);
7505 Register opr2_reg = AT;
7507 Label* target = $labl$$label;
7508 int flag = $cmp$$cmpcode;
7510 __ set64(opr2_reg, $src2$$constant);
7512 switch(flag) {
7513 case 0x01: //equal
7514 __ beq_long(opr1_reg, opr2_reg, *target);
7515 break;
7517 case 0x02: //not_equal
7518 __ bne_long(opr1_reg, opr2_reg, *target);
7519 break;
7521 case 0x03: //greater
7522 __ slt(AT, opr2_reg, opr1_reg);
7523 __ bne_long(AT, R0, *target);
7524 break;
7526 case 0x04: //greater_equal
7527 __ slt(AT, opr1_reg, opr2_reg);
7528 __ beq_long(AT, R0, *target);
7529 break;
7531 case 0x05: //less
7532 __ slt(AT, opr1_reg, opr2_reg);
7533 __ bne_long(AT, R0, *target);
7534 break;
7536 case 0x06: //less_equal
7537 __ slt(AT, opr2_reg, opr1_reg);
7538 __ beq_long(AT, R0, *target);
7539 break;
7541 default:
7542 Unimplemented();
7543 }
7544 %}
7547 ins_pc_relative(1);
7548 ins_pipe( pipe_alu_branch );
7549 %}
7552 //FIXME
7553 instruct branchConF_reg_reg_long(cmpOp cmp, regF src1, regF src2, label labl) %{
7554 match( If cmp (CmpF src1 src2) );
7555 effect(USE labl);
7556 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg_long" %}
7558 ins_encode %{
7559 FloatRegister reg_op1 = $src1$$FloatRegister;
7560 FloatRegister reg_op2 = $src2$$FloatRegister;
7561 Label* L = $labl$$label;
7562 int flag = $cmp$$cmpcode;
7564 switch(flag) {
7565 case 0x01: //equal
7566 __ c_eq_s(reg_op1, reg_op2);
7567 __ bc1t_long(*L);
7568 break;
7569 case 0x02: //not_equal
7570 __ c_eq_s(reg_op1, reg_op2);
7571 __ bc1f_long(*L);
7572 break;
7573 case 0x03: //greater
7574 __ c_ule_s(reg_op1, reg_op2);
7575 __ bc1f_long(*L);
7576 break;
7577 case 0x04: //greater_equal
7578 __ c_ult_s(reg_op1, reg_op2);
7579 __ bc1f_long(*L);
7580 break;
7581 case 0x05: //less
7582 __ c_ult_s(reg_op1, reg_op2);
7583 __ bc1t_long(*L);
7584 break;
7585 case 0x06: //less_equal
7586 __ c_ule_s(reg_op1, reg_op2);
7587 __ bc1t_long(*L);
7588 break;
7589 default:
7590 Unimplemented();
7591 }
7592 %}
7594 ins_pc_relative(1);
7595 ins_pipe(pipe_slow);
7596 %}
7598 instruct branchConD_reg_reg_long(cmpOp cmp, regD src1, regD src2, label labl) %{
7599 match( If cmp (CmpD src1 src2) );
7600 effect(USE labl);
7601 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg_long" %}
7603 ins_encode %{
7604 FloatRegister reg_op1 = $src1$$FloatRegister;
7605 FloatRegister reg_op2 = $src2$$FloatRegister;
7606 Label* L = $labl$$label;
7607 int flag = $cmp$$cmpcode;
7609 switch(flag) {
7610 case 0x01: //equal
7611 __ c_eq_d(reg_op1, reg_op2);
7612 __ bc1t_long(*L);
7613 break;
7614 case 0x02: //not_equal
7615 // c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7616 __ c_eq_d(reg_op1, reg_op2);
7617 __ bc1f_long(*L);
7618 break;
7619 case 0x03: //greater
7620 __ c_ule_d(reg_op1, reg_op2);
7621 __ bc1f_long(*L);
7622 break;
7623 case 0x04: //greater_equal
7624 __ c_ult_d(reg_op1, reg_op2);
7625 __ bc1f_long(*L);
7626 break;
7627 case 0x05: //less
7628 __ c_ult_d(reg_op1, reg_op2);
7629 __ bc1t_long(*L);
7630 break;
7631 case 0x06: //less_equal
7632 __ c_ule_d(reg_op1, reg_op2);
7633 __ bc1t_long(*L);
7634 break;
7635 default:
7636 Unimplemented();
7637 }
7638 %}
7640 ins_pc_relative(1);
7641 ins_pipe(pipe_slow);
7642 %}
7645 // ============================================================================
7646 // Branch Instructions -- short offset versions
7648 // Jump Direct
7649 instruct jmpDir_short(label labl) %{
7650 match(Goto);
7651 effect(USE labl);
7653 ins_cost(300);
7654 format %{ "JMP $labl #@jmpDir_short" %}
7656 ins_encode %{
7657 Label &L = *($labl$$label);
7658 if(&L)
7659 __ b(L);
7660 else
7661 __ b(int(0));
7662 __ delayed()->nop();
7663 %}
7665 ins_pipe( pipe_jump );
7666 ins_pc_relative(1);
7667 ins_short_branch(1);
7668 %}
7670 // Jump Direct Conditional - Label defines a relative address from Jcc+1
7671 instruct jmpLoopEnd_short(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
7672 match(CountedLoopEnd cop (CmpI src1 src2));
7673 effect(USE labl);
7675 ins_cost(300);
7676 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_short" %}
7677 ins_encode %{
7678 Register op1 = $src1$$Register;
7679 Register op2 = $src2$$Register;
7680 Label &L = *($labl$$label);
7681 int flag = $cop$$cmpcode;
7683 switch(flag) {
7684 case 0x01: //equal
7685 if (&L)
7686 __ beq(op1, op2, L);
7687 else
7688 __ beq(op1, op2, (int)0);
7689 break;
7690 case 0x02: //not_equal
7691 if (&L)
7692 __ bne(op1, op2, L);
7693 else
7694 __ bne(op1, op2, (int)0);
7695 break;
7696 case 0x03: //above
7697 __ slt(AT, op2, op1);
7698 if(&L)
7699 __ bne(AT, R0, L);
7700 else
7701 __ bne(AT, R0, (int)0);
7702 break;
7703 case 0x04: //above_equal
7704 __ slt(AT, op1, op2);
7705 if(&L)
7706 __ beq(AT, R0, L);
7707 else
7708 __ beq(AT, R0, (int)0);
7709 break;
7710 case 0x05: //below
7711 __ slt(AT, op1, op2);
7712 if(&L)
7713 __ bne(AT, R0, L);
7714 else
7715 __ bne(AT, R0, (int)0);
7716 break;
7717 case 0x06: //below_equal
7718 __ slt(AT, op2, op1);
7719 if(&L)
7720 __ beq(AT, R0, L);
7721 else
7722 __ beq(AT, R0, (int)0);
7723 break;
7724 default:
7725 Unimplemented();
7726 }
7727 __ delayed()->nop();
7728 %}
7729 ins_pipe( pipe_jump );
7730 ins_pc_relative(1);
7731 ins_short_branch(1);
7732 %}
7734 instruct jmpLoopEnd_reg_immI_short(cmpOp cop, mRegI src1, immI src2, label labl) %{
7735 match(CountedLoopEnd cop (CmpI src1 src2));
7736 effect(USE labl);
7738 ins_cost(300);
7739 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI_short" %}
7740 ins_encode %{
7741 Register op1 = $src1$$Register;
7742 Register op2 = AT;
7743 Label &L = *($labl$$label);
7744 int flag = $cop$$cmpcode;
7746 __ move(op2, $src2$$constant);
7748 switch(flag) {
7749 case 0x01: //equal
7750 if (&L)
7751 __ beq(op1, op2, L);
7752 else
7753 __ beq(op1, op2, (int)0);
7754 break;
7755 case 0x02: //not_equal
7756 if (&L)
7757 __ bne(op1, op2, L);
7758 else
7759 __ bne(op1, op2, (int)0);
7760 break;
7761 case 0x03: //above
7762 __ slt(AT, op2, op1);
7763 if(&L)
7764 __ bne(AT, R0, L);
7765 else
7766 __ bne(AT, R0, (int)0);
7767 break;
7768 case 0x04: //above_equal
7769 __ slt(AT, op1, op2);
7770 if(&L)
7771 __ beq(AT, R0, L);
7772 else
7773 __ beq(AT, R0, (int)0);
7774 break;
7775 case 0x05: //below
7776 __ slt(AT, op1, op2);
7777 if(&L)
7778 __ bne(AT, R0, L);
7779 else
7780 __ bne(AT, R0, (int)0);
7781 break;
7782 case 0x06: //below_equal
7783 __ slt(AT, op2, op1);
7784 if(&L)
7785 __ beq(AT, R0, L);
7786 else
7787 __ beq(AT, R0, (int)0);
7788 break;
7789 default:
7790 Unimplemented();
7791 }
7792 __ delayed()->nop();
7793 %}
7794 ins_pipe( pipe_jump );
7795 ins_pc_relative(1);
7796 ins_short_branch(1);
7797 %}
7800 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
7801 instruct jmpCon_flags_short(cmpOp cop, FlagsReg cr, label labl) %{
7802 match(If cop cr);
7803 effect(USE labl);
7805 ins_cost(300);
7806 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags_short" %}
7808 ins_encode %{
7809 Label &L = *($labl$$label);
7810 switch($cop$$cmpcode) {
7811 case 0x01: //equal
7812 if (&L)
7813 __ bne(AT, R0, L);
7814 else
7815 __ bne(AT, R0, (int)0);
7816 break;
7817 case 0x02: //not equal
7818 if (&L)
7819 __ beq(AT, R0, L);
7820 else
7821 __ beq(AT, R0, (int)0);
7822 break;
7823 default:
7824 Unimplemented();
7825 }
7826 __ delayed()->nop();
7827 %}
7829 ins_pipe( pipe_jump );
7830 ins_pc_relative(1);
7831 ins_short_branch(1);
7832 %}
7834 // Conditional jumps
7835 instruct branchConP_zero_short(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7836 match(If cmp (CmpP op1 zero));
7837 effect(USE labl);
7839 ins_cost(180);
7840 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero_short" %}
7842 ins_encode %{
7843 Register op1 = $op1$$Register;
7844 Register op2 = R0;
7845 Label &L = *($labl$$label);
7846 int flag = $cmp$$cmpcode;
7848 switch(flag) {
7849 case 0x01: //equal
7850 if (&L)
7851 __ beq(op1, op2, L);
7852 else
7853 __ beq(op1, op2, (int)0);
7854 break;
7855 case 0x02: //not_equal
7856 if (&L)
7857 __ bne(op1, op2, L);
7858 else
7859 __ bne(op1, op2, (int)0);
7860 break;
7861 default:
7862 Unimplemented();
7863 }
7864 __ delayed()->nop();
7865 %}
7867 ins_pc_relative(1);
7868 ins_pipe( pipe_alu_branch );
7869 ins_short_branch(1);
7870 %}
7872 instruct branchConN2P_zero_short(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7873 match(If cmp (CmpP (DecodeN op1) zero));
7874 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7875 effect(USE labl);
7877 ins_cost(180);
7878 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero_short" %}
7880 ins_encode %{
7881 Register op1 = $op1$$Register;
7882 Register op2 = R0;
7883 Label &L = *($labl$$label);
7884 int flag = $cmp$$cmpcode;
7886 switch(flag)
7887 {
7888 case 0x01: //equal
7889 if (&L)
7890 __ beq(op1, op2, L);
7891 else
7892 __ beq(op1, op2, (int)0);
7893 break;
7894 case 0x02: //not_equal
7895 if (&L)
7896 __ bne(op1, op2, L);
7897 else
7898 __ bne(op1, op2, (int)0);
7899 break;
7900 default:
7901 Unimplemented();
7902 }
7903 __ delayed()->nop();
7904 %}
7906 ins_pc_relative(1);
7907 ins_pipe( pipe_alu_branch );
7908 ins_short_branch(1);
7909 %}
7912 instruct branchConP_short(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7913 match(If cmp (CmpP op1 op2));
7914 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7915 effect(USE labl);
7917 ins_cost(200);
7918 format %{ "b$cmp $op1, $op2, $labl #@branchConP_short" %}
7920 ins_encode %{
7921 Register op1 = $op1$$Register;
7922 Register op2 = $op2$$Register;
7923 Label &L = *($labl$$label);
7924 int flag = $cmp$$cmpcode;
7926 switch(flag) {
7927 case 0x01: //equal
7928 if (&L)
7929 __ beq(op1, op2, L);
7930 else
7931 __ beq(op1, op2, (int)0);
7932 break;
7933 case 0x02: //not_equal
7934 if (&L)
7935 __ bne(op1, op2, L);
7936 else
7937 __ bne(op1, op2, (int)0);
7938 break;
7939 case 0x03: //above
7940 __ sltu(AT, op2, op1);
7941 if(&L)
7942 __ bne(R0, AT, L);
7943 else
7944 __ bne(R0, AT, (int)0);
7945 break;
7946 case 0x04: //above_equal
7947 __ sltu(AT, op1, op2);
7948 if(&L)
7949 __ beq(AT, R0, L);
7950 else
7951 __ beq(AT, R0, (int)0);
7952 break;
7953 case 0x05: //below
7954 __ sltu(AT, op1, op2);
7955 if(&L)
7956 __ bne(R0, AT, L);
7957 else
7958 __ bne(R0, AT, (int)0);
7959 break;
7960 case 0x06: //below_equal
7961 __ sltu(AT, op2, op1);
7962 if(&L)
7963 __ beq(AT, R0, L);
7964 else
7965 __ beq(AT, R0, (int)0);
7966 break;
7967 default:
7968 Unimplemented();
7969 }
7970 __ delayed()->nop();
7971 %}
7973 ins_pc_relative(1);
7974 ins_pipe( pipe_alu_branch );
7975 ins_short_branch(1);
7976 %}
7978 instruct cmpN_null_branch_short(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7979 match(If cmp (CmpN op1 null));
7980 effect(USE labl);
7982 ins_cost(180);
7983 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7984 "BP$cmp $labl @ cmpN_null_branch_short" %}
7985 ins_encode %{
7986 Register op1 = $op1$$Register;
7987 Register op2 = R0;
7988 Label &L = *($labl$$label);
7989 int flag = $cmp$$cmpcode;
7991 switch(flag) {
7992 case 0x01: //equal
7993 if (&L)
7994 __ beq(op1, op2, L);
7995 else
7996 __ beq(op1, op2, (int)0);
7997 break;
7998 case 0x02: //not_equal
7999 if (&L)
8000 __ bne(op1, op2, L);
8001 else
8002 __ bne(op1, op2, (int)0);
8003 break;
8004 default:
8005 Unimplemented();
8006 }
8007 __ delayed()->nop();
8008 %}
8009 //TODO: pipe_branchP or create pipe_branchN LEE
8010 ins_pc_relative(1);
8011 ins_pipe( pipe_alu_branch );
8012 ins_short_branch(1);
8013 %}
8015 instruct cmpN_reg_branch_short(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
8016 match(If cmp (CmpN op1 op2));
8017 effect(USE labl);
8019 ins_cost(180);
8020 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
8021 "BP$cmp $labl @ cmpN_reg_branch_short" %}
8022 ins_encode %{
8023 Register op1_reg = $op1$$Register;
8024 Register op2_reg = $op2$$Register;
8025 Label &L = *($labl$$label);
8026 int flag = $cmp$$cmpcode;
8028 switch(flag) {
8029 case 0x01: //equal
8030 if (&L)
8031 __ beq(op1_reg, op2_reg, L);
8032 else
8033 __ beq(op1_reg, op2_reg, (int)0);
8034 break;
8035 case 0x02: //not_equal
8036 if (&L)
8037 __ bne(op1_reg, op2_reg, L);
8038 else
8039 __ bne(op1_reg, op2_reg, (int)0);
8040 break;
8041 case 0x03: //above
8042 __ sltu(AT, op2_reg, op1_reg);
8043 if(&L)
8044 __ bne(R0, AT, L);
8045 else
8046 __ bne(R0, AT, (int)0);
8047 break;
8048 case 0x04: //above_equal
8049 __ sltu(AT, op1_reg, op2_reg);
8050 if(&L)
8051 __ beq(AT, R0, L);
8052 else
8053 __ beq(AT, R0, (int)0);
8054 break;
8055 case 0x05: //below
8056 __ sltu(AT, op1_reg, op2_reg);
8057 if(&L)
8058 __ bne(R0, AT, L);
8059 else
8060 __ bne(R0, AT, (int)0);
8061 break;
8062 case 0x06: //below_equal
8063 __ sltu(AT, op2_reg, op1_reg);
8064 if(&L)
8065 __ beq(AT, R0, L);
8066 else
8067 __ beq(AT, R0, (int)0);
8068 break;
8069 default:
8070 Unimplemented();
8071 }
8072 __ delayed()->nop();
8073 %}
8074 ins_pc_relative(1);
8075 ins_pipe( pipe_alu_branch );
8076 ins_short_branch(1);
8077 %}
8079 instruct branchConIU_reg_reg_short(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
8080 match( If cmp (CmpU src1 src2) );
8081 effect(USE labl);
8082 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg_short" %}
8084 ins_encode %{
8085 Register op1 = $src1$$Register;
8086 Register op2 = $src2$$Register;
8087 Label &L = *($labl$$label);
8088 int flag = $cmp$$cmpcode;
8090 switch(flag) {
8091 case 0x01: //equal
8092 if (&L)
8093 __ beq(op1, op2, L);
8094 else
8095 __ beq(op1, op2, (int)0);
8096 break;
8097 case 0x02: //not_equal
8098 if (&L)
8099 __ bne(op1, op2, L);
8100 else
8101 __ bne(op1, op2, (int)0);
8102 break;
8103 case 0x03: //above
8104 __ sltu(AT, op2, op1);
8105 if(&L)
8106 __ bne(AT, R0, L);
8107 else
8108 __ bne(AT, R0, (int)0);
8109 break;
8110 case 0x04: //above_equal
8111 __ sltu(AT, op1, op2);
8112 if(&L)
8113 __ beq(AT, R0, L);
8114 else
8115 __ beq(AT, R0, (int)0);
8116 break;
8117 case 0x05: //below
8118 __ sltu(AT, op1, op2);
8119 if(&L)
8120 __ bne(AT, R0, L);
8121 else
8122 __ bne(AT, R0, (int)0);
8123 break;
8124 case 0x06: //below_equal
8125 __ sltu(AT, op2, op1);
8126 if(&L)
8127 __ beq(AT, R0, L);
8128 else
8129 __ beq(AT, R0, (int)0);
8130 break;
8131 default:
8132 Unimplemented();
8133 }
8134 __ delayed()->nop();
8135 %}
8137 ins_pc_relative(1);
8138 ins_pipe( pipe_alu_branch );
8139 ins_short_branch(1);
8140 %}
8143 instruct branchConIU_reg_imm_short(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
8144 match( If cmp (CmpU src1 src2) );
8145 effect(USE labl);
8146 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm_short" %}
8148 ins_encode %{
8149 Register op1 = $src1$$Register;
8150 int val = $src2$$constant;
8151 Label &L = *($labl$$label);
8152 int flag = $cmp$$cmpcode;
8154 __ move(AT, val);
8155 switch(flag) {
8156 case 0x01: //equal
8157 if (&L)
8158 __ beq(op1, AT, L);
8159 else
8160 __ beq(op1, AT, (int)0);
8161 break;
8162 case 0x02: //not_equal
8163 if (&L)
8164 __ bne(op1, AT, L);
8165 else
8166 __ bne(op1, AT, (int)0);
8167 break;
8168 case 0x03: //above
8169 __ sltu(AT, AT, op1);
8170 if(&L)
8171 __ bne(R0, AT, L);
8172 else
8173 __ bne(R0, AT, (int)0);
8174 break;
8175 case 0x04: //above_equal
8176 __ sltu(AT, op1, AT);
8177 if(&L)
8178 __ beq(AT, R0, L);
8179 else
8180 __ beq(AT, R0, (int)0);
8181 break;
8182 case 0x05: //below
8183 __ sltu(AT, op1, AT);
8184 if(&L)
8185 __ bne(R0, AT, L);
8186 else
8187 __ bne(R0, AT, (int)0);
8188 break;
8189 case 0x06: //below_equal
8190 __ sltu(AT, AT, op1);
8191 if(&L)
8192 __ beq(AT, R0, L);
8193 else
8194 __ beq(AT, R0, (int)0);
8195 break;
8196 default:
8197 Unimplemented();
8198 }
8199 __ delayed()->nop();
8200 %}
8202 ins_pc_relative(1);
8203 ins_pipe( pipe_alu_branch );
8204 ins_short_branch(1);
8205 %}
8207 instruct branchConI_reg_reg_short(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
8208 match( If cmp (CmpI src1 src2) );
8209 effect(USE labl);
8210 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg_short" %}
8212 ins_encode %{
8213 Register op1 = $src1$$Register;
8214 Register op2 = $src2$$Register;
8215 Label &L = *($labl$$label);
8216 int flag = $cmp$$cmpcode;
8218 switch(flag) {
8219 case 0x01: //equal
8220 if (&L)
8221 __ beq(op1, op2, L);
8222 else
8223 __ beq(op1, op2, (int)0);
8224 break;
8225 case 0x02: //not_equal
8226 if (&L)
8227 __ bne(op1, op2, L);
8228 else
8229 __ bne(op1, op2, (int)0);
8230 break;
8231 case 0x03: //above
8232 __ slt(AT, op2, op1);
8233 if(&L)
8234 __ bne(R0, AT, L);
8235 else
8236 __ bne(R0, AT, (int)0);
8237 break;
8238 case 0x04: //above_equal
8239 __ slt(AT, op1, op2);
8240 if(&L)
8241 __ beq(AT, R0, L);
8242 else
8243 __ beq(AT, R0, (int)0);
8244 break;
8245 case 0x05: //below
8246 __ slt(AT, op1, op2);
8247 if(&L)
8248 __ bne(R0, AT, L);
8249 else
8250 __ bne(R0, AT, (int)0);
8251 break;
8252 case 0x06: //below_equal
8253 __ slt(AT, op2, op1);
8254 if(&L)
8255 __ beq(AT, R0, L);
8256 else
8257 __ beq(AT, R0, (int)0);
8258 break;
8259 default:
8260 Unimplemented();
8261 }
8262 __ delayed()->nop();
8263 %}
8265 ins_pc_relative(1);
8266 ins_pipe( pipe_alu_branch );
8267 ins_short_branch(1);
8268 %}
8270 instruct branchConI_reg_imm0_short(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
8271 match( If cmp (CmpI src1 src2) );
8272 effect(USE labl);
8273 ins_cost(170);
8274 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0_short" %}
8276 ins_encode %{
8277 Register op1 = $src1$$Register;
8278 Label &L = *($labl$$label);
8279 int flag = $cmp$$cmpcode;
8281 switch(flag) {
8282 case 0x01: //equal
8283 if (&L)
8284 __ beq(op1, R0, L);
8285 else
8286 __ beq(op1, R0, (int)0);
8287 break;
8288 case 0x02: //not_equal
8289 if (&L)
8290 __ bne(op1, R0, L);
8291 else
8292 __ bne(op1, R0, (int)0);
8293 break;
8294 case 0x03: //greater
8295 if(&L)
8296 __ bgtz(op1, L);
8297 else
8298 __ bgtz(op1, (int)0);
8299 break;
8300 case 0x04: //greater_equal
8301 if(&L)
8302 __ bgez(op1, L);
8303 else
8304 __ bgez(op1, (int)0);
8305 break;
8306 case 0x05: //less
8307 if(&L)
8308 __ bltz(op1, L);
8309 else
8310 __ bltz(op1, (int)0);
8311 break;
8312 case 0x06: //less_equal
8313 if(&L)
8314 __ blez(op1, L);
8315 else
8316 __ blez(op1, (int)0);
8317 break;
8318 default:
8319 Unimplemented();
8320 }
8321 __ delayed()->nop();
8322 %}
8324 ins_pc_relative(1);
8325 ins_pipe( pipe_alu_branch );
8326 ins_short_branch(1);
8327 %}
8330 instruct branchConI_reg_imm_short(cmpOp cmp, mRegI src1, immI src2, label labl) %{
8331 match( If cmp (CmpI src1 src2) );
8332 effect(USE labl);
8333 ins_cost(200);
8334 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm_short" %}
8336 ins_encode %{
8337 Register op1 = $src1$$Register;
8338 int val = $src2$$constant;
8339 Label &L = *($labl$$label);
8340 int flag = $cmp$$cmpcode;
8342 __ move(AT, val);
8343 switch(flag) {
8344 case 0x01: //equal
8345 if (&L)
8346 __ beq(op1, AT, L);
8347 else
8348 __ beq(op1, AT, (int)0);
8349 break;
8350 case 0x02: //not_equal
8351 if (&L)
8352 __ bne(op1, AT, L);
8353 else
8354 __ bne(op1, AT, (int)0);
8355 break;
8356 case 0x03: //greater
8357 __ slt(AT, AT, op1);
8358 if(&L)
8359 __ bne(R0, AT, L);
8360 else
8361 __ bne(R0, AT, (int)0);
8362 break;
8363 case 0x04: //greater_equal
8364 __ slt(AT, op1, AT);
8365 if(&L)
8366 __ beq(AT, R0, L);
8367 else
8368 __ beq(AT, R0, (int)0);
8369 break;
8370 case 0x05: //less
8371 __ slt(AT, op1, AT);
8372 if(&L)
8373 __ bne(R0, AT, L);
8374 else
8375 __ bne(R0, AT, (int)0);
8376 break;
8377 case 0x06: //less_equal
8378 __ slt(AT, AT, op1);
8379 if(&L)
8380 __ beq(AT, R0, L);
8381 else
8382 __ beq(AT, R0, (int)0);
8383 break;
8384 default:
8385 Unimplemented();
8386 }
8387 __ delayed()->nop();
8388 %}
8390 ins_pc_relative(1);
8391 ins_pipe( pipe_alu_branch );
8392 ins_short_branch(1);
8393 %}
8395 instruct branchConIU_reg_imm0_short(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
8396 match( If cmp (CmpU src1 zero) );
8397 effect(USE labl);
8398 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0_short" %}
8400 ins_encode %{
8401 Register op1 = $src1$$Register;
8402 Label &L = *($labl$$label);
8403 int flag = $cmp$$cmpcode;
8405 switch(flag) {
8406 case 0x01: //equal
8407 if (&L)
8408 __ beq(op1, R0, L);
8409 else
8410 __ beq(op1, R0, (int)0);
8411 break;
8412 case 0x02: //not_equal
8413 if (&L)
8414 __ bne(op1, R0, L);
8415 else
8416 __ bne(op1, R0, (int)0);
8417 break;
8418 case 0x03: //above
8419 if(&L)
8420 __ bne(R0, op1, L);
8421 else
8422 __ bne(R0, op1, (int)0);
8423 break;
8424 case 0x04: //above_equal
8425 if(&L)
8426 __ beq(R0, R0, L);
8427 else
8428 __ beq(R0, R0, (int)0);
8429 break;
8430 case 0x05: //below
8431 return;
8432 break;
8433 case 0x06: //below_equal
8434 if(&L)
8435 __ beq(op1, R0, L);
8436 else
8437 __ beq(op1, R0, (int)0);
8438 break;
8439 default:
8440 Unimplemented();
8441 }
8442 __ delayed()->nop();
8443 %}
8445 ins_pc_relative(1);
8446 ins_pipe( pipe_alu_branch );
8447 ins_short_branch(1);
8448 %}
8451 instruct branchConIU_reg_immI16_short(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
8452 match( If cmp (CmpU src1 src2) );
8453 effect(USE labl);
8454 ins_cost(180);
8455 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16_short" %}
8457 ins_encode %{
8458 Register op1 = $src1$$Register;
8459 int val = $src2$$constant;
8460 Label &L = *($labl$$label);
8461 int flag = $cmp$$cmpcode;
8463 switch(flag) {
8464 case 0x01: //equal
8465 __ move(AT, val);
8466 if (&L)
8467 __ beq(op1, AT, L);
8468 else
8469 __ beq(op1, AT, (int)0);
8470 break;
8471 case 0x02: //not_equal
8472 __ move(AT, val);
8473 if (&L)
8474 __ bne(op1, AT, L);
8475 else
8476 __ bne(op1, AT, (int)0);
8477 break;
8478 case 0x03: //above
8479 __ move(AT, val);
8480 __ sltu(AT, AT, op1);
8481 if(&L)
8482 __ bne(R0, AT, L);
8483 else
8484 __ bne(R0, AT, (int)0);
8485 break;
8486 case 0x04: //above_equal
8487 __ sltiu(AT, op1, val);
8488 if(&L)
8489 __ beq(AT, R0, L);
8490 else
8491 __ beq(AT, R0, (int)0);
8492 break;
8493 case 0x05: //below
8494 __ sltiu(AT, op1, val);
8495 if(&L)
8496 __ bne(R0, AT, L);
8497 else
8498 __ bne(R0, AT, (int)0);
8499 break;
8500 case 0x06: //below_equal
8501 __ move(AT, val);
8502 __ sltu(AT, AT, op1);
8503 if(&L)
8504 __ beq(AT, R0, L);
8505 else
8506 __ beq(AT, R0, (int)0);
8507 break;
8508 default:
8509 Unimplemented();
8510 }
8511 __ delayed()->nop();
8512 %}
8514 ins_pc_relative(1);
8515 ins_pipe( pipe_alu_branch );
8516 ins_short_branch(1);
8517 %}
8520 instruct branchConL_regL_regL_short(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
8521 match( If cmp (CmpL src1 src2) );
8522 effect(USE labl);
8523 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL_short" %}
8524 ins_cost(250);
8526 ins_encode %{
8527 Register opr1_reg = as_Register($src1$$reg);
8528 Register opr2_reg = as_Register($src2$$reg);
8530 Label &target = *($labl$$label);
8531 int flag = $cmp$$cmpcode;
8533 switch(flag) {
8534 case 0x01: //equal
8535 if (&target)
8536 __ beq(opr1_reg, opr2_reg, target);
8537 else
8538 __ beq(opr1_reg, opr2_reg, (int)0);
8539 __ delayed()->nop();
8540 break;
8542 case 0x02: //not_equal
8543 if(&target)
8544 __ bne(opr1_reg, opr2_reg, target);
8545 else
8546 __ bne(opr1_reg, opr2_reg, (int)0);
8547 __ delayed()->nop();
8548 break;
8550 case 0x03: //greater
8551 __ slt(AT, opr2_reg, opr1_reg);
8552 if(&target)
8553 __ bne(AT, R0, target);
8554 else
8555 __ bne(AT, R0, (int)0);
8556 __ delayed()->nop();
8557 break;
8559 case 0x04: //greater_equal
8560 __ slt(AT, opr1_reg, opr2_reg);
8561 if(&target)
8562 __ beq(AT, R0, target);
8563 else
8564 __ beq(AT, R0, (int)0);
8565 __ delayed()->nop();
8567 break;
8569 case 0x05: //less
8570 __ slt(AT, opr1_reg, opr2_reg);
8571 if(&target)
8572 __ bne(AT, R0, target);
8573 else
8574 __ bne(AT, R0, (int)0);
8575 __ delayed()->nop();
8577 break;
8579 case 0x06: //less_equal
8580 __ slt(AT, opr2_reg, opr1_reg);
8582 if(&target)
8583 __ beq(AT, R0, target);
8584 else
8585 __ beq(AT, R0, (int)0);
8586 __ delayed()->nop();
8588 break;
8590 default:
8591 Unimplemented();
8592 }
8593 %}
8596 ins_pc_relative(1);
8597 ins_pipe( pipe_alu_branch );
8598 ins_short_branch(1);
8599 %}
8602 instruct branchConL_regL_immL0_short(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
8603 match( If cmp (CmpL src1 zero) );
8604 effect(USE labl);
8605 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0_short" %}
8606 ins_cost(150);
8608 ins_encode %{
8609 Register opr1_reg = as_Register($src1$$reg);
8610 Label &target = *($labl$$label);
8611 int flag = $cmp$$cmpcode;
8613 switch(flag) {
8614 case 0x01: //equal
8615 if (&target)
8616 __ beq(opr1_reg, R0, target);
8617 else
8618 __ beq(opr1_reg, R0, int(0));
8619 break;
8621 case 0x02: //not_equal
8622 if(&target)
8623 __ bne(opr1_reg, R0, target);
8624 else
8625 __ bne(opr1_reg, R0, (int)0);
8626 break;
8628 case 0x03: //greater
8629 if(&target)
8630 __ bgtz(opr1_reg, target);
8631 else
8632 __ bgtz(opr1_reg, (int)0);
8633 break;
8635 case 0x04: //greater_equal
8636 if(&target)
8637 __ bgez(opr1_reg, target);
8638 else
8639 __ bgez(opr1_reg, (int)0);
8640 break;
8642 case 0x05: //less
8643 __ slt(AT, opr1_reg, R0);
8644 if(&target)
8645 __ bne(AT, R0, target);
8646 else
8647 __ bne(AT, R0, (int)0);
8648 break;
8650 case 0x06: //less_equal
8651 if (&target)
8652 __ blez(opr1_reg, target);
8653 else
8654 __ blez(opr1_reg, int(0));
8655 break;
8657 default:
8658 Unimplemented();
8659 }
8660 __ delayed()->nop();
8661 %}
8664 ins_pc_relative(1);
8665 ins_pipe( pipe_alu_branch );
8666 ins_short_branch(1);
8667 %}
8669 instruct branchConL_regL_immL_short(cmpOp cmp, mRegL src1, immL src2, label labl) %{
8670 match( If cmp (CmpL src1 src2) );
8671 effect(USE labl);
8672 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL_short" %}
8673 ins_cost(180);
8675 ins_encode %{
8676 Register opr1_reg = as_Register($src1$$reg);
8677 Register opr2_reg = AT;
8679 Label &target = *($labl$$label);
8680 int flag = $cmp$$cmpcode;
8682 __ set64(opr2_reg, $src2$$constant);
8684 switch(flag) {
8685 case 0x01: //equal
8686 if (&target)
8687 __ beq(opr1_reg, opr2_reg, target);
8688 else
8689 __ beq(opr1_reg, opr2_reg, (int)0);
8690 break;
8692 case 0x02: //not_equal
8693 if(&target)
8694 __ bne(opr1_reg, opr2_reg, target);
8695 else
8696 __ bne(opr1_reg, opr2_reg, (int)0);
8697 break;
8699 case 0x03: //greater
8700 __ slt(AT, opr2_reg, opr1_reg);
8701 if(&target)
8702 __ bne(AT, R0, target);
8703 else
8704 __ bne(AT, R0, (int)0);
8705 break;
8707 case 0x04: //greater_equal
8708 __ slt(AT, opr1_reg, opr2_reg);
8709 if(&target)
8710 __ beq(AT, R0, target);
8711 else
8712 __ beq(AT, R0, (int)0);
8713 break;
8715 case 0x05: //less
8716 __ slt(AT, opr1_reg, opr2_reg);
8717 if(&target)
8718 __ bne(AT, R0, target);
8719 else
8720 __ bne(AT, R0, (int)0);
8721 break;
8723 case 0x06: //less_equal
8724 __ slt(AT, opr2_reg, opr1_reg);
8725 if(&target)
8726 __ beq(AT, R0, target);
8727 else
8728 __ beq(AT, R0, (int)0);
8729 break;
8731 default:
8732 Unimplemented();
8733 }
8734 __ delayed()->nop();
8735 %}
8738 ins_pc_relative(1);
8739 ins_pipe( pipe_alu_branch );
8740 ins_short_branch(1);
8741 %}
8744 //FIXME
8745 instruct branchConF_reg_reg_short(cmpOp cmp, regF src1, regF src2, label labl) %{
8746 match( If cmp (CmpF src1 src2) );
8747 effect(USE labl);
8748 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg_short" %}
8750 ins_encode %{
8751 FloatRegister reg_op1 = $src1$$FloatRegister;
8752 FloatRegister reg_op2 = $src2$$FloatRegister;
8753 Label &L = *($labl$$label);
8754 int flag = $cmp$$cmpcode;
8756 switch(flag) {
8757 case 0x01: //equal
8758 __ c_eq_s(reg_op1, reg_op2);
8759 if (&L)
8760 __ bc1t(L);
8761 else
8762 __ bc1t((int)0);
8763 break;
8764 case 0x02: //not_equal
8765 __ c_eq_s(reg_op1, reg_op2);
8766 if (&L)
8767 __ bc1f(L);
8768 else
8769 __ bc1f((int)0);
8770 break;
8771 case 0x03: //greater
8772 __ c_ule_s(reg_op1, reg_op2);
8773 if(&L)
8774 __ bc1f(L);
8775 else
8776 __ bc1f((int)0);
8777 break;
8778 case 0x04: //greater_equal
8779 __ c_ult_s(reg_op1, reg_op2);
8780 if(&L)
8781 __ bc1f(L);
8782 else
8783 __ bc1f((int)0);
8784 break;
8785 case 0x05: //less
8786 __ c_ult_s(reg_op1, reg_op2);
8787 if(&L)
8788 __ bc1t(L);
8789 else
8790 __ bc1t((int)0);
8791 break;
8792 case 0x06: //less_equal
8793 __ c_ule_s(reg_op1, reg_op2);
8794 if(&L)
8795 __ bc1t(L);
8796 else
8797 __ bc1t((int)0);
8798 break;
8799 default:
8800 Unimplemented();
8801 }
8802 __ delayed()->nop();
8803 %}
8805 ins_pc_relative(1);
8806 ins_pipe(pipe_slow);
8807 ins_short_branch(1);
8808 %}
8810 instruct branchConD_reg_reg_short(cmpOp cmp, regD src1, regD src2, label labl) %{
8811 match( If cmp (CmpD src1 src2) );
8812 effect(USE labl);
8813 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg_short" %}
8815 ins_encode %{
8816 FloatRegister reg_op1 = $src1$$FloatRegister;
8817 FloatRegister reg_op2 = $src2$$FloatRegister;
8818 Label &L = *($labl$$label);
8819 int flag = $cmp$$cmpcode;
8821 switch(flag) {
8822 case 0x01: //equal
8823 __ c_eq_d(reg_op1, reg_op2);
8824 if (&L)
8825 __ bc1t(L);
8826 else
8827 __ bc1t((int)0);
8828 break;
8829 case 0x02: //not_equal
8830 // c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8831 __ c_eq_d(reg_op1, reg_op2);
8832 if (&L)
8833 __ bc1f(L);
8834 else
8835 __ bc1f((int)0);
8836 break;
8837 case 0x03: //greater
8838 __ c_ule_d(reg_op1, reg_op2);
8839 if(&L)
8840 __ bc1f(L);
8841 else
8842 __ bc1f((int)0);
8843 break;
8844 case 0x04: //greater_equal
8845 __ c_ult_d(reg_op1, reg_op2);
8846 if(&L)
8847 __ bc1f(L);
8848 else
8849 __ bc1f((int)0);
8850 break;
8851 case 0x05: //less
8852 __ c_ult_d(reg_op1, reg_op2);
8853 if(&L)
8854 __ bc1t(L);
8855 else
8856 __ bc1t((int)0);
8857 break;
8858 case 0x06: //less_equal
8859 __ c_ule_d(reg_op1, reg_op2);
8860 if(&L)
8861 __ bc1t(L);
8862 else
8863 __ bc1t((int)0);
8864 break;
8865 default:
8866 Unimplemented();
8867 }
8868 __ delayed()->nop();
8869 %}
8871 ins_pc_relative(1);
8872 ins_pipe(pipe_slow);
8873 ins_short_branch(1);
8874 %}
8876 // =================== End of branch instructions ==========================
8878 // Call Runtime Instruction
8879 instruct CallRuntimeDirect(method meth) %{
8880 match(CallRuntime );
8881 effect(USE meth);
8883 ins_cost(300);
8884 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8885 ins_encode( Java_To_Runtime( meth ) );
8886 ins_pipe( pipe_slow );
8887 ins_alignment(16);
8888 %}
8892 //------------------------MemBar Instructions-------------------------------
8893 //Memory barrier flavors
8895 instruct membar_acquire() %{
8896 match(MemBarAcquire);
8897 ins_cost(400);
8899 format %{ "MEMBAR-acquire @ membar_acquire" %}
8900 ins_encode %{
8901 __ sync();
8902 %}
8903 ins_pipe(empty);
8904 %}
8906 instruct load_fence() %{
8907 match(LoadFence);
8908 ins_cost(400);
8910 format %{ "MEMBAR @ load_fence" %}
8911 ins_encode %{
8912 __ sync();
8913 %}
8914 ins_pipe(pipe_slow);
8915 %}
8917 instruct membar_acquire_lock()
8918 %{
8919 match(MemBarAcquireLock);
8920 ins_cost(0);
8922 size(0);
8923 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8924 ins_encode();
8925 ins_pipe(empty);
8926 %}
8928 instruct membar_release() %{
8929 match(MemBarRelease);
8930 ins_cost(400);
8932 format %{ "MEMBAR-release @ membar_release" %}
8934 ins_encode %{
8935 // Attention: DO NOT DELETE THIS GUY!
8936 __ sync();
8937 %}
8939 ins_pipe(pipe_slow);
8940 %}
8942 instruct store_fence() %{
8943 match(StoreFence);
8944 ins_cost(400);
8946 format %{ "MEMBAR @ store_fence" %}
8948 ins_encode %{
8949 __ sync();
8950 %}
8952 ins_pipe(pipe_slow);
8953 %}
8955 instruct membar_release_lock()
8956 %{
8957 match(MemBarReleaseLock);
8958 ins_cost(0);
8960 size(0);
8961 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8962 ins_encode();
8963 ins_pipe(empty);
8964 %}
8967 instruct membar_volatile() %{
8968 match(MemBarVolatile);
8969 ins_cost(400);
8971 format %{ "MEMBAR-volatile" %}
8972 ins_encode %{
8973 if( !os::is_MP() ) return; // Not needed on single CPU
8974 __ sync();
8976 %}
8977 ins_pipe(pipe_slow);
8978 %}
8980 instruct unnecessary_membar_volatile() %{
8981 match(MemBarVolatile);
8982 predicate(Matcher::post_store_load_barrier(n));
8983 ins_cost(0);
8985 size(0);
8986 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8987 ins_encode( );
8988 ins_pipe(empty);
8989 %}
8991 instruct membar_storestore() %{
8992 match(MemBarStoreStore);
8994 ins_cost(400);
8995 format %{ "MEMBAR-storestore @ membar_storestore" %}
8996 ins_encode %{
8997 __ sync();
8998 %}
8999 ins_pipe(empty);
9000 %}
9002 //----------Move Instructions--------------------------------------------------
9003 instruct castX2P(mRegP dst, mRegL src) %{
9004 match(Set dst (CastX2P src));
9005 format %{ "castX2P $dst, $src @ castX2P" %}
9006 ins_encode %{
9007 Register src = $src$$Register;
9008 Register dst = $dst$$Register;
9010 if(src != dst)
9011 __ move(dst, src);
9012 %}
9013 ins_cost(10);
9014 ins_pipe( ialu_regI_mov );
9015 %}
9017 instruct castP2X(mRegL dst, mRegP src ) %{
9018 match(Set dst (CastP2X src));
9020 format %{ "mov $dst, $src\t #@castP2X" %}
9021 ins_encode %{
9022 Register src = $src$$Register;
9023 Register dst = $dst$$Register;
9025 if(src != dst)
9026 __ move(dst, src);
9027 %}
9028 ins_pipe( ialu_regI_mov );
9029 %}
9031 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
9032 match(Set dst (MoveF2I src));
9033 effect(DEF dst, USE src);
9034 ins_cost(85);
9035 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
9036 ins_encode %{
9037 Register dst = as_Register($dst$$reg);
9038 FloatRegister src = as_FloatRegister($src$$reg);
9040 __ mfc1(dst, src);
9041 %}
9042 ins_pipe( pipe_slow );
9043 %}
9045 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
9046 match(Set dst (MoveI2F src));
9047 effect(DEF dst, USE src);
9048 ins_cost(85);
9049 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
9050 ins_encode %{
9051 Register src = as_Register($src$$reg);
9052 FloatRegister dst = as_FloatRegister($dst$$reg);
9054 __ mtc1(src, dst);
9055 %}
9056 ins_pipe( pipe_slow );
9057 %}
9059 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
9060 match(Set dst (MoveD2L src));
9061 effect(DEF dst, USE src);
9062 ins_cost(85);
9063 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
9064 ins_encode %{
9065 Register dst = as_Register($dst$$reg);
9066 FloatRegister src = as_FloatRegister($src$$reg);
9068 __ dmfc1(dst, src);
9069 %}
9070 ins_pipe( pipe_slow );
9071 %}
9073 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
9074 match(Set dst (MoveL2D src));
9075 effect(DEF dst, USE src);
9076 ins_cost(85);
9077 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
9078 ins_encode %{
9079 FloatRegister dst = as_FloatRegister($dst$$reg);
9080 Register src = as_Register($src$$reg);
9082 __ dmtc1(src, dst);
9083 %}
9084 ins_pipe( pipe_slow );
9085 %}
9087 //----------Conditional Move---------------------------------------------------
9088 // Conditional move
9089 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9090 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9091 ins_cost(80);
9092 format %{
9093 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
9094 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
9095 %}
9097 ins_encode %{
9098 Register op1 = $tmp1$$Register;
9099 Register op2 = $tmp2$$Register;
9100 Register dst = $dst$$Register;
9101 Register src = $src$$Register;
9102 int flag = $cop$$cmpcode;
9104 switch(flag) {
9105 case 0x01: //equal
9106 __ subu32(AT, op1, op2);
9107 __ movz(dst, src, AT);
9108 break;
9110 case 0x02: //not_equal
9111 __ subu32(AT, op1, op2);
9112 __ movn(dst, src, AT);
9113 break;
9115 case 0x03: //great
9116 __ slt(AT, op2, op1);
9117 __ movn(dst, src, AT);
9118 break;
9120 case 0x04: //great_equal
9121 __ slt(AT, op1, op2);
9122 __ movz(dst, src, AT);
9123 break;
9125 case 0x05: //less
9126 __ slt(AT, op1, op2);
9127 __ movn(dst, src, AT);
9128 break;
9130 case 0x06: //less_equal
9131 __ slt(AT, op2, op1);
9132 __ movz(dst, src, AT);
9133 break;
9135 default:
9136 Unimplemented();
9137 }
9138 %}
9140 ins_pipe( pipe_slow );
9141 %}
9143 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9144 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9145 ins_cost(80);
9146 format %{
9147 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
9148 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
9149 %}
9150 ins_encode %{
9151 Register op1 = $tmp1$$Register;
9152 Register op2 = $tmp2$$Register;
9153 Register dst = $dst$$Register;
9154 Register src = $src$$Register;
9155 int flag = $cop$$cmpcode;
9157 switch(flag) {
9158 case 0x01: //equal
9159 __ subu(AT, op1, op2);
9160 __ movz(dst, src, AT);
9161 break;
9163 case 0x02: //not_equal
9164 __ subu(AT, op1, op2);
9165 __ movn(dst, src, AT);
9166 break;
9168 case 0x03: //above
9169 __ sltu(AT, op2, op1);
9170 __ movn(dst, src, AT);
9171 break;
9173 case 0x04: //above_equal
9174 __ sltu(AT, op1, op2);
9175 __ movz(dst, src, AT);
9176 break;
9178 case 0x05: //below
9179 __ sltu(AT, op1, op2);
9180 __ movn(dst, src, AT);
9181 break;
9183 case 0x06: //below_equal
9184 __ sltu(AT, op2, op1);
9185 __ movz(dst, src, AT);
9186 break;
9188 default:
9189 Unimplemented();
9190 }
9191 %}
9193 ins_pipe( pipe_slow );
9194 %}
9196 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9197 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9198 ins_cost(80);
9199 format %{
9200 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
9201 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
9202 %}
9203 ins_encode %{
9204 Register op1 = $tmp1$$Register;
9205 Register op2 = $tmp2$$Register;
9206 Register dst = $dst$$Register;
9207 Register src = $src$$Register;
9208 int flag = $cop$$cmpcode;
9210 switch(flag) {
9211 case 0x01: //equal
9212 __ subu32(AT, op1, op2);
9213 __ movz(dst, src, AT);
9214 break;
9216 case 0x02: //not_equal
9217 __ subu32(AT, op1, op2);
9218 __ movn(dst, src, AT);
9219 break;
9221 case 0x03: //above
9222 __ sltu(AT, op2, op1);
9223 __ movn(dst, src, AT);
9224 break;
9226 case 0x04: //above_equal
9227 __ sltu(AT, op1, op2);
9228 __ movz(dst, src, AT);
9229 break;
9231 case 0x05: //below
9232 __ sltu(AT, op1, op2);
9233 __ movn(dst, src, AT);
9234 break;
9236 case 0x06: //below_equal
9237 __ sltu(AT, op2, op1);
9238 __ movz(dst, src, AT);
9239 break;
9241 default:
9242 Unimplemented();
9243 }
9244 %}
9246 ins_pipe( pipe_slow );
9247 %}
9249 instruct cmovP_cmpU_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
9250 match(Set dst (CMoveP (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
9251 ins_cost(80);
9252 format %{
9253 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpU_reg_reg\n\t"
9254 "CMOV $dst,$src\t @cmovP_cmpU_reg_reg"
9255 %}
9256 ins_encode %{
9257 Register op1 = $tmp1$$Register;
9258 Register op2 = $tmp2$$Register;
9259 Register dst = $dst$$Register;
9260 Register src = $src$$Register;
9261 int flag = $cop$$cmpcode;
9263 switch(flag) {
9264 case 0x01: //equal
9265 __ subu32(AT, op1, op2);
9266 __ movz(dst, src, AT);
9267 break;
9269 case 0x02: //not_equal
9270 __ subu32(AT, op1, op2);
9271 __ movn(dst, src, AT);
9272 break;
9274 case 0x03: //above
9275 __ sltu(AT, op2, op1);
9276 __ movn(dst, src, AT);
9277 break;
9279 case 0x04: //above_equal
9280 __ sltu(AT, op1, op2);
9281 __ movz(dst, src, AT);
9282 break;
9284 case 0x05: //below
9285 __ sltu(AT, op1, op2);
9286 __ movn(dst, src, AT);
9287 break;
9289 case 0x06: //below_equal
9290 __ sltu(AT, op2, op1);
9291 __ movz(dst, src, AT);
9292 break;
9294 default:
9295 Unimplemented();
9296 }
9297 %}
9299 ins_pipe( pipe_slow );
9300 %}
9302 instruct cmovP_cmpF_reg_reg(mRegP dst, mRegP src, regF tmp1, regF tmp2, cmpOp cop ) %{
9303 match(Set dst (CMoveP (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9304 ins_cost(80);
9305 format %{
9306 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpF_reg_reg\n"
9307 "\tCMOV $dst,$src \t @cmovP_cmpF_reg_reg"
9308 %}
9310 ins_encode %{
9311 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9312 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9313 Register dst = $dst$$Register;
9314 Register src = $src$$Register;
9315 int flag = $cop$$cmpcode;
9317 switch(flag) {
9318 case 0x01: //equal
9319 __ c_eq_s(reg_op1, reg_op2);
9320 __ movt(dst, src);
9321 break;
9322 case 0x02: //not_equal
9323 __ c_eq_s(reg_op1, reg_op2);
9324 __ movf(dst, src);
9325 break;
9326 case 0x03: //greater
9327 __ c_ole_s(reg_op1, reg_op2);
9328 __ movf(dst, src);
9329 break;
9330 case 0x04: //greater_equal
9331 __ c_olt_s(reg_op1, reg_op2);
9332 __ movf(dst, src);
9333 break;
9334 case 0x05: //less
9335 __ c_ult_s(reg_op1, reg_op2);
9336 __ movt(dst, src);
9337 break;
9338 case 0x06: //less_equal
9339 __ c_ule_s(reg_op1, reg_op2);
9340 __ movt(dst, src);
9341 break;
9342 default:
9343 Unimplemented();
9344 }
9345 %}
9346 ins_pipe( pipe_slow );
9347 %}
9349 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9350 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9351 ins_cost(80);
9352 format %{
9353 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
9354 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
9355 %}
9356 ins_encode %{
9357 Register op1 = $tmp1$$Register;
9358 Register op2 = $tmp2$$Register;
9359 Register dst = $dst$$Register;
9360 Register src = $src$$Register;
9361 int flag = $cop$$cmpcode;
9363 switch(flag) {
9364 case 0x01: //equal
9365 __ subu32(AT, op1, op2);
9366 __ movz(dst, src, AT);
9367 break;
9369 case 0x02: //not_equal
9370 __ subu32(AT, op1, op2);
9371 __ movn(dst, src, AT);
9372 break;
9374 case 0x03: //above
9375 __ sltu(AT, op2, op1);
9376 __ movn(dst, src, AT);
9377 break;
9379 case 0x04: //above_equal
9380 __ sltu(AT, op1, op2);
9381 __ movz(dst, src, AT);
9382 break;
9384 case 0x05: //below
9385 __ sltu(AT, op1, op2);
9386 __ movn(dst, src, AT);
9387 break;
9389 case 0x06: //below_equal
9390 __ sltu(AT, op2, op1);
9391 __ movz(dst, src, AT);
9392 break;
9394 default:
9395 Unimplemented();
9396 }
9397 %}
9399 ins_pipe( pipe_slow );
9400 %}
9402 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9403 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9404 ins_cost(80);
9405 format %{
9406 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
9407 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
9408 %}
9409 ins_encode %{
9410 Register op1 = $tmp1$$Register;
9411 Register op2 = $tmp2$$Register;
9412 Register dst = $dst$$Register;
9413 Register src = $src$$Register;
9414 int flag = $cop$$cmpcode;
9416 switch(flag) {
9417 case 0x01: //equal
9418 __ subu(AT, op1, op2);
9419 __ movz(dst, src, AT);
9420 break;
9422 case 0x02: //not_equal
9423 __ subu(AT, op1, op2);
9424 __ movn(dst, src, AT);
9425 break;
9427 case 0x03: //above
9428 __ sltu(AT, op2, op1);
9429 __ movn(dst, src, AT);
9430 break;
9432 case 0x04: //above_equal
9433 __ sltu(AT, op1, op2);
9434 __ movz(dst, src, AT);
9435 break;
9437 case 0x05: //below
9438 __ sltu(AT, op1, op2);
9439 __ movn(dst, src, AT);
9440 break;
9442 case 0x06: //below_equal
9443 __ sltu(AT, op2, op1);
9444 __ movz(dst, src, AT);
9445 break;
9447 default:
9448 Unimplemented();
9449 }
9450 %}
9452 ins_pipe( pipe_slow );
9453 %}
9455 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
9456 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9457 ins_cost(80);
9458 format %{
9459 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
9460 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
9461 %}
9462 ins_encode %{
9463 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9464 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9465 Register dst = as_Register($dst$$reg);
9466 Register src = as_Register($src$$reg);
9468 int flag = $cop$$cmpcode;
9470 switch(flag) {
9471 case 0x01: //equal
9472 __ c_eq_d(reg_op1, reg_op2);
9473 __ movt(dst, src);
9474 break;
9475 case 0x02: //not_equal
9476 __ c_eq_d(reg_op1, reg_op2);
9477 __ movf(dst, src);
9478 break;
9479 case 0x03: //greater
9480 __ c_ole_d(reg_op1, reg_op2);
9481 __ movf(dst, src);
9482 break;
9483 case 0x04: //greater_equal
9484 __ c_olt_d(reg_op1, reg_op2);
9485 __ movf(dst, src);
9486 break;
9487 case 0x05: //less
9488 __ c_ult_d(reg_op1, reg_op2);
9489 __ movt(dst, src);
9490 break;
9491 case 0x06: //less_equal
9492 __ c_ule_d(reg_op1, reg_op2);
9493 __ movt(dst, src);
9494 break;
9495 default:
9496 Unimplemented();
9497 }
9498 %}
9500 ins_pipe( pipe_slow );
9501 %}
9504 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9505 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9506 ins_cost(80);
9507 format %{
9508 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
9509 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
9510 %}
9511 ins_encode %{
9512 Register op1 = $tmp1$$Register;
9513 Register op2 = $tmp2$$Register;
9514 Register dst = $dst$$Register;
9515 Register src = $src$$Register;
9516 int flag = $cop$$cmpcode;
9518 switch(flag) {
9519 case 0x01: //equal
9520 __ subu32(AT, op1, op2);
9521 __ movz(dst, src, AT);
9522 break;
9524 case 0x02: //not_equal
9525 __ subu32(AT, op1, op2);
9526 __ movn(dst, src, AT);
9527 break;
9529 case 0x03: //above
9530 __ sltu(AT, op2, op1);
9531 __ movn(dst, src, AT);
9532 break;
9534 case 0x04: //above_equal
9535 __ sltu(AT, op1, op2);
9536 __ movz(dst, src, AT);
9537 break;
9539 case 0x05: //below
9540 __ sltu(AT, op1, op2);
9541 __ movn(dst, src, AT);
9542 break;
9544 case 0x06: //below_equal
9545 __ sltu(AT, op2, op1);
9546 __ movz(dst, src, AT);
9547 break;
9549 default:
9550 Unimplemented();
9551 }
9552 %}
9554 ins_pipe( pipe_slow );
9555 %}
9558 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
9559 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
9560 ins_cost(80);
9561 format %{
9562 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
9563 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
9564 %}
9565 ins_encode %{
9566 Register op1 = $tmp1$$Register;
9567 Register op2 = $tmp2$$Register;
9568 Register dst = $dst$$Register;
9569 Register src = $src$$Register;
9570 int flag = $cop$$cmpcode;
9572 switch(flag) {
9573 case 0x01: //equal
9574 __ subu(AT, op1, op2);
9575 __ movz(dst, src, AT);
9576 break;
9578 case 0x02: //not_equal
9579 __ subu(AT, op1, op2);
9580 __ movn(dst, src, AT);
9581 break;
9583 case 0x03: //above
9584 __ sltu(AT, op2, op1);
9585 __ movn(dst, src, AT);
9586 break;
9588 case 0x04: //above_equal
9589 __ sltu(AT, op1, op2);
9590 __ movz(dst, src, AT);
9591 break;
9593 case 0x05: //below
9594 __ sltu(AT, op1, op2);
9595 __ movn(dst, src, AT);
9596 break;
9598 case 0x06: //below_equal
9599 __ sltu(AT, op2, op1);
9600 __ movz(dst, src, AT);
9601 break;
9603 default:
9604 Unimplemented();
9605 }
9606 %}
9608 ins_pipe( pipe_slow );
9609 %}
9611 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9612 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9613 ins_cost(80);
9614 format %{
9615 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
9616 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
9617 %}
9618 ins_encode %{
9619 Register opr1 = as_Register($tmp1$$reg);
9620 Register opr2 = as_Register($tmp2$$reg);
9621 Register dst = $dst$$Register;
9622 Register src = $src$$Register;
9623 int flag = $cop$$cmpcode;
9625 switch(flag) {
9626 case 0x01: //equal
9627 __ subu(AT, opr1, opr2);
9628 __ movz(dst, src, AT);
9629 break;
9631 case 0x02: //not_equal
9632 __ subu(AT, opr1, opr2);
9633 __ movn(dst, src, AT);
9634 break;
9636 case 0x03: //greater
9637 __ slt(AT, opr2, opr1);
9638 __ movn(dst, src, AT);
9639 break;
9641 case 0x04: //greater_equal
9642 __ slt(AT, opr1, opr2);
9643 __ movz(dst, src, AT);
9644 break;
9646 case 0x05: //less
9647 __ slt(AT, opr1, opr2);
9648 __ movn(dst, src, AT);
9649 break;
9651 case 0x06: //less_equal
9652 __ slt(AT, opr2, opr1);
9653 __ movz(dst, src, AT);
9654 break;
9656 default:
9657 Unimplemented();
9658 }
9659 %}
9661 ins_pipe( pipe_slow );
9662 %}
9664 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9665 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9666 ins_cost(80);
9667 format %{
9668 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
9669 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
9670 %}
9671 ins_encode %{
9672 Register opr1 = as_Register($tmp1$$reg);
9673 Register opr2 = as_Register($tmp2$$reg);
9674 Register dst = $dst$$Register;
9675 Register src = $src$$Register;
9676 int flag = $cop$$cmpcode;
9678 switch(flag) {
9679 case 0x01: //equal
9680 __ subu(AT, opr1, opr2);
9681 __ movz(dst, src, AT);
9682 break;
9684 case 0x02: //not_equal
9685 __ subu(AT, opr1, opr2);
9686 __ movn(dst, src, AT);
9687 break;
9689 case 0x03: //greater
9690 __ slt(AT, opr2, opr1);
9691 __ movn(dst, src, AT);
9692 break;
9694 case 0x04: //greater_equal
9695 __ slt(AT, opr1, opr2);
9696 __ movz(dst, src, AT);
9697 break;
9699 case 0x05: //less
9700 __ slt(AT, opr1, opr2);
9701 __ movn(dst, src, AT);
9702 break;
9704 case 0x06: //less_equal
9705 __ slt(AT, opr2, opr1);
9706 __ movz(dst, src, AT);
9707 break;
9709 default:
9710 Unimplemented();
9711 }
9712 %}
9714 ins_pipe( pipe_slow );
9715 %}
9717 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
9718 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9719 ins_cost(80);
9720 format %{
9721 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
9722 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
9723 %}
9724 ins_encode %{
9725 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9726 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9727 Register dst = as_Register($dst$$reg);
9728 Register src = as_Register($src$$reg);
9730 int flag = $cop$$cmpcode;
9732 switch(flag) {
9733 case 0x01: //equal
9734 __ c_eq_d(reg_op1, reg_op2);
9735 __ movt(dst, src);
9736 break;
9737 case 0x02: //not_equal
9738 // See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9739 __ c_eq_d(reg_op1, reg_op2);
9740 __ movf(dst, src);
9741 break;
9742 case 0x03: //greater
9743 __ c_ole_d(reg_op1, reg_op2);
9744 __ movf(dst, src);
9745 break;
9746 case 0x04: //greater_equal
9747 __ c_olt_d(reg_op1, reg_op2);
9748 __ movf(dst, src);
9749 break;
9750 case 0x05: //less
9751 __ c_ult_d(reg_op1, reg_op2);
9752 __ movt(dst, src);
9753 break;
9754 case 0x06: //less_equal
9755 __ c_ule_d(reg_op1, reg_op2);
9756 __ movt(dst, src);
9757 break;
9758 default:
9759 Unimplemented();
9760 }
9761 %}
9763 ins_pipe( pipe_slow );
9764 %}
9767 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9768 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9769 ins_cost(80);
9770 format %{
9771 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
9772 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
9773 %}
9774 ins_encode %{
9775 Register op1 = $tmp1$$Register;
9776 Register op2 = $tmp2$$Register;
9777 Register dst = $dst$$Register;
9778 Register src = $src$$Register;
9779 int flag = $cop$$cmpcode;
9781 switch(flag) {
9782 case 0x01: //equal
9783 __ subu(AT, op1, op2);
9784 __ movz(dst, src, AT);
9785 break;
9787 case 0x02: //not_equal
9788 __ subu(AT, op1, op2);
9789 __ movn(dst, src, AT);
9790 break;
9792 case 0x03: //above
9793 __ sltu(AT, op2, op1);
9794 __ movn(dst, src, AT);
9795 break;
9797 case 0x04: //above_equal
9798 __ sltu(AT, op1, op2);
9799 __ movz(dst, src, AT);
9800 break;
9802 case 0x05: //below
9803 __ sltu(AT, op1, op2);
9804 __ movn(dst, src, AT);
9805 break;
9807 case 0x06: //below_equal
9808 __ sltu(AT, op2, op1);
9809 __ movz(dst, src, AT);
9810 break;
9812 default:
9813 Unimplemented();
9814 }
9815 %}
9817 ins_pipe( pipe_slow );
9818 %}
9820 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9821 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9822 ins_cost(80);
9823 format %{
9824 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
9825 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
9826 %}
9827 ins_encode %{
9828 Register op1 = $tmp1$$Register;
9829 Register op2 = $tmp2$$Register;
9830 Register dst = $dst$$Register;
9831 Register src = $src$$Register;
9832 int flag = $cop$$cmpcode;
9834 switch(flag) {
9835 case 0x01: //equal
9836 __ subu32(AT, op1, op2);
9837 __ movz(dst, src, AT);
9838 break;
9840 case 0x02: //not_equal
9841 __ subu32(AT, op1, op2);
9842 __ movn(dst, src, AT);
9843 break;
9845 case 0x03: //above
9846 __ slt(AT, op2, op1);
9847 __ movn(dst, src, AT);
9848 break;
9850 case 0x04: //above_equal
9851 __ slt(AT, op1, op2);
9852 __ movz(dst, src, AT);
9853 break;
9855 case 0x05: //below
9856 __ slt(AT, op1, op2);
9857 __ movn(dst, src, AT);
9858 break;
9860 case 0x06: //below_equal
9861 __ slt(AT, op2, op1);
9862 __ movz(dst, src, AT);
9863 break;
9865 default:
9866 Unimplemented();
9867 }
9868 %}
9870 ins_pipe( pipe_slow );
9871 %}
9873 instruct cmovL_cmpP_reg_reg(mRegL dst, mRegL src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
9874 match(Set dst (CMoveL (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9875 ins_cost(80);
9876 format %{
9877 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpP_reg_reg\n\t"
9878 "CMOV $dst,$src\t @cmovL_cmpP_reg_reg"
9879 %}
9880 ins_encode %{
9881 Register op1 = $tmp1$$Register;
9882 Register op2 = $tmp2$$Register;
9883 Register dst = $dst$$Register;
9884 Register src = $src$$Register;
9885 int flag = $cop$$cmpcode;
9887 switch(flag) {
9888 case 0x01: //equal
9889 __ subu(AT, op1, op2);
9890 __ movz(dst, src, AT);
9891 break;
9893 case 0x02: //not_equal
9894 __ subu(AT, op1, op2);
9895 __ movn(dst, src, AT);
9896 break;
9898 case 0x03: //above
9899 __ sltu(AT, op2, op1);
9900 __ movn(dst, src, AT);
9901 break;
9903 case 0x04: //above_equal
9904 __ sltu(AT, op1, op2);
9905 __ movz(dst, src, AT);
9906 break;
9908 case 0x05: //below
9909 __ sltu(AT, op1, op2);
9910 __ movn(dst, src, AT);
9911 break;
9913 case 0x06: //below_equal
9914 __ sltu(AT, op2, op1);
9915 __ movz(dst, src, AT);
9916 break;
9918 default:
9919 Unimplemented();
9920 }
9921 %}
9923 ins_pipe( pipe_slow );
9924 %}
9926 instruct cmovN_cmpL_reg_reg(mRegN dst, mRegN src, mRegL tmp1, mRegL tmp2, cmpOp cop) %{
9927 match(Set dst (CMoveN (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9928 ins_cost(80);
9929 format %{
9930 "CMP$cop $tmp1, $tmp2\t @cmovN_cmpL_reg_reg\n"
9931 "\tCMOV $dst,$src \t @cmovN_cmpL_reg_reg"
9932 %}
9933 ins_encode %{
9934 Register opr1 = as_Register($tmp1$$reg);
9935 Register opr2 = as_Register($tmp2$$reg);
9936 Register dst = $dst$$Register;
9937 Register src = $src$$Register;
9938 int flag = $cop$$cmpcode;
9940 switch(flag) {
9941 case 0x01: //equal
9942 __ subu(AT, opr1, opr2);
9943 __ movz(dst, src, AT);
9944 break;
9946 case 0x02: //not_equal
9947 __ subu(AT, opr1, opr2);
9948 __ movn(dst, src, AT);
9949 break;
9951 case 0x03: //greater
9952 __ slt(AT, opr2, opr1);
9953 __ movn(dst, src, AT);
9954 break;
9956 case 0x04: //greater_equal
9957 __ slt(AT, opr1, opr2);
9958 __ movz(dst, src, AT);
9959 break;
9961 case 0x05: //less
9962 __ slt(AT, opr1, opr2);
9963 __ movn(dst, src, AT);
9964 break;
9966 case 0x06: //less_equal
9967 __ slt(AT, opr2, opr1);
9968 __ movz(dst, src, AT);
9969 break;
9971 default:
9972 Unimplemented();
9973 }
9974 %}
9976 ins_pipe( pipe_slow );
9977 %}
9979 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9980 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9981 ins_cost(80);
9982 format %{
9983 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9984 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9985 %}
9986 ins_encode %{
9987 Register op1 = $tmp1$$Register;
9988 Register op2 = $tmp2$$Register;
9989 Register dst = $dst$$Register;
9990 Register src = $src$$Register;
9991 int flag = $cop$$cmpcode;
9993 switch(flag) {
9994 case 0x01: //equal
9995 __ subu32(AT, op1, op2);
9996 __ movz(dst, src, AT);
9997 break;
9999 case 0x02: //not_equal
10000 __ subu32(AT, op1, op2);
10001 __ movn(dst, src, AT);
10002 break;
10004 case 0x03: //above
10005 __ slt(AT, op2, op1);
10006 __ movn(dst, src, AT);
10007 break;
10009 case 0x04: //above_equal
10010 __ slt(AT, op1, op2);
10011 __ movz(dst, src, AT);
10012 break;
10014 case 0x05: //below
10015 __ slt(AT, op1, op2);
10016 __ movn(dst, src, AT);
10017 break;
10019 case 0x06: //below_equal
10020 __ slt(AT, op2, op1);
10021 __ movz(dst, src, AT);
10022 break;
10024 default:
10025 Unimplemented();
10026 }
10027 %}
10029 ins_pipe( pipe_slow );
10030 %}
10032 instruct cmovL_cmpU_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
10033 match(Set dst (CMoveL (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
10034 ins_cost(80);
10035 format %{
10036 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpU_reg_reg\n\t"
10037 "CMOV $dst,$src\t @cmovL_cmpU_reg_reg"
10038 %}
10039 ins_encode %{
10040 Register op1 = $tmp1$$Register;
10041 Register op2 = $tmp2$$Register;
10042 Register dst = $dst$$Register;
10043 Register src = $src$$Register;
10044 int flag = $cop$$cmpcode;
10046 switch(flag) {
10047 case 0x01: //equal
10048 __ subu32(AT, op1, op2);
10049 __ movz(dst, src, AT);
10050 break;
10052 case 0x02: //not_equal
10053 __ subu32(AT, op1, op2);
10054 __ movn(dst, src, AT);
10055 break;
10057 case 0x03: //above
10058 __ sltu(AT, op2, op1);
10059 __ movn(dst, src, AT);
10060 break;
10062 case 0x04: //above_equal
10063 __ sltu(AT, op1, op2);
10064 __ movz(dst, src, AT);
10065 break;
10067 case 0x05: //below
10068 __ sltu(AT, op1, op2);
10069 __ movn(dst, src, AT);
10070 break;
10072 case 0x06: //below_equal
10073 __ sltu(AT, op2, op1);
10074 __ movz(dst, src, AT);
10075 break;
10077 default:
10078 Unimplemented();
10079 }
10080 %}
10082 ins_pipe( pipe_slow );
10083 %}
10085 instruct cmovL_cmpF_reg_reg(mRegL dst, mRegL src, regF tmp1, regF tmp2, cmpOp cop ) %{
10086 match(Set dst (CMoveL (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10087 ins_cost(80);
10088 format %{
10089 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpF_reg_reg\n"
10090 "\tCMOV $dst,$src \t @cmovL_cmpF_reg_reg"
10091 %}
10093 ins_encode %{
10094 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10095 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10096 Register dst = $dst$$Register;
10097 Register src = $src$$Register;
10098 int flag = $cop$$cmpcode;
10100 switch(flag) {
10101 case 0x01: //equal
10102 __ c_eq_s(reg_op1, reg_op2);
10103 __ movt(dst, src);
10104 break;
10105 case 0x02: //not_equal
10106 __ c_eq_s(reg_op1, reg_op2);
10107 __ movf(dst, src);
10108 break;
10109 case 0x03: //greater
10110 __ c_ole_s(reg_op1, reg_op2);
10111 __ movf(dst, src);
10112 break;
10113 case 0x04: //greater_equal
10114 __ c_olt_s(reg_op1, reg_op2);
10115 __ movf(dst, src);
10116 break;
10117 case 0x05: //less
10118 __ c_ult_s(reg_op1, reg_op2);
10119 __ movt(dst, src);
10120 break;
10121 case 0x06: //less_equal
10122 __ c_ule_s(reg_op1, reg_op2);
10123 __ movt(dst, src);
10124 break;
10125 default:
10126 Unimplemented();
10127 }
10128 %}
10129 ins_pipe( pipe_slow );
10130 %}
10132 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10133 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10134 ins_cost(80);
10135 format %{
10136 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
10137 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
10138 %}
10140 ins_encode %{
10141 Register op1 = $tmp1$$Register;
10142 Register op2 = $tmp2$$Register;
10143 Register dst = as_Register($dst$$reg);
10144 Register src = as_Register($src$$reg);
10145 int flag = $cop$$cmpcode;
10147 switch(flag)
10148 {
10149 case 0x01: //equal
10150 __ subu32(AT, op1, op2);
10151 __ movz(dst, src, AT);
10152 break;
10154 case 0x02: //not_equal
10155 __ subu32(AT, op1, op2);
10156 __ movn(dst, src, AT);
10157 break;
10159 case 0x03: //great
10160 __ slt(AT, op2, op1);
10161 __ movn(dst, src, AT);
10162 break;
10164 case 0x04: //great_equal
10165 __ slt(AT, op1, op2);
10166 __ movz(dst, src, AT);
10167 break;
10169 case 0x05: //less
10170 __ slt(AT, op1, op2);
10171 __ movn(dst, src, AT);
10172 break;
10174 case 0x06: //less_equal
10175 __ slt(AT, op2, op1);
10176 __ movz(dst, src, AT);
10177 break;
10179 default:
10180 Unimplemented();
10181 }
10182 %}
10184 ins_pipe( pipe_slow );
10185 %}
10187 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
10188 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
10189 ins_cost(80);
10190 format %{
10191 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
10192 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
10193 %}
10194 ins_encode %{
10195 Register opr1 = as_Register($tmp1$$reg);
10196 Register opr2 = as_Register($tmp2$$reg);
10197 Register dst = as_Register($dst$$reg);
10198 Register src = as_Register($src$$reg);
10199 int flag = $cop$$cmpcode;
10201 switch(flag) {
10202 case 0x01: //equal
10203 __ subu(AT, opr1, opr2);
10204 __ movz(dst, src, AT);
10205 break;
10207 case 0x02: //not_equal
10208 __ subu(AT, opr1, opr2);
10209 __ movn(dst, src, AT);
10210 break;
10212 case 0x03: //greater
10213 __ slt(AT, opr2, opr1);
10214 __ movn(dst, src, AT);
10215 break;
10217 case 0x04: //greater_equal
10218 __ slt(AT, opr1, opr2);
10219 __ movz(dst, src, AT);
10220 break;
10222 case 0x05: //less
10223 __ slt(AT, opr1, opr2);
10224 __ movn(dst, src, AT);
10225 break;
10227 case 0x06: //less_equal
10228 __ slt(AT, opr2, opr1);
10229 __ movz(dst, src, AT);
10230 break;
10232 default:
10233 Unimplemented();
10234 }
10235 %}
10237 ins_pipe( pipe_slow );
10238 %}
10240 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
10241 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
10242 ins_cost(80);
10243 format %{
10244 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
10245 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
10246 %}
10247 ins_encode %{
10248 Register op1 = $tmp1$$Register;
10249 Register op2 = $tmp2$$Register;
10250 Register dst = $dst$$Register;
10251 Register src = $src$$Register;
10252 int flag = $cop$$cmpcode;
10254 switch(flag) {
10255 case 0x01: //equal
10256 __ subu32(AT, op1, op2);
10257 __ movz(dst, src, AT);
10258 break;
10260 case 0x02: //not_equal
10261 __ subu32(AT, op1, op2);
10262 __ movn(dst, src, AT);
10263 break;
10265 case 0x03: //above
10266 __ sltu(AT, op2, op1);
10267 __ movn(dst, src, AT);
10268 break;
10270 case 0x04: //above_equal
10271 __ sltu(AT, op1, op2);
10272 __ movz(dst, src, AT);
10273 break;
10275 case 0x05: //below
10276 __ sltu(AT, op1, op2);
10277 __ movn(dst, src, AT);
10278 break;
10280 case 0x06: //below_equal
10281 __ sltu(AT, op2, op1);
10282 __ movz(dst, src, AT);
10283 break;
10285 default:
10286 Unimplemented();
10287 }
10288 %}
10290 ins_pipe( pipe_slow );
10291 %}
10294 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
10295 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
10296 ins_cost(80);
10297 format %{
10298 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
10299 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
10300 %}
10301 ins_encode %{
10302 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
10303 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
10304 Register dst = as_Register($dst$$reg);
10305 Register src = as_Register($src$$reg);
10307 int flag = $cop$$cmpcode;
10309 switch(flag) {
10310 case 0x01: //equal
10311 __ c_eq_d(reg_op1, reg_op2);
10312 __ movt(dst, src);
10313 break;
10314 case 0x02: //not_equal
10315 __ c_eq_d(reg_op1, reg_op2);
10316 __ movf(dst, src);
10317 break;
10318 case 0x03: //greater
10319 __ c_ole_d(reg_op1, reg_op2);
10320 __ movf(dst, src);
10321 break;
10322 case 0x04: //greater_equal
10323 __ c_olt_d(reg_op1, reg_op2);
10324 __ movf(dst, src);
10325 break;
10326 case 0x05: //less
10327 __ c_ult_d(reg_op1, reg_op2);
10328 __ movt(dst, src);
10329 break;
10330 case 0x06: //less_equal
10331 __ c_ule_d(reg_op1, reg_op2);
10332 __ movt(dst, src);
10333 break;
10334 default:
10335 Unimplemented();
10336 }
10337 %}
10339 ins_pipe( pipe_slow );
10340 %}
10342 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
10343 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
10344 ins_cost(200);
10345 format %{
10346 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
10347 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
10348 %}
10349 ins_encode %{
10350 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
10351 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
10352 FloatRegister dst = as_FloatRegister($dst$$reg);
10353 FloatRegister src = as_FloatRegister($src$$reg);
10355 int flag = $cop$$cmpcode;
10357 switch(flag) {
10358 case 0x01: //equal
10359 __ c_eq_d(reg_op1, reg_op2);
10360 __ movt_d(dst, src);
10361 break;
10362 case 0x02: //not_equal
10363 __ c_eq_d(reg_op1, reg_op2);
10364 __ movf_d(dst, src);
10365 break;
10366 case 0x03: //greater
10367 __ c_ole_d(reg_op1, reg_op2);
10368 __ movf_d(dst, src);
10369 break;
10370 case 0x04: //greater_equal
10371 __ c_olt_d(reg_op1, reg_op2);
10372 __ movf_d(dst, src);
10373 break;
10374 case 0x05: //less
10375 __ c_ult_d(reg_op1, reg_op2);
10376 __ movt_d(dst, src);
10377 break;
10378 case 0x06: //less_equal
10379 __ c_ule_d(reg_op1, reg_op2);
10380 __ movt_d(dst, src);
10381 break;
10382 default:
10383 Unimplemented();
10384 }
10385 %}
10387 ins_pipe( pipe_slow );
10388 %}
10390 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10391 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10392 ins_cost(200);
10393 format %{
10394 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
10395 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
10396 %}
10398 ins_encode %{
10399 Register op1 = $tmp1$$Register;
10400 Register op2 = $tmp2$$Register;
10401 FloatRegister dst = as_FloatRegister($dst$$reg);
10402 FloatRegister src = as_FloatRegister($src$$reg);
10403 int flag = $cop$$cmpcode;
10404 Label L;
10406 switch(flag) {
10407 case 0x01: //equal
10408 __ bne(op1, op2, L);
10409 __ delayed()->nop();
10410 __ mov_s(dst, src);
10411 __ bind(L);
10412 break;
10413 case 0x02: //not_equal
10414 __ beq(op1, op2, L);
10415 __ delayed()->nop();
10416 __ mov_s(dst, src);
10417 __ bind(L);
10418 break;
10419 case 0x03: //great
10420 __ slt(AT, op2, op1);
10421 __ beq(AT, R0, L);
10422 __ delayed()->nop();
10423 __ mov_s(dst, src);
10424 __ bind(L);
10425 break;
10426 case 0x04: //great_equal
10427 __ slt(AT, op1, op2);
10428 __ bne(AT, R0, L);
10429 __ delayed()->nop();
10430 __ mov_s(dst, src);
10431 __ bind(L);
10432 break;
10433 case 0x05: //less
10434 __ slt(AT, op1, op2);
10435 __ beq(AT, R0, L);
10436 __ delayed()->nop();
10437 __ mov_s(dst, src);
10438 __ bind(L);
10439 break;
10440 case 0x06: //less_equal
10441 __ slt(AT, op2, op1);
10442 __ bne(AT, R0, L);
10443 __ delayed()->nop();
10444 __ mov_s(dst, src);
10445 __ bind(L);
10446 break;
10447 default:
10448 Unimplemented();
10449 }
10450 %}
10452 ins_pipe( pipe_slow );
10453 %}
10455 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
10456 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
10457 ins_cost(200);
10458 format %{
10459 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
10460 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
10461 %}
10463 ins_encode %{
10464 Register op1 = $tmp1$$Register;
10465 Register op2 = $tmp2$$Register;
10466 FloatRegister dst = as_FloatRegister($dst$$reg);
10467 FloatRegister src = as_FloatRegister($src$$reg);
10468 int flag = $cop$$cmpcode;
10469 Label L;
10471 switch(flag) {
10472 case 0x01: //equal
10473 __ bne(op1, op2, L);
10474 __ delayed()->nop();
10475 __ mov_d(dst, src);
10476 __ bind(L);
10477 break;
10478 case 0x02: //not_equal
10479 __ beq(op1, op2, L);
10480 __ delayed()->nop();
10481 __ mov_d(dst, src);
10482 __ bind(L);
10483 break;
10484 case 0x03: //great
10485 __ slt(AT, op2, op1);
10486 __ beq(AT, R0, L);
10487 __ delayed()->nop();
10488 __ mov_d(dst, src);
10489 __ bind(L);
10490 break;
10491 case 0x04: //great_equal
10492 __ slt(AT, op1, op2);
10493 __ bne(AT, R0, L);
10494 __ delayed()->nop();
10495 __ mov_d(dst, src);
10496 __ bind(L);
10497 break;
10498 case 0x05: //less
10499 __ slt(AT, op1, op2);
10500 __ beq(AT, R0, L);
10501 __ delayed()->nop();
10502 __ mov_d(dst, src);
10503 __ bind(L);
10504 break;
10505 case 0x06: //less_equal
10506 __ slt(AT, op2, op1);
10507 __ bne(AT, R0, L);
10508 __ delayed()->nop();
10509 __ mov_d(dst, src);
10510 __ bind(L);
10511 break;
10512 default:
10513 Unimplemented();
10514 }
10515 %}
10517 ins_pipe( pipe_slow );
10518 %}
10520 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
10521 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
10522 ins_cost(200);
10523 format %{
10524 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
10525 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
10526 %}
10528 ins_encode %{
10529 Register op1 = $tmp1$$Register;
10530 Register op2 = $tmp2$$Register;
10531 FloatRegister dst = as_FloatRegister($dst$$reg);
10532 FloatRegister src = as_FloatRegister($src$$reg);
10533 int flag = $cop$$cmpcode;
10534 Label L;
10536 switch(flag) {
10537 case 0x01: //equal
10538 __ bne(op1, op2, L);
10539 __ delayed()->nop();
10540 __ mov_d(dst, src);
10541 __ bind(L);
10542 break;
10543 case 0x02: //not_equal
10544 __ beq(op1, op2, L);
10545 __ delayed()->nop();
10546 __ mov_d(dst, src);
10547 __ bind(L);
10548 break;
10549 case 0x03: //great
10550 __ slt(AT, op2, op1);
10551 __ beq(AT, R0, L);
10552 __ delayed()->nop();
10553 __ mov_d(dst, src);
10554 __ bind(L);
10555 break;
10556 case 0x04: //great_equal
10557 __ slt(AT, op1, op2);
10558 __ bne(AT, R0, L);
10559 __ delayed()->nop();
10560 __ mov_d(dst, src);
10561 __ bind(L);
10562 break;
10563 case 0x05: //less
10564 __ slt(AT, op1, op2);
10565 __ beq(AT, R0, L);
10566 __ delayed()->nop();
10567 __ mov_d(dst, src);
10568 __ bind(L);
10569 break;
10570 case 0x06: //less_equal
10571 __ slt(AT, op2, op1);
10572 __ bne(AT, R0, L);
10573 __ delayed()->nop();
10574 __ mov_d(dst, src);
10575 __ bind(L);
10576 break;
10577 default:
10578 Unimplemented();
10579 }
10580 %}
10582 ins_pipe( pipe_slow );
10583 %}
10585 //FIXME
10586 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
10587 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10588 ins_cost(80);
10589 format %{
10590 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
10591 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
10592 %}
10594 ins_encode %{
10595 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10596 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10597 Register dst = $dst$$Register;
10598 Register src = $src$$Register;
10599 int flag = $cop$$cmpcode;
10601 switch(flag) {
10602 case 0x01: //equal
10603 __ c_eq_s(reg_op1, reg_op2);
10604 __ movt(dst, src);
10605 break;
10606 case 0x02: //not_equal
10607 __ c_eq_s(reg_op1, reg_op2);
10608 __ movf(dst, src);
10609 break;
10610 case 0x03: //greater
10611 __ c_ole_s(reg_op1, reg_op2);
10612 __ movf(dst, src);
10613 break;
10614 case 0x04: //greater_equal
10615 __ c_olt_s(reg_op1, reg_op2);
10616 __ movf(dst, src);
10617 break;
10618 case 0x05: //less
10619 __ c_ult_s(reg_op1, reg_op2);
10620 __ movt(dst, src);
10621 break;
10622 case 0x06: //less_equal
10623 __ c_ule_s(reg_op1, reg_op2);
10624 __ movt(dst, src);
10625 break;
10626 default:
10627 Unimplemented();
10628 }
10629 %}
10630 ins_pipe( pipe_slow );
10631 %}
10633 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
10634 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
10635 ins_cost(200);
10636 format %{
10637 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
10638 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
10639 %}
10641 ins_encode %{
10642 FloatRegister reg_op1 = $tmp1$$FloatRegister;
10643 FloatRegister reg_op2 = $tmp2$$FloatRegister;
10644 FloatRegister dst = $dst$$FloatRegister;
10645 FloatRegister src = $src$$FloatRegister;
10646 int flag = $cop$$cmpcode;
10648 switch(flag) {
10649 case 0x01: //equal
10650 __ c_eq_s(reg_op1, reg_op2);
10651 __ movt_s(dst, src);
10652 break;
10653 case 0x02: //not_equal
10654 __ c_eq_s(reg_op1, reg_op2);
10655 __ movf_s(dst, src);
10656 break;
10657 case 0x03: //greater
10658 __ c_ole_s(reg_op1, reg_op2);
10659 __ movf_s(dst, src);
10660 break;
10661 case 0x04: //greater_equal
10662 __ c_olt_s(reg_op1, reg_op2);
10663 __ movf_s(dst, src);
10664 break;
10665 case 0x05: //less
10666 __ c_ult_s(reg_op1, reg_op2);
10667 __ movt_s(dst, src);
10668 break;
10669 case 0x06: //less_equal
10670 __ c_ule_s(reg_op1, reg_op2);
10671 __ movt_s(dst, src);
10672 break;
10673 default:
10674 Unimplemented();
10675 }
10676 %}
10677 ins_pipe( pipe_slow );
10678 %}
10680 // Manifest a CmpL result in an integer register. Very painful.
10681 // This is the test to avoid.
10682 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
10683 match(Set dst (CmpL3 src1 src2));
10684 ins_cost(1000);
10685 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
10686 ins_encode %{
10687 Register opr1 = as_Register($src1$$reg);
10688 Register opr2 = as_Register($src2$$reg);
10689 Register dst = as_Register($dst$$reg);
10691 Label Done;
10693 __ subu(AT, opr1, opr2);
10694 __ bltz(AT, Done);
10695 __ delayed()->daddiu(dst, R0, -1);
10697 __ move(dst, 1);
10698 __ movz(dst, R0, AT);
10700 __ bind(Done);
10701 %}
10702 ins_pipe( pipe_slow );
10703 %}
10705 //
10706 // less_rsult = -1
10707 // greater_result = 1
10708 // equal_result = 0
10709 // nan_result = -1
10710 //
10711 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
10712 match(Set dst (CmpF3 src1 src2));
10713 ins_cost(1000);
10714 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
10715 ins_encode %{
10716 FloatRegister src1 = as_FloatRegister($src1$$reg);
10717 FloatRegister src2 = as_FloatRegister($src2$$reg);
10718 Register dst = as_Register($dst$$reg);
10720 Label Done;
10722 __ c_ult_s(src1, src2);
10723 __ bc1t(Done);
10724 __ delayed()->daddiu(dst, R0, -1);
10726 __ c_eq_s(src1, src2);
10727 __ move(dst, 1);
10728 __ movt(dst, R0);
10730 __ bind(Done);
10731 %}
10732 ins_pipe( pipe_slow );
10733 %}
10735 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
10736 match(Set dst (CmpD3 src1 src2));
10737 ins_cost(1000);
10738 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
10739 ins_encode %{
10740 FloatRegister src1 = as_FloatRegister($src1$$reg);
10741 FloatRegister src2 = as_FloatRegister($src2$$reg);
10742 Register dst = as_Register($dst$$reg);
10744 Label Done;
10746 __ c_ult_d(src1, src2);
10747 __ bc1t(Done);
10748 __ delayed()->daddiu(dst, R0, -1);
10750 __ c_eq_d(src1, src2);
10751 __ move(dst, 1);
10752 __ movt(dst, R0);
10754 __ bind(Done);
10755 %}
10756 ins_pipe( pipe_slow );
10757 %}
10759 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
10760 match(Set dummy (ClearArray cnt base));
10761 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
10762 ins_encode %{
10763 //Assume cnt is the number of bytes in an array to be cleared,
10764 //and base points to the starting address of the array.
10765 Register base = $base$$Register;
10766 Register num = $cnt$$Register;
10767 Label Loop, done;
10769 __ beq(num, R0, done);
10770 __ delayed()->daddu(AT, base, R0);
10772 __ move(T9, num); /* T9 = words */
10774 __ bind(Loop);
10775 __ sd(R0, AT, 0);
10776 __ daddi(T9, T9, -1);
10777 __ bne(T9, R0, Loop);
10778 __ delayed()->daddi(AT, AT, wordSize);
10780 __ bind(done);
10781 %}
10782 ins_pipe( pipe_slow );
10783 %}
10785 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
10786 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10787 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
10789 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
10790 ins_encode %{
10791 // Get the first character position in both strings
10792 // [8] char array, [12] offset, [16] count
10793 Register str1 = $str1$$Register;
10794 Register str2 = $str2$$Register;
10795 Register cnt1 = $cnt1$$Register;
10796 Register cnt2 = $cnt2$$Register;
10797 Register result = $result$$Register;
10799 Label L, Loop, haveResult, done;
10801 // compute the and difference of lengths (in result)
10802 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
10804 // compute the shorter length (in cnt1)
10805 __ slt(AT, cnt2, cnt1);
10806 __ movn(cnt1, cnt2, AT);
10808 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
10809 __ bind(Loop); // Loop begin
10810 __ beq(cnt1, R0, done);
10811 __ delayed()->lhu(AT, str1, 0);;
10813 // compare current character
10814 __ lhu(cnt2, str2, 0);
10815 __ bne(AT, cnt2, haveResult);
10816 __ delayed()->addi(str1, str1, 2);
10817 __ addi(str2, str2, 2);
10818 __ b(Loop);
10819 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
10821 __ bind(haveResult);
10822 __ subu(result, AT, cnt2);
10824 __ bind(done);
10825 %}
10827 ins_pipe( pipe_slow );
10828 %}
10830 // intrinsic optimization
10831 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
10832 match(Set result (StrEquals (Binary str1 str2) cnt));
10833 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
10835 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
10836 ins_encode %{
10837 // Get the first character position in both strings
10838 // [8] char array, [12] offset, [16] count
10839 Register str1 = $str1$$Register;
10840 Register str2 = $str2$$Register;
10841 Register cnt = $cnt$$Register;
10842 Register tmp = $temp$$Register;
10843 Register result = $result$$Register;
10845 Label Loop, done;
10848 __ beq(str1, str2, done); // same char[] ?
10849 __ delayed()->daddiu(result, R0, 1);
10851 __ bind(Loop); // Loop begin
10852 __ beq(cnt, R0, done);
10853 __ delayed()->daddiu(result, R0, 1); // count == 0
10855 // compare current character
10856 __ lhu(AT, str1, 0);;
10857 __ lhu(tmp, str2, 0);
10858 __ bne(AT, tmp, done);
10859 __ delayed()->daddi(result, R0, 0);
10860 __ addi(str1, str1, 2);
10861 __ addi(str2, str2, 2);
10862 __ b(Loop);
10863 __ delayed()->addi(cnt, cnt, -1); // Loop end
10865 __ bind(done);
10866 %}
10868 ins_pipe( pipe_slow );
10869 %}
10871 //----------Arithmetic Instructions-------------------------------------------
10872 //----------Addition Instructions---------------------------------------------
10873 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10874 match(Set dst (AddI src1 src2));
10876 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
10877 ins_encode %{
10878 Register dst = $dst$$Register;
10879 Register src1 = $src1$$Register;
10880 Register src2 = $src2$$Register;
10881 __ addu32(dst, src1, src2);
10882 %}
10883 ins_pipe( ialu_regI_regI );
10884 %}
10886 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
10887 match(Set dst (AddI src1 src2));
10889 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
10890 ins_encode %{
10891 Register dst = $dst$$Register;
10892 Register src1 = $src1$$Register;
10893 int imm = $src2$$constant;
10895 if(Assembler::is_simm16(imm)) {
10896 __ addiu32(dst, src1, imm);
10897 } else {
10898 __ move(AT, imm);
10899 __ addu32(dst, src1, AT);
10900 }
10901 %}
10902 ins_pipe( ialu_regI_regI );
10903 %}
10905 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
10906 match(Set dst (AddP src1 src2));
10908 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
10910 ins_encode %{
10911 Register dst = $dst$$Register;
10912 Register src1 = $src1$$Register;
10913 Register src2 = $src2$$Register;
10914 __ daddu(dst, src1, src2);
10915 %}
10917 ins_pipe( ialu_regI_regI );
10918 %}
10920 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
10921 match(Set dst (AddP src1 (ConvI2L src2)));
10923 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
10925 ins_encode %{
10926 Register dst = $dst$$Register;
10927 Register src1 = $src1$$Register;
10928 Register src2 = $src2$$Register;
10929 __ daddu(dst, src1, src2);
10930 %}
10932 ins_pipe( ialu_regI_regI );
10933 %}
10935 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
10936 match(Set dst (AddP src1 src2));
10938 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
10939 ins_encode %{
10940 Register src1 = $src1$$Register;
10941 long src2 = $src2$$constant;
10942 Register dst = $dst$$Register;
10944 if(Assembler::is_simm16(src2)) {
10945 __ daddiu(dst, src1, src2);
10946 } else {
10947 __ set64(AT, src2);
10948 __ daddu(dst, src1, AT);
10949 }
10950 %}
10951 ins_pipe( ialu_regI_imm16 );
10952 %}
10954 // Add Long Register with Register
10955 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10956 match(Set dst (AddL src1 src2));
10957 ins_cost(200);
10958 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
10960 ins_encode %{
10961 Register dst_reg = as_Register($dst$$reg);
10962 Register src1_reg = as_Register($src1$$reg);
10963 Register src2_reg = as_Register($src2$$reg);
10965 __ daddu(dst_reg, src1_reg, src2_reg);
10966 %}
10968 ins_pipe( ialu_regL_regL );
10969 %}
10971 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
10972 %{
10973 match(Set dst (AddL src1 src2));
10975 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
10976 ins_encode %{
10977 Register dst_reg = as_Register($dst$$reg);
10978 Register src1_reg = as_Register($src1$$reg);
10979 int src2_imm = $src2$$constant;
10981 __ daddiu(dst_reg, src1_reg, src2_imm);
10982 %}
10984 ins_pipe( ialu_regL_regL );
10985 %}
10987 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
10988 %{
10989 match(Set dst (AddL (ConvI2L src1) src2));
10991 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
10992 ins_encode %{
10993 Register dst_reg = as_Register($dst$$reg);
10994 Register src1_reg = as_Register($src1$$reg);
10995 int src2_imm = $src2$$constant;
10997 __ daddiu(dst_reg, src1_reg, src2_imm);
10998 %}
11000 ins_pipe( ialu_regL_regL );
11001 %}
11003 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
11004 match(Set dst (AddL (ConvI2L src1) src2));
11005 ins_cost(200);
11006 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
11008 ins_encode %{
11009 Register dst_reg = as_Register($dst$$reg);
11010 Register src1_reg = as_Register($src1$$reg);
11011 Register src2_reg = as_Register($src2$$reg);
11013 __ daddu(dst_reg, src1_reg, src2_reg);
11014 %}
11016 ins_pipe( ialu_regL_regL );
11017 %}
11019 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
11020 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
11021 ins_cost(200);
11022 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
11024 ins_encode %{
11025 Register dst_reg = as_Register($dst$$reg);
11026 Register src1_reg = as_Register($src1$$reg);
11027 Register src2_reg = as_Register($src2$$reg);
11029 __ daddu(dst_reg, src1_reg, src2_reg);
11030 %}
11032 ins_pipe( ialu_regL_regL );
11033 %}
11035 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
11036 match(Set dst (AddL src1 (ConvI2L src2)));
11037 ins_cost(200);
11038 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
11040 ins_encode %{
11041 Register dst_reg = as_Register($dst$$reg);
11042 Register src1_reg = as_Register($src1$$reg);
11043 Register src2_reg = as_Register($src2$$reg);
11045 __ daddu(dst_reg, src1_reg, src2_reg);
11046 %}
11048 ins_pipe( ialu_regL_regL );
11049 %}
11051 //----------Subtraction Instructions-------------------------------------------
11052 // Integer Subtraction Instructions
11053 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11054 match(Set dst (SubI src1 src2));
11055 ins_cost(100);
11057 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
11058 ins_encode %{
11059 Register dst = $dst$$Register;
11060 Register src1 = $src1$$Register;
11061 Register src2 = $src2$$Register;
11062 __ subu32(dst, src1, src2);
11063 %}
11064 ins_pipe( ialu_regI_regI );
11065 %}
11067 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
11068 match(Set dst (SubI src1 src2));
11069 ins_cost(80);
11071 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
11072 ins_encode %{
11073 Register dst = $dst$$Register;
11074 Register src1 = $src1$$Register;
11075 __ addiu32(dst, src1, -1 * $src2$$constant);
11076 %}
11077 ins_pipe( ialu_regI_regI );
11078 %}
11080 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
11081 match(Set dst (SubI zero src));
11082 ins_cost(80);
11084 format %{ "neg $dst, $src #@negI_Reg" %}
11085 ins_encode %{
11086 Register dst = $dst$$Register;
11087 Register src = $src$$Register;
11088 __ subu32(dst, R0, src);
11089 %}
11090 ins_pipe( ialu_regI_regI );
11091 %}
11093 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
11094 match(Set dst (SubL zero src));
11095 ins_cost(80);
11097 format %{ "neg $dst, $src #@negL_Reg" %}
11098 ins_encode %{
11099 Register dst = $dst$$Register;
11100 Register src = $src$$Register;
11101 __ subu(dst, R0, src);
11102 %}
11103 ins_pipe( ialu_regI_regI );
11104 %}
11106 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
11107 match(Set dst (SubL src1 src2));
11108 ins_cost(80);
11110 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
11111 ins_encode %{
11112 Register dst = $dst$$Register;
11113 Register src1 = $src1$$Register;
11114 __ daddiu(dst, src1, -1 * $src2$$constant);
11115 %}
11116 ins_pipe( ialu_regI_regI );
11117 %}
11119 // Subtract Long Register with Register.
11120 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11121 match(Set dst (SubL src1 src2));
11122 ins_cost(100);
11123 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
11124 ins_encode %{
11125 Register dst = as_Register($dst$$reg);
11126 Register src1 = as_Register($src1$$reg);
11127 Register src2 = as_Register($src2$$reg);
11129 __ subu(dst, src1, src2);
11130 %}
11131 ins_pipe( ialu_regL_regL );
11132 %}
11134 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
11135 match(Set dst (SubL src1 (ConvI2L src2)));
11136 ins_cost(100);
11137 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
11138 ins_encode %{
11139 Register dst = as_Register($dst$$reg);
11140 Register src1 = as_Register($src1$$reg);
11141 Register src2 = as_Register($src2$$reg);
11143 __ subu(dst, src1, src2);
11144 %}
11145 ins_pipe( ialu_regL_regL );
11146 %}
11148 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
11149 match(Set dst (SubL (ConvI2L src1) src2));
11150 ins_cost(200);
11151 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
11152 ins_encode %{
11153 Register dst = as_Register($dst$$reg);
11154 Register src1 = as_Register($src1$$reg);
11155 Register src2 = as_Register($src2$$reg);
11157 __ subu(dst, src1, src2);
11158 %}
11159 ins_pipe( ialu_regL_regL );
11160 %}
11162 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
11163 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
11164 ins_cost(200);
11165 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
11166 ins_encode %{
11167 Register dst = as_Register($dst$$reg);
11168 Register src1 = as_Register($src1$$reg);
11169 Register src2 = as_Register($src2$$reg);
11171 __ subu(dst, src1, src2);
11172 %}
11173 ins_pipe( ialu_regL_regL );
11174 %}
11176 // Integer MOD with Register
11177 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11178 match(Set dst (ModI src1 src2));
11179 ins_cost(300);
11180 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
11181 ins_encode %{
11182 Register dst = $dst$$Register;
11183 Register src1 = $src1$$Register;
11184 Register src2 = $src2$$Register;
11186 //if (UseLoongsonISA) {
11187 if (0) {
11188 // 2016.08.10
11189 // Experiments show that gsmod is slower that div+mfhi.
11190 // So I just disable it here.
11191 __ gsmod(dst, src1, src2);
11192 } else {
11193 __ div(src1, src2);
11194 __ mfhi(dst);
11195 }
11196 %}
11198 //ins_pipe( ialu_mod );
11199 ins_pipe( ialu_regI_regI );
11200 %}
11202 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11203 match(Set dst (ModL src1 src2));
11204 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
11206 ins_encode %{
11207 Register dst = as_Register($dst$$reg);
11208 Register op1 = as_Register($src1$$reg);
11209 Register op2 = as_Register($src2$$reg);
11211 if (UseLoongsonISA) {
11212 __ gsdmod(dst, op1, op2);
11213 } else {
11214 __ ddiv(op1, op2);
11215 __ mfhi(dst);
11216 }
11217 %}
11218 ins_pipe( pipe_slow );
11219 %}
11221 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11222 match(Set dst (MulI src1 src2));
11224 ins_cost(300);
11225 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
11226 ins_encode %{
11227 Register src1 = $src1$$Register;
11228 Register src2 = $src2$$Register;
11229 Register dst = $dst$$Register;
11231 __ mul(dst, src1, src2);
11232 %}
11233 ins_pipe( ialu_mult );
11234 %}
11236 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
11237 match(Set dst (AddI (MulI src1 src2) src3));
11239 ins_cost(999);
11240 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
11241 ins_encode %{
11242 Register src1 = $src1$$Register;
11243 Register src2 = $src2$$Register;
11244 Register src3 = $src3$$Register;
11245 Register dst = $dst$$Register;
11247 __ mtlo(src3);
11248 __ madd(src1, src2);
11249 __ mflo(dst);
11250 %}
11251 ins_pipe( ialu_mult );
11252 %}
11254 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11255 match(Set dst (DivI src1 src2));
11257 ins_cost(300);
11258 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
11259 ins_encode %{
11260 Register src1 = $src1$$Register;
11261 Register src2 = $src2$$Register;
11262 Register dst = $dst$$Register;
11264 // In MIPS, div does not cause exception.
11265 // We must trap an exception manually.
11266 __ teq(R0, src2, 0x7);
11268 if (UseLoongsonISA) {
11269 __ gsdiv(dst, src1, src2);
11270 } else {
11271 __ div(src1, src2);
11273 __ nop();
11274 __ nop();
11275 __ mflo(dst);
11276 }
11277 %}
11278 ins_pipe( ialu_mod );
11279 %}
11281 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
11282 match(Set dst (DivF src1 src2));
11284 ins_cost(300);
11285 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
11286 ins_encode %{
11287 FloatRegister src1 = $src1$$FloatRegister;
11288 FloatRegister src2 = $src2$$FloatRegister;
11289 FloatRegister dst = $dst$$FloatRegister;
11291 /* Here do we need to trap an exception manually ? */
11292 __ div_s(dst, src1, src2);
11293 %}
11294 ins_pipe( pipe_slow );
11295 %}
11297 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
11298 match(Set dst (DivD src1 src2));
11300 ins_cost(300);
11301 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
11302 ins_encode %{
11303 FloatRegister src1 = $src1$$FloatRegister;
11304 FloatRegister src2 = $src2$$FloatRegister;
11305 FloatRegister dst = $dst$$FloatRegister;
11307 /* Here do we need to trap an exception manually ? */
11308 __ div_d(dst, src1, src2);
11309 %}
11310 ins_pipe( pipe_slow );
11311 %}
11313 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11314 match(Set dst (MulL src1 src2));
11315 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
11316 ins_encode %{
11317 Register dst = as_Register($dst$$reg);
11318 Register op1 = as_Register($src1$$reg);
11319 Register op2 = as_Register($src2$$reg);
11321 if (UseLoongsonISA) {
11322 __ gsdmult(dst, op1, op2);
11323 } else {
11324 __ dmult(op1, op2);
11325 __ mflo(dst);
11326 }
11327 %}
11328 ins_pipe( pipe_slow );
11329 %}
11331 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
11332 match(Set dst (MulL src1 (ConvI2L src2)));
11333 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
11334 ins_encode %{
11335 Register dst = as_Register($dst$$reg);
11336 Register op1 = as_Register($src1$$reg);
11337 Register op2 = as_Register($src2$$reg);
11339 if (UseLoongsonISA) {
11340 __ gsdmult(dst, op1, op2);
11341 } else {
11342 __ dmult(op1, op2);
11343 __ mflo(dst);
11344 }
11345 %}
11346 ins_pipe( pipe_slow );
11347 %}
11349 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
11350 match(Set dst (DivL src1 src2));
11351 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
11353 ins_encode %{
11354 Register dst = as_Register($dst$$reg);
11355 Register op1 = as_Register($src1$$reg);
11356 Register op2 = as_Register($src2$$reg);
11358 if (UseLoongsonISA) {
11359 __ gsddiv(dst, op1, op2);
11360 } else {
11361 __ ddiv(op1, op2);
11362 __ mflo(dst);
11363 }
11364 %}
11365 ins_pipe( pipe_slow );
11366 %}
11368 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
11369 match(Set dst (AddF src1 src2));
11370 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
11371 ins_encode %{
11372 FloatRegister src1 = as_FloatRegister($src1$$reg);
11373 FloatRegister src2 = as_FloatRegister($src2$$reg);
11374 FloatRegister dst = as_FloatRegister($dst$$reg);
11376 __ add_s(dst, src1, src2);
11377 %}
11378 ins_pipe( fpu_regF_regF );
11379 %}
11381 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
11382 match(Set dst (SubF src1 src2));
11383 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
11384 ins_encode %{
11385 FloatRegister src1 = as_FloatRegister($src1$$reg);
11386 FloatRegister src2 = as_FloatRegister($src2$$reg);
11387 FloatRegister dst = as_FloatRegister($dst$$reg);
11389 __ sub_s(dst, src1, src2);
11390 %}
11391 ins_pipe( fpu_regF_regF );
11392 %}
11393 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
11394 match(Set dst (AddD src1 src2));
11395 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
11396 ins_encode %{
11397 FloatRegister src1 = as_FloatRegister($src1$$reg);
11398 FloatRegister src2 = as_FloatRegister($src2$$reg);
11399 FloatRegister dst = as_FloatRegister($dst$$reg);
11401 __ add_d(dst, src1, src2);
11402 %}
11403 ins_pipe( fpu_regF_regF );
11404 %}
11406 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
11407 match(Set dst (SubD src1 src2));
11408 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
11409 ins_encode %{
11410 FloatRegister src1 = as_FloatRegister($src1$$reg);
11411 FloatRegister src2 = as_FloatRegister($src2$$reg);
11412 FloatRegister dst = as_FloatRegister($dst$$reg);
11414 __ sub_d(dst, src1, src2);
11415 %}
11416 ins_pipe( fpu_regF_regF );
11417 %}
11419 instruct negF_reg(regF dst, regF src) %{
11420 match(Set dst (NegF src));
11421 format %{ "negF $dst, $src @negF_reg" %}
11422 ins_encode %{
11423 FloatRegister src = as_FloatRegister($src$$reg);
11424 FloatRegister dst = as_FloatRegister($dst$$reg);
11426 __ neg_s(dst, src);
11427 %}
11428 ins_pipe( fpu_regF_regF );
11429 %}
11431 instruct negD_reg(regD dst, regD src) %{
11432 match(Set dst (NegD src));
11433 format %{ "negD $dst, $src @negD_reg" %}
11434 ins_encode %{
11435 FloatRegister src = as_FloatRegister($src$$reg);
11436 FloatRegister dst = as_FloatRegister($dst$$reg);
11438 __ neg_d(dst, src);
11439 %}
11440 ins_pipe( fpu_regF_regF );
11441 %}
11444 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
11445 match(Set dst (MulF src1 src2));
11446 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
11447 ins_encode %{
11448 FloatRegister src1 = $src1$$FloatRegister;
11449 FloatRegister src2 = $src2$$FloatRegister;
11450 FloatRegister dst = $dst$$FloatRegister;
11452 __ mul_s(dst, src1, src2);
11453 %}
11454 ins_pipe( fpu_regF_regF );
11455 %}
11457 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
11458 match(Set dst (AddF (MulF src1 src2) src3));
11459 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
11460 ins_cost(44444);
11461 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
11462 ins_encode %{
11463 FloatRegister src1 = $src1$$FloatRegister;
11464 FloatRegister src2 = $src2$$FloatRegister;
11465 FloatRegister src3 = $src3$$FloatRegister;
11466 FloatRegister dst = $dst$$FloatRegister;
11468 __ madd_s(dst, src1, src2, src3);
11469 %}
11470 ins_pipe( fpu_regF_regF );
11471 %}
11473 // Mul two double precision floating piont number
11474 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
11475 match(Set dst (MulD src1 src2));
11476 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
11477 ins_encode %{
11478 FloatRegister src1 = $src1$$FloatRegister;
11479 FloatRegister src2 = $src2$$FloatRegister;
11480 FloatRegister dst = $dst$$FloatRegister;
11482 __ mul_d(dst, src1, src2);
11483 %}
11484 ins_pipe( fpu_regF_regF );
11485 %}
11487 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
11488 match(Set dst (AddD (MulD src1 src2) src3));
11489 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
11490 ins_cost(44444);
11491 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
11492 ins_encode %{
11493 FloatRegister src1 = $src1$$FloatRegister;
11494 FloatRegister src2 = $src2$$FloatRegister;
11495 FloatRegister src3 = $src3$$FloatRegister;
11496 FloatRegister dst = $dst$$FloatRegister;
11498 __ madd_d(dst, src1, src2, src3);
11499 %}
11500 ins_pipe( fpu_regF_regF );
11501 %}
11503 instruct absF_reg(regF dst, regF src) %{
11504 match(Set dst (AbsF src));
11505 ins_cost(100);
11506 format %{ "absF $dst, $src @absF_reg" %}
11507 ins_encode %{
11508 FloatRegister src = as_FloatRegister($src$$reg);
11509 FloatRegister dst = as_FloatRegister($dst$$reg);
11511 __ abs_s(dst, src);
11512 %}
11513 ins_pipe( fpu_regF_regF );
11514 %}
11517 // intrinsics for math_native.
11518 // AbsD SqrtD CosD SinD TanD LogD Log10D
11520 instruct absD_reg(regD dst, regD src) %{
11521 match(Set dst (AbsD src));
11522 ins_cost(100);
11523 format %{ "absD $dst, $src @absD_reg" %}
11524 ins_encode %{
11525 FloatRegister src = as_FloatRegister($src$$reg);
11526 FloatRegister dst = as_FloatRegister($dst$$reg);
11528 __ abs_d(dst, src);
11529 %}
11530 ins_pipe( fpu_regF_regF );
11531 %}
11533 instruct sqrtD_reg(regD dst, regD src) %{
11534 match(Set dst (SqrtD src));
11535 ins_cost(100);
11536 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
11537 ins_encode %{
11538 FloatRegister src = as_FloatRegister($src$$reg);
11539 FloatRegister dst = as_FloatRegister($dst$$reg);
11541 __ sqrt_d(dst, src);
11542 %}
11543 ins_pipe( fpu_regF_regF );
11544 %}
11546 instruct sqrtF_reg(regF dst, regF src) %{
11547 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
11548 ins_cost(100);
11549 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
11550 ins_encode %{
11551 FloatRegister src = as_FloatRegister($src$$reg);
11552 FloatRegister dst = as_FloatRegister($dst$$reg);
11554 __ sqrt_s(dst, src);
11555 %}
11556 ins_pipe( fpu_regF_regF );
11557 %}
11558 //----------------------------------Logical Instructions----------------------
11559 //__________________________________Integer Logical Instructions-------------
11561 //And Instuctions
11562 // And Register with Immediate
11563 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
11564 match(Set dst (AndI src1 src2));
11566 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
11567 ins_encode %{
11568 Register dst = $dst$$Register;
11569 Register src = $src1$$Register;
11570 int val = $src2$$constant;
11572 __ move(AT, val);
11573 __ andr(dst, src, AT);
11574 %}
11575 ins_pipe( ialu_regI_regI );
11576 %}
11578 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
11579 match(Set dst (AndI src1 src2));
11580 ins_cost(60);
11582 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
11583 ins_encode %{
11584 Register dst = $dst$$Register;
11585 Register src = $src1$$Register;
11586 int val = $src2$$constant;
11588 __ andi(dst, src, val);
11589 %}
11590 ins_pipe( ialu_regI_regI );
11591 %}
11593 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
11594 match(Set dst (AndI src1 mask));
11595 ins_cost(60);
11597 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
11598 ins_encode %{
11599 Register dst = $dst$$Register;
11600 Register src = $src1$$Register;
11601 int size = Assembler::is_int_mask($mask$$constant);
11603 __ ext(dst, src, 0, size);
11604 %}
11605 ins_pipe( ialu_regI_regI );
11606 %}
11608 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
11609 match(Set dst (AndL src1 mask));
11610 ins_cost(60);
11612 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
11613 ins_encode %{
11614 Register dst = $dst$$Register;
11615 Register src = $src1$$Register;
11616 int size = Assembler::is_jlong_mask($mask$$constant);
11618 __ dext(dst, src, 0, size);
11619 %}
11620 ins_pipe( ialu_regI_regI );
11621 %}
11623 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
11624 match(Set dst (XorI src1 src2));
11625 ins_cost(60);
11627 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
11628 ins_encode %{
11629 Register dst = $dst$$Register;
11630 Register src = $src1$$Register;
11631 int val = $src2$$constant;
11633 __ xori(dst, src, val);
11634 %}
11635 ins_pipe( ialu_regI_regI );
11636 %}
11638 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
11639 match(Set dst (XorI src1 M1));
11640 predicate(UseLoongsonISA && Use3A2000);
11641 ins_cost(60);
11643 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
11644 ins_encode %{
11645 Register dst = $dst$$Register;
11646 Register src = $src1$$Register;
11648 __ gsorn(dst, R0, src);
11649 %}
11650 ins_pipe( ialu_regI_regI );
11651 %}
11653 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
11654 match(Set dst (XorI (ConvL2I src1) M1));
11655 predicate(UseLoongsonISA && Use3A2000);
11656 ins_cost(60);
11658 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
11659 ins_encode %{
11660 Register dst = $dst$$Register;
11661 Register src = $src1$$Register;
11663 __ gsorn(dst, R0, src);
11664 %}
11665 ins_pipe( ialu_regI_regI );
11666 %}
11668 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
11669 match(Set dst (XorL src1 src2));
11670 ins_cost(60);
11672 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
11673 ins_encode %{
11674 Register dst = $dst$$Register;
11675 Register src = $src1$$Register;
11676 int val = $src2$$constant;
11678 __ xori(dst, src, val);
11679 %}
11680 ins_pipe( ialu_regI_regI );
11681 %}
11683 /*
11684 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
11685 match(Set dst (XorL src1 M1));
11686 predicate(UseLoongsonISA);
11687 ins_cost(60);
11689 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
11690 ins_encode %{
11691 Register dst = $dst$$Register;
11692 Register src = $src1$$Register;
11694 __ gsorn(dst, R0, src);
11695 %}
11696 ins_pipe( ialu_regI_regI );
11697 %}
11698 */
11700 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
11701 match(Set dst (AndI mask (LoadB mem)));
11702 ins_cost(60);
11704 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
11705 ins_encode(load_UB_enc(dst, mem));
11706 ins_pipe( ialu_loadI );
11707 %}
11709 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
11710 match(Set dst (AndI (LoadB mem) mask));
11711 ins_cost(60);
11713 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
11714 ins_encode(load_UB_enc(dst, mem));
11715 ins_pipe( ialu_loadI );
11716 %}
11718 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11719 match(Set dst (AndI src1 src2));
11721 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
11722 ins_encode %{
11723 Register dst = $dst$$Register;
11724 Register src1 = $src1$$Register;
11725 Register src2 = $src2$$Register;
11726 __ andr(dst, src1, src2);
11727 %}
11728 ins_pipe( ialu_regI_regI );
11729 %}
11731 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11732 match(Set dst (AndI src1 (XorI src2 M1)));
11733 predicate(UseLoongsonISA && Use3A2000);
11735 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
11736 ins_encode %{
11737 Register dst = $dst$$Register;
11738 Register src1 = $src1$$Register;
11739 Register src2 = $src2$$Register;
11741 __ gsandn(dst, src1, src2);
11742 %}
11743 ins_pipe( ialu_regI_regI );
11744 %}
11746 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11747 match(Set dst (OrI src1 (XorI src2 M1)));
11748 predicate(UseLoongsonISA && Use3A2000);
11750 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
11751 ins_encode %{
11752 Register dst = $dst$$Register;
11753 Register src1 = $src1$$Register;
11754 Register src2 = $src2$$Register;
11756 __ gsorn(dst, src1, src2);
11757 %}
11758 ins_pipe( ialu_regI_regI );
11759 %}
11761 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11762 match(Set dst (AndI (XorI src1 M1) src2));
11763 predicate(UseLoongsonISA && Use3A2000);
11765 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
11766 ins_encode %{
11767 Register dst = $dst$$Register;
11768 Register src1 = $src1$$Register;
11769 Register src2 = $src2$$Register;
11771 __ gsandn(dst, src2, src1);
11772 %}
11773 ins_pipe( ialu_regI_regI );
11774 %}
11776 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
11777 match(Set dst (OrI (XorI src1 M1) src2));
11778 predicate(UseLoongsonISA && Use3A2000);
11780 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
11781 ins_encode %{
11782 Register dst = $dst$$Register;
11783 Register src1 = $src1$$Register;
11784 Register src2 = $src2$$Register;
11786 __ gsorn(dst, src2, src1);
11787 %}
11788 ins_pipe( ialu_regI_regI );
11789 %}
11791 // And Long Register with Register
11792 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11793 match(Set dst (AndL src1 src2));
11794 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
11795 ins_encode %{
11796 Register dst_reg = as_Register($dst$$reg);
11797 Register src1_reg = as_Register($src1$$reg);
11798 Register src2_reg = as_Register($src2$$reg);
11800 __ andr(dst_reg, src1_reg, src2_reg);
11801 %}
11802 ins_pipe( ialu_regL_regL );
11803 %}
11805 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
11806 match(Set dst (AndL src1 (ConvI2L src2)));
11807 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
11808 ins_encode %{
11809 Register dst_reg = as_Register($dst$$reg);
11810 Register src1_reg = as_Register($src1$$reg);
11811 Register src2_reg = as_Register($src2$$reg);
11813 __ andr(dst_reg, src1_reg, src2_reg);
11814 %}
11815 ins_pipe( ialu_regL_regL );
11816 %}
11818 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
11819 match(Set dst (AndL src1 src2));
11820 ins_cost(60);
11822 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
11823 ins_encode %{
11824 Register dst = $dst$$Register;
11825 Register src = $src1$$Register;
11826 long val = $src2$$constant;
11828 __ andi(dst, src, val);
11829 %}
11830 ins_pipe( ialu_regI_regI );
11831 %}
11833 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
11834 match(Set dst (ConvL2I (AndL src1 src2)));
11835 ins_cost(60);
11837 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
11838 ins_encode %{
11839 Register dst = $dst$$Register;
11840 Register src = $src1$$Register;
11841 long val = $src2$$constant;
11843 __ andi(dst, src, val);
11844 %}
11845 ins_pipe( ialu_regI_regI );
11846 %}
11848 /*
11849 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11850 match(Set dst (AndL src1 (XorL src2 M1)));
11851 predicate(UseLoongsonISA);
11853 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
11854 ins_encode %{
11855 Register dst = $dst$$Register;
11856 Register src1 = $src1$$Register;
11857 Register src2 = $src2$$Register;
11859 __ gsandn(dst, src1, src2);
11860 %}
11861 ins_pipe( ialu_regI_regI );
11862 %}
11863 */
11865 /*
11866 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11867 match(Set dst (OrL src1 (XorL src2 M1)));
11868 predicate(UseLoongsonISA);
11870 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
11871 ins_encode %{
11872 Register dst = $dst$$Register;
11873 Register src1 = $src1$$Register;
11874 Register src2 = $src2$$Register;
11876 __ gsorn(dst, src1, src2);
11877 %}
11878 ins_pipe( ialu_regI_regI );
11879 %}
11880 */
11882 /*
11883 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11884 match(Set dst (AndL (XorL src1 M1) src2));
11885 predicate(UseLoongsonISA);
11887 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
11888 ins_encode %{
11889 Register dst = $dst$$Register;
11890 Register src1 = $src1$$Register;
11891 Register src2 = $src2$$Register;
11893 __ gsandn(dst, src2, src1);
11894 %}
11895 ins_pipe( ialu_regI_regI );
11896 %}
11897 */
11899 /*
11900 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
11901 match(Set dst (OrL (XorL src1 M1) src2));
11902 predicate(UseLoongsonISA);
11904 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
11905 ins_encode %{
11906 Register dst = $dst$$Register;
11907 Register src1 = $src1$$Register;
11908 Register src2 = $src2$$Register;
11910 __ gsorn(dst, src2, src1);
11911 %}
11912 ins_pipe( ialu_regI_regI );
11913 %}
11914 */
11916 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
11917 match(Set dst (AndL dst M8));
11918 ins_cost(60);
11920 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
11921 ins_encode %{
11922 Register dst = $dst$$Register;
11924 __ dins(dst, R0, 0, 3);
11925 %}
11926 ins_pipe( ialu_regI_regI );
11927 %}
11929 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
11930 match(Set dst (AndL dst M5));
11931 ins_cost(60);
11933 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
11934 ins_encode %{
11935 Register dst = $dst$$Register;
11937 __ dins(dst, R0, 2, 1);
11938 %}
11939 ins_pipe( ialu_regI_regI );
11940 %}
11942 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
11943 match(Set dst (AndL dst M7));
11944 ins_cost(60);
11946 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
11947 ins_encode %{
11948 Register dst = $dst$$Register;
11950 __ dins(dst, R0, 1, 2);
11951 %}
11952 ins_pipe( ialu_regI_regI );
11953 %}
11955 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
11956 match(Set dst (AndL dst M4));
11957 ins_cost(60);
11959 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
11960 ins_encode %{
11961 Register dst = $dst$$Register;
11963 __ dins(dst, R0, 0, 2);
11964 %}
11965 ins_pipe( ialu_regI_regI );
11966 %}
11968 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
11969 match(Set dst (AndL dst M121));
11970 ins_cost(60);
11972 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
11973 ins_encode %{
11974 Register dst = $dst$$Register;
11976 __ dins(dst, R0, 3, 4);
11977 %}
11978 ins_pipe( ialu_regI_regI );
11979 %}
11981 // Or Long Register with Register
11982 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11983 match(Set dst (OrL src1 src2));
11984 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
11985 ins_encode %{
11986 Register dst_reg = $dst$$Register;
11987 Register src1_reg = $src1$$Register;
11988 Register src2_reg = $src2$$Register;
11990 __ orr(dst_reg, src1_reg, src2_reg);
11991 %}
11992 ins_pipe( ialu_regL_regL );
11993 %}
11995 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
11996 match(Set dst (OrL (CastP2X src1) src2));
11997 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
11998 ins_encode %{
11999 Register dst_reg = $dst$$Register;
12000 Register src1_reg = $src1$$Register;
12001 Register src2_reg = $src2$$Register;
12003 __ orr(dst_reg, src1_reg, src2_reg);
12004 %}
12005 ins_pipe( ialu_regL_regL );
12006 %}
12008 // Xor Long Register with Register
12009 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
12010 match(Set dst (XorL src1 src2));
12011 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
12012 ins_encode %{
12013 Register dst_reg = as_Register($dst$$reg);
12014 Register src1_reg = as_Register($src1$$reg);
12015 Register src2_reg = as_Register($src2$$reg);
12017 __ xorr(dst_reg, src1_reg, src2_reg);
12018 %}
12019 ins_pipe( ialu_regL_regL );
12020 %}
12022 // Shift Left by 8-bit immediate
12023 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12024 match(Set dst (LShiftI src shift));
12026 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
12027 ins_encode %{
12028 Register src = $src$$Register;
12029 Register dst = $dst$$Register;
12030 int shamt = $shift$$constant;
12032 __ sll(dst, src, shamt);
12033 %}
12034 ins_pipe( ialu_regI_regI );
12035 %}
12037 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
12038 match(Set dst (LShiftI (ConvL2I src) shift));
12040 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
12041 ins_encode %{
12042 Register src = $src$$Register;
12043 Register dst = $dst$$Register;
12044 int shamt = $shift$$constant;
12046 __ sll(dst, src, shamt);
12047 %}
12048 ins_pipe( ialu_regI_regI );
12049 %}
12051 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
12052 match(Set dst (AndI (LShiftI src shift) mask));
12054 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
12055 ins_encode %{
12056 Register src = $src$$Register;
12057 Register dst = $dst$$Register;
12059 __ sll(dst, src, 16);
12060 %}
12061 ins_pipe( ialu_regI_regI );
12062 %}
12064 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
12065 %{
12066 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
12068 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
12069 ins_encode %{
12070 Register src = $src$$Register;
12071 Register dst = $dst$$Register;
12073 __ andi(dst, src, 7);
12074 %}
12075 ins_pipe(ialu_regI_regI);
12076 %}
12078 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
12079 %{
12080 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
12082 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
12083 ins_encode %{
12084 Register src = $src1$$Register;
12085 int val = $src2$$constant;
12086 Register dst = $dst$$Register;
12088 __ ori(dst, src, val);
12089 %}
12090 ins_pipe(ialu_regI_regI);
12091 %}
12093 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
12094 // This idiom is used by the compiler the i2s bytecode.
12095 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
12096 %{
12097 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
12099 format %{ "i2s $dst, $src\t# @i2s" %}
12100 ins_encode %{
12101 Register src = $src$$Register;
12102 Register dst = $dst$$Register;
12104 __ seh(dst, src);
12105 %}
12106 ins_pipe(ialu_regI_regI);
12107 %}
12109 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
12110 // This idiom is used by the compiler for the i2b bytecode.
12111 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
12112 %{
12113 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
12115 format %{ "i2b $dst, $src\t# @i2b" %}
12116 ins_encode %{
12117 Register src = $src$$Register;
12118 Register dst = $dst$$Register;
12120 __ seb(dst, src);
12121 %}
12122 ins_pipe(ialu_regI_regI);
12123 %}
12126 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
12127 match(Set dst (LShiftI (ConvL2I src) shift));
12129 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
12130 ins_encode %{
12131 Register src = $src$$Register;
12132 Register dst = $dst$$Register;
12133 int shamt = $shift$$constant;
12135 __ sll(dst, src, shamt);
12136 %}
12137 ins_pipe( ialu_regI_regI );
12138 %}
12140 // Shift Left by 8-bit immediate
12141 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12142 match(Set dst (LShiftI src shift));
12144 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
12145 ins_encode %{
12146 Register src = $src$$Register;
12147 Register dst = $dst$$Register;
12148 Register shamt = $shift$$Register;
12149 __ sllv(dst, src, shamt);
12150 %}
12151 ins_pipe( ialu_regI_regI );
12152 %}
12155 // Shift Left Long
12156 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
12157 //predicate(UseNewLongLShift);
12158 match(Set dst (LShiftL src shift));
12159 ins_cost(100);
12160 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
12161 ins_encode %{
12162 Register src_reg = as_Register($src$$reg);
12163 Register dst_reg = as_Register($dst$$reg);
12164 int shamt = $shift$$constant;
12166 if (__ is_simm(shamt, 5))
12167 __ dsll(dst_reg, src_reg, shamt);
12168 else {
12169 int sa = Assembler::low(shamt, 6);
12170 if (sa < 32) {
12171 __ dsll(dst_reg, src_reg, sa);
12172 } else {
12173 __ dsll32(dst_reg, src_reg, sa - 32);
12174 }
12175 }
12176 %}
12177 ins_pipe( ialu_regL_regL );
12178 %}
12180 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
12181 //predicate(UseNewLongLShift);
12182 match(Set dst (LShiftL (ConvI2L src) shift));
12183 ins_cost(100);
12184 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
12185 ins_encode %{
12186 Register src_reg = as_Register($src$$reg);
12187 Register dst_reg = as_Register($dst$$reg);
12188 int shamt = $shift$$constant;
12190 if (__ is_simm(shamt, 5))
12191 __ dsll(dst_reg, src_reg, shamt);
12192 else {
12193 int sa = Assembler::low(shamt, 6);
12194 if (sa < 32) {
12195 __ dsll(dst_reg, src_reg, sa);
12196 } else {
12197 __ dsll32(dst_reg, src_reg, sa - 32);
12198 }
12199 }
12200 %}
12201 ins_pipe( ialu_regL_regL );
12202 %}
12204 // Shift Left Long
12205 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12206 //predicate(UseNewLongLShift);
12207 match(Set dst (LShiftL src shift));
12208 ins_cost(100);
12209 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
12210 ins_encode %{
12211 Register src_reg = as_Register($src$$reg);
12212 Register dst_reg = as_Register($dst$$reg);
12214 __ dsllv(dst_reg, src_reg, $shift$$Register);
12215 %}
12216 ins_pipe( ialu_regL_regL );
12217 %}
12219 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
12220 match(Set dst (LShiftL (ConvI2L src) shift));
12221 ins_cost(100);
12222 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
12223 ins_encode %{
12224 Register src_reg = as_Register($src$$reg);
12225 Register dst_reg = as_Register($dst$$reg);
12226 int shamt = $shift$$constant;
12228 if (__ is_simm(shamt, 5)) {
12229 __ dsll(dst_reg, src_reg, shamt);
12230 } else {
12231 int sa = Assembler::low(shamt, 6);
12232 if (sa < 32) {
12233 __ dsll(dst_reg, src_reg, sa);
12234 } else {
12235 __ dsll32(dst_reg, src_reg, sa - 32);
12236 }
12237 }
12238 %}
12239 ins_pipe( ialu_regL_regL );
12240 %}
12242 // Shift Right Long
12243 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
12244 match(Set dst (RShiftL src shift));
12245 ins_cost(100);
12246 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
12247 ins_encode %{
12248 Register src_reg = as_Register($src$$reg);
12249 Register dst_reg = as_Register($dst$$reg);
12250 int shamt = ($shift$$constant & 0x3f);
12251 if (__ is_simm(shamt, 5))
12252 __ dsra(dst_reg, src_reg, shamt);
12253 else {
12254 int sa = Assembler::low(shamt, 6);
12255 if (sa < 32) {
12256 __ dsra(dst_reg, src_reg, sa);
12257 } else {
12258 __ dsra32(dst_reg, src_reg, sa - 32);
12259 }
12260 }
12261 %}
12262 ins_pipe( ialu_regL_regL );
12263 %}
12265 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
12266 match(Set dst (ConvL2I (RShiftL src shift)));
12267 ins_cost(100);
12268 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
12269 ins_encode %{
12270 Register src_reg = as_Register($src$$reg);
12271 Register dst_reg = as_Register($dst$$reg);
12272 int shamt = $shift$$constant;
12274 __ dsra32(dst_reg, src_reg, shamt - 32);
12275 %}
12276 ins_pipe( ialu_regL_regL );
12277 %}
12279 // Shift Right Long arithmetically
12280 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12281 //predicate(UseNewLongLShift);
12282 match(Set dst (RShiftL src shift));
12283 ins_cost(100);
12284 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
12285 ins_encode %{
12286 Register src_reg = as_Register($src$$reg);
12287 Register dst_reg = as_Register($dst$$reg);
12289 __ dsrav(dst_reg, src_reg, $shift$$Register);
12290 %}
12291 ins_pipe( ialu_regL_regL );
12292 %}
12294 // Shift Right Long logically
12295 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
12296 match(Set dst (URShiftL src shift));
12297 ins_cost(100);
12298 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
12299 ins_encode %{
12300 Register src_reg = as_Register($src$$reg);
12301 Register dst_reg = as_Register($dst$$reg);
12303 __ dsrlv(dst_reg, src_reg, $shift$$Register);
12304 %}
12305 ins_pipe( ialu_regL_regL );
12306 %}
12308 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
12309 match(Set dst (URShiftL src shift));
12310 ins_cost(80);
12311 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
12312 ins_encode %{
12313 Register src_reg = as_Register($src$$reg);
12314 Register dst_reg = as_Register($dst$$reg);
12315 int shamt = $shift$$constant;
12317 __ dsrl(dst_reg, src_reg, shamt);
12318 %}
12319 ins_pipe( ialu_regL_regL );
12320 %}
12322 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
12323 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
12324 ins_cost(80);
12325 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
12326 ins_encode %{
12327 Register src_reg = as_Register($src$$reg);
12328 Register dst_reg = as_Register($dst$$reg);
12329 int shamt = $shift$$constant;
12331 __ dext(dst_reg, src_reg, shamt, 31);
12332 %}
12333 ins_pipe( ialu_regL_regL );
12334 %}
12336 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
12337 match(Set dst (URShiftL (CastP2X src) shift));
12338 ins_cost(80);
12339 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
12340 ins_encode %{
12341 Register src_reg = as_Register($src$$reg);
12342 Register dst_reg = as_Register($dst$$reg);
12343 int shamt = $shift$$constant;
12345 __ dsrl(dst_reg, src_reg, shamt);
12346 %}
12347 ins_pipe( ialu_regL_regL );
12348 %}
12350 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
12351 match(Set dst (URShiftL src shift));
12352 ins_cost(80);
12353 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
12354 ins_encode %{
12355 Register src_reg = as_Register($src$$reg);
12356 Register dst_reg = as_Register($dst$$reg);
12357 int shamt = $shift$$constant;
12359 __ dsrl32(dst_reg, src_reg, shamt - 32);
12360 %}
12361 ins_pipe( ialu_regL_regL );
12362 %}
12364 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
12365 match(Set dst (ConvL2I (URShiftL src shift)));
12366 predicate(n->in(1)->in(2)->get_int() > 32);
12367 ins_cost(80);
12368 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
12369 ins_encode %{
12370 Register src_reg = as_Register($src$$reg);
12371 Register dst_reg = as_Register($dst$$reg);
12372 int shamt = $shift$$constant;
12374 __ dsrl32(dst_reg, src_reg, shamt - 32);
12375 %}
12376 ins_pipe( ialu_regL_regL );
12377 %}
12379 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
12380 match(Set dst (URShiftL (CastP2X src) shift));
12381 ins_cost(80);
12382 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
12383 ins_encode %{
12384 Register src_reg = as_Register($src$$reg);
12385 Register dst_reg = as_Register($dst$$reg);
12386 int shamt = $shift$$constant;
12388 __ dsrl32(dst_reg, src_reg, shamt - 32);
12389 %}
12390 ins_pipe( ialu_regL_regL );
12391 %}
12393 // Xor Instructions
12394 // Xor Register with Register
12395 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
12396 match(Set dst (XorI src1 src2));
12398 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
12400 ins_encode %{
12401 Register dst = $dst$$Register;
12402 Register src1 = $src1$$Register;
12403 Register src2 = $src2$$Register;
12404 __ xorr(dst, src1, src2);
12405 __ sll(dst, dst, 0); /* long -> int */
12406 %}
12408 ins_pipe( ialu_regI_regI );
12409 %}
12411 // Or Instructions
12412 // Or Register with Register
12413 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
12414 match(Set dst (OrI src1 src2));
12416 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
12417 ins_encode %{
12418 Register dst = $dst$$Register;
12419 Register src1 = $src1$$Register;
12420 Register src2 = $src2$$Register;
12421 __ orr(dst, src1, src2);
12422 %}
12424 ins_pipe( ialu_regI_regI );
12425 %}
12427 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
12428 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
12429 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
12431 format %{ "rotr $dst, $src, 1 ...\n\t"
12432 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
12433 ins_encode %{
12434 Register dst = $dst$$Register;
12435 Register src = $src$$Register;
12436 int rshift = $rshift$$constant;
12438 __ rotr(dst, src, 1);
12439 if (rshift - 1) {
12440 __ srl(dst, dst, rshift - 1);
12441 }
12442 %}
12444 ins_pipe( ialu_regI_regI );
12445 %}
12447 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
12448 match(Set dst (OrI src1 (CastP2X src2)));
12450 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
12451 ins_encode %{
12452 Register dst = $dst$$Register;
12453 Register src1 = $src1$$Register;
12454 Register src2 = $src2$$Register;
12455 __ orr(dst, src1, src2);
12456 %}
12458 ins_pipe( ialu_regI_regI );
12459 %}
12461 // Logical Shift Right by 8-bit immediate
12462 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12463 match(Set dst (URShiftI src shift));
12464 //effect(KILL cr);
12466 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
12467 ins_encode %{
12468 Register src = $src$$Register;
12469 Register dst = $dst$$Register;
12470 int shift = $shift$$constant;
12472 __ srl(dst, src, shift);
12473 %}
12474 ins_pipe( ialu_regI_regI );
12475 %}
12477 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
12478 match(Set dst (AndI (URShiftI src shift) mask));
12480 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
12481 ins_encode %{
12482 Register src = $src$$Register;
12483 Register dst = $dst$$Register;
12484 int pos = $shift$$constant;
12485 int size = Assembler::is_int_mask($mask$$constant);
12487 __ ext(dst, src, pos, size);
12488 %}
12489 ins_pipe( ialu_regI_regI );
12490 %}
12492 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
12493 %{
12494 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
12495 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
12497 ins_cost(100);
12498 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
12499 ins_encode %{
12500 Register dst = $dst$$Register;
12501 int sa = $rshift$$constant;
12503 __ rotr(dst, dst, sa);
12504 %}
12505 ins_pipe( ialu_regI_regI );
12506 %}
12508 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
12509 %{
12510 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12511 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
12513 ins_cost(100);
12514 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
12515 ins_encode %{
12516 Register dst = $dst$$Register;
12517 int sa = $rshift$$constant;
12519 __ drotr(dst, dst, sa);
12520 %}
12521 ins_pipe( ialu_regI_regI );
12522 %}
12524 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
12525 %{
12526 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12527 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
12529 ins_cost(100);
12530 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
12531 ins_encode %{
12532 Register dst = $dst$$Register;
12533 int sa = $rshift$$constant;
12535 __ drotr32(dst, dst, sa - 32);
12536 %}
12537 ins_pipe( ialu_regI_regI );
12538 %}
12540 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
12541 %{
12542 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
12543 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
12545 ins_cost(100);
12546 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
12547 ins_encode %{
12548 Register dst = $dst$$Register;
12549 int sa = $rshift$$constant;
12551 __ rotr(dst, dst, sa);
12552 %}
12553 ins_pipe( ialu_regI_regI );
12554 %}
12556 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
12557 %{
12558 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12559 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
12561 ins_cost(100);
12562 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
12563 ins_encode %{
12564 Register dst = $dst$$Register;
12565 int sa = $rshift$$constant;
12567 __ drotr(dst, dst, sa);
12568 %}
12569 ins_pipe( ialu_regI_regI );
12570 %}
12572 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
12573 %{
12574 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
12575 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
12577 ins_cost(100);
12578 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
12579 ins_encode %{
12580 Register dst = $dst$$Register;
12581 int sa = $rshift$$constant;
12583 __ drotr32(dst, dst, sa - 32);
12584 %}
12585 ins_pipe( ialu_regI_regI );
12586 %}
12588 // Logical Shift Right
12589 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12590 match(Set dst (URShiftI src shift));
12592 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
12593 ins_encode %{
12594 Register src = $src$$Register;
12595 Register dst = $dst$$Register;
12596 Register shift = $shift$$Register;
12597 __ srlv(dst, src, shift);
12598 %}
12599 ins_pipe( ialu_regI_regI );
12600 %}
12603 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
12604 match(Set dst (RShiftI src shift));
12605 // effect(KILL cr);
12607 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
12608 ins_encode %{
12609 Register src = $src$$Register;
12610 Register dst = $dst$$Register;
12611 int shift = $shift$$constant;
12612 __ sra(dst, src, shift);
12613 %}
12614 ins_pipe( ialu_regI_regI );
12615 %}
12617 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
12618 match(Set dst (RShiftI src shift));
12619 // effect(KILL cr);
12621 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
12622 ins_encode %{
12623 Register src = $src$$Register;
12624 Register dst = $dst$$Register;
12625 Register shift = $shift$$Register;
12626 __ srav(dst, src, shift);
12627 %}
12628 ins_pipe( ialu_regI_regI );
12629 %}
12631 //----------Convert Int to Boolean---------------------------------------------
12633 instruct convI2B(mRegI dst, mRegI src) %{
12634 match(Set dst (Conv2B src));
12636 ins_cost(100);
12637 format %{ "convI2B $dst, $src @ convI2B" %}
12638 ins_encode %{
12639 Register dst = as_Register($dst$$reg);
12640 Register src = as_Register($src$$reg);
12642 if (dst != src) {
12643 __ daddiu(dst, R0, 1);
12644 __ movz(dst, R0, src);
12645 } else {
12646 __ move(AT, src);
12647 __ daddiu(dst, R0, 1);
12648 __ movz(dst, R0, AT);
12649 }
12650 %}
12652 ins_pipe( ialu_regL_regL );
12653 %}
12655 instruct convI2L_reg( mRegL dst, mRegI src) %{
12656 match(Set dst (ConvI2L src));
12658 ins_cost(100);
12659 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
12660 ins_encode %{
12661 Register dst = as_Register($dst$$reg);
12662 Register src = as_Register($src$$reg);
12664 if(dst != src) __ sll(dst, src, 0);
12665 %}
12666 ins_pipe( ialu_regL_regL );
12667 %}
12670 instruct convL2I_reg( mRegI dst, mRegL src ) %{
12671 match(Set dst (ConvL2I src));
12673 format %{ "MOV $dst, $src @ convL2I_reg" %}
12674 ins_encode %{
12675 Register dst = as_Register($dst$$reg);
12676 Register src = as_Register($src$$reg);
12678 __ sll(dst, src, 0);
12679 %}
12681 ins_pipe( ialu_regI_regI );
12682 %}
12684 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
12685 match(Set dst (ConvI2L (ConvL2I src)));
12687 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
12688 ins_encode %{
12689 Register dst = as_Register($dst$$reg);
12690 Register src = as_Register($src$$reg);
12692 __ sll(dst, src, 0);
12693 %}
12695 ins_pipe( ialu_regI_regI );
12696 %}
12698 instruct convL2D_reg( regD dst, mRegL src ) %{
12699 match(Set dst (ConvL2D src));
12700 format %{ "convL2D $dst, $src @ convL2D_reg" %}
12701 ins_encode %{
12702 Register src = as_Register($src$$reg);
12703 FloatRegister dst = as_FloatRegister($dst$$reg);
12705 __ dmtc1(src, dst);
12706 __ cvt_d_l(dst, dst);
12707 %}
12709 ins_pipe( pipe_slow );
12710 %}
12713 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
12714 match(Set dst (ConvD2L src));
12715 ins_cost(150);
12716 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
12717 ins_encode %{
12718 Register dst = as_Register($dst$$reg);
12719 FloatRegister src = as_FloatRegister($src$$reg);
12721 Label Done;
12723 __ trunc_l_d(F30, src);
12724 // max_long: 0x7fffffffffffffff
12725 // __ set64(AT, 0x7fffffffffffffff);
12726 __ daddiu(AT, R0, -1);
12727 __ dsrl(AT, AT, 1);
12728 __ dmfc1(dst, F30);
12730 __ bne(dst, AT, Done);
12731 __ delayed()->mtc1(R0, F30);
12733 __ cvt_d_w(F30, F30);
12734 __ c_ult_d(src, F30);
12735 __ bc1f(Done);
12736 __ delayed()->daddiu(T9, R0, -1);
12738 __ c_un_d(src, src); //NaN?
12739 __ subu(dst, T9, AT);
12740 __ movt(dst, R0);
12742 __ bind(Done);
12743 %}
12745 ins_pipe( pipe_slow );
12746 %}
12749 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
12750 match(Set dst (ConvD2L src));
12751 ins_cost(250);
12752 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
12753 ins_encode %{
12754 Register dst = as_Register($dst$$reg);
12755 FloatRegister src = as_FloatRegister($src$$reg);
12757 Label L;
12759 __ c_un_d(src, src); //NaN?
12760 __ bc1t(L);
12761 __ delayed();
12762 __ move(dst, R0);
12764 __ trunc_l_d(F30, src);
12765 __ cfc1(AT, 31);
12766 __ li(T9, 0x10000);
12767 __ andr(AT, AT, T9);
12768 __ beq(AT, R0, L);
12769 __ delayed()->dmfc1(dst, F30);
12771 __ mov_d(F12, src);
12772 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
12773 __ move(dst, V0);
12774 __ bind(L);
12775 %}
12777 ins_pipe( pipe_slow );
12778 %}
12781 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
12782 match(Set dst (ConvF2I src));
12783 ins_cost(150);
12784 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
12785 ins_encode %{
12786 Register dreg = $dst$$Register;
12787 FloatRegister fval = $src$$FloatRegister;
12788 Label L;
12790 __ trunc_w_s(F30, fval);
12791 __ move(AT, 0x7fffffff);
12792 __ mfc1(dreg, F30);
12793 __ c_un_s(fval, fval); //NaN?
12794 __ movt(dreg, R0);
12796 __ bne(AT, dreg, L);
12797 __ delayed()->lui(T9, 0x8000);
12799 __ mfc1(AT, fval);
12800 __ andr(AT, AT, T9);
12802 __ movn(dreg, T9, AT);
12804 __ bind(L);
12806 %}
12808 ins_pipe( pipe_slow );
12809 %}
12813 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
12814 match(Set dst (ConvF2I src));
12815 ins_cost(250);
12816 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
12817 ins_encode %{
12818 Register dreg = $dst$$Register;
12819 FloatRegister fval = $src$$FloatRegister;
12820 Label L;
12822 __ c_un_s(fval, fval); //NaN?
12823 __ bc1t(L);
12824 __ delayed();
12825 __ move(dreg, R0);
12827 __ trunc_w_s(F30, fval);
12829 /* Call SharedRuntime:f2i() to do valid convention */
12830 __ cfc1(AT, 31);
12831 __ li(T9, 0x10000);
12832 __ andr(AT, AT, T9);
12833 __ beq(AT, R0, L);
12834 __ delayed()->mfc1(dreg, F30);
12836 __ mov_s(F12, fval);
12838 //This bug was found when running ezDS's control-panel.
12839 // J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
12840 //
12841 // An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
12842 // V0 is corrupted during call_VM_leaf(), and should be preserved.
12843 //
12844 __ push(fval);
12845 if(dreg != V0) {
12846 __ push(V0);
12847 }
12848 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
12849 if(dreg != V0) {
12850 __ move(dreg, V0);
12851 __ pop(V0);
12852 }
12853 __ pop(fval);
12854 __ bind(L);
12855 %}
12857 ins_pipe( pipe_slow );
12858 %}
12861 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
12862 match(Set dst (ConvF2L src));
12863 ins_cost(150);
12864 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
12865 ins_encode %{
12866 Register dreg = $dst$$Register;
12867 FloatRegister fval = $src$$FloatRegister;
12868 Label L;
12870 __ trunc_l_s(F30, fval);
12871 __ daddiu(AT, R0, -1);
12872 __ dsrl(AT, AT, 1);
12873 __ dmfc1(dreg, F30);
12874 __ c_un_s(fval, fval); //NaN?
12875 __ movt(dreg, R0);
12877 __ bne(AT, dreg, L);
12878 __ delayed()->lui(T9, 0x8000);
12880 __ mfc1(AT, fval);
12881 __ andr(AT, AT, T9);
12883 __ dsll32(T9, T9, 0);
12884 __ movn(dreg, T9, AT);
12886 __ bind(L);
12887 %}
12889 ins_pipe( pipe_slow );
12890 %}
12893 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
12894 match(Set dst (ConvF2L src));
12895 ins_cost(250);
12896 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
12897 ins_encode %{
12898 Register dst = as_Register($dst$$reg);
12899 FloatRegister fval = $src$$FloatRegister;
12900 Label L;
12902 __ c_un_s(fval, fval); //NaN?
12903 __ bc1t(L);
12904 __ delayed();
12905 __ move(dst, R0);
12907 __ trunc_l_s(F30, fval);
12908 __ cfc1(AT, 31);
12909 __ li(T9, 0x10000);
12910 __ andr(AT, AT, T9);
12911 __ beq(AT, R0, L);
12912 __ delayed()->dmfc1(dst, F30);
12914 __ mov_s(F12, fval);
12915 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
12916 __ move(dst, V0);
12917 __ bind(L);
12918 %}
12920 ins_pipe( pipe_slow );
12921 %}
12923 instruct convL2F_reg( regF dst, mRegL src ) %{
12924 match(Set dst (ConvL2F src));
12925 format %{ "convl2f $dst, $src @ convL2F_reg" %}
12926 ins_encode %{
12927 FloatRegister dst = $dst$$FloatRegister;
12928 Register src = as_Register($src$$reg);
12929 Label L;
12931 __ dmtc1(src, dst);
12932 __ cvt_s_l(dst, dst);
12933 %}
12935 ins_pipe( pipe_slow );
12936 %}
12938 instruct convI2F_reg( regF dst, mRegI src ) %{
12939 match(Set dst (ConvI2F src));
12940 format %{ "convi2f $dst, $src @ convI2F_reg" %}
12941 ins_encode %{
12942 Register src = $src$$Register;
12943 FloatRegister dst = $dst$$FloatRegister;
12945 __ mtc1(src, dst);
12946 __ cvt_s_w(dst, dst);
12947 %}
12949 ins_pipe( fpu_regF_regF );
12950 %}
12952 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
12953 match(Set dst (CmpLTMask p zero));
12954 ins_cost(100);
12956 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
12957 ins_encode %{
12958 Register src = $p$$Register;
12959 Register dst = $dst$$Register;
12961 __ sra(dst, src, 31);
12962 %}
12963 ins_pipe( pipe_slow );
12964 %}
12967 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
12968 match(Set dst (CmpLTMask p q));
12969 ins_cost(400);
12971 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
12972 ins_encode %{
12973 Register p = $p$$Register;
12974 Register q = $q$$Register;
12975 Register dst = $dst$$Register;
12977 __ slt(dst, p, q);
12978 __ subu(dst, R0, dst);
12979 %}
12980 ins_pipe( pipe_slow );
12981 %}
12983 instruct convP2B(mRegI dst, mRegP src) %{
12984 match(Set dst (Conv2B src));
12986 ins_cost(100);
12987 format %{ "convP2B $dst, $src @ convP2B" %}
12988 ins_encode %{
12989 Register dst = as_Register($dst$$reg);
12990 Register src = as_Register($src$$reg);
12992 if (dst != src) {
12993 __ daddiu(dst, R0, 1);
12994 __ movz(dst, R0, src);
12995 } else {
12996 __ move(AT, src);
12997 __ daddiu(dst, R0, 1);
12998 __ movz(dst, R0, AT);
12999 }
13000 %}
13002 ins_pipe( ialu_regL_regL );
13003 %}
13006 instruct convI2D_reg_reg(regD dst, mRegI src) %{
13007 match(Set dst (ConvI2D src));
13008 format %{ "conI2D $dst, $src @convI2D_reg" %}
13009 ins_encode %{
13010 Register src = $src$$Register;
13011 FloatRegister dst = $dst$$FloatRegister;
13012 __ mtc1(src, dst);
13013 __ cvt_d_w(dst, dst);
13014 %}
13015 ins_pipe( fpu_regF_regF );
13016 %}
13018 instruct convF2D_reg_reg(regD dst, regF src) %{
13019 match(Set dst (ConvF2D src));
13020 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
13021 ins_encode %{
13022 FloatRegister dst = $dst$$FloatRegister;
13023 FloatRegister src = $src$$FloatRegister;
13025 __ cvt_d_s(dst, src);
13026 %}
13027 ins_pipe( fpu_regF_regF );
13028 %}
13030 instruct convD2F_reg_reg(regF dst, regD src) %{
13031 match(Set dst (ConvD2F src));
13032 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
13033 ins_encode %{
13034 FloatRegister dst = $dst$$FloatRegister;
13035 FloatRegister src = $src$$FloatRegister;
13037 __ cvt_s_d(dst, src);
13038 %}
13039 ins_pipe( fpu_regF_regF );
13040 %}
13043 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
13044 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
13045 match(Set dst (ConvD2I src));
13047 ins_cost(150);
13048 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
13050 ins_encode %{
13051 FloatRegister src = $src$$FloatRegister;
13052 Register dst = $dst$$Register;
13054 Label Done;
13056 __ trunc_w_d(F30, src);
13057 // max_int: 2147483647
13058 __ move(AT, 0x7fffffff);
13059 __ mfc1(dst, F30);
13061 __ bne(dst, AT, Done);
13062 __ delayed()->mtc1(R0, F30);
13064 __ cvt_d_w(F30, F30);
13065 __ c_ult_d(src, F30);
13066 __ bc1f(Done);
13067 __ delayed()->addiu(T9, R0, -1);
13069 __ c_un_d(src, src); //NaN?
13070 __ subu32(dst, T9, AT);
13071 __ movt(dst, R0);
13073 __ bind(Done);
13074 %}
13075 ins_pipe( pipe_slow );
13076 %}
13079 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
13080 match(Set dst (ConvD2I src));
13082 ins_cost(250);
13083 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
13085 ins_encode %{
13086 FloatRegister src = $src$$FloatRegister;
13087 Register dst = $dst$$Register;
13088 Label L;
13090 __ trunc_w_d(F30, src);
13091 __ cfc1(AT, 31);
13092 __ li(T9, 0x10000);
13093 __ andr(AT, AT, T9);
13094 __ beq(AT, R0, L);
13095 __ delayed()->mfc1(dst, F30);
13097 __ mov_d(F12, src);
13098 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
13099 __ move(dst, V0);
13100 __ bind(L);
13102 %}
13103 ins_pipe( pipe_slow );
13104 %}
13106 // Convert oop pointer into compressed form
13107 instruct encodeHeapOop(mRegN dst, mRegP src) %{
13108 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
13109 match(Set dst (EncodeP src));
13110 format %{ "encode_heap_oop $dst,$src" %}
13111 ins_encode %{
13112 Register src = $src$$Register;
13113 Register dst = $dst$$Register;
13115 __ encode_heap_oop(dst, src);
13116 %}
13117 ins_pipe( ialu_regL_regL );
13118 %}
13120 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
13121 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
13122 match(Set dst (EncodeP src));
13123 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
13124 ins_encode %{
13125 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
13126 %}
13127 ins_pipe( ialu_regL_regL );
13128 %}
13130 instruct decodeHeapOop(mRegP dst, mRegN src) %{
13131 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
13132 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
13133 match(Set dst (DecodeN src));
13134 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
13135 ins_encode %{
13136 Register s = $src$$Register;
13137 Register d = $dst$$Register;
13139 __ decode_heap_oop(d, s);
13140 %}
13141 ins_pipe( ialu_regL_regL );
13142 %}
13144 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
13145 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
13146 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
13147 match(Set dst (DecodeN src));
13148 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
13149 ins_encode %{
13150 Register s = $src$$Register;
13151 Register d = $dst$$Register;
13152 if (s != d) {
13153 __ decode_heap_oop_not_null(d, s);
13154 } else {
13155 __ decode_heap_oop_not_null(d);
13156 }
13157 %}
13158 ins_pipe( ialu_regL_regL );
13159 %}
13161 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
13162 match(Set dst (EncodePKlass src));
13163 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
13164 ins_encode %{
13165 __ encode_klass_not_null($dst$$Register, $src$$Register);
13166 %}
13167 ins_pipe( ialu_regL_regL );
13168 %}
13170 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
13171 match(Set dst (DecodeNKlass src));
13172 format %{ "decode_heap_klass_not_null $dst,$src" %}
13173 ins_encode %{
13174 Register s = $src$$Register;
13175 Register d = $dst$$Register;
13176 if (s != d) {
13177 __ decode_klass_not_null(d, s);
13178 } else {
13179 __ decode_klass_not_null(d);
13180 }
13181 %}
13182 ins_pipe( ialu_regL_regL );
13183 %}
13185 //FIXME
13186 instruct tlsLoadP(mRegP dst) %{
13187 match(Set dst (ThreadLocal));
13189 ins_cost(0);
13190 format %{ " get_thread in $dst #@tlsLoadP" %}
13191 ins_encode %{
13192 Register dst = $dst$$Register;
13193 #ifdef OPT_THREAD
13194 __ move(dst, TREG);
13195 #else
13196 __ get_thread(dst);
13197 #endif
13198 %}
13200 ins_pipe( ialu_loadI );
13201 %}
13204 instruct checkCastPP( mRegP dst ) %{
13205 match(Set dst (CheckCastPP dst));
13207 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
13208 ins_encode( /*empty encoding*/ );
13209 ins_pipe( empty );
13210 %}
13212 instruct castPP(mRegP dst)
13213 %{
13214 match(Set dst (CastPP dst));
13216 size(0);
13217 format %{ "# castPP of $dst" %}
13218 ins_encode(/* empty encoding */);
13219 ins_pipe(empty);
13220 %}
13222 instruct castII( mRegI dst ) %{
13223 match(Set dst (CastII dst));
13224 format %{ "#castII of $dst empty encoding" %}
13225 ins_encode( /*empty encoding*/ );
13226 ins_cost(0);
13227 ins_pipe( empty );
13228 %}
13230 // Return Instruction
13231 // Remove the return address & jump to it.
13232 instruct Ret() %{
13233 match(Return);
13234 format %{ "RET #@Ret" %}
13236 ins_encode %{
13237 __ jr(RA);
13238 __ delayed()->nop();
13239 %}
13241 ins_pipe( pipe_jump );
13242 %}
13244 /*
13245 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
13246 instruct jumpXtnd(mRegL switch_val) %{
13247 match(Jump switch_val);
13249 ins_cost(350);
13251 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
13252 "jr T9\n\t"
13253 "nop" %}
13254 ins_encode %{
13255 Register table_base = $constanttablebase;
13256 int con_offset = $constantoffset;
13257 Register switch_reg = $switch_val$$Register;
13259 if (UseLoongsonISA) {
13260 if (Assembler::is_simm(con_offset, 8)) {
13261 __ gsldx(T9, table_base, switch_reg, con_offset);
13262 } else if (Assembler::is_simm16(con_offset)) {
13263 __ daddu(T9, table_base, switch_reg);
13264 __ ld(T9, T9, con_offset);
13265 } else {
13266 __ move(T9, con_offset);
13267 __ daddu(AT, table_base, switch_reg);
13268 __ gsldx(T9, AT, T9, 0);
13269 }
13270 } else {
13271 if (Assembler::is_simm16(con_offset)) {
13272 __ daddu(T9, table_base, switch_reg);
13273 __ ld(T9, T9, con_offset);
13274 } else {
13275 __ move(T9, con_offset);
13276 __ daddu(AT, table_base, switch_reg);
13277 __ daddu(AT, T9, AT);
13278 __ ld(T9, AT, 0);
13279 }
13280 }
13282 __ jr(T9);
13283 __ delayed()->nop();
13285 %}
13286 ins_pipe(pipe_jump);
13287 %}
13288 */
13291 // Tail Jump; remove the return address; jump to target.
13292 // TailCall above leaves the return address around.
13293 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
13294 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
13295 // "restore" before this instruction (in Epilogue), we need to materialize it
13296 // in %i0.
13297 //FIXME
13298 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
13299 match( TailJump jump_target ex_oop );
13300 ins_cost(200);
13301 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
13302 ins_encode %{
13303 Register target = $jump_target$$Register;
13305 // V0, V1 are indicated in:
13306 // [stubGenerator_mips.cpp] generate_forward_exception()
13307 // [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
13308 //
13309 Register oop = $ex_oop$$Register;
13310 Register exception_oop = V0;
13311 Register exception_pc = V1;
13313 __ move(exception_pc, RA);
13314 __ move(exception_oop, oop);
13316 __ jr(target);
13317 __ delayed()->nop();
13318 %}
13319 ins_pipe( pipe_jump );
13320 %}
13322 // ============================================================================
13323 // Procedure Call/Return Instructions
13324 // Call Java Static Instruction
13325 // Note: If this code changes, the corresponding ret_addr_offset() and
13326 // compute_padding() functions will have to be adjusted.
13327 instruct CallStaticJavaDirect(method meth) %{
13328 match(CallStaticJava);
13329 effect(USE meth);
13331 ins_cost(300);
13332 format %{ "CALL,static #@CallStaticJavaDirect " %}
13333 ins_encode( Java_Static_Call( meth ) );
13334 ins_pipe( pipe_slow );
13335 ins_pc_relative(1);
13336 %}
13338 // Call Java Dynamic Instruction
13339 // Note: If this code changes, the corresponding ret_addr_offset() and
13340 // compute_padding() functions will have to be adjusted.
13341 instruct CallDynamicJavaDirect(method meth) %{
13342 match(CallDynamicJava);
13343 effect(USE meth);
13345 ins_cost(300);
13346 format %{"MOV IC_Klass, #Universe::non_oop_word()\n\t"
13347 "CallDynamic @ CallDynamicJavaDirect" %}
13348 ins_encode( Java_Dynamic_Call( meth ) );
13349 ins_pipe( pipe_slow );
13350 ins_pc_relative(1);
13351 %}
13353 instruct CallLeafNoFPDirect(method meth) %{
13354 match(CallLeafNoFP);
13355 effect(USE meth);
13357 ins_cost(300);
13358 format %{ "CALL_LEAF_NOFP,runtime " %}
13359 ins_encode(Java_To_Runtime(meth));
13360 ins_pipe( pipe_slow );
13361 ins_pc_relative(1);
13362 ins_alignment(16);
13363 %}
13365 // Prefetch instructions.
13367 instruct prefetchrNTA( memory mem ) %{
13368 match(PrefetchRead mem);
13369 ins_cost(125);
13371 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
13372 ins_encode %{
13373 int base = $mem$$base;
13374 int index = $mem$$index;
13375 int scale = $mem$$scale;
13376 int disp = $mem$$disp;
13378 if( index != 0 ) {
13379 if (scale == 0) {
13380 __ daddu(AT, as_Register(base), as_Register(index));
13381 } else {
13382 __ dsll(AT, as_Register(index), scale);
13383 __ daddu(AT, as_Register(base), AT);
13384 }
13385 } else {
13386 __ move(AT, as_Register(base));
13387 }
13388 if( Assembler::is_simm16(disp) ) {
13389 __ daddiu(AT, as_Register(base), disp);
13390 __ daddiu(AT, AT, disp);
13391 } else {
13392 __ move(T9, disp);
13393 __ daddu(AT, as_Register(base), T9);
13394 }
13395 __ pref(0, AT, 0); //hint: 0:load
13396 %}
13397 ins_pipe(pipe_slow);
13398 %}
13400 instruct prefetchwNTA( memory mem ) %{
13401 match(PrefetchWrite mem);
13402 ins_cost(125);
13403 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
13404 ins_encode %{
13405 int base = $mem$$base;
13406 int index = $mem$$index;
13407 int scale = $mem$$scale;
13408 int disp = $mem$$disp;
13410 if( index != 0 ) {
13411 if (scale == 0) {
13412 __ daddu(AT, as_Register(base), as_Register(index));
13413 } else {
13414 __ dsll(AT, as_Register(index), scale);
13415 __ daddu(AT, as_Register(base), AT);
13416 }
13417 } else {
13418 __ move(AT, as_Register(base));
13419 }
13420 if( Assembler::is_simm16(disp) ) {
13421 __ daddiu(AT, as_Register(base), disp);
13422 __ daddiu(AT, AT, disp);
13423 } else {
13424 __ move(T9, disp);
13425 __ daddu(AT, as_Register(base), T9);
13426 }
13427 __ pref(1, AT, 0); //hint: 1:store
13428 %}
13429 ins_pipe(pipe_slow);
13430 %}
13432 // Prefetch instructions for allocation.
13434 instruct prefetchAllocNTA( memory mem ) %{
13435 match(PrefetchAllocation mem);
13436 ins_cost(125);
13437 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
13438 ins_encode %{
13439 int base = $mem$$base;
13440 int index = $mem$$index;
13441 int scale = $mem$$scale;
13442 int disp = $mem$$disp;
13444 Register dst = R0;
13446 if( index != 0 ) {
13447 if( Assembler::is_simm16(disp) ) {
13448 if( UseLoongsonISA ) {
13449 if (scale == 0) {
13450 __ gslbx(dst, as_Register(base), as_Register(index), disp);
13451 } else {
13452 __ dsll(AT, as_Register(index), scale);
13453 __ gslbx(dst, as_Register(base), AT, disp);
13454 }
13455 } else {
13456 if (scale == 0) {
13457 __ addu(AT, as_Register(base), as_Register(index));
13458 } else {
13459 __ dsll(AT, as_Register(index), scale);
13460 __ addu(AT, as_Register(base), AT);
13461 }
13462 __ lb(dst, AT, disp);
13463 }
13464 } else {
13465 if (scale == 0) {
13466 __ addu(AT, as_Register(base), as_Register(index));
13467 } else {
13468 __ dsll(AT, as_Register(index), scale);
13469 __ addu(AT, as_Register(base), AT);
13470 }
13471 __ move(T9, disp);
13472 if( UseLoongsonISA ) {
13473 __ gslbx(dst, AT, T9, 0);
13474 } else {
13475 __ addu(AT, AT, T9);
13476 __ lb(dst, AT, 0);
13477 }
13478 }
13479 } else {
13480 if( Assembler::is_simm16(disp) ) {
13481 __ lb(dst, as_Register(base), disp);
13482 } else {
13483 __ move(T9, disp);
13484 if( UseLoongsonISA ) {
13485 __ gslbx(dst, as_Register(base), T9, 0);
13486 } else {
13487 __ addu(AT, as_Register(base), T9);
13488 __ lb(dst, AT, 0);
13489 }
13490 }
13491 }
13492 %}
13493 ins_pipe(pipe_slow);
13494 %}
13497 // Call runtime without safepoint
13498 instruct CallLeafDirect(method meth) %{
13499 match(CallLeaf);
13500 effect(USE meth);
13502 ins_cost(300);
13503 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
13504 ins_encode(Java_To_Runtime(meth));
13505 ins_pipe( pipe_slow );
13506 ins_pc_relative(1);
13507 ins_alignment(16);
13508 %}
13510 // Load Char (16bit unsigned)
13511 instruct loadUS(mRegI dst, memory mem) %{
13512 match(Set dst (LoadUS mem));
13514 ins_cost(125);
13515 format %{ "loadUS $dst,$mem @ loadC" %}
13516 ins_encode(load_C_enc(dst, mem));
13517 ins_pipe( ialu_loadI );
13518 %}
13520 instruct loadUS_convI2L(mRegL dst, memory mem) %{
13521 match(Set dst (ConvI2L (LoadUS mem)));
13523 ins_cost(125);
13524 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
13525 ins_encode(load_C_enc(dst, mem));
13526 ins_pipe( ialu_loadI );
13527 %}
13529 // Store Char (16bit unsigned)
13530 instruct storeC(memory mem, mRegI src) %{
13531 match(Set mem (StoreC mem src));
13533 ins_cost(125);
13534 format %{ "storeC $src, $mem @ storeC" %}
13535 ins_encode(store_C_reg_enc(mem, src));
13536 ins_pipe( ialu_loadI );
13537 %}
13539 instruct storeC0(memory mem, immI0 zero) %{
13540 match(Set mem (StoreC mem zero));
13542 ins_cost(125);
13543 format %{ "storeC $zero, $mem @ storeC0" %}
13544 ins_encode(store_C0_enc(mem));
13545 ins_pipe( ialu_loadI );
13546 %}
13549 instruct loadConF0(regF dst, immF0 zero) %{
13550 match(Set dst zero);
13551 ins_cost(100);
13553 format %{ "mov $dst, zero @ loadConF0\n"%}
13554 ins_encode %{
13555 FloatRegister dst = $dst$$FloatRegister;
13557 __ mtc1(R0, dst);
13558 %}
13559 ins_pipe( fpu_loadF );
13560 %}
13563 instruct loadConF(regF dst, immF src) %{
13564 match(Set dst src);
13565 ins_cost(125);
13567 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
13568 ins_encode %{
13569 int con_offset = $constantoffset($src);
13571 if (Assembler::is_simm16(con_offset)) {
13572 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
13573 } else {
13574 __ set64(AT, con_offset);
13575 if (UseLoongsonISA) {
13576 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
13577 } else {
13578 __ daddu(AT, $constanttablebase, AT);
13579 __ lwc1($dst$$FloatRegister, AT, 0);
13580 }
13581 }
13582 %}
13583 ins_pipe( fpu_loadF );
13584 %}
13587 instruct loadConD0(regD dst, immD0 zero) %{
13588 match(Set dst zero);
13589 ins_cost(100);
13591 format %{ "mov $dst, zero @ loadConD0"%}
13592 ins_encode %{
13593 FloatRegister dst = as_FloatRegister($dst$$reg);
13595 __ dmtc1(R0, dst);
13596 %}
13597 ins_pipe( fpu_loadF );
13598 %}
13600 instruct loadConD(regD dst, immD src) %{
13601 match(Set dst src);
13602 ins_cost(125);
13604 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
13605 ins_encode %{
13606 int con_offset = $constantoffset($src);
13608 if (Assembler::is_simm16(con_offset)) {
13609 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
13610 } else {
13611 __ set64(AT, con_offset);
13612 if (UseLoongsonISA) {
13613 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
13614 } else {
13615 __ daddu(AT, $constanttablebase, AT);
13616 __ ldc1($dst$$FloatRegister, AT, 0);
13617 }
13618 }
13619 %}
13620 ins_pipe( fpu_loadF );
13621 %}
13623 // Store register Float value (it is faster than store from FPU register)
13624 instruct storeF_reg( memory mem, regF src) %{
13625 match(Set mem (StoreF mem src));
13627 ins_cost(50);
13628 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
13629 ins_encode(store_F_reg_enc(mem, src));
13630 ins_pipe( fpu_storeF );
13631 %}
13633 instruct storeF_imm0( memory mem, immF0 zero) %{
13634 match(Set mem (StoreF mem zero));
13636 ins_cost(40);
13637 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
13638 ins_encode %{
13639 int base = $mem$$base;
13640 int index = $mem$$index;
13641 int scale = $mem$$scale;
13642 int disp = $mem$$disp;
13644 if( index != 0 ) {
13645 if ( UseLoongsonISA ) {
13646 if ( Assembler::is_simm(disp, 8) ) {
13647 if ( scale == 0 ) {
13648 __ gsswx(R0, as_Register(base), as_Register(index), disp);
13649 } else {
13650 __ dsll(T9, as_Register(index), scale);
13651 __ gsswx(R0, as_Register(base), T9, disp);
13652 }
13653 } else if ( Assembler::is_simm16(disp) ) {
13654 if ( scale == 0 ) {
13655 __ daddu(AT, as_Register(base), as_Register(index));
13656 } else {
13657 __ dsll(T9, as_Register(index), scale);
13658 __ daddu(AT, as_Register(base), T9);
13659 }
13660 __ sw(R0, AT, disp);
13661 } else {
13662 if ( scale == 0 ) {
13663 __ move(T9, disp);
13664 __ daddu(AT, as_Register(index), T9);
13665 __ gsswx(R0, as_Register(base), AT, 0);
13666 } else {
13667 __ dsll(T9, as_Register(index), scale);
13668 __ move(AT, disp);
13669 __ daddu(AT, AT, T9);
13670 __ gsswx(R0, as_Register(base), AT, 0);
13671 }
13672 }
13673 } else { //not use loongson isa
13674 if(scale != 0) {
13675 __ dsll(T9, as_Register(index), scale);
13676 __ daddu(AT, as_Register(base), T9);
13677 } else {
13678 __ daddu(AT, as_Register(base), as_Register(index));
13679 }
13680 if( Assembler::is_simm16(disp) ) {
13681 __ sw(R0, AT, disp);
13682 } else {
13683 __ move(T9, disp);
13684 __ daddu(AT, AT, T9);
13685 __ sw(R0, AT, 0);
13686 }
13687 }
13688 } else { //index is 0
13689 if ( UseLoongsonISA ) {
13690 if ( Assembler::is_simm16(disp) ) {
13691 __ sw(R0, as_Register(base), disp);
13692 } else {
13693 __ move(T9, disp);
13694 __ gsswx(R0, as_Register(base), T9, 0);
13695 }
13696 } else {
13697 if( Assembler::is_simm16(disp) ) {
13698 __ sw(R0, as_Register(base), disp);
13699 } else {
13700 __ move(T9, disp);
13701 __ daddu(AT, as_Register(base), T9);
13702 __ sw(R0, AT, 0);
13703 }
13704 }
13705 }
13706 %}
13707 ins_pipe( ialu_storeI );
13708 %}
13710 // Load Double
13711 instruct loadD(regD dst, memory mem) %{
13712 match(Set dst (LoadD mem));
13714 ins_cost(150);
13715 format %{ "loadD $dst, $mem #@loadD" %}
13716 ins_encode(load_D_enc(dst, mem));
13717 ins_pipe( ialu_loadI );
13718 %}
13720 // Load Double - UNaligned
13721 instruct loadD_unaligned(regD dst, memory mem ) %{
13722 match(Set dst (LoadD_unaligned mem));
13723 ins_cost(250);
13724 // FIXME: Need more effective ldl/ldr
13725 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
13726 ins_encode(load_D_enc(dst, mem));
13727 ins_pipe( ialu_loadI );
13728 %}
13730 instruct storeD_reg( memory mem, regD src) %{
13731 match(Set mem (StoreD mem src));
13733 ins_cost(50);
13734 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
13735 ins_encode(store_D_reg_enc(mem, src));
13736 ins_pipe( fpu_storeF );
13737 %}
13739 instruct storeD_imm0( memory mem, immD0 zero) %{
13740 match(Set mem (StoreD mem zero));
13742 ins_cost(40);
13743 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
13744 ins_encode %{
13745 int base = $mem$$base;
13746 int index = $mem$$index;
13747 int scale = $mem$$scale;
13748 int disp = $mem$$disp;
13750 __ mtc1(R0, F30);
13751 __ cvt_d_w(F30, F30);
13753 if( index != 0 ) {
13754 if ( UseLoongsonISA ) {
13755 if ( Assembler::is_simm(disp, 8) ) {
13756 if (scale == 0) {
13757 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
13758 } else {
13759 __ dsll(T9, as_Register(index), scale);
13760 __ gssdxc1(F30, as_Register(base), T9, disp);
13761 }
13762 } else if ( Assembler::is_simm16(disp) ) {
13763 if (scale == 0) {
13764 __ daddu(AT, as_Register(base), as_Register(index));
13765 __ sdc1(F30, AT, disp);
13766 } else {
13767 __ dsll(T9, as_Register(index), scale);
13768 __ daddu(AT, as_Register(base), T9);
13769 __ sdc1(F30, AT, disp);
13770 }
13771 } else {
13772 if (scale == 0) {
13773 __ move(T9, disp);
13774 __ daddu(AT, as_Register(index), T9);
13775 __ gssdxc1(F30, as_Register(base), AT, 0);
13776 } else {
13777 __ move(T9, disp);
13778 __ dsll(AT, as_Register(index), scale);
13779 __ daddu(AT, AT, T9);
13780 __ gssdxc1(F30, as_Register(base), AT, 0);
13781 }
13782 }
13783 } else { // not use loongson isa
13784 if(scale != 0) {
13785 __ dsll(T9, as_Register(index), scale);
13786 __ daddu(AT, as_Register(base), T9);
13787 } else {
13788 __ daddu(AT, as_Register(base), as_Register(index));
13789 }
13790 if( Assembler::is_simm16(disp) ) {
13791 __ sdc1(F30, AT, disp);
13792 } else {
13793 __ move(T9, disp);
13794 __ daddu(AT, AT, T9);
13795 __ sdc1(F30, AT, 0);
13796 }
13797 }
13798 } else {// index is 0
13799 if ( UseLoongsonISA ) {
13800 if ( Assembler::is_simm16(disp) ) {
13801 __ sdc1(F30, as_Register(base), disp);
13802 } else {
13803 __ move(T9, disp);
13804 __ gssdxc1(F30, as_Register(base), T9, 0);
13805 }
13806 } else {
13807 if( Assembler::is_simm16(disp) ) {
13808 __ sdc1(F30, as_Register(base), disp);
13809 } else {
13810 __ move(T9, disp);
13811 __ daddu(AT, as_Register(base), T9);
13812 __ sdc1(F30, AT, 0);
13813 }
13814 }
13815 }
13816 %}
13817 ins_pipe( ialu_storeI );
13818 %}
13820 instruct loadSSI(mRegI dst, stackSlotI src)
13821 %{
13822 match(Set dst src);
13824 ins_cost(125);
13825 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
13826 ins_encode %{
13827 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
13828 __ lw($dst$$Register, SP, $src$$disp);
13829 %}
13830 ins_pipe(ialu_loadI);
13831 %}
13833 instruct storeSSI(stackSlotI dst, mRegI src)
13834 %{
13835 match(Set dst src);
13837 ins_cost(100);
13838 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
13839 ins_encode %{
13840 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
13841 __ sw($src$$Register, SP, $dst$$disp);
13842 %}
13843 ins_pipe(ialu_storeI);
13844 %}
13846 instruct loadSSL(mRegL dst, stackSlotL src)
13847 %{
13848 match(Set dst src);
13850 ins_cost(125);
13851 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
13852 ins_encode %{
13853 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
13854 __ ld($dst$$Register, SP, $src$$disp);
13855 %}
13856 ins_pipe(ialu_loadI);
13857 %}
13859 instruct storeSSL(stackSlotL dst, mRegL src)
13860 %{
13861 match(Set dst src);
13863 ins_cost(100);
13864 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
13865 ins_encode %{
13866 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
13867 __ sd($src$$Register, SP, $dst$$disp);
13868 %}
13869 ins_pipe(ialu_storeI);
13870 %}
13872 instruct loadSSP(mRegP dst, stackSlotP src)
13873 %{
13874 match(Set dst src);
13876 ins_cost(125);
13877 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
13878 ins_encode %{
13879 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
13880 __ ld($dst$$Register, SP, $src$$disp);
13881 %}
13882 ins_pipe(ialu_loadI);
13883 %}
13885 instruct storeSSP(stackSlotP dst, mRegP src)
13886 %{
13887 match(Set dst src);
13889 ins_cost(100);
13890 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
13891 ins_encode %{
13892 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
13893 __ sd($src$$Register, SP, $dst$$disp);
13894 %}
13895 ins_pipe(ialu_storeI);
13896 %}
13898 instruct loadSSF(regF dst, stackSlotF src)
13899 %{
13900 match(Set dst src);
13902 ins_cost(125);
13903 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
13904 ins_encode %{
13905 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
13906 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
13907 %}
13908 ins_pipe(ialu_loadI);
13909 %}
13911 instruct storeSSF(stackSlotF dst, regF src)
13912 %{
13913 match(Set dst src);
13915 ins_cost(100);
13916 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
13917 ins_encode %{
13918 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
13919 __ swc1($src$$FloatRegister, SP, $dst$$disp);
13920 %}
13921 ins_pipe(fpu_storeF);
13922 %}
13924 // Use the same format since predicate() can not be used here.
13925 instruct loadSSD(regD dst, stackSlotD src)
13926 %{
13927 match(Set dst src);
13929 ins_cost(125);
13930 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
13931 ins_encode %{
13932 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
13933 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
13934 %}
13935 ins_pipe(ialu_loadI);
13936 %}
13938 instruct storeSSD(stackSlotD dst, regD src)
13939 %{
13940 match(Set dst src);
13942 ins_cost(100);
13943 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
13944 ins_encode %{
13945 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
13946 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
13947 %}
13948 ins_pipe(fpu_storeF);
13949 %}
13951 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
13952 match( Set cr (FastLock object box) );
13953 effect( TEMP tmp, TEMP scr, USE_KILL box );
13954 ins_cost(300);
13955 format %{ "FASTLOCK $cr <-- $object, $box, $tmp, $scr #@ cmpFastLock" %}
13956 ins_encode %{
13957 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
13958 %}
13960 ins_pipe( pipe_slow );
13961 ins_pc_relative(1);
13962 %}
13964 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
13965 match( Set cr (FastUnlock object box) );
13966 effect( TEMP tmp, USE_KILL box );
13967 ins_cost(300);
13968 format %{ "FASTUNLOCK $cr <-- $object, $box, $tmp #@cmpFastUnlock" %}
13969 ins_encode %{
13970 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
13971 %}
13973 ins_pipe( pipe_slow );
13974 ins_pc_relative(1);
13975 %}
13977 // Store CMS card-mark Immediate
13978 instruct storeImmCM(memory mem, immI8 src) %{
13979 match(Set mem (StoreCM mem src));
13981 ins_cost(150);
13982 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
13983 // opcode(0xC6);
13984 ins_encode(store_B_immI_enc_sync(mem, src));
13985 ins_pipe( ialu_storeI );
13986 %}
13988 // Die now
13989 instruct ShouldNotReachHere( )
13990 %{
13991 match(Halt);
13992 ins_cost(300);
13994 // Use the following format syntax
13995 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
13996 ins_encode %{
13997 // Here we should emit illtrap !
13999 __ stop("in ShoudNotReachHere");
14001 %}
14002 ins_pipe( pipe_jump );
14003 %}
14005 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
14006 %{
14007 predicate(Universe::narrow_oop_shift() == 0);
14008 match(Set dst mem);
14010 ins_cost(110);
14011 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
14012 ins_encode %{
14013 Register dst = $dst$$Register;
14014 Register base = as_Register($mem$$base);
14015 int disp = $mem$$disp;
14017 __ daddiu(dst, base, disp);
14018 %}
14019 ins_pipe( ialu_regI_imm16 );
14020 %}
14022 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
14023 %{
14024 match(Set dst mem);
14026 ins_cost(110);
14027 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
14028 ins_encode %{
14029 Register dst = $dst$$Register;
14030 Register base = as_Register($mem$$base);
14031 Register index = as_Register($mem$$index);
14032 int scale = $mem$$scale;
14033 int disp = $mem$$disp;
14035 if (scale == 0) {
14036 __ daddu(AT, base, index);
14037 __ daddiu(dst, AT, disp);
14038 } else {
14039 __ dsll(AT, index, scale);
14040 __ daddu(AT, base, AT);
14041 __ daddiu(dst, AT, disp);
14042 }
14043 %}
14045 ins_pipe( ialu_regI_imm16 );
14046 %}
14048 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
14049 %{
14050 match(Set dst mem);
14052 ins_cost(110);
14053 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
14054 ins_encode %{
14055 Register dst = $dst$$Register;
14056 Register base = as_Register($mem$$base);
14057 Register index = as_Register($mem$$index);
14058 int scale = $mem$$scale;
14060 if (scale == 0) {
14061 __ daddu(dst, base, index);
14062 } else {
14063 __ dsll(AT, index, scale);
14064 __ daddu(dst, base, AT);
14065 }
14066 %}
14068 ins_pipe( ialu_regI_imm16 );
14069 %}
14072 // ============================================================================
14073 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
14074 // array for an instance of the superklass. Set a hidden internal cache on a
14075 // hit (cache is checked with exposed code in gen_subtype_check()). Return
14076 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
14077 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
14078 match(Set result (PartialSubtypeCheck sub super));
14079 effect(KILL tmp);
14080 ins_cost(1100); // slightly larger than the next version
14081 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
14083 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
14084 ins_pipe( pipe_slow );
14085 %}
14087 // Conditional-store of the updated heap-top.
14088 // Used during allocation of the shared heap.
14090 instruct storePConditional( memory heap_top_ptr, mRegP oldval, mRegP newval, FlagsReg cr ) %{
14091 match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval)));
14093 format %{ "CMPXCHG $heap_top_ptr, $newval\t# (ptr) @storePConditional "
14094 "If $oldval == $heap_top_ptr then store $newval into $heap_top_ptr" %}
14095 ins_encode%{
14096 Register oldval = $oldval$$Register;
14097 Register newval = $newval$$Register;
14098 Address addr(as_Register($heap_top_ptr$$base), $heap_top_ptr$$disp);
14100 int index = $heap_top_ptr$$index;
14101 int scale = $heap_top_ptr$$scale;
14102 int disp = $heap_top_ptr$$disp;
14104 guarantee(Assembler::is_simm16(disp), "");
14106 if( index != 0 ) {
14107 __ stop("in storePConditional: index != 0");
14108 } else {
14109 __ cmpxchg(newval, addr, oldval);
14110 }
14111 %}
14112 ins_pipe( long_memory_op );
14113 %}
14115 // Conditional-store of an int value.
14116 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
14117 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
14118 match(Set cr (StoreIConditional mem (Binary oldval newval)));
14119 // effect(KILL oldval);
14120 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
14122 ins_encode %{
14123 Register oldval = $oldval$$Register;
14124 Register newval = $newval$$Register;
14125 Address addr(as_Register($mem$$base), $mem$$disp);
14126 Label again, failure;
14128 int index = $mem$$index;
14129 int scale = $mem$$scale;
14130 int disp = $mem$$disp;
14132 guarantee(Assembler::is_simm16(disp), "");
14134 if( index != 0 ) {
14135 __ stop("in storeIConditional: index != 0");
14136 } else {
14137 __ bind(again);
14138 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) __ sync();
14139 __ ll(AT, addr);
14140 __ bne(AT, oldval, failure);
14141 __ delayed()->addu(AT, R0, R0);
14143 __ addu(AT, newval, R0);
14144 __ sc(AT, addr);
14145 __ beq(AT, R0, again);
14146 __ delayed()->addiu(AT, R0, 0xFF);
14147 __ bind(failure);
14148 __ sync();
14149 }
14150 %}
14152 ins_pipe( long_memory_op );
14153 %}
14155 // Conditional-store of a long value.
14156 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
14157 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
14158 %{
14159 match(Set cr (StoreLConditional mem (Binary oldval newval)));
14160 effect(KILL oldval);
14162 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
14163 ins_encode%{
14164 Register oldval = $oldval$$Register;
14165 Register newval = $newval$$Register;
14166 Address addr(as_Register($mem$$base), $mem$$disp);
14168 int index = $mem$$index;
14169 int scale = $mem$$scale;
14170 int disp = $mem$$disp;
14172 guarantee(Assembler::is_simm16(disp), "");
14174 if( index != 0 ) {
14175 __ stop("in storeIConditional: index != 0");
14176 } else {
14177 __ cmpxchg(newval, addr, oldval);
14178 }
14179 %}
14180 ins_pipe( long_memory_op );
14181 %}
14183 // Implement LoadPLocked. Must be ordered against changes of the memory location
14184 // by storePConditional.
14185 instruct loadPLocked(mRegP dst, memory mem) %{
14186 match(Set dst (LoadPLocked mem));
14187 ins_cost(MEMORY_REF_COST);
14189 format %{ "ld $dst, $mem #@loadPLocked\n\t"
14190 "sync" %}
14191 size(12);
14192 ins_encode (load_P_enc_ac(dst, mem));
14193 ins_pipe( ialu_loadI );
14194 %}
14197 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
14198 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
14199 effect(KILL oldval);
14200 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
14201 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapL\n\t"
14202 "MOV $res, 1 @ compareAndSwapI\n\t"
14203 "BNE AT, R0 @ compareAndSwapI\n\t"
14204 "MOV $res, 0 @ compareAndSwapI\n"
14205 "L:" %}
14206 ins_encode %{
14207 Register newval = $newval$$Register;
14208 Register oldval = $oldval$$Register;
14209 Register res = $res$$Register;
14210 Address addr($mem_ptr$$Register, 0);
14211 Label L;
14213 __ cmpxchg32(newval, addr, oldval);
14214 __ move(res, AT);
14215 %}
14216 ins_pipe( long_memory_op );
14217 %}
14219 instruct compareAndSwapL( mRegI res, mRegP mem_ptr, s2RegL oldval, mRegL newval) %{
14220 predicate(VM_Version::supports_cx8());
14221 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
14222 effect(KILL oldval);
14223 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
14224 "MOV $res, 1 @ compareAndSwapI\n\t"
14225 "BNE AT, R0 @ compareAndSwapI\n\t"
14226 "MOV $res, 0 @ compareAndSwapI\n"
14227 "L:" %}
14228 ins_encode %{
14229 Register newval = $newval$$Register;
14230 Register oldval = $oldval$$Register;
14231 Register res = $res$$Register;
14232 Address addr($mem_ptr$$Register, 0);
14233 Label L;
14235 __ cmpxchg(newval, addr, oldval);
14236 __ move(res, AT);
14237 %}
14238 ins_pipe( long_memory_op );
14239 %}
14241 //FIXME:
14242 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
14243 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
14244 effect(KILL oldval);
14245 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
14246 "MOV $res, AT @ compareAndSwapP\n\t"
14247 "L:" %}
14248 ins_encode %{
14249 Register newval = $newval$$Register;
14250 Register oldval = $oldval$$Register;
14251 Register res = $res$$Register;
14252 Address addr($mem_ptr$$Register, 0);
14253 Label L;
14255 __ cmpxchg(newval, addr, oldval);
14256 __ move(res, AT);
14257 %}
14258 ins_pipe( long_memory_op );
14259 %}
14261 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
14262 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
14263 effect(KILL oldval);
14264 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
14265 "MOV $res, AT @ compareAndSwapN\n\t"
14266 "L:" %}
14267 ins_encode %{
14268 Register newval = $newval$$Register;
14269 Register oldval = $oldval$$Register;
14270 Register res = $res$$Register;
14271 Address addr($mem_ptr$$Register, 0);
14272 Label L;
14274 // cmpxchg32 is implemented with ll/sc, which will do sign extension.
14275 // Thus, we should extend oldval's sign for correct comparision.
14276 //
14277 __ sll(oldval, oldval, 0);
14279 __ cmpxchg32(newval, addr, oldval);
14280 __ move(res, AT);
14281 %}
14282 ins_pipe( long_memory_op );
14283 %}
14285 //----------Max and Min--------------------------------------------------------
14286 // Min Instructions
14287 ////
14288 // *** Min and Max using the conditional move are slower than the
14289 // *** branch version on a Pentium III.
14290 // // Conditional move for min
14291 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
14292 // effect( USE_DEF op2, USE op1, USE cr );
14293 // format %{ "CMOVlt $op2,$op1\t! min" %}
14294 // opcode(0x4C,0x0F);
14295 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
14296 // ins_pipe( pipe_cmov_reg );
14297 //%}
14298 //
14299 //// Min Register with Register (P6 version)
14300 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
14301 // predicate(VM_Version::supports_cmov() );
14302 // match(Set op2 (MinI op1 op2));
14303 // ins_cost(200);
14304 // expand %{
14305 // eFlagsReg cr;
14306 // compI_eReg(cr,op1,op2);
14307 // cmovI_reg_lt(op2,op1,cr);
14308 // %}
14309 //%}
14311 // Min Register with Register (generic version)
14312 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
14313 match(Set dst (MinI dst src));
14314 //effect(KILL flags);
14315 ins_cost(80);
14317 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
14318 ins_encode %{
14319 Register dst = $dst$$Register;
14320 Register src = $src$$Register;
14322 __ slt(AT, src, dst);
14323 __ movn(dst, src, AT);
14325 %}
14327 ins_pipe( pipe_slow );
14328 %}
14330 // Max Register with Register
14331 // *** Min and Max using the conditional move are slower than the
14332 // *** branch version on a Pentium III.
14333 // // Conditional move for max
14334 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
14335 // effect( USE_DEF op2, USE op1, USE cr );
14336 // format %{ "CMOVgt $op2,$op1\t! max" %}
14337 // opcode(0x4F,0x0F);
14338 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
14339 // ins_pipe( pipe_cmov_reg );
14340 //%}
14341 //
14342 // // Max Register with Register (P6 version)
14343 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
14344 // predicate(VM_Version::supports_cmov() );
14345 // match(Set op2 (MaxI op1 op2));
14346 // ins_cost(200);
14347 // expand %{
14348 // eFlagsReg cr;
14349 // compI_eReg(cr,op1,op2);
14350 // cmovI_reg_gt(op2,op1,cr);
14351 // %}
14352 //%}
14354 // Max Register with Register (generic version)
14355 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
14356 match(Set dst (MaxI dst src));
14357 ins_cost(80);
14359 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
14361 ins_encode %{
14362 Register dst = $dst$$Register;
14363 Register src = $src$$Register;
14365 __ slt(AT, dst, src);
14366 __ movn(dst, src, AT);
14368 %}
14370 ins_pipe( pipe_slow );
14371 %}
14373 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
14374 match(Set dst (MaxI dst zero));
14375 ins_cost(50);
14377 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
14379 ins_encode %{
14380 Register dst = $dst$$Register;
14382 __ slt(AT, dst, R0);
14383 __ movn(dst, R0, AT);
14385 %}
14387 ins_pipe( pipe_slow );
14388 %}
14390 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
14391 %{
14392 match(Set dst (AndL src mask));
14394 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
14395 ins_encode %{
14396 Register dst = $dst$$Register;
14397 Register src = $src$$Register;
14399 __ dext(dst, src, 0, 32);
14400 %}
14401 ins_pipe(ialu_regI_regI);
14402 %}
14404 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
14405 %{
14406 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
14408 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
14409 ins_encode %{
14410 Register dst = $dst$$Register;
14411 Register src1 = $src1$$Register;
14412 Register src2 = $src2$$Register;
14414 if (src1 == dst) {
14415 __ dinsu(dst, src2, 32, 32);
14416 } else if (src2 == dst) {
14417 __ dsll32(dst, dst, 0);
14418 __ dins(dst, src1, 0, 32);
14419 } else {
14420 __ dext(dst, src1, 0, 32);
14421 __ dinsu(dst, src2, 32, 32);
14422 }
14423 %}
14424 ins_pipe(ialu_regI_regI);
14425 %}
14427 // Zero-extend convert int to long
14428 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
14429 %{
14430 match(Set dst (AndL (ConvI2L src) mask));
14432 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
14433 ins_encode %{
14434 Register dst = $dst$$Register;
14435 Register src = $src$$Register;
14437 __ dext(dst, src, 0, 32);
14438 %}
14439 ins_pipe(ialu_regI_regI);
14440 %}
14442 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
14443 %{
14444 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
14446 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
14447 ins_encode %{
14448 Register dst = $dst$$Register;
14449 Register src = $src$$Register;
14451 __ dext(dst, src, 0, 32);
14452 %}
14453 ins_pipe(ialu_regI_regI);
14454 %}
14456 // Match loading integer and casting it to unsigned int in long register.
14457 // LoadI + ConvI2L + AndL 0xffffffff.
14458 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
14459 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
14461 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
14462 ins_encode (load_N_enc(dst, mem));
14463 ins_pipe(ialu_loadI);
14464 %}
14466 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
14467 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
14469 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
14470 ins_encode (load_N_enc(dst, mem));
14471 ins_pipe(ialu_loadI);
14472 %}
14475 // ============================================================================
14476 // Safepoint Instruction
14477 instruct safePoint_poll_reg(mRegP poll) %{
14478 match(SafePoint poll);
14479 predicate(false);
14480 effect(USE poll);
14482 ins_cost(125);
14483 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
14485 ins_encode %{
14486 Register poll_reg = $poll$$Register;
14488 __ block_comment("Safepoint:");
14489 __ relocate(relocInfo::poll_type);
14490 __ lw(AT, poll_reg, 0);
14491 %}
14493 ins_pipe( ialu_storeI );
14494 %}
14496 instruct safePoint_poll() %{
14497 match(SafePoint);
14499 ins_cost(105);
14500 format %{ "poll for GC @ safePoint_poll" %}
14502 ins_encode %{
14503 __ block_comment("Safepoint:");
14504 __ set64(T9, (long)os::get_polling_page());
14505 __ relocate(relocInfo::poll_type);
14506 __ lw(AT, T9, 0);
14507 %}
14509 ins_pipe( ialu_storeI );
14510 %}
14512 //----------Arithmetic Conversion Instructions---------------------------------
14514 instruct roundFloat_nop(regF dst)
14515 %{
14516 match(Set dst (RoundFloat dst));
14518 ins_cost(0);
14519 ins_encode();
14520 ins_pipe(empty);
14521 %}
14523 instruct roundDouble_nop(regD dst)
14524 %{
14525 match(Set dst (RoundDouble dst));
14527 ins_cost(0);
14528 ins_encode();
14529 ins_pipe(empty);
14530 %}
14532 //---------- Zeros Count Instructions ------------------------------------------
14533 // CountLeadingZerosINode CountTrailingZerosINode
14534 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
14535 predicate(UseCountLeadingZerosInstructionMIPS64);
14536 match(Set dst (CountLeadingZerosI src));
14538 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
14539 ins_encode %{
14540 __ clz($dst$$Register, $src$$Register);
14541 %}
14542 ins_pipe( ialu_regL_regL );
14543 %}
14545 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
14546 predicate(UseCountLeadingZerosInstructionMIPS64);
14547 match(Set dst (CountLeadingZerosL src));
14549 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
14550 ins_encode %{
14551 __ dclz($dst$$Register, $src$$Register);
14552 %}
14553 ins_pipe( ialu_regL_regL );
14554 %}
14556 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
14557 predicate(UseCountTrailingZerosInstructionMIPS64);
14558 match(Set dst (CountTrailingZerosI src));
14560 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
14561 ins_encode %{
14562 // ctz and dctz is gs instructions.
14563 __ ctz($dst$$Register, $src$$Register);
14564 %}
14565 ins_pipe( ialu_regL_regL );
14566 %}
14568 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
14569 predicate(UseCountTrailingZerosInstructionMIPS64);
14570 match(Set dst (CountTrailingZerosL src));
14572 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
14573 ins_encode %{
14574 __ dctz($dst$$Register, $src$$Register);
14575 %}
14576 ins_pipe( ialu_regL_regL );
14577 %}
14579 // ====================VECTOR INSTRUCTIONS=====================================
14581 // Load vectors (8 bytes long)
14582 instruct loadV8(vecD dst, memory mem) %{
14583 predicate(n->as_LoadVector()->memory_size() == 8);
14584 match(Set dst (LoadVector mem));
14585 ins_cost(125);
14586 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
14587 ins_encode(load_D_enc(dst, mem));
14588 ins_pipe( fpu_loadF );
14589 %}
14591 // Store vectors (8 bytes long)
14592 instruct storeV8(memory mem, vecD src) %{
14593 predicate(n->as_StoreVector()->memory_size() == 8);
14594 match(Set mem (StoreVector mem src));
14595 ins_cost(145);
14596 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
14597 ins_encode(store_D_reg_enc(mem, src));
14598 ins_pipe( fpu_storeF );
14599 %}
14601 instruct Repl8B_DSP(vecD dst, mRegI src) %{
14602 predicate(n->as_Vector()->length() == 8 && Use3A2000);
14603 match(Set dst (ReplicateB src));
14604 ins_cost(100);
14605 format %{ "replv_ob AT, $src\n\t"
14606 "dmtc1 AT, $dst\t! replicate8B" %}
14607 ins_encode %{
14608 __ replv_ob(AT, $src$$Register);
14609 __ dmtc1(AT, $dst$$FloatRegister);
14610 %}
14611 ins_pipe( pipe_mtc1 );
14612 %}
14614 instruct Repl8B(vecD dst, mRegI src) %{
14615 predicate(n->as_Vector()->length() == 8);
14616 match(Set dst (ReplicateB src));
14617 ins_cost(140);
14618 format %{ "move AT, $src\n\t"
14619 "dins AT, AT, 8, 8\n\t"
14620 "dins AT, AT, 16, 16\n\t"
14621 "dinsu AT, AT, 32, 32\n\t"
14622 "dmtc1 AT, $dst\t! replicate8B" %}
14623 ins_encode %{
14624 __ move(AT, $src$$Register);
14625 __ dins(AT, AT, 8, 8);
14626 __ dins(AT, AT, 16, 16);
14627 __ dinsu(AT, AT, 32, 32);
14628 __ dmtc1(AT, $dst$$FloatRegister);
14629 %}
14630 ins_pipe( pipe_mtc1 );
14631 %}
14633 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
14634 predicate(n->as_Vector()->length() == 8 && Use3A2000);
14635 match(Set dst (ReplicateB con));
14636 ins_cost(110);
14637 format %{ "repl_ob AT, [$con]\n\t"
14638 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
14639 ins_encode %{
14640 int val = $con$$constant;
14641 __ repl_ob(AT, val);
14642 __ dmtc1(AT, $dst$$FloatRegister);
14643 %}
14644 ins_pipe( pipe_mtc1 );
14645 %}
14647 instruct Repl8B_imm(vecD dst, immI con) %{
14648 predicate(n->as_Vector()->length() == 8);
14649 match(Set dst (ReplicateB con));
14650 ins_cost(150);
14651 format %{ "move AT, [$con]\n\t"
14652 "dins AT, AT, 8, 8\n\t"
14653 "dins AT, AT, 16, 16\n\t"
14654 "dinsu AT, AT, 32, 32\n\t"
14655 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
14656 ins_encode %{
14657 __ move(AT, $con$$constant);
14658 __ dins(AT, AT, 8, 8);
14659 __ dins(AT, AT, 16, 16);
14660 __ dinsu(AT, AT, 32, 32);
14661 __ dmtc1(AT, $dst$$FloatRegister);
14662 %}
14663 ins_pipe( pipe_mtc1 );
14664 %}
14666 instruct Repl8B_zero(vecD dst, immI0 zero) %{
14667 predicate(n->as_Vector()->length() == 8);
14668 match(Set dst (ReplicateB zero));
14669 ins_cost(90);
14670 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
14671 ins_encode %{
14672 __ dmtc1(R0, $dst$$FloatRegister);
14673 %}
14674 ins_pipe( pipe_mtc1 );
14675 %}
14677 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
14678 predicate(n->as_Vector()->length() == 8);
14679 match(Set dst (ReplicateB M1));
14680 ins_cost(80);
14681 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
14682 ins_encode %{
14683 __ nor(AT, R0, R0);
14684 __ dmtc1(AT, $dst$$FloatRegister);
14685 %}
14686 ins_pipe( pipe_mtc1 );
14687 %}
14689 instruct Repl4S_DSP(vecD dst, mRegI src) %{
14690 predicate(n->as_Vector()->length() == 4 && Use3A2000);
14691 match(Set dst (ReplicateS src));
14692 ins_cost(100);
14693 format %{ "replv_qh AT, $src\n\t"
14694 "dmtc1 AT, $dst\t! replicate4S" %}
14695 ins_encode %{
14696 __ replv_qh(AT, $src$$Register);
14697 __ dmtc1(AT, $dst$$FloatRegister);
14698 %}
14699 ins_pipe( pipe_mtc1 );
14700 %}
14702 instruct Repl4S(vecD dst, mRegI src) %{
14703 predicate(n->as_Vector()->length() == 4);
14704 match(Set dst (ReplicateS src));
14705 ins_cost(120);
14706 format %{ "move AT, $src \n\t"
14707 "dins AT, AT, 16, 16\n\t"
14708 "dinsu AT, AT, 32, 32\n\t"
14709 "dmtc1 AT, $dst\t! replicate4S" %}
14710 ins_encode %{
14711 __ move(AT, $src$$Register);
14712 __ dins(AT, AT, 16, 16);
14713 __ dinsu(AT, AT, 32, 32);
14714 __ dmtc1(AT, $dst$$FloatRegister);
14715 %}
14716 ins_pipe( pipe_mtc1 );
14717 %}
14719 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
14720 predicate(n->as_Vector()->length() == 4 && Use3A2000);
14721 match(Set dst (ReplicateS con));
14722 ins_cost(100);
14723 format %{ "repl_qh AT, [$con]\n\t"
14724 "dmtc1 AT, $dst\t! replicate4S($con)" %}
14725 ins_encode %{
14726 int val = $con$$constant;
14727 if ( Assembler::is_simm(val, 10)) {
14728 //repl_qh supports 10 bits immediate
14729 __ repl_qh(AT, val);
14730 } else {
14731 __ li32(AT, val);
14732 __ replv_qh(AT, AT);
14733 }
14734 __ dmtc1(AT, $dst$$FloatRegister);
14735 %}
14736 ins_pipe( pipe_mtc1 );
14737 %}
14739 instruct Repl4S_imm(vecD dst, immI con) %{
14740 predicate(n->as_Vector()->length() == 4);
14741 match(Set dst (ReplicateS con));
14742 ins_cost(110);
14743 format %{ "move AT, [$con]\n\t"
14744 "dins AT, AT, 16, 16\n\t"
14745 "dinsu AT, AT, 32, 32\n\t"
14746 "dmtc1 AT, $dst\t! replicate4S($con)" %}
14747 ins_encode %{
14748 __ move(AT, $con$$constant);
14749 __ dins(AT, AT, 16, 16);
14750 __ dinsu(AT, AT, 32, 32);
14751 __ dmtc1(AT, $dst$$FloatRegister);
14752 %}
14753 ins_pipe( pipe_mtc1 );
14754 %}
14756 instruct Repl4S_zero(vecD dst, immI0 zero) %{
14757 predicate(n->as_Vector()->length() == 4);
14758 match(Set dst (ReplicateS zero));
14759 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
14760 ins_encode %{
14761 __ dmtc1(R0, $dst$$FloatRegister);
14762 %}
14763 ins_pipe( pipe_mtc1 );
14764 %}
14766 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
14767 predicate(n->as_Vector()->length() == 4);
14768 match(Set dst (ReplicateS M1));
14769 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
14770 ins_encode %{
14771 __ nor(AT, R0, R0);
14772 __ dmtc1(AT, $dst$$FloatRegister);
14773 %}
14774 ins_pipe( pipe_mtc1 );
14775 %}
14777 // Replicate integer (4 byte) scalar to be vector
14778 instruct Repl2I(vecD dst, mRegI src) %{
14779 predicate(n->as_Vector()->length() == 2);
14780 match(Set dst (ReplicateI src));
14781 format %{ "dins AT, $src, 0, 32\n\t"
14782 "dinsu AT, $src, 32, 32\n\t"
14783 "dmtc1 AT, $dst\t! replicate2I" %}
14784 ins_encode %{
14785 __ dins(AT, $src$$Register, 0, 32);
14786 __ dinsu(AT, $src$$Register, 32, 32);
14787 __ dmtc1(AT, $dst$$FloatRegister);
14788 %}
14789 ins_pipe( pipe_mtc1 );
14790 %}
14792 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
14793 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
14794 predicate(n->as_Vector()->length() == 2);
14795 match(Set dst (ReplicateI con));
14796 effect(KILL tmp);
14797 format %{ "li32 AT, [$con], 32\n\t"
14798 "dinsu AT, AT\n\t"
14799 "dmtc1 AT, $dst\t! replicate2I($con)" %}
14800 ins_encode %{
14801 int val = $con$$constant;
14802 __ li32(AT, val);
14803 __ dinsu(AT, AT, 32, 32);
14804 __ dmtc1(AT, $dst$$FloatRegister);
14805 %}
14806 ins_pipe( pipe_mtc1 );
14807 %}
14809 // Replicate integer (4 byte) scalar zero to be vector
14810 instruct Repl2I_zero(vecD dst, immI0 zero) %{
14811 predicate(n->as_Vector()->length() == 2);
14812 match(Set dst (ReplicateI zero));
14813 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
14814 ins_encode %{
14815 __ dmtc1(R0, $dst$$FloatRegister);
14816 %}
14817 ins_pipe( pipe_mtc1 );
14818 %}
14820 // Replicate integer (4 byte) scalar -1 to be vector
14821 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
14822 predicate(n->as_Vector()->length() == 2);
14823 match(Set dst (ReplicateI M1));
14824 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
14825 ins_encode %{
14826 __ nor(AT, R0, R0);
14827 __ dmtc1(AT, $dst$$FloatRegister);
14828 %}
14829 ins_pipe( pipe_mtc1 );
14830 %}
14832 // Replicate float (4 byte) scalar to be vector
14833 instruct Repl2F(vecD dst, regF src) %{
14834 predicate(n->as_Vector()->length() == 2);
14835 match(Set dst (ReplicateF src));
14836 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
14837 ins_encode %{
14838 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
14839 %}
14840 ins_pipe( pipe_slow );
14841 %}
14843 // Replicate float (4 byte) scalar zero to be vector
14844 instruct Repl2F_zero(vecD dst, immF0 zero) %{
14845 predicate(n->as_Vector()->length() == 2);
14846 match(Set dst (ReplicateF zero));
14847 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
14848 ins_encode %{
14849 __ dmtc1(R0, $dst$$FloatRegister);
14850 %}
14851 ins_pipe( pipe_mtc1 );
14852 %}
14855 // ====================VECTOR ARITHMETIC=======================================
14857 // --------------------------------- ADD --------------------------------------
14859 // Floats vector add
14860 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
14861 instruct vadd2F(vecD dst, vecD src) %{
14862 predicate(n->as_Vector()->length() == 2);
14863 match(Set dst (AddVF dst src));
14864 format %{ "add.ps $dst,$src\t! add packed2F" %}
14865 ins_encode %{
14866 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14867 %}
14868 ins_pipe( pipe_slow );
14869 %}
14871 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
14872 predicate(n->as_Vector()->length() == 2);
14873 match(Set dst (AddVF src1 src2));
14874 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14875 ins_encode %{
14876 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14877 %}
14878 ins_pipe( fpu_regF_regF );
14879 %}
14881 // --------------------------------- SUB --------------------------------------
14883 // Floats vector sub
14884 instruct vsub2F(vecD dst, vecD src) %{
14885 predicate(n->as_Vector()->length() == 2);
14886 match(Set dst (SubVF dst src));
14887 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14888 ins_encode %{
14889 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14890 %}
14891 ins_pipe( fpu_regF_regF );
14892 %}
14894 // --------------------------------- MUL --------------------------------------
14896 // Floats vector mul
14897 instruct vmul2F(vecD dst, vecD src) %{
14898 predicate(n->as_Vector()->length() == 2);
14899 match(Set dst (MulVF dst src));
14900 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14901 ins_encode %{
14902 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14903 %}
14904 ins_pipe( fpu_regF_regF );
14905 %}
14907 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14908 predicate(n->as_Vector()->length() == 2);
14909 match(Set dst (MulVF src1 src2));
14910 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14911 ins_encode %{
14912 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14913 %}
14914 ins_pipe( fpu_regF_regF );
14915 %}
14917 // --------------------------------- DIV --------------------------------------
14918 // MIPS do not have div.ps
14920 // --------------------------------- MADD --------------------------------------
14921 // Floats vector madd
14922 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14923 // predicate(n->as_Vector()->length() == 2);
14924 // match(Set dst (AddVF (MulVF src1 src2) src3));
14925 // ins_cost(50);
14926 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14927 // ins_encode %{
14928 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14929 // %}
14930 // ins_pipe( fpu_regF_regF );
14931 //%}
14934 //----------PEEPHOLE RULES-----------------------------------------------------
14935 // These must follow all instruction definitions as they use the names
14936 // defined in the instructions definitions.
14937 //
14938 // peepmatch ( root_instr_name [preceeding_instruction]* );
14939 //
14940 // peepconstraint %{
14941 // (instruction_number.operand_name relational_op instruction_number.operand_name
14942 // [, ...] );
14943 // // instruction numbers are zero-based using left to right order in peepmatch
14944 //
14945 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14946 // // provide an instruction_number.operand_name for each operand that appears
14947 // // in the replacement instruction's match rule
14948 //
14949 // ---------VM FLAGS---------------------------------------------------------
14950 //
14951 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14952 //
14953 // Each peephole rule is given an identifying number starting with zero and
14954 // increasing by one in the order seen by the parser. An individual peephole
14955 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14956 // on the command-line.
14957 //
14958 // ---------CURRENT LIMITATIONS----------------------------------------------
14959 //
14960 // Only match adjacent instructions in same basic block
14961 // Only equality constraints
14962 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14963 // Only one replacement instruction
14964 //
14965 // ---------EXAMPLE----------------------------------------------------------
14966 //
14967 // // pertinent parts of existing instructions in architecture description
14968 // instruct movI(eRegI dst, eRegI src) %{
14969 // match(Set dst (CopyI src));
14970 // %}
14971 //
14972 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14973 // match(Set dst (AddI dst src));
14974 // effect(KILL cr);
14975 // %}
14976 //
14977 // // Change (inc mov) to lea
14978 // peephole %{
14979 // // increment preceeded by register-register move
14980 // peepmatch ( incI_eReg movI );
14981 // // require that the destination register of the increment
14982 // // match the destination register of the move
14983 // peepconstraint ( 0.dst == 1.dst );
14984 // // construct a replacement instruction that sets
14985 // // the destination to ( move's source register + one )
14986 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14987 // %}
14988 //
14989 // Implementation no longer uses movX instructions since
14990 // machine-independent system no longer uses CopyX nodes.
14991 //
14992 // peephole %{
14993 // peepmatch ( incI_eReg movI );
14994 // peepconstraint ( 0.dst == 1.dst );
14995 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14996 // %}
14997 //
14998 // peephole %{
14999 // peepmatch ( decI_eReg movI );
15000 // peepconstraint ( 0.dst == 1.dst );
15001 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15002 // %}
15003 //
15004 // peephole %{
15005 // peepmatch ( addI_eReg_imm movI );
15006 // peepconstraint ( 0.dst == 1.dst );
15007 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
15008 // %}
15009 //
15010 // peephole %{
15011 // peepmatch ( addP_eReg_imm movP );
15012 // peepconstraint ( 0.dst == 1.dst );
15013 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
15014 // %}
15016 // // Change load of spilled value to only a spill
15017 // instruct storeI(memory mem, eRegI src) %{
15018 // match(Set mem (StoreI mem src));
15019 // %}
15020 //
15021 // instruct loadI(eRegI dst, memory mem) %{
15022 // match(Set dst (LoadI mem));
15023 // %}
15024 //
15025 //peephole %{
15026 // peepmatch ( loadI storeI );
15027 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
15028 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
15029 //%}
15031 //----------SMARTSPILL RULES---------------------------------------------------
15032 // These must follow all instruction definitions as they use the names
15033 // defined in the instructions definitions.