Fri, 17 Mar 2017 22:15:27 +0800
[C2] Use general_jal for static java call & dynamic java call.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else {
691 assert(false, "Not implemented yet !" );
692 Unimplemented();
693 }
694 }
697 // No additional cost for CMOVL.
698 const int Matcher::long_cmove_cost() { return 0; }
700 // No CMOVF/CMOVD with SSE2
701 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
703 // Does the CPU require late expand (see block.cpp for description of late expand)?
704 const bool Matcher::require_postalloc_expand = false;
706 // Should the Matcher clone shifts on addressing modes, expecting them
707 // to be subsumed into complex addressing expressions or compute them
708 // into registers? True for Intel but false for most RISCs
709 const bool Matcher::clone_shift_expressions = false;
711 // Do we need to mask the count passed to shift instructions or does
712 // the cpu only look at the lower 5/6 bits anyway?
713 const bool Matcher::need_masked_shift_count = false;
715 bool Matcher::narrow_oop_use_complex_address() {
716 NOT_LP64(ShouldNotCallThis());
717 assert(UseCompressedOops, "only for compressed oops code");
718 return false;
719 }
721 bool Matcher::narrow_klass_use_complex_address() {
722 NOT_LP64(ShouldNotCallThis());
723 assert(UseCompressedClassPointers, "only for compressed klass code");
724 return false;
725 }
727 // This is UltraSparc specific, true just means we have fast l2f conversion
728 const bool Matcher::convL2FSupported(void) {
729 return true;
730 }
732 // Max vector size in bytes. 0 if not supported.
733 const int Matcher::vector_width_in_bytes(BasicType bt) {
734 assert(MaxVectorSize == 8, "");
735 return 8;
736 }
738 // Vector ideal reg
739 const int Matcher::vector_ideal_reg(int size) {
740 assert(MaxVectorSize == 8, "");
741 switch(size) {
742 case 8: return Op_VecD;
743 }
744 ShouldNotReachHere();
745 return 0;
746 }
748 // Only lowest bits of xmm reg are used for vector shift count.
749 const int Matcher::vector_shift_count_ideal_reg(int size) {
750 fatal("vector shift is not supported");
751 return Node::NotAMachineReg;
752 }
754 // Limits on vector size (number of elements) loaded into vector.
755 const int Matcher::max_vector_size(const BasicType bt) {
756 assert(is_java_primitive(bt), "only primitive type vectors");
757 return vector_width_in_bytes(bt)/type2aelembytes(bt);
758 }
760 const int Matcher::min_vector_size(const BasicType bt) {
761 return max_vector_size(bt); // Same as max.
762 }
764 // MIPS supports misaligned vectors store/load? FIXME
765 const bool Matcher::misaligned_vectors_ok() {
766 return false;
767 //return !AlignVector; // can be changed by flag
768 }
770 // Register for DIVI projection of divmodI
771 RegMask Matcher::divI_proj_mask() {
772 ShouldNotReachHere();
773 return RegMask();
774 }
776 // Register for MODI projection of divmodI
777 RegMask Matcher::modI_proj_mask() {
778 ShouldNotReachHere();
779 return RegMask();
780 }
782 // Register for DIVL projection of divmodL
783 RegMask Matcher::divL_proj_mask() {
784 ShouldNotReachHere();
785 return RegMask();
786 }
788 int Matcher::regnum_to_fpu_offset(int regnum) {
789 return regnum - 32; // The FP registers are in the second chunk
790 }
793 const bool Matcher::isSimpleConstant64(jlong value) {
794 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
795 return true;
796 }
799 // Return whether or not this register is ever used as an argument. This
800 // function is used on startup to build the trampoline stubs in generateOptoStub.
801 // Registers not mentioned will be killed by the VM call in the trampoline, and
802 // arguments in those registers not be available to the callee.
803 bool Matcher::can_be_java_arg( int reg ) {
804 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
805 if ( reg == T0_num || reg == T0_H_num
806 || reg == A0_num || reg == A0_H_num
807 || reg == A1_num || reg == A1_H_num
808 || reg == A2_num || reg == A2_H_num
809 || reg == A3_num || reg == A3_H_num
810 || reg == A4_num || reg == A4_H_num
811 || reg == A5_num || reg == A5_H_num
812 || reg == A6_num || reg == A6_H_num
813 || reg == A7_num || reg == A7_H_num )
814 return true;
816 if ( reg == F12_num || reg == F12_H_num
817 || reg == F13_num || reg == F13_H_num
818 || reg == F14_num || reg == F14_H_num
819 || reg == F15_num || reg == F15_H_num
820 || reg == F16_num || reg == F16_H_num
821 || reg == F17_num || reg == F17_H_num
822 || reg == F18_num || reg == F18_H_num
823 || reg == F19_num || reg == F19_H_num )
824 return true;
826 return false;
827 }
829 bool Matcher::is_spillable_arg( int reg ) {
830 return can_be_java_arg(reg);
831 }
833 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
834 return false;
835 }
837 // Register for MODL projection of divmodL
838 RegMask Matcher::modL_proj_mask() {
839 ShouldNotReachHere();
840 return RegMask();
841 }
843 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
844 return FP_REG_mask();
845 }
847 // MIPS doesn't support AES intrinsics
848 const bool Matcher::pass_original_key_for_aes() {
849 return false;
850 }
852 // The address of the call instruction needs to be 16-byte aligned to
853 // ensure that it does not span a cache line so that it can be patched.
855 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
856 //lui
857 //ori
858 //dsll
859 //ori
861 //jalr
862 //nop
864 return round_to(current_offset, alignment_required()) - current_offset;
865 }
867 // The address of the call instruction needs to be 16-byte aligned to
868 // ensure that it does not span a cache line so that it can be patched.
869 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
870 //loadIC <--- skip
872 //lui
873 //ori
874 //nop
875 //nop
877 //jalr
878 //nop
880 current_offset += 4 * 4;
881 return round_to(current_offset, alignment_required()) - current_offset;
882 }
884 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
885 //lui
886 //ori
887 //dsll
888 //ori
890 //jalr
891 //nop
893 return round_to(current_offset, alignment_required()) - current_offset;
894 }
896 int CallLeafDirectNode::compute_padding(int current_offset) const {
897 //lui
898 //ori
899 //dsll
900 //ori
902 //jalr
903 //nop
905 return round_to(current_offset, alignment_required()) - current_offset;
906 }
908 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
909 //lui
910 //ori
911 //dsll
912 //ori
914 //jalr
915 //nop
917 return round_to(current_offset, alignment_required()) - current_offset;
918 }
920 // If CPU can load and store mis-aligned doubles directly then no fixup is
921 // needed. Else we split the double into 2 integer pieces and move it
922 // piece-by-piece. Only happens when passing doubles into C code as the
923 // Java calling convention forces doubles to be aligned.
924 const bool Matcher::misaligned_doubles_ok = false;
925 // Do floats take an entire double register or just half?
926 //const bool Matcher::float_in_double = true;
927 bool Matcher::float_in_double() { return false; }
928 // Threshold size for cleararray.
929 const int Matcher::init_array_short_size = 8 * BytesPerLong;
930 // Do ints take an entire long register or just half?
931 const bool Matcher::int_in_long = true;
932 // Is it better to copy float constants, or load them directly from memory?
933 // Intel can load a float constant from a direct address, requiring no
934 // extra registers. Most RISCs will have to materialize an address into a
935 // register first, so they would do better to copy the constant from stack.
936 const bool Matcher::rematerialize_float_constants = false;
937 // Advertise here if the CPU requires explicit rounding operations
938 // to implement the UseStrictFP mode.
939 const bool Matcher::strict_fp_requires_explicit_rounding = false;
940 // The ecx parameter to rep stos for the ClearArray node is in dwords.
941 const bool Matcher::init_array_count_is_in_bytes = false;
944 // Indicate if the safepoint node needs the polling page as an input.
945 // Since MIPS doesn't have absolute addressing, it needs.
946 bool SafePointNode::needs_polling_address_input() {
947 return false;
948 }
950 // !!!!! Special hack to get all type of calls to specify the byte offset
951 // from the start of the call to the point where the return address
952 // will point.
953 int MachCallStaticJavaNode::ret_addr_offset() {
954 //lui
955 //ori
956 //nop
957 //nop
958 //jalr
959 //nop
960 return 24;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 //lui IC_Klass,
965 //ori IC_Klass,
966 //dsll IC_Klass
967 //ori IC_Klass
969 //lui T9
970 //ori T9
971 //nop
972 //nop
973 //jalr T9
974 //nop
975 return 4 * 4 + 4 * 6;
976 }
978 //=============================================================================
980 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
981 enum RC { rc_bad, rc_int, rc_float, rc_stack };
982 static enum RC rc_class( OptoReg::Name reg ) {
983 if( !OptoReg::is_valid(reg) ) return rc_bad;
984 if (OptoReg::is_stack(reg)) return rc_stack;
985 VMReg r = OptoReg::as_VMReg(reg);
986 if (r->is_Register()) return rc_int;
987 assert(r->is_FloatRegister(), "must be");
988 return rc_float;
989 }
991 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
992 // Get registers to move
993 OptoReg::Name src_second = ra_->get_reg_second(in(1));
994 OptoReg::Name src_first = ra_->get_reg_first(in(1));
995 OptoReg::Name dst_second = ra_->get_reg_second(this );
996 OptoReg::Name dst_first = ra_->get_reg_first(this );
998 enum RC src_second_rc = rc_class(src_second);
999 enum RC src_first_rc = rc_class(src_first);
1000 enum RC dst_second_rc = rc_class(dst_second);
1001 enum RC dst_first_rc = rc_class(dst_first);
1003 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1005 // Generate spill code!
1006 int size = 0;
1008 if( src_first == dst_first && src_second == dst_second )
1009 return 0; // Self copy, no move
1011 if (src_first_rc == rc_stack) {
1012 // mem ->
1013 if (dst_first_rc == rc_stack) {
1014 // mem -> mem
1015 assert(src_second != dst_first, "overlap");
1016 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1017 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1018 // 64-bit
1019 int src_offset = ra_->reg2offset(src_first);
1020 int dst_offset = ra_->reg2offset(dst_first);
1021 if (cbuf) {
1022 MacroAssembler _masm(cbuf);
1023 __ ld(AT, Address(SP, src_offset));
1024 __ sd(AT, Address(SP, dst_offset));
1025 #ifndef PRODUCT
1026 } else {
1027 if(!do_size){
1028 if (size != 0) st->print("\n\t");
1029 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1030 "sd AT, [SP + #%d]",
1031 src_offset, dst_offset);
1032 }
1033 #endif
1034 }
1035 size += 8;
1036 } else {
1037 // 32-bit
1038 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1039 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1040 // No pushl/popl, so:
1041 int src_offset = ra_->reg2offset(src_first);
1042 int dst_offset = ra_->reg2offset(dst_first);
1043 if (cbuf) {
1044 MacroAssembler _masm(cbuf);
1045 __ lw(AT, Address(SP, src_offset));
1046 __ sw(AT, Address(SP, dst_offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("lw AT, [SP + #%d] spill 2\n\t"
1052 "sw AT, [SP + #%d]\n\t",
1053 src_offset, dst_offset);
1054 }
1055 #endif
1056 }
1057 size += 8;
1058 }
1059 return size;
1060 } else if (dst_first_rc == rc_int) {
1061 // mem -> gpr
1062 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1063 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1064 // 64-bit
1065 int offset = ra_->reg2offset(src_first);
1066 if (cbuf) {
1067 MacroAssembler _masm(cbuf);
1068 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1069 #ifndef PRODUCT
1070 } else {
1071 if(!do_size){
1072 if (size != 0) st->print("\n\t");
1073 st->print("ld %s, [SP + #%d]\t# spill 3",
1074 Matcher::regName[dst_first],
1075 offset);
1076 }
1077 #endif
1078 }
1079 size += 4;
1080 } else {
1081 // 32-bit
1082 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1083 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1084 int offset = ra_->reg2offset(src_first);
1085 if (cbuf) {
1086 MacroAssembler _masm(cbuf);
1087 if (this->ideal_reg() == Op_RegI)
1088 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1089 else
1090 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 if (this->ideal_reg() == Op_RegI)
1096 st->print("lw %s, [SP + #%d]\t# spill 4",
1097 Matcher::regName[dst_first],
1098 offset);
1099 else
1100 st->print("lwu %s, [SP + #%d]\t# spill 5",
1101 Matcher::regName[dst_first],
1102 offset);
1103 }
1104 #endif
1105 }
1106 size += 4;
1107 }
1108 return size;
1109 } else if (dst_first_rc == rc_float) {
1110 // mem-> xmm
1111 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1112 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1113 // 64-bit
1114 int offset = ra_->reg2offset(src_first);
1115 if (cbuf) {
1116 MacroAssembler _masm(cbuf);
1117 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1118 #ifndef PRODUCT
1119 } else {
1120 if(!do_size){
1121 if (size != 0) st->print("\n\t");
1122 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1123 Matcher::regName[dst_first],
1124 offset);
1125 }
1126 #endif
1127 }
1128 size += 4;
1129 } else {
1130 // 32-bit
1131 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1132 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1133 int offset = ra_->reg2offset(src_first);
1134 if (cbuf) {
1135 MacroAssembler _masm(cbuf);
1136 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 #ifndef PRODUCT
1138 } else {
1139 if(!do_size){
1140 if (size != 0) st->print("\n\t");
1141 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1142 Matcher::regName[dst_first],
1143 offset);
1144 }
1145 #endif
1146 }
1147 size += 4;
1148 }
1149 return size;
1150 }
1151 } else if (src_first_rc == rc_int) {
1152 // gpr ->
1153 if (dst_first_rc == rc_stack) {
1154 // gpr -> mem
1155 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1156 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1157 // 64-bit
1158 int offset = ra_->reg2offset(dst_first);
1159 if (cbuf) {
1160 MacroAssembler _masm(cbuf);
1161 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1162 #ifndef PRODUCT
1163 } else {
1164 if(!do_size){
1165 if (size != 0) st->print("\n\t");
1166 st->print("sd %s, [SP + #%d] # spill 8",
1167 Matcher::regName[src_first],
1168 offset);
1169 }
1170 #endif
1171 }
1172 size += 4;
1173 } else {
1174 // 32-bit
1175 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1176 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1177 int offset = ra_->reg2offset(dst_first);
1178 if (cbuf) {
1179 MacroAssembler _masm(cbuf);
1180 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1181 #ifndef PRODUCT
1182 } else {
1183 if(!do_size){
1184 if (size != 0) st->print("\n\t");
1185 st->print("sw %s, [SP + #%d]\t# spill 9",
1186 Matcher::regName[src_first], offset);
1187 }
1188 #endif
1189 }
1190 size += 4;
1191 }
1192 return size;
1193 } else if (dst_first_rc == rc_int) {
1194 // gpr -> gpr
1195 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1196 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1197 // 64-bit
1198 if (cbuf) {
1199 MacroAssembler _masm(cbuf);
1200 __ move(as_Register(Matcher::_regEncode[dst_first]),
1201 as_Register(Matcher::_regEncode[src_first]));
1202 #ifndef PRODUCT
1203 } else {
1204 if(!do_size){
1205 if (size != 0) st->print("\n\t");
1206 st->print("move(64bit) %s <-- %s\t# spill 10",
1207 Matcher::regName[dst_first],
1208 Matcher::regName[src_first]);
1209 }
1210 #endif
1211 }
1212 size += 4;
1213 return size;
1214 } else {
1215 // 32-bit
1216 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1217 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1218 if (cbuf) {
1219 MacroAssembler _masm(cbuf);
1220 if (this->ideal_reg() == Op_RegI)
1221 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1222 else
1223 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1225 #ifndef PRODUCT
1226 } else {
1227 if(!do_size){
1228 if (size != 0) st->print("\n\t");
1229 st->print("move(32-bit) %s <-- %s\t# spill 11",
1230 Matcher::regName[dst_first],
1231 Matcher::regName[src_first]);
1232 }
1233 #endif
1234 }
1235 size += 4;
1236 return size;
1237 }
1238 } else if (dst_first_rc == rc_float) {
1239 // gpr -> xmm
1240 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1241 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1242 // 64-bit
1243 if (cbuf) {
1244 MacroAssembler _masm(cbuf);
1245 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1246 #ifndef PRODUCT
1247 } else {
1248 if(!do_size){
1249 if (size != 0) st->print("\n\t");
1250 st->print("dmtc1 %s, %s\t# spill 12",
1251 Matcher::regName[dst_first],
1252 Matcher::regName[src_first]);
1253 }
1254 #endif
1255 }
1256 size += 4;
1257 } else {
1258 // 32-bit
1259 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1260 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1261 if (cbuf) {
1262 MacroAssembler _masm(cbuf);
1263 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1264 #ifndef PRODUCT
1265 } else {
1266 if(!do_size){
1267 if (size != 0) st->print("\n\t");
1268 st->print("mtc1 %s, %s\t# spill 13",
1269 Matcher::regName[dst_first],
1270 Matcher::regName[src_first]);
1271 }
1272 #endif
1273 }
1274 size += 4;
1275 }
1276 return size;
1277 }
1278 } else if (src_first_rc == rc_float) {
1279 // xmm ->
1280 if (dst_first_rc == rc_stack) {
1281 // xmm -> mem
1282 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1283 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1284 // 64-bit
1285 int offset = ra_->reg2offset(dst_first);
1286 if (cbuf) {
1287 MacroAssembler _masm(cbuf);
1288 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1289 #ifndef PRODUCT
1290 } else {
1291 if(!do_size){
1292 if (size != 0) st->print("\n\t");
1293 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1294 Matcher::regName[src_first],
1295 offset);
1296 }
1297 #endif
1298 }
1299 size += 4;
1300 } else {
1301 // 32-bit
1302 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1303 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1304 int offset = ra_->reg2offset(dst_first);
1305 if (cbuf) {
1306 MacroAssembler _masm(cbuf);
1307 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1308 #ifndef PRODUCT
1309 } else {
1310 if(!do_size){
1311 if (size != 0) st->print("\n\t");
1312 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1313 Matcher::regName[src_first],
1314 offset);
1315 }
1316 #endif
1317 }
1318 size += 4;
1319 }
1320 return size;
1321 } else if (dst_first_rc == rc_int) {
1322 // xmm -> gpr
1323 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1324 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1325 // 64-bit
1326 if (cbuf) {
1327 MacroAssembler _masm(cbuf);
1328 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1329 #ifndef PRODUCT
1330 } else {
1331 if(!do_size){
1332 if (size != 0) st->print("\n\t");
1333 st->print("dmfc1 %s, %s\t# spill 16",
1334 Matcher::regName[dst_first],
1335 Matcher::regName[src_first]);
1336 }
1337 #endif
1338 }
1339 size += 4;
1340 } else {
1341 // 32-bit
1342 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1343 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1344 if (cbuf) {
1345 MacroAssembler _masm(cbuf);
1346 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1347 #ifndef PRODUCT
1348 } else {
1349 if(!do_size){
1350 if (size != 0) st->print("\n\t");
1351 st->print("mfc1 %s, %s\t# spill 17",
1352 Matcher::regName[dst_first],
1353 Matcher::regName[src_first]);
1354 }
1355 #endif
1356 }
1357 size += 4;
1358 }
1359 return size;
1360 } else if (dst_first_rc == rc_float) {
1361 // xmm -> xmm
1362 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1363 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1364 // 64-bit
1365 if (cbuf) {
1366 MacroAssembler _masm(cbuf);
1367 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1368 #ifndef PRODUCT
1369 } else {
1370 if(!do_size){
1371 if (size != 0) st->print("\n\t");
1372 st->print("mov_d %s <-- %s\t# spill 18",
1373 Matcher::regName[dst_first],
1374 Matcher::regName[src_first]);
1375 }
1376 #endif
1377 }
1378 size += 4;
1379 } else {
1380 // 32-bit
1381 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1382 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1383 if (cbuf) {
1384 MacroAssembler _masm(cbuf);
1385 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1386 #ifndef PRODUCT
1387 } else {
1388 if(!do_size){
1389 if (size != 0) st->print("\n\t");
1390 st->print("mov_s %s <-- %s\t# spill 19",
1391 Matcher::regName[dst_first],
1392 Matcher::regName[src_first]);
1393 }
1394 #endif
1395 }
1396 size += 4;
1397 }
1398 return size;
1399 }
1400 }
1402 assert(0," foo ");
1403 Unimplemented();
1404 return size;
1406 }
1408 #ifndef PRODUCT
1409 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1410 implementation( NULL, ra_, false, st );
1411 }
1412 #endif
1414 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1415 implementation( &cbuf, ra_, false, NULL );
1416 }
1418 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1419 return implementation( NULL, ra_, true, NULL );
1420 }
1422 //=============================================================================
1423 #
1425 #ifndef PRODUCT
1426 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1427 st->print("INT3");
1428 }
1429 #endif
1431 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1432 MacroAssembler _masm(&cbuf);
1433 __ int3();
1434 }
1436 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1437 return MachNode::size(ra_);
1438 }
1441 //=============================================================================
1442 #ifndef PRODUCT
1443 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1444 Compile *C = ra_->C;
1445 int framesize = C->frame_size_in_bytes();
1447 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1449 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1450 st->cr(); st->print("\t");
1451 if (UseLoongsonISA) {
1452 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1453 } else {
1454 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1455 st->cr(); st->print("\t");
1456 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1457 }
1459 if( do_polling() && C->is_method_compilation() ) {
1460 st->print("Poll Safepoint # MachEpilogNode");
1461 }
1462 }
1463 #endif
1465 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1466 Compile *C = ra_->C;
1467 MacroAssembler _masm(&cbuf);
1468 int framesize = C->frame_size_in_bytes();
1470 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1472 __ daddiu(SP, SP, framesize);
1474 if (UseLoongsonISA) {
1475 __ gslq(RA, FP, SP, -wordSize*2);
1476 } else {
1477 __ ld(RA, SP, -wordSize );
1478 __ ld(FP, SP, -wordSize*2 );
1479 }
1481 if( do_polling() && C->is_method_compilation() ) {
1482 __ set64(AT, (long)os::get_polling_page());
1483 __ relocate(relocInfo::poll_return_type);
1484 __ lw(AT, AT, 0);
1485 }
1486 }
1488 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1489 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1490 }
1492 int MachEpilogNode::reloc() const {
1493 return 0; // a large enough number
1494 }
1496 const Pipeline * MachEpilogNode::pipeline() const {
1497 return MachNode::pipeline_class();
1498 }
1500 int MachEpilogNode::safepoint_offset() const { return 0; }
1502 //=============================================================================
1504 #ifndef PRODUCT
1505 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1506 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1507 int reg = ra_->get_reg_first(this);
1508 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1509 }
1510 #endif
1513 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1514 return 4;
1515 }
1517 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1518 MacroAssembler _masm(&cbuf);
1519 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1520 int reg = ra_->get_encode(this);
1522 __ addi(as_Register(reg), SP, offset);
1523 /*
1524 if( offset >= 128 ) {
1525 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1526 emit_rm(cbuf, 0x2, reg, 0x04);
1527 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1528 emit_d32(cbuf, offset);
1529 }
1530 else {
1531 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1532 emit_rm(cbuf, 0x1, reg, 0x04);
1533 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1534 emit_d8(cbuf, offset);
1535 }
1536 */
1537 }
1540 //static int sizeof_FFree_Float_Stack_All = -1;
1542 int MachCallRuntimeNode::ret_addr_offset() {
1543 //lui
1544 //ori
1545 //dsll
1546 //ori
1547 //jalr
1548 //nop
1549 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1550 return NativeCall::instruction_size;
1551 // return 16;
1552 }
1558 //=============================================================================
1559 #ifndef PRODUCT
1560 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1561 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1562 }
1563 #endif
1565 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1566 MacroAssembler _masm(&cbuf);
1567 int i = 0;
1568 for(i = 0; i < _count; i++)
1569 __ nop();
1570 }
1572 uint MachNopNode::size(PhaseRegAlloc *) const {
1573 return 4 * _count;
1574 }
1575 const Pipeline* MachNopNode::pipeline() const {
1576 return MachNode::pipeline_class();
1577 }
1579 //=============================================================================
1581 //=============================================================================
1582 #ifndef PRODUCT
1583 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1584 st->print_cr("load_klass(T9, T0)");
1585 st->print_cr("\tbeq(T9, iCache, L)");
1586 st->print_cr("\tnop");
1587 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1588 st->print_cr("\tnop");
1589 st->print_cr("\tnop");
1590 st->print_cr(" L:");
1591 }
1592 #endif
1595 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1596 MacroAssembler _masm(&cbuf);
1597 #ifdef ASSERT
1598 //uint code_size = cbuf.code_size();
1599 #endif
1600 int ic_reg = Matcher::inline_cache_reg_encode();
1601 Label L;
1602 Register receiver = T0;
1603 Register iCache = as_Register(ic_reg);
1604 __ load_klass(T9, receiver);
1605 __ beq(T9, iCache, L);
1606 __ nop();
1608 __ relocate(relocInfo::runtime_call_type);
1609 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1610 __ jr(T9);
1611 __ nop();
1613 /* WARNING these NOPs are critical so that verified entry point is properly
1614 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1615 __ align(CodeEntryAlignment);
1616 __ bind(L);
1617 }
1619 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1620 return MachNode::size(ra_);
1621 }
1625 //=============================================================================
1627 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1629 int Compile::ConstantTable::calculate_table_base_offset() const {
1630 return 0; // absolute addressing, no offset
1631 }
1633 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1634 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1635 ShouldNotReachHere();
1636 }
1638 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1639 Compile* C = ra_->C;
1640 Compile::ConstantTable& constant_table = C->constant_table();
1641 MacroAssembler _masm(&cbuf);
1643 Register Rtoc = as_Register(ra_->get_encode(this));
1644 CodeSection* consts_section = __ code()->consts();
1645 int consts_size = consts_section->align_at_start(consts_section->size());
1646 assert(constant_table.size() == consts_size, "must be equal");
1648 if (consts_section->size()) {
1649 // Materialize the constant table base.
1650 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1651 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1652 __ relocate(relocInfo::internal_pc_type);
1653 __ patchable_set48(Rtoc, (long)baseaddr);
1654 }
1655 }
1657 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1658 // patchable_set48 (4 insts)
1659 return 4 * 4;
1660 }
1662 #ifndef PRODUCT
1663 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1664 Register r = as_Register(ra_->get_encode(this));
1665 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1666 }
1667 #endif
1670 //=============================================================================
1671 #ifndef PRODUCT
1672 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1673 Compile* C = ra_->C;
1675 int framesize = C->frame_size_in_bytes();
1676 int bangsize = C->bang_size_in_bytes();
1677 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1679 // Calls to C2R adapters often do not accept exceptional returns.
1680 // We require that their callers must bang for them. But be careful, because
1681 // some VM calls (such as call site linkage) can use several kilobytes of
1682 // stack. But the stack safety zone should account for that.
1683 // See bugs 4446381, 4468289, 4497237.
1684 if (C->need_stack_bang(bangsize)) {
1685 st->print_cr("# stack bang"); st->print("\t");
1686 }
1687 if (UseLoongsonISA) {
1688 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1689 } else {
1690 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1691 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1692 }
1693 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1694 st->print("daddiu SP, SP, -%d \t",framesize);
1695 }
1696 #endif
1699 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1700 Compile* C = ra_->C;
1701 MacroAssembler _masm(&cbuf);
1703 int framesize = C->frame_size_in_bytes();
1704 int bangsize = C->bang_size_in_bytes();
1706 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1708 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1710 if (C->need_stack_bang(framesize)) {
1711 __ generate_stack_overflow_check(framesize);
1712 }
1714 if (UseLoongsonISA) {
1715 __ gssq(RA, FP, SP, -wordSize*2);
1716 } else {
1717 __ sd(RA, SP, -wordSize);
1718 __ sd(FP, SP, -wordSize*2);
1719 }
1720 __ daddiu(FP, SP, -wordSize*2);
1721 __ daddiu(SP, SP, -framesize);
1722 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1723 __ nop();
1725 C->set_frame_complete(cbuf.insts_size());
1726 if (C->has_mach_constant_base_node()) {
1727 // NOTE: We set the table base offset here because users might be
1728 // emitted before MachConstantBaseNode.
1729 Compile::ConstantTable& constant_table = C->constant_table();
1730 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1731 }
1733 }
1736 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1737 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1738 return MachNode::size(ra_); // too many variables; just compute it the hard way
1739 }
1741 int MachPrologNode::reloc() const {
1742 return 0; // a large enough number
1743 }
1745 %}
1747 //----------ENCODING BLOCK-----------------------------------------------------
1748 // This block specifies the encoding classes used by the compiler to output
1749 // byte streams. Encoding classes generate functions which are called by
1750 // Machine Instruction Nodes in order to generate the bit encoding of the
1751 // instruction. Operands specify their base encoding interface with the
1752 // interface keyword. There are currently supported four interfaces,
1753 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1754 // operand to generate a function which returns its register number when
1755 // queried. CONST_INTER causes an operand to generate a function which
1756 // returns the value of the constant when queried. MEMORY_INTER causes an
1757 // operand to generate four functions which return the Base Register, the
1758 // Index Register, the Scale Value, and the Offset Value of the operand when
1759 // queried. COND_INTER causes an operand to generate six functions which
1760 // return the encoding code (ie - encoding bits for the instruction)
1761 // associated with each basic boolean condition for a conditional instruction.
1762 // Instructions specify two basic values for encoding. They use the
1763 // ins_encode keyword to specify their encoding class (which must be one of
1764 // the class names specified in the encoding block), and they use the
1765 // opcode keyword to specify, in order, their primary, secondary, and
1766 // tertiary opcode. Only the opcode sections which a particular instruction
1767 // needs for encoding need to be specified.
1768 encode %{
1769 /*
1770 Alias:
1771 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1772 118 B14: # B19 B15 <- B13 Freq: 0.899955
1773 118 add S1, S2, V0 #@addP_reg_reg
1774 11c lb S0, [S1 + #-8257524] #@loadB
1775 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1776 */
1777 //Load byte signed
1778 enc_class load_B_enc (mRegI dst, memory mem) %{
1779 MacroAssembler _masm(&cbuf);
1780 int dst = $dst$$reg;
1781 int base = $mem$$base;
1782 int index = $mem$$index;
1783 int scale = $mem$$scale;
1784 int disp = $mem$$disp;
1786 if( index != 0 ) {
1787 if( Assembler::is_simm16(disp) ) {
1788 if( UseLoongsonISA ) {
1789 if (scale == 0) {
1790 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1791 } else {
1792 __ dsll(AT, as_Register(index), scale);
1793 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1794 }
1795 } else {
1796 if (scale == 0) {
1797 __ addu(AT, as_Register(base), as_Register(index));
1798 } else {
1799 __ dsll(AT, as_Register(index), scale);
1800 __ addu(AT, as_Register(base), AT);
1801 }
1802 __ lb(as_Register(dst), AT, disp);
1803 }
1804 } else {
1805 if (scale == 0) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 } else {
1808 __ dsll(AT, as_Register(index), scale);
1809 __ addu(AT, as_Register(base), AT);
1810 }
1811 __ move(T9, disp);
1812 if( UseLoongsonISA ) {
1813 __ gslbx(as_Register(dst), AT, T9, 0);
1814 } else {
1815 __ addu(AT, AT, T9);
1816 __ lb(as_Register(dst), AT, 0);
1817 }
1818 }
1819 } else {
1820 if( Assembler::is_simm16(disp) ) {
1821 __ lb(as_Register(dst), as_Register(base), disp);
1822 } else {
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1826 } else {
1827 __ addu(AT, as_Register(base), T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 }
1832 %}
1834 //Load byte unsigned
1835 enc_class load_UB_enc (mRegI dst, memory mem) %{
1836 MacroAssembler _masm(&cbuf);
1837 int dst = $dst$$reg;
1838 int base = $mem$$base;
1839 int index = $mem$$index;
1840 int scale = $mem$$scale;
1841 int disp = $mem$$disp;
1843 if( index != 0 ) {
1844 if (scale == 0) {
1845 __ daddu(AT, as_Register(base), as_Register(index));
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ daddu(AT, as_Register(base), AT);
1849 }
1850 if( Assembler::is_simm16(disp) ) {
1851 __ lbu(as_Register(dst), AT, disp);
1852 } else {
1853 __ move(T9, disp);
1854 __ daddu(AT, AT, T9);
1855 __ lbu(as_Register(dst), AT, 0);
1856 }
1857 } else {
1858 if( Assembler::is_simm16(disp) ) {
1859 __ lbu(as_Register(dst), as_Register(base), disp);
1860 } else {
1861 __ move(T9, disp);
1862 __ daddu(AT, as_Register(base), T9);
1863 __ lbu(as_Register(dst), AT, 0);
1864 }
1865 }
1866 %}
1868 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1869 MacroAssembler _masm(&cbuf);
1870 int src = $src$$reg;
1871 int base = $mem$$base;
1872 int index = $mem$$index;
1873 int scale = $mem$$scale;
1874 int disp = $mem$$disp;
1876 if( index != 0 ) {
1877 if (scale == 0) {
1878 if( Assembler::is_simm(disp, 8) ) {
1879 if (UseLoongsonISA) {
1880 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1881 } else {
1882 __ addu(AT, as_Register(base), as_Register(index));
1883 __ sb(as_Register(src), AT, disp);
1884 }
1885 } else if( Assembler::is_simm16(disp) ) {
1886 __ addu(AT, as_Register(base), as_Register(index));
1887 __ sb(as_Register(src), AT, disp);
1888 } else {
1889 __ addu(AT, as_Register(base), as_Register(index));
1890 __ move(T9, disp);
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), AT, T9, 0);
1893 } else {
1894 __ addu(AT, AT, T9);
1895 __ sb(as_Register(src), AT, 0);
1896 }
1897 }
1898 } else {
1899 __ dsll(AT, as_Register(index), scale);
1900 if( Assembler::is_simm(disp, 8) ) {
1901 if (UseLoongsonISA) {
1902 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1903 } else {
1904 __ addu(AT, as_Register(base), AT);
1905 __ sb(as_Register(src), AT, disp);
1906 }
1907 } else if( Assembler::is_simm16(disp) ) {
1908 __ addu(AT, as_Register(base), AT);
1909 __ sb(as_Register(src), AT, disp);
1910 } else {
1911 __ addu(AT, as_Register(base), AT);
1912 __ move(T9, disp);
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, T9, 0);
1915 } else {
1916 __ addu(AT, AT, T9);
1917 __ sb(as_Register(src), AT, 0);
1918 }
1919 }
1920 }
1921 } else {
1922 if( Assembler::is_simm16(disp) ) {
1923 __ sb(as_Register(src), as_Register(base), disp);
1924 } else {
1925 __ move(T9, disp);
1926 if (UseLoongsonISA) {
1927 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1928 } else {
1929 __ addu(AT, as_Register(base), T9);
1930 __ sb(as_Register(src), AT, 0);
1931 }
1932 }
1933 }
1934 %}
1936 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1937 MacroAssembler _masm(&cbuf);
1938 int base = $mem$$base;
1939 int index = $mem$$index;
1940 int scale = $mem$$scale;
1941 int disp = $mem$$disp;
1942 int value = $src$$constant;
1944 if( index != 0 ) {
1945 if (!UseLoongsonISA) {
1946 if (scale == 0) {
1947 __ daddu(AT, as_Register(base), as_Register(index));
1948 } else {
1949 __ dsll(AT, as_Register(index), scale);
1950 __ daddu(AT, as_Register(base), AT);
1951 }
1952 if( Assembler::is_simm16(disp) ) {
1953 if (value == 0) {
1954 __ sb(R0, AT, disp);
1955 } else {
1956 __ move(T9, value);
1957 __ sb(T9, AT, disp);
1958 }
1959 } else {
1960 if (value == 0) {
1961 __ move(T9, disp);
1962 __ daddu(AT, AT, T9);
1963 __ sb(R0, AT, 0);
1964 } else {
1965 __ move(T9, disp);
1966 __ daddu(AT, AT, T9);
1967 __ move(T9, value);
1968 __ sb(T9, AT, 0);
1969 }
1970 }
1971 } else {
1973 if (scale == 0) {
1974 if( Assembler::is_simm(disp, 8) ) {
1975 if (value == 0) {
1976 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1977 } else {
1978 __ move(T9, value);
1979 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1980 }
1981 } else if( Assembler::is_simm16(disp) ) {
1982 __ daddu(AT, as_Register(base), as_Register(index));
1983 if (value == 0) {
1984 __ sb(R0, AT, disp);
1985 } else {
1986 __ move(T9, value);
1987 __ sb(T9, AT, disp);
1988 }
1989 } else {
1990 if (value == 0) {
1991 __ daddu(AT, as_Register(base), as_Register(index));
1992 __ move(T9, disp);
1993 __ gssbx(R0, AT, T9, 0);
1994 } else {
1995 __ move(AT, disp);
1996 __ move(T9, value);
1997 __ daddu(AT, as_Register(base), AT);
1998 __ gssbx(T9, AT, as_Register(index), 0);
1999 }
2000 }
2002 } else {
2004 if( Assembler::is_simm(disp, 8) ) {
2005 __ dsll(AT, as_Register(index), scale);
2006 if (value == 0) {
2007 __ gssbx(R0, as_Register(base), AT, disp);
2008 } else {
2009 __ move(T9, value);
2010 __ gssbx(T9, as_Register(base), AT, disp);
2011 }
2012 } else if( Assembler::is_simm16(disp) ) {
2013 __ dsll(AT, as_Register(index), scale);
2014 __ daddu(AT, as_Register(base), AT);
2015 if (value == 0) {
2016 __ sb(R0, AT, disp);
2017 } else {
2018 __ move(T9, value);
2019 __ sb(T9, AT, disp);
2020 }
2021 } else {
2022 __ dsll(AT, as_Register(index), scale);
2023 if (value == 0) {
2024 __ daddu(AT, as_Register(base), AT);
2025 __ move(T9, disp);
2026 __ gssbx(R0, AT, T9, 0);
2027 } else {
2028 __ move(T9, disp);
2029 __ daddu(AT, AT, T9);
2030 __ move(T9, value);
2031 __ gssbx(T9, as_Register(base), AT, 0);
2032 }
2033 }
2034 }
2035 }
2036 } else {
2037 if( Assembler::is_simm16(disp) ) {
2038 if (value == 0) {
2039 __ sb(R0, as_Register(base), disp);
2040 } else {
2041 __ move(AT, value);
2042 __ sb(AT, as_Register(base), disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ move(T9, disp);
2047 if (UseLoongsonISA) {
2048 __ gssbx(R0, as_Register(base), T9, 0);
2049 } else {
2050 __ daddu(AT, as_Register(base), T9);
2051 __ sb(R0, AT, 0);
2052 }
2053 } else {
2054 __ move(T9, disp);
2055 if (UseLoongsonISA) {
2056 __ move(AT, value);
2057 __ gssbx(AT, as_Register(base), T9, 0);
2058 } else {
2059 __ daddu(AT, as_Register(base), T9);
2060 __ move(T9, value);
2061 __ sb(T9, AT, 0);
2062 }
2063 }
2064 }
2065 }
2066 %}
2069 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2070 MacroAssembler _masm(&cbuf);
2071 int base = $mem$$base;
2072 int index = $mem$$index;
2073 int scale = $mem$$scale;
2074 int disp = $mem$$disp;
2075 int value = $src$$constant;
2077 if( index != 0 ) {
2078 if ( UseLoongsonISA ) {
2079 if ( Assembler::is_simm(disp,8) ) {
2080 if ( scale == 0 ) {
2081 if ( value == 0 ) {
2082 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2083 } else {
2084 __ move(AT, value);
2085 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2086 }
2087 } else {
2088 __ dsll(AT, as_Register(index), scale);
2089 if ( value == 0 ) {
2090 __ gssbx(R0, as_Register(base), AT, disp);
2091 } else {
2092 __ move(T9, value);
2093 __ gssbx(T9, as_Register(base), AT, disp);
2094 }
2095 }
2096 } else if ( Assembler::is_simm16(disp) ) {
2097 if ( scale == 0 ) {
2098 __ daddu(AT, as_Register(base), as_Register(index));
2099 if ( value == 0 ){
2100 __ sb(R0, AT, disp);
2101 } else {
2102 __ move(T9, value);
2103 __ sb(T9, AT, disp);
2104 }
2105 } else {
2106 __ dsll(AT, as_Register(index), scale);
2107 __ daddu(AT, as_Register(base), AT);
2108 if ( value == 0 ) {
2109 __ sb(R0, AT, disp);
2110 } else {
2111 __ move(T9, value);
2112 __ sb(T9, AT, disp);
2113 }
2114 }
2115 } else {
2116 if ( scale == 0 ) {
2117 __ move(AT, disp);
2118 __ daddu(AT, as_Register(index), AT);
2119 if ( value == 0 ) {
2120 __ gssbx(R0, as_Register(base), AT, 0);
2121 } else {
2122 __ move(T9, value);
2123 __ gssbx(T9, as_Register(base), AT, 0);
2124 }
2125 } else {
2126 __ dsll(AT, as_Register(index), scale);
2127 __ move(T9, disp);
2128 __ daddu(AT, AT, T9);
2129 if ( value == 0 ) {
2130 __ gssbx(R0, as_Register(base), AT, 0);
2131 } else {
2132 __ move(T9, value);
2133 __ gssbx(T9, as_Register(base), AT, 0);
2134 }
2135 }
2136 }
2137 } else { //not use loongson isa
2138 if (scale == 0) {
2139 __ daddu(AT, as_Register(base), as_Register(index));
2140 } else {
2141 __ dsll(AT, as_Register(index), scale);
2142 __ daddu(AT, as_Register(base), AT);
2143 }
2144 if( Assembler::is_simm16(disp) ) {
2145 if (value == 0) {
2146 __ sb(R0, AT, disp);
2147 } else {
2148 __ move(T9, value);
2149 __ sb(T9, AT, disp);
2150 }
2151 } else {
2152 if (value == 0) {
2153 __ move(T9, disp);
2154 __ daddu(AT, AT, T9);
2155 __ sb(R0, AT, 0);
2156 } else {
2157 __ move(T9, disp);
2158 __ daddu(AT, AT, T9);
2159 __ move(T9, value);
2160 __ sb(T9, AT, 0);
2161 }
2162 }
2163 }
2164 } else {
2165 if ( UseLoongsonISA ){
2166 if ( Assembler::is_simm16(disp) ){
2167 if ( value == 0 ) {
2168 __ sb(R0, as_Register(base), disp);
2169 } else {
2170 __ move(AT, value);
2171 __ sb(AT, as_Register(base), disp);
2172 }
2173 } else {
2174 __ move(AT, disp);
2175 if ( value == 0 ) {
2176 __ gssbx(R0, as_Register(base), AT, 0);
2177 } else {
2178 __ move(T9, value);
2179 __ gssbx(T9, as_Register(base), AT, 0);
2180 }
2181 }
2182 } else {
2183 if( Assembler::is_simm16(disp) ) {
2184 if (value == 0) {
2185 __ sb(R0, as_Register(base), disp);
2186 } else {
2187 __ move(AT, value);
2188 __ sb(AT, as_Register(base), disp);
2189 }
2190 } else {
2191 if (value == 0) {
2192 __ move(T9, disp);
2193 __ daddu(AT, as_Register(base), T9);
2194 __ sb(R0, AT, 0);
2195 } else {
2196 __ move(T9, disp);
2197 __ daddu(AT, as_Register(base), T9);
2198 __ move(T9, value);
2199 __ sb(T9, AT, 0);
2200 }
2201 }
2202 }
2203 }
2205 __ sync();
2206 %}
2208 // Load Short (16bit signed)
2209 enc_class load_S_enc (mRegI dst, memory mem) %{
2210 MacroAssembler _masm(&cbuf);
2211 int dst = $dst$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if ( UseLoongsonISA ) {
2219 if ( Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2225 }
2226 } else if ( Assembler::is_simm16(disp) ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 __ lh(as_Register(dst), AT, disp);
2230 } else {
2231 __ dsll(AT, as_Register(index), scale);
2232 __ daddu(AT, as_Register(base), AT);
2233 __ lh(as_Register(dst), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ move(AT, disp);
2238 __ daddu(AT, as_Register(index), AT);
2239 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2240 } else {
2241 __ dsll(AT, as_Register(index), scale);
2242 __ move(T9, disp);
2243 __ daddu(AT, AT, T9);
2244 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2245 }
2246 }
2247 } else { // not use loongson isa
2248 if (scale == 0) {
2249 __ daddu(AT, as_Register(base), as_Register(index));
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ daddu(AT, as_Register(base), AT);
2253 }
2254 if( Assembler::is_simm16(disp) ) {
2255 __ lh(as_Register(dst), AT, disp);
2256 } else {
2257 __ move(T9, disp);
2258 __ daddu(AT, AT, T9);
2259 __ lh(as_Register(dst), AT, 0);
2260 }
2261 }
2262 } else { // index is 0
2263 if ( UseLoongsonISA ) {
2264 if ( Assembler::is_simm16(disp) ) {
2265 __ lh(as_Register(dst), as_Register(base), disp);
2266 } else {
2267 __ move(T9, disp);
2268 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2269 }
2270 } else { //not use loongson isa
2271 if( Assembler::is_simm16(disp) ) {
2272 __ lh(as_Register(dst), as_Register(base), disp);
2273 } else {
2274 __ move(T9, disp);
2275 __ daddu(AT, as_Register(base), T9);
2276 __ lh(as_Register(dst), AT, 0);
2277 }
2278 }
2279 }
2280 %}
2282 // Load Char (16bit unsigned)
2283 enc_class load_C_enc (mRegI dst, memory mem) %{
2284 MacroAssembler _masm(&cbuf);
2285 int dst = $dst$$reg;
2286 int base = $mem$$base;
2287 int index = $mem$$index;
2288 int scale = $mem$$scale;
2289 int disp = $mem$$disp;
2291 if( index != 0 ) {
2292 if (scale == 0) {
2293 __ daddu(AT, as_Register(base), as_Register(index));
2294 } else {
2295 __ dsll(AT, as_Register(index), scale);
2296 __ daddu(AT, as_Register(base), AT);
2297 }
2298 if( Assembler::is_simm16(disp) ) {
2299 __ lhu(as_Register(dst), AT, disp);
2300 } else {
2301 __ move(T9, disp);
2302 __ addu(AT, AT, T9);
2303 __ lhu(as_Register(dst), AT, 0);
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ lhu(as_Register(dst), as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 __ daddu(AT, as_Register(base), T9);
2311 __ lhu(as_Register(dst), AT, 0);
2312 }
2313 }
2314 %}
2316 // Store Char (16bit unsigned)
2317 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2318 MacroAssembler _masm(&cbuf);
2319 int src = $src$$reg;
2320 int base = $mem$$base;
2321 int index = $mem$$index;
2322 int scale = $mem$$scale;
2323 int disp = $mem$$disp;
2325 if( index != 0 ) {
2326 if( Assembler::is_simm16(disp) ) {
2327 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2328 if (scale == 0) {
2329 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2330 } else {
2331 __ dsll(AT, as_Register(index), scale);
2332 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ sh(as_Register(src), AT, disp);
2342 }
2343 } else {
2344 if (scale == 0) {
2345 __ addu(AT, as_Register(base), as_Register(index));
2346 } else {
2347 __ dsll(AT, as_Register(index), scale);
2348 __ addu(AT, as_Register(base), AT);
2349 }
2350 __ move(T9, disp);
2351 if( UseLoongsonISA ) {
2352 __ gsshx(as_Register(src), AT, T9, 0);
2353 } else {
2354 __ addu(AT, AT, T9);
2355 __ sh(as_Register(src), AT, 0);
2356 }
2357 }
2358 } else {
2359 if( Assembler::is_simm16(disp) ) {
2360 __ sh(as_Register(src), as_Register(base), disp);
2361 } else {
2362 __ move(T9, disp);
2363 if( UseLoongsonISA ) {
2364 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2365 } else {
2366 __ addu(AT, as_Register(base), T9);
2367 __ sh(as_Register(src), AT, 0);
2368 }
2369 }
2370 }
2371 %}
2373 enc_class store_C0_enc (memory mem) %{
2374 MacroAssembler _masm(&cbuf);
2375 int base = $mem$$base;
2376 int index = $mem$$index;
2377 int scale = $mem$$scale;
2378 int disp = $mem$$disp;
2380 if( index != 0 ) {
2381 if( Assembler::is_simm16(disp) ) {
2382 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2383 if (scale == 0) {
2384 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2385 } else {
2386 __ dsll(AT, as_Register(index), scale);
2387 __ gsshx(R0, as_Register(base), AT, disp);
2388 }
2389 } else {
2390 if (scale == 0) {
2391 __ addu(AT, as_Register(base), as_Register(index));
2392 } else {
2393 __ dsll(AT, as_Register(index), scale);
2394 __ addu(AT, as_Register(base), AT);
2395 }
2396 __ sh(R0, AT, disp);
2397 }
2398 } else {
2399 if (scale == 0) {
2400 __ addu(AT, as_Register(base), as_Register(index));
2401 } else {
2402 __ dsll(AT, as_Register(index), scale);
2403 __ addu(AT, as_Register(base), AT);
2404 }
2405 __ move(T9, disp);
2406 if( UseLoongsonISA ) {
2407 __ gsshx(R0, AT, T9, 0);
2408 } else {
2409 __ addu(AT, AT, T9);
2410 __ sh(R0, AT, 0);
2411 }
2412 }
2413 } else {
2414 if( Assembler::is_simm16(disp) ) {
2415 __ sh(R0, as_Register(base), disp);
2416 } else {
2417 __ move(T9, disp);
2418 if( UseLoongsonISA ) {
2419 __ gsshx(R0, as_Register(base), T9, 0);
2420 } else {
2421 __ addu(AT, as_Register(base), T9);
2422 __ sh(R0, AT, 0);
2423 }
2424 }
2425 }
2426 %}
2428 enc_class load_I_enc (mRegI dst, memory mem) %{
2429 MacroAssembler _masm(&cbuf);
2430 int dst = $dst$$reg;
2431 int base = $mem$$base;
2432 int index = $mem$$index;
2433 int scale = $mem$$scale;
2434 int disp = $mem$$disp;
2436 if( index != 0 ) {
2437 if( Assembler::is_simm16(disp) ) {
2438 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2439 if (scale == 0) {
2440 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2441 } else {
2442 __ dsll(AT, as_Register(index), scale);
2443 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2444 }
2445 } else {
2446 if (scale == 0) {
2447 __ addu(AT, as_Register(base), as_Register(index));
2448 } else {
2449 __ dsll(AT, as_Register(index), scale);
2450 __ addu(AT, as_Register(base), AT);
2451 }
2452 __ lw(as_Register(dst), AT, disp);
2453 }
2454 } else {
2455 if (scale == 0) {
2456 __ addu(AT, as_Register(base), as_Register(index));
2457 } else {
2458 __ dsll(AT, as_Register(index), scale);
2459 __ addu(AT, as_Register(base), AT);
2460 }
2461 __ move(T9, disp);
2462 if( UseLoongsonISA ) {
2463 __ gslwx(as_Register(dst), AT, T9, 0);
2464 } else {
2465 __ addu(AT, AT, T9);
2466 __ lw(as_Register(dst), AT, 0);
2467 }
2468 }
2469 } else {
2470 if( Assembler::is_simm16(disp) ) {
2471 __ lw(as_Register(dst), as_Register(base), disp);
2472 } else {
2473 __ move(T9, disp);
2474 if( UseLoongsonISA ) {
2475 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2476 } else {
2477 __ addu(AT, as_Register(base), T9);
2478 __ lw(as_Register(dst), AT, 0);
2479 }
2480 }
2481 }
2482 %}
2484 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2485 MacroAssembler _masm(&cbuf);
2486 int src = $src$$reg;
2487 int base = $mem$$base;
2488 int index = $mem$$index;
2489 int scale = $mem$$scale;
2490 int disp = $mem$$disp;
2492 if( index != 0 ) {
2493 if( Assembler::is_simm16(disp) ) {
2494 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2495 if (scale == 0) {
2496 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2497 } else {
2498 __ dsll(AT, as_Register(index), scale);
2499 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2500 }
2501 } else {
2502 if (scale == 0) {
2503 __ addu(AT, as_Register(base), as_Register(index));
2504 } else {
2505 __ dsll(AT, as_Register(index), scale);
2506 __ addu(AT, as_Register(base), AT);
2507 }
2508 __ sw(as_Register(src), AT, disp);
2509 }
2510 } else {
2511 if (scale == 0) {
2512 __ addu(AT, as_Register(base), as_Register(index));
2513 } else {
2514 __ dsll(AT, as_Register(index), scale);
2515 __ addu(AT, as_Register(base), AT);
2516 }
2517 __ move(T9, disp);
2518 if( UseLoongsonISA ) {
2519 __ gsswx(as_Register(src), AT, T9, 0);
2520 } else {
2521 __ addu(AT, AT, T9);
2522 __ sw(as_Register(src), AT, 0);
2523 }
2524 }
2525 } else {
2526 if( Assembler::is_simm16(disp) ) {
2527 __ sw(as_Register(src), as_Register(base), disp);
2528 } else {
2529 __ move(T9, disp);
2530 if( UseLoongsonISA ) {
2531 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2532 } else {
2533 __ addu(AT, as_Register(base), T9);
2534 __ sw(as_Register(src), AT, 0);
2535 }
2536 }
2537 }
2538 %}
2540 enc_class store_I_immI_enc (memory mem, immI src) %{
2541 MacroAssembler _masm(&cbuf);
2542 int base = $mem$$base;
2543 int index = $mem$$index;
2544 int scale = $mem$$scale;
2545 int disp = $mem$$disp;
2546 int value = $src$$constant;
2548 if( index != 0 ) {
2549 if ( UseLoongsonISA ) {
2550 if ( Assembler::is_simm(disp, 8) ) {
2551 if ( scale == 0 ) {
2552 if ( value == 0 ) {
2553 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2554 } else {
2555 __ move(T9, value);
2556 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2557 }
2558 } else {
2559 __ dsll(AT, as_Register(index), scale);
2560 if ( value == 0 ) {
2561 __ gsswx(R0, as_Register(base), AT, disp);
2562 } else {
2563 __ move(T9, value);
2564 __ gsswx(T9, as_Register(base), AT, disp);
2565 }
2566 }
2567 } else if ( Assembler::is_simm16(disp) ) {
2568 if ( scale == 0 ) {
2569 __ daddu(AT, as_Register(base), as_Register(index));
2570 if ( value == 0 ) {
2571 __ sw(R0, AT, disp);
2572 } else {
2573 __ move(T9, value);
2574 __ sw(T9, AT, disp);
2575 }
2576 } else {
2577 __ dsll(AT, as_Register(index), scale);
2578 __ daddu(AT, as_Register(base), AT);
2579 if ( value == 0 ) {
2580 __ sw(R0, AT, disp);
2581 } else {
2582 __ move(T9, value);
2583 __ sw(T9, AT, disp);
2584 }
2585 }
2586 } else {
2587 if ( scale == 0 ) {
2588 __ move(T9, disp);
2589 __ daddu(AT, as_Register(index), T9);
2590 if ( value ==0 ) {
2591 __ gsswx(R0, as_Register(base), AT, 0);
2592 } else {
2593 __ move(T9, value);
2594 __ gsswx(T9, as_Register(base), AT, 0);
2595 }
2596 } else {
2597 __ dsll(AT, as_Register(index), scale);
2598 __ move(T9, disp);
2599 __ daddu(AT, AT, T9);
2600 if ( value == 0 ) {
2601 __ gsswx(R0, as_Register(base), AT, 0);
2602 } else {
2603 __ move(T9, value);
2604 __ gsswx(T9, as_Register(base), AT, 0);
2605 }
2606 }
2607 }
2608 } else { //not use loongson isa
2609 if (scale == 0) {
2610 __ daddu(AT, as_Register(base), as_Register(index));
2611 } else {
2612 __ dsll(AT, as_Register(index), scale);
2613 __ daddu(AT, as_Register(base), AT);
2614 }
2615 if( Assembler::is_simm16(disp) ) {
2616 if (value == 0) {
2617 __ sw(R0, AT, disp);
2618 } else {
2619 __ move(T9, value);
2620 __ sw(T9, AT, disp);
2621 }
2622 } else {
2623 if (value == 0) {
2624 __ move(T9, disp);
2625 __ daddu(AT, AT, T9);
2626 __ sw(R0, AT, 0);
2627 } else {
2628 __ move(T9, disp);
2629 __ daddu(AT, AT, T9);
2630 __ move(T9, value);
2631 __ sw(T9, AT, 0);
2632 }
2633 }
2634 }
2635 } else {
2636 if ( UseLoongsonISA ) {
2637 if ( Assembler::is_simm16(disp) ) {
2638 if ( value == 0 ) {
2639 __ sw(R0, as_Register(base), disp);
2640 } else {
2641 __ move(AT, value);
2642 __ sw(AT, as_Register(base), disp);
2643 }
2644 } else {
2645 __ move(T9, disp);
2646 if ( value == 0 ) {
2647 __ gsswx(R0, as_Register(base), T9, 0);
2648 } else {
2649 __ move(AT, value);
2650 __ gsswx(AT, as_Register(base), T9, 0);
2651 }
2652 }
2653 } else {
2654 if( Assembler::is_simm16(disp) ) {
2655 if (value == 0) {
2656 __ sw(R0, as_Register(base), disp);
2657 } else {
2658 __ move(AT, value);
2659 __ sw(AT, as_Register(base), disp);
2660 }
2661 } else {
2662 if (value == 0) {
2663 __ move(T9, disp);
2664 __ daddu(AT, as_Register(base), T9);
2665 __ sw(R0, AT, 0);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(base), T9);
2669 __ move(T9, value);
2670 __ sw(T9, AT, 0);
2671 }
2672 }
2673 }
2674 }
2675 %}
2677 enc_class load_N_enc (mRegN dst, memory mem) %{
2678 MacroAssembler _masm(&cbuf);
2679 int dst = $dst$$reg;
2680 int base = $mem$$base;
2681 int index = $mem$$index;
2682 int scale = $mem$$scale;
2683 int disp = $mem$$disp;
2684 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2685 assert(disp_reloc == relocInfo::none, "cannot have disp");
2687 if( index != 0 ) {
2688 if (scale == 0) {
2689 __ daddu(AT, as_Register(base), as_Register(index));
2690 } else {
2691 __ dsll(AT, as_Register(index), scale);
2692 __ daddu(AT, as_Register(base), AT);
2693 }
2694 if( Assembler::is_simm16(disp) ) {
2695 __ lwu(as_Register(dst), AT, disp);
2696 } else {
2697 __ set64(T9, disp);
2698 __ daddu(AT, AT, T9);
2699 __ lwu(as_Register(dst), AT, 0);
2700 }
2701 } else {
2702 if( Assembler::is_simm16(disp) ) {
2703 __ lwu(as_Register(dst), as_Register(base), disp);
2704 } else {
2705 __ set64(T9, disp);
2706 __ daddu(AT, as_Register(base), T9);
2707 __ lwu(as_Register(dst), AT, 0);
2708 }
2709 }
2711 %}
2714 enc_class load_P_enc (mRegP dst, memory mem) %{
2715 MacroAssembler _masm(&cbuf);
2716 int dst = $dst$$reg;
2717 int base = $mem$$base;
2718 int index = $mem$$index;
2719 int scale = $mem$$scale;
2720 int disp = $mem$$disp;
2721 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2722 assert(disp_reloc == relocInfo::none, "cannot have disp");
2724 if( index != 0 ) {
2725 if ( UseLoongsonISA ) {
2726 if ( Assembler::is_simm(disp, 8) ) {
2727 if ( scale != 0 ) {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2730 } else {
2731 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2732 }
2733 } else if ( Assembler::is_simm16(disp) ){
2734 if ( scale != 0 ) {
2735 __ dsll(AT, as_Register(index), scale);
2736 __ daddu(AT, AT, as_Register(base));
2737 } else {
2738 __ daddu(AT, as_Register(index), as_Register(base));
2739 }
2740 __ ld(as_Register(dst), AT, disp);
2741 } else {
2742 if ( scale != 0 ) {
2743 __ dsll(AT, as_Register(index), scale);
2744 __ move(T9, disp);
2745 __ daddu(AT, AT, T9);
2746 } else {
2747 __ move(T9, disp);
2748 __ daddu(AT, as_Register(index), T9);
2749 }
2750 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2751 }
2752 } else { //not use loongson isa
2753 if (scale == 0) {
2754 __ daddu(AT, as_Register(base), as_Register(index));
2755 } else {
2756 __ dsll(AT, as_Register(index), scale);
2757 __ daddu(AT, as_Register(base), AT);
2758 }
2759 if( Assembler::is_simm16(disp) ) {
2760 __ ld(as_Register(dst), AT, disp);
2761 } else {
2762 __ set64(T9, disp);
2763 __ daddu(AT, AT, T9);
2764 __ ld(as_Register(dst), AT, 0);
2765 }
2766 }
2767 } else {
2768 if ( UseLoongsonISA ) {
2769 if ( Assembler::is_simm16(disp) ){
2770 __ ld(as_Register(dst), as_Register(base), disp);
2771 } else {
2772 __ set64(T9, disp);
2773 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2774 }
2775 } else { //not use loongson isa
2776 if( Assembler::is_simm16(disp) ) {
2777 __ ld(as_Register(dst), as_Register(base), disp);
2778 } else {
2779 __ set64(T9, disp);
2780 __ daddu(AT, as_Register(base), T9);
2781 __ ld(as_Register(dst), AT, 0);
2782 }
2783 }
2784 }
2785 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2786 %}
2788 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2789 MacroAssembler _masm(&cbuf);
2790 int src = $src$$reg;
2791 int base = $mem$$base;
2792 int index = $mem$$index;
2793 int scale = $mem$$scale;
2794 int disp = $mem$$disp;
2796 if( index != 0 ) {
2797 if ( UseLoongsonISA ){
2798 if ( Assembler::is_simm(disp, 8) ) {
2799 if ( scale == 0 ) {
2800 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2801 } else {
2802 __ dsll(AT, as_Register(index), scale);
2803 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2804 }
2805 } else if ( Assembler::is_simm16(disp) ) {
2806 if ( scale == 0 ) {
2807 __ daddu(AT, as_Register(base), as_Register(index));
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ daddu(AT, as_Register(base), AT);
2811 }
2812 __ sd(as_Register(src), AT, disp);
2813 } else {
2814 if ( scale == 0 ) {
2815 __ move(T9, disp);
2816 __ daddu(AT, as_Register(index), T9);
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ move(T9, disp);
2820 __ daddu(AT, AT, T9);
2821 }
2822 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2823 }
2824 } else { //not use loongson isa
2825 if (scale == 0) {
2826 __ daddu(AT, as_Register(base), as_Register(index));
2827 } else {
2828 __ dsll(AT, as_Register(index), scale);
2829 __ daddu(AT, as_Register(base), AT);
2830 }
2831 if( Assembler::is_simm16(disp) ) {
2832 __ sd(as_Register(src), AT, disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ daddu(AT, AT, T9);
2836 __ sd(as_Register(src), AT, 0);
2837 }
2838 }
2839 } else {
2840 if ( UseLoongsonISA ) {
2841 if ( Assembler::is_simm16(disp) ) {
2842 __ sd(as_Register(src), as_Register(base), disp);
2843 } else {
2844 __ move(T9, disp);
2845 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2846 }
2847 } else {
2848 if( Assembler::is_simm16(disp) ) {
2849 __ sd(as_Register(src), as_Register(base), disp);
2850 } else {
2851 __ move(T9, disp);
2852 __ daddu(AT, as_Register(base), T9);
2853 __ sd(as_Register(src), AT, 0);
2854 }
2855 }
2856 }
2857 %}
2859 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2860 MacroAssembler _masm(&cbuf);
2861 int src = $src$$reg;
2862 int base = $mem$$base;
2863 int index = $mem$$index;
2864 int scale = $mem$$scale;
2865 int disp = $mem$$disp;
2867 if( index != 0 ) {
2868 if ( UseLoongsonISA ){
2869 if ( Assembler::is_simm(disp, 8) ) {
2870 if ( scale == 0 ) {
2871 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2872 } else {
2873 __ dsll(AT, as_Register(index), scale);
2874 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2875 }
2876 } else if ( Assembler::is_simm16(disp) ) {
2877 if ( scale == 0 ) {
2878 __ daddu(AT, as_Register(base), as_Register(index));
2879 } else {
2880 __ dsll(AT, as_Register(index), scale);
2881 __ daddu(AT, as_Register(base), AT);
2882 }
2883 __ sw(as_Register(src), AT, disp);
2884 } else {
2885 if ( scale == 0 ) {
2886 __ move(T9, disp);
2887 __ daddu(AT, as_Register(index), T9);
2888 } else {
2889 __ dsll(AT, as_Register(index), scale);
2890 __ move(T9, disp);
2891 __ daddu(AT, AT, T9);
2892 }
2893 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2894 }
2895 } else { //not use loongson isa
2896 if (scale == 0) {
2897 __ daddu(AT, as_Register(base), as_Register(index));
2898 } else {
2899 __ dsll(AT, as_Register(index), scale);
2900 __ daddu(AT, as_Register(base), AT);
2901 }
2902 if( Assembler::is_simm16(disp) ) {
2903 __ sw(as_Register(src), AT, disp);
2904 } else {
2905 __ move(T9, disp);
2906 __ daddu(AT, AT, T9);
2907 __ sw(as_Register(src), AT, 0);
2908 }
2909 }
2910 } else {
2911 if ( UseLoongsonISA ) {
2912 if ( Assembler::is_simm16(disp) ) {
2913 __ sw(as_Register(src), as_Register(base), disp);
2914 } else {
2915 __ move(T9, disp);
2916 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2917 }
2918 } else {
2919 if( Assembler::is_simm16(disp) ) {
2920 __ sw(as_Register(src), as_Register(base), disp);
2921 } else {
2922 __ move(T9, disp);
2923 __ daddu(AT, as_Register(base), T9);
2924 __ sw(as_Register(src), AT, 0);
2925 }
2926 }
2927 }
2928 %}
2930 enc_class store_P_immP0_enc (memory mem) %{
2931 MacroAssembler _masm(&cbuf);
2932 int base = $mem$$base;
2933 int index = $mem$$index;
2934 int scale = $mem$$scale;
2935 int disp = $mem$$disp;
2937 if( index != 0 ) {
2938 if (scale == 0) {
2939 if( Assembler::is_simm16(disp) ) {
2940 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2941 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2942 } else {
2943 __ daddu(AT, as_Register(base), as_Register(index));
2944 __ sd(R0, AT, disp);
2945 }
2946 } else {
2947 __ daddu(AT, as_Register(base), as_Register(index));
2948 __ move(T9, disp);
2949 if(UseLoongsonISA) {
2950 __ gssdx(R0, AT, T9, 0);
2951 } else {
2952 __ daddu(AT, AT, T9);
2953 __ sd(R0, AT, 0);
2954 }
2955 }
2956 } else {
2957 __ dsll(AT, as_Register(index), scale);
2958 if( Assembler::is_simm16(disp) ) {
2959 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2960 __ gssdx(R0, as_Register(base), AT, disp);
2961 } else {
2962 __ daddu(AT, as_Register(base), AT);
2963 __ sd(R0, AT, disp);
2964 }
2965 } else {
2966 __ daddu(AT, as_Register(base), AT);
2967 __ move(T9, disp);
2968 if (UseLoongsonISA) {
2969 __ gssdx(R0, AT, T9, 0);
2970 } else {
2971 __ daddu(AT, AT, T9);
2972 __ sd(R0, AT, 0);
2973 }
2974 }
2975 }
2976 } else {
2977 if( Assembler::is_simm16(disp) ) {
2978 __ sd(R0, as_Register(base), disp);
2979 } else {
2980 __ move(T9, disp);
2981 if (UseLoongsonISA) {
2982 __ gssdx(R0, as_Register(base), T9, 0);
2983 } else {
2984 __ daddu(AT, as_Register(base), T9);
2985 __ sd(R0, AT, 0);
2986 }
2987 }
2988 }
2989 %}
2992 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2993 MacroAssembler _masm(&cbuf);
2994 int base = $mem$$base;
2995 int index = $mem$$index;
2996 int scale = $mem$$scale;
2997 int disp = $mem$$disp;
2999 if(index!=0){
3000 if (scale == 0) {
3001 __ daddu(AT, as_Register(base), as_Register(index));
3002 } else {
3003 __ dsll(AT, as_Register(index), scale);
3004 __ daddu(AT, as_Register(base), AT);
3005 }
3007 if( Assembler::is_simm16(disp) ) {
3008 __ sw(R0, AT, disp);
3009 } else {
3010 __ move(T9, disp);
3011 __ daddu(AT, AT, T9);
3012 __ sw(R0, AT, 0);
3013 }
3014 }
3015 else {
3016 if( Assembler::is_simm16(disp) ) {
3017 __ sw(R0, as_Register(base), disp);
3018 } else {
3019 __ move(T9, disp);
3020 __ daddu(AT, as_Register(base), T9);
3021 __ sw(R0, AT, 0);
3022 }
3023 }
3024 %}
3026 enc_class load_L_enc (mRegL dst, memory mem) %{
3027 MacroAssembler _masm(&cbuf);
3028 int base = $mem$$base;
3029 int index = $mem$$index;
3030 int scale = $mem$$scale;
3031 int disp = $mem$$disp;
3032 Register dst_reg = as_Register($dst$$reg);
3034 /*********************2013/03/27**************************
3035 * Jin: $base may contain a null object.
3036 * Server JIT force the exception_offset to be the pos of
3037 * the first instruction.
3038 * I insert such a 'null_check' at the beginning.
3039 *******************************************************/
3041 __ lw(AT, as_Register(base), 0);
3043 /*********************2012/10/04**************************
3044 * Error case found in SortTest
3045 * 337 b java.util.Arrays::sort1 (401 bytes)
3046 * B73:
3047 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3048 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3049 *
3050 * The original instructions generated here are :
3051 * __ lw(dst_lo, as_Register(base), disp);
3052 * __ lw(dst_hi, as_Register(base), disp + 4);
3053 *******************************************************/
3055 if( index != 0 ) {
3056 if (scale == 0) {
3057 __ daddu(AT, as_Register(base), as_Register(index));
3058 } else {
3059 __ dsll(AT, as_Register(index), scale);
3060 __ daddu(AT, as_Register(base), AT);
3061 }
3062 if( Assembler::is_simm16(disp) ) {
3063 __ ld(dst_reg, AT, disp);
3064 } else {
3065 __ move(T9, disp);
3066 __ daddu(AT, AT, T9);
3067 __ ld(dst_reg, AT, 0);
3068 }
3069 } else {
3070 if( Assembler::is_simm16(disp) ) {
3071 __ move(AT, as_Register(base));
3072 __ ld(dst_reg, AT, disp);
3073 } else {
3074 __ move(T9, disp);
3075 __ daddu(AT, as_Register(base), T9);
3076 __ ld(dst_reg, AT, 0);
3077 }
3078 }
3079 %}
3081 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3082 MacroAssembler _masm(&cbuf);
3083 int base = $mem$$base;
3084 int index = $mem$$index;
3085 int scale = $mem$$scale;
3086 int disp = $mem$$disp;
3087 Register src_reg = as_Register($src$$reg);
3089 if( index != 0 ) {
3090 if (scale == 0) {
3091 __ daddu(AT, as_Register(base), as_Register(index));
3092 } else {
3093 __ dsll(AT, as_Register(index), scale);
3094 __ daddu(AT, as_Register(base), AT);
3095 }
3096 if( Assembler::is_simm16(disp) ) {
3097 __ sd(src_reg, AT, disp);
3098 } else {
3099 __ move(T9, disp);
3100 __ daddu(AT, AT, T9);
3101 __ sd(src_reg, AT, 0);
3102 }
3103 } else {
3104 if( Assembler::is_simm16(disp) ) {
3105 __ move(AT, as_Register(base));
3106 __ sd(src_reg, AT, disp);
3107 } else {
3108 __ move(T9, disp);
3109 __ daddu(AT, as_Register(base), T9);
3110 __ sd(src_reg, AT, 0);
3111 }
3112 }
3113 %}
3115 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3116 MacroAssembler _masm(&cbuf);
3117 int base = $mem$$base;
3118 int index = $mem$$index;
3119 int scale = $mem$$scale;
3120 int disp = $mem$$disp;
3122 if( index != 0 ) {
3123 if (scale == 0) {
3124 __ daddu(AT, as_Register(base), as_Register(index));
3125 } else {
3126 __ dsll(AT, as_Register(index), scale);
3127 __ daddu(AT, as_Register(base), AT);
3128 }
3129 if( Assembler::is_simm16(disp) ) {
3130 __ sd(R0, AT, disp);
3131 } else {
3132 __ move(T9, disp);
3133 __ addu(AT, AT, T9);
3134 __ sd(R0, AT, 0);
3135 }
3136 } else {
3137 if( Assembler::is_simm16(disp) ) {
3138 __ move(AT, as_Register(base));
3139 __ sd(R0, AT, disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ addu(AT, as_Register(base), T9);
3143 __ sd(R0, AT, 0);
3144 }
3145 }
3146 %}
3148 enc_class load_F_enc (regF dst, memory mem) %{
3149 MacroAssembler _masm(&cbuf);
3150 int base = $mem$$base;
3151 int index = $mem$$index;
3152 int scale = $mem$$scale;
3153 int disp = $mem$$disp;
3154 FloatRegister dst = $dst$$FloatRegister;
3156 if( index != 0 ) {
3157 if( Assembler::is_simm16(disp) ) {
3158 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3159 if (scale == 0) {
3160 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3161 } else {
3162 __ dsll(AT, as_Register(index), scale);
3163 __ gslwxc1(dst, as_Register(base), AT, disp);
3164 }
3165 } else {
3166 if (scale == 0) {
3167 __ daddu(AT, as_Register(base), as_Register(index));
3168 } else {
3169 __ dsll(AT, as_Register(index), scale);
3170 __ daddu(AT, as_Register(base), AT);
3171 }
3172 __ lwc1(dst, AT, disp);
3173 }
3174 } else {
3175 if (scale == 0) {
3176 __ daddu(AT, as_Register(base), as_Register(index));
3177 } else {
3178 __ dsll(AT, as_Register(index), scale);
3179 __ daddu(AT, as_Register(base), AT);
3180 }
3181 __ move(T9, disp);
3182 if( UseLoongsonISA ) {
3183 __ gslwxc1(dst, AT, T9, 0);
3184 } else {
3185 __ daddu(AT, AT, T9);
3186 __ lwc1(dst, AT, 0);
3187 }
3188 }
3189 } else {
3190 if( Assembler::is_simm16(disp) ) {
3191 __ lwc1(dst, as_Register(base), disp);
3192 } else {
3193 __ move(T9, disp);
3194 if( UseLoongsonISA ) {
3195 __ gslwxc1(dst, as_Register(base), T9, 0);
3196 } else {
3197 __ daddu(AT, as_Register(base), T9);
3198 __ lwc1(dst, AT, 0);
3199 }
3200 }
3201 }
3202 %}
3204 enc_class store_F_reg_enc (memory mem, regF src) %{
3205 MacroAssembler _masm(&cbuf);
3206 int base = $mem$$base;
3207 int index = $mem$$index;
3208 int scale = $mem$$scale;
3209 int disp = $mem$$disp;
3210 FloatRegister src = $src$$FloatRegister;
3212 if( index != 0 ) {
3213 if( Assembler::is_simm16(disp) ) {
3214 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3215 if (scale == 0) {
3216 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3217 } else {
3218 __ dsll(AT, as_Register(index), scale);
3219 __ gsswxc1(src, as_Register(base), AT, disp);
3220 }
3221 } else {
3222 if (scale == 0) {
3223 __ daddu(AT, as_Register(base), as_Register(index));
3224 } else {
3225 __ dsll(AT, as_Register(index), scale);
3226 __ daddu(AT, as_Register(base), AT);
3227 }
3228 __ swc1(src, AT, disp);
3229 }
3230 } else {
3231 if (scale == 0) {
3232 __ daddu(AT, as_Register(base), as_Register(index));
3233 } else {
3234 __ dsll(AT, as_Register(index), scale);
3235 __ daddu(AT, as_Register(base), AT);
3236 }
3237 __ move(T9, disp);
3238 if( UseLoongsonISA ) {
3239 __ gsswxc1(src, AT, T9, 0);
3240 } else {
3241 __ daddu(AT, AT, T9);
3242 __ swc1(src, AT, 0);
3243 }
3244 }
3245 } else {
3246 if( Assembler::is_simm16(disp) ) {
3247 __ swc1(src, as_Register(base), disp);
3248 } else {
3249 __ move(T9, disp);
3250 if( UseLoongsonISA ) {
3251 __ gslwxc1(src, as_Register(base), T9, 0);
3252 } else {
3253 __ daddu(AT, as_Register(base), T9);
3254 __ swc1(src, AT, 0);
3255 }
3256 }
3257 }
3258 %}
3260 enc_class load_D_enc (regD dst, memory mem) %{
3261 MacroAssembler _masm(&cbuf);
3262 int base = $mem$$base;
3263 int index = $mem$$index;
3264 int scale = $mem$$scale;
3265 int disp = $mem$$disp;
3266 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3268 if( index != 0 ) {
3269 if( Assembler::is_simm16(disp) ) {
3270 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3271 if (scale == 0) {
3272 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3273 } else {
3274 __ dsll(AT, as_Register(index), scale);
3275 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3276 }
3277 } else {
3278 if (scale == 0) {
3279 __ daddu(AT, as_Register(base), as_Register(index));
3280 } else {
3281 __ dsll(AT, as_Register(index), scale);
3282 __ daddu(AT, as_Register(base), AT);
3283 }
3284 __ ldc1(dst_reg, AT, disp);
3285 }
3286 } else {
3287 if (scale == 0) {
3288 __ daddu(AT, as_Register(base), as_Register(index));
3289 } else {
3290 __ dsll(AT, as_Register(index), scale);
3291 __ daddu(AT, as_Register(base), AT);
3292 }
3293 __ move(T9, disp);
3294 if( UseLoongsonISA ) {
3295 __ gsldxc1(dst_reg, AT, T9, 0);
3296 } else {
3297 __ addu(AT, AT, T9);
3298 __ ldc1(dst_reg, AT, 0);
3299 }
3300 }
3301 } else {
3302 if( Assembler::is_simm16(disp) ) {
3303 __ ldc1(dst_reg, as_Register(base), disp);
3304 } else {
3305 __ move(T9, disp);
3306 if( UseLoongsonISA ) {
3307 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3308 } else {
3309 __ addu(AT, as_Register(base), T9);
3310 __ ldc1(dst_reg, AT, 0);
3311 }
3312 }
3313 }
3314 %}
3316 enc_class store_D_reg_enc (memory mem, regD src) %{
3317 MacroAssembler _masm(&cbuf);
3318 int base = $mem$$base;
3319 int index = $mem$$index;
3320 int scale = $mem$$scale;
3321 int disp = $mem$$disp;
3322 FloatRegister src_reg = as_FloatRegister($src$$reg);
3324 if( index != 0 ) {
3325 if( Assembler::is_simm16(disp) ) {
3326 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3327 if (scale == 0) {
3328 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3329 } else {
3330 __ dsll(AT, as_Register(index), scale);
3331 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3332 }
3333 } else {
3334 if (scale == 0) {
3335 __ daddu(AT, as_Register(base), as_Register(index));
3336 } else {
3337 __ dsll(AT, as_Register(index), scale);
3338 __ daddu(AT, as_Register(base), AT);
3339 }
3340 __ sdc1(src_reg, AT, disp);
3341 }
3342 } else {
3343 if (scale == 0) {
3344 __ daddu(AT, as_Register(base), as_Register(index));
3345 } else {
3346 __ dsll(AT, as_Register(index), scale);
3347 __ daddu(AT, as_Register(base), AT);
3348 }
3349 __ move(T9, disp);
3350 if( UseLoongsonISA ) {
3351 __ gssdxc1(src_reg, AT, T9, 0);
3352 } else {
3353 __ addu(AT, AT, T9);
3354 __ sdc1(src_reg, AT, 0);
3355 }
3356 }
3357 } else {
3358 if( Assembler::is_simm16(disp) ) {
3359 __ sdc1(src_reg, as_Register(base), disp);
3360 } else {
3361 __ move(T9, disp);
3362 if( UseLoongsonISA ) {
3363 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3364 } else {
3365 __ addu(AT, as_Register(base), T9);
3366 __ sdc1(src_reg, AT, 0);
3367 }
3368 }
3369 }
3370 %}
3372 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3373 MacroAssembler _masm(&cbuf);
3374 // This is the instruction starting address for relocation info.
3375 __ block_comment("Java_To_Runtime");
3376 cbuf.set_insts_mark();
3377 __ relocate(relocInfo::runtime_call_type);
3379 __ patchable_set48(T9, (long)$meth$$method);
3380 __ jalr(T9);
3381 __ nop();
3382 %}
3384 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3385 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3386 // who we intended to call.
3387 MacroAssembler _masm(&cbuf);
3388 cbuf.set_insts_mark();
3390 if ( !_method ) {
3391 __ relocate(relocInfo::runtime_call_type);
3392 } else if(_optimized_virtual) {
3393 __ relocate(relocInfo::opt_virtual_call_type);
3394 } else {
3395 __ relocate(relocInfo::static_call_type);
3396 }
3398 __ general_jal((address)($meth$$method));
3399 if( _method ) { // Emit stub for static call
3400 emit_java_to_interp(cbuf);
3401 }
3402 %}
3405 /*
3406 * [Ref: LIR_Assembler::ic_call() ]
3407 */
3408 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3409 MacroAssembler _masm(&cbuf);
3410 __ block_comment("Java_Dynamic_Call");
3411 __ ic_call((address)$meth$$method);
3412 %}
3415 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3416 Register flags = $cr$$Register;
3417 Label L;
3419 MacroAssembler _masm(&cbuf);
3421 __ addu(flags, R0, R0);
3422 __ beq(AT, R0, L);
3423 __ delayed()->nop();
3424 __ move(flags, 0xFFFFFFFF);
3425 __ bind(L);
3426 %}
3428 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3429 Register result = $result$$Register;
3430 Register sub = $sub$$Register;
3431 Register super = $super$$Register;
3432 Register length = $tmp$$Register;
3433 Register tmp = T9;
3434 Label miss;
3436 /* 2012/9/28 Jin: result may be the same as sub
3437 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3438 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3439 * 4bc mov S2, NULL #@loadConP
3440 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3441 */
3442 MacroAssembler _masm(&cbuf);
3443 Label done;
3444 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3445 NULL, &miss,
3446 /*set_cond_codes:*/ true);
3447 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3448 __ move(result, 0);
3449 __ b(done);
3450 __ nop();
3452 __ bind(miss);
3453 __ move(result, 1);
3454 __ bind(done);
3455 %}
3457 %}
3460 //---------MIPS FRAME--------------------------------------------------------------
3461 // Definition of frame structure and management information.
3462 //
3463 // S T A C K L A Y O U T Allocators stack-slot number
3464 // | (to get allocators register number
3465 // G Owned by | | v add SharedInfo::stack0)
3466 // r CALLER | |
3467 // o | +--------+ pad to even-align allocators stack-slot
3468 // w V | pad0 | numbers; owned by CALLER
3469 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3470 // h ^ | in | 5
3471 // | | args | 4 Holes in incoming args owned by SELF
3472 // | | old | | 3
3473 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3474 // v | | ret | 3 return address
3475 // Owned by +--------+
3476 // Self | pad2 | 2 pad to align old SP
3477 // | +--------+ 1
3478 // | | locks | 0
3479 // | +--------+----> SharedInfo::stack0, even aligned
3480 // | | pad1 | 11 pad to align new SP
3481 // | +--------+
3482 // | | | 10
3483 // | | spills | 9 spills
3484 // V | | 8 (pad0 slot for callee)
3485 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3486 // ^ | out | 7
3487 // | | args | 6 Holes in outgoing args owned by CALLEE
3488 // Owned by new | |
3489 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3490 // | |
3491 //
3492 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3493 // known from SELF's arguments and the Java calling convention.
3494 // Region 6-7 is determined per call site.
3495 // Note 2: If the calling convention leaves holes in the incoming argument
3496 // area, those holes are owned by SELF. Holes in the outgoing area
3497 // are owned by the CALLEE. Holes should not be nessecary in the
3498 // incoming area, as the Java calling convention is completely under
3499 // the control of the AD file. Doubles can be sorted and packed to
3500 // avoid holes. Holes in the outgoing arguments may be nessecary for
3501 // varargs C calling conventions.
3502 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3503 // even aligned with pad0 as needed.
3504 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3505 // region 6-11 is even aligned; it may be padded out more so that
3506 // the region from SP to FP meets the minimum stack alignment.
3507 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3508 // alignment. Region 11, pad1, may be dynamically extended so that
3509 // SP meets the minimum alignment.
3512 frame %{
3514 stack_direction(TOWARDS_LOW);
3516 // These two registers define part of the calling convention
3517 // between compiled code and the interpreter.
3518 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3519 // for more information. by yjl 3/16/2006
3521 inline_cache_reg(T1); // Inline Cache Register
3522 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3523 /*
3524 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3525 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3526 */
3528 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3529 cisc_spilling_operand_name(indOffset32);
3531 // Number of stack slots consumed by locking an object
3532 // generate Compile::sync_stack_slots
3533 #ifdef _LP64
3534 sync_stack_slots(2);
3535 #else
3536 sync_stack_slots(1);
3537 #endif
3539 frame_pointer(SP);
3541 // Interpreter stores its frame pointer in a register which is
3542 // stored to the stack by I2CAdaptors.
3543 // I2CAdaptors convert from interpreted java to compiled java.
3545 interpreter_frame_pointer(FP);
3547 // generate Matcher::stack_alignment
3548 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3550 // Number of stack slots between incoming argument block and the start of
3551 // a new frame. The PROLOG must add this many slots to the stack. The
3552 // EPILOG must remove this many slots. Intel needs one slot for
3553 // return address.
3554 // generate Matcher::in_preserve_stack_slots
3555 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3556 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3558 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3559 // for calls to C. Supports the var-args backing area for register parms.
3560 varargs_C_out_slots_killed(0);
3562 // The after-PROLOG location of the return address. Location of
3563 // return address specifies a type (REG or STACK) and a number
3564 // representing the register number (i.e. - use a register name) or
3565 // stack slot.
3566 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3567 // Otherwise, it is above the locks and verification slot and alignment word
3568 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3569 return_addr(REG RA);
3571 // Body of function which returns an integer array locating
3572 // arguments either in registers or in stack slots. Passed an array
3573 // of ideal registers called "sig" and a "length" count. Stack-slot
3574 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3575 // arguments for a CALLEE. Incoming stack arguments are
3576 // automatically biased by the preserve_stack_slots field above.
3579 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3580 // StartNode::calling_convention call this. by yjl 3/16/2006
3581 calling_convention %{
3582 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3583 %}
3588 // Body of function which returns an integer array locating
3589 // arguments either in registers or in stack slots. Passed an array
3590 // of ideal registers called "sig" and a "length" count. Stack-slot
3591 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3592 // arguments for a CALLEE. Incoming stack arguments are
3593 // automatically biased by the preserve_stack_slots field above.
3596 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3597 c_calling_convention %{
3598 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3599 %}
3602 // Location of C & interpreter return values
3603 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3604 // SEE Matcher::match. by yjl 3/16/2006
3605 c_return_value %{
3606 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3607 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3608 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3609 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3610 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3611 %}
3613 // Location of return values
3614 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3615 // SEE Matcher::match. by yjl 3/16/2006
3617 return_value %{
3618 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3619 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3620 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3621 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3622 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3623 %}
3625 %}
3627 //----------ATTRIBUTES---------------------------------------------------------
3628 //----------Operand Attributes-------------------------------------------------
3629 op_attrib op_cost(0); // Required cost attribute
3631 //----------Instruction Attributes---------------------------------------------
3632 ins_attrib ins_cost(100); // Required cost attribute
3633 ins_attrib ins_size(32); // Required size attribute (in bits)
3634 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3635 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3636 // non-matching short branch variant of some
3637 // long branch?
3638 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3639 // specifies the alignment that some part of the instruction (not
3640 // necessarily the start) requires. If > 1, a compute_padding()
3641 // function must be provided for the instruction
3643 //----------OPERANDS-----------------------------------------------------------
3644 // Operand definitions must precede instruction definitions for correct parsing
3645 // in the ADLC because operands constitute user defined types which are used in
3646 // instruction definitions.
3648 // Vectors
3649 operand vecD() %{
3650 constraint(ALLOC_IN_RC(dbl_reg));
3651 match(VecD);
3653 format %{ %}
3654 interface(REG_INTER);
3655 %}
3657 // Flags register, used as output of compare instructions
3658 operand FlagsReg() %{
3659 constraint(ALLOC_IN_RC(mips_flags));
3660 match(RegFlags);
3662 format %{ "EFLAGS" %}
3663 interface(REG_INTER);
3664 %}
3666 //----------Simple Operands----------------------------------------------------
3667 //TODO: Should we need to define some more special immediate number ?
3668 // Immediate Operands
3669 // Integer Immediate
3670 operand immI() %{
3671 match(ConI);
3672 //TODO: should not match immI8 here LEE
3673 match(immI8);
3675 op_cost(20);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 // Long Immediate 8-bit
3681 operand immL8()
3682 %{
3683 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3684 match(ConL);
3686 op_cost(5);
3687 format %{ %}
3688 interface(CONST_INTER);
3689 %}
3691 // Constant for test vs zero
3692 operand immI0() %{
3693 predicate(n->get_int() == 0);
3694 match(ConI);
3696 op_cost(0);
3697 format %{ %}
3698 interface(CONST_INTER);
3699 %}
3701 // Constant for increment
3702 operand immI1() %{
3703 predicate(n->get_int() == 1);
3704 match(ConI);
3706 op_cost(0);
3707 format %{ %}
3708 interface(CONST_INTER);
3709 %}
3711 // Constant for decrement
3712 operand immI_M1() %{
3713 predicate(n->get_int() == -1);
3714 match(ConI);
3716 op_cost(0);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 operand immI_MaxI() %{
3722 predicate(n->get_int() == 2147483647);
3723 match(ConI);
3725 op_cost(0);
3726 format %{ %}
3727 interface(CONST_INTER);
3728 %}
3730 // Valid scale values for addressing modes
3731 operand immI2() %{
3732 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3733 match(ConI);
3735 format %{ %}
3736 interface(CONST_INTER);
3737 %}
3739 operand immI8() %{
3740 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3741 match(ConI);
3743 op_cost(5);
3744 format %{ %}
3745 interface(CONST_INTER);
3746 %}
3748 operand immI16() %{
3749 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3750 match(ConI);
3752 op_cost(10);
3753 format %{ %}
3754 interface(CONST_INTER);
3755 %}
3757 // Constant for long shifts
3758 operand immI_32() %{
3759 predicate( n->get_int() == 32 );
3760 match(ConI);
3762 op_cost(0);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 operand immI_63() %{
3768 predicate( n->get_int() == 63 );
3769 match(ConI);
3771 op_cost(0);
3772 format %{ %}
3773 interface(CONST_INTER);
3774 %}
3776 operand immI_0_31() %{
3777 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3778 match(ConI);
3780 op_cost(0);
3781 format %{ %}
3782 interface(CONST_INTER);
3783 %}
3785 // Operand for non-negtive integer mask
3786 operand immI_nonneg_mask() %{
3787 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3788 match(ConI);
3790 op_cost(0);
3791 format %{ %}
3792 interface(CONST_INTER);
3793 %}
3795 operand immI_32_63() %{
3796 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3797 match(ConI);
3798 op_cost(0);
3800 format %{ %}
3801 interface(CONST_INTER);
3802 %}
3804 operand immI16_sub() %{
3805 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3806 match(ConI);
3808 op_cost(10);
3809 format %{ %}
3810 interface(CONST_INTER);
3811 %}
3813 operand immI_0_32767() %{
3814 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3815 match(ConI);
3816 op_cost(0);
3818 format %{ %}
3819 interface(CONST_INTER);
3820 %}
3822 operand immI_0_65535() %{
3823 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3824 match(ConI);
3825 op_cost(0);
3827 format %{ %}
3828 interface(CONST_INTER);
3829 %}
3831 operand immI_1() %{
3832 predicate( n->get_int() == 1 );
3833 match(ConI);
3835 op_cost(0);
3836 format %{ %}
3837 interface(CONST_INTER);
3838 %}
3840 operand immI_2() %{
3841 predicate( n->get_int() == 2 );
3842 match(ConI);
3844 op_cost(0);
3845 format %{ %}
3846 interface(CONST_INTER);
3847 %}
3849 operand immI_3() %{
3850 predicate( n->get_int() == 3 );
3851 match(ConI);
3853 op_cost(0);
3854 format %{ %}
3855 interface(CONST_INTER);
3856 %}
3858 operand immI_7() %{
3859 predicate( n->get_int() == 7 );
3860 match(ConI);
3862 format %{ %}
3863 interface(CONST_INTER);
3864 %}
3866 // Immediates for special shifts (sign extend)
3868 // Constants for increment
3869 operand immI_16() %{
3870 predicate( n->get_int() == 16 );
3871 match(ConI);
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 operand immI_24() %{
3878 predicate( n->get_int() == 24 );
3879 match(ConI);
3881 format %{ %}
3882 interface(CONST_INTER);
3883 %}
3885 // Constant for byte-wide masking
3886 operand immI_255() %{
3887 predicate( n->get_int() == 255 );
3888 match(ConI);
3890 op_cost(0);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 operand immI_65535() %{
3896 predicate( n->get_int() == 65535 );
3897 match(ConI);
3899 op_cost(5);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 operand immI_65536() %{
3905 predicate( n->get_int() == 65536 );
3906 match(ConI);
3908 op_cost(5);
3909 format %{ %}
3910 interface(CONST_INTER);
3911 %}
3913 operand immI_M65536() %{
3914 predicate( n->get_int() == -65536 );
3915 match(ConI);
3917 op_cost(5);
3918 format %{ %}
3919 interface(CONST_INTER);
3920 %}
3922 // Pointer Immediate
3923 operand immP() %{
3924 match(ConP);
3926 op_cost(10);
3927 format %{ %}
3928 interface(CONST_INTER);
3929 %}
3931 // NULL Pointer Immediate
3932 operand immP0() %{
3933 predicate( n->get_ptr() == 0 );
3934 match(ConP);
3935 op_cost(0);
3937 format %{ %}
3938 interface(CONST_INTER);
3939 %}
3941 // Pointer Immediate: 64-bit
3942 operand immP_set() %{
3943 match(ConP);
3945 op_cost(5);
3946 // formats are generated automatically for constants and base registers
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 // Pointer Immediate: 64-bit
3952 operand immP_load() %{
3953 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3954 match(ConP);
3956 op_cost(5);
3957 // formats are generated automatically for constants and base registers
3958 format %{ %}
3959 interface(CONST_INTER);
3960 %}
3962 // Pointer Immediate: 64-bit
3963 operand immP_no_oop_cheap() %{
3964 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3965 match(ConP);
3967 op_cost(5);
3968 // formats are generated automatically for constants and base registers
3969 format %{ %}
3970 interface(CONST_INTER);
3971 %}
3973 // Pointer for polling page
3974 operand immP_poll() %{
3975 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3976 match(ConP);
3977 op_cost(5);
3979 format %{ %}
3980 interface(CONST_INTER);
3981 %}
3983 // Pointer Immediate
3984 operand immN() %{
3985 match(ConN);
3987 op_cost(10);
3988 format %{ %}
3989 interface(CONST_INTER);
3990 %}
3992 operand immNKlass() %{
3993 match(ConNKlass);
3995 op_cost(10);
3996 format %{ %}
3997 interface(CONST_INTER);
3998 %}
4000 // NULL Pointer Immediate
4001 operand immN0() %{
4002 predicate(n->get_narrowcon() == 0);
4003 match(ConN);
4005 op_cost(5);
4006 format %{ %}
4007 interface(CONST_INTER);
4008 %}
4010 // Long Immediate
4011 operand immL() %{
4012 match(ConL);
4014 op_cost(20);
4015 format %{ %}
4016 interface(CONST_INTER);
4017 %}
4019 // Long Immediate zero
4020 operand immL0() %{
4021 predicate( n->get_long() == 0L );
4022 match(ConL);
4023 op_cost(0);
4025 format %{ %}
4026 interface(CONST_INTER);
4027 %}
4029 operand immL7() %{
4030 predicate( n->get_long() == 7L );
4031 match(ConL);
4032 op_cost(0);
4034 format %{ %}
4035 interface(CONST_INTER);
4036 %}
4038 operand immL_M1() %{
4039 predicate( n->get_long() == -1L );
4040 match(ConL);
4041 op_cost(0);
4043 format %{ %}
4044 interface(CONST_INTER);
4045 %}
4047 // bit 0..2 zero
4048 operand immL_M8() %{
4049 predicate( n->get_long() == -8L );
4050 match(ConL);
4051 op_cost(0);
4053 format %{ %}
4054 interface(CONST_INTER);
4055 %}
4057 // bit 2 zero
4058 operand immL_M5() %{
4059 predicate( n->get_long() == -5L );
4060 match(ConL);
4061 op_cost(0);
4063 format %{ %}
4064 interface(CONST_INTER);
4065 %}
4067 // bit 1..2 zero
4068 operand immL_M7() %{
4069 predicate( n->get_long() == -7L );
4070 match(ConL);
4071 op_cost(0);
4073 format %{ %}
4074 interface(CONST_INTER);
4075 %}
4077 // bit 0..1 zero
4078 operand immL_M4() %{
4079 predicate( n->get_long() == -4L );
4080 match(ConL);
4081 op_cost(0);
4083 format %{ %}
4084 interface(CONST_INTER);
4085 %}
4087 // bit 3..6 zero
4088 operand immL_M121() %{
4089 predicate( n->get_long() == -121L );
4090 match(ConL);
4091 op_cost(0);
4093 format %{ %}
4094 interface(CONST_INTER);
4095 %}
4097 // Long immediate from 0 to 127.
4098 // Used for a shorter form of long mul by 10.
4099 operand immL_127() %{
4100 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4101 match(ConL);
4102 op_cost(0);
4104 format %{ %}
4105 interface(CONST_INTER);
4106 %}
4108 // Operand for non-negtive long mask
4109 operand immL_nonneg_mask() %{
4110 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4111 match(ConL);
4113 op_cost(0);
4114 format %{ %}
4115 interface(CONST_INTER);
4116 %}
4118 operand immL_0_65535() %{
4119 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4120 match(ConL);
4121 op_cost(0);
4123 format %{ %}
4124 interface(CONST_INTER);
4125 %}
4127 // Long Immediate: cheap (materialize in <= 3 instructions)
4128 operand immL_cheap() %{
4129 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4130 match(ConL);
4131 op_cost(0);
4133 format %{ %}
4134 interface(CONST_INTER);
4135 %}
4137 // Long Immediate: expensive (materialize in > 3 instructions)
4138 operand immL_expensive() %{
4139 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4140 match(ConL);
4141 op_cost(0);
4143 format %{ %}
4144 interface(CONST_INTER);
4145 %}
4147 operand immL16() %{
4148 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4149 match(ConL);
4151 op_cost(10);
4152 format %{ %}
4153 interface(CONST_INTER);
4154 %}
4156 operand immL16_sub() %{
4157 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4158 match(ConL);
4160 op_cost(10);
4161 format %{ %}
4162 interface(CONST_INTER);
4163 %}
4165 // Long Immediate: low 32-bit mask
4166 operand immL_32bits() %{
4167 predicate(n->get_long() == 0xFFFFFFFFL);
4168 match(ConL);
4169 op_cost(20);
4171 format %{ %}
4172 interface(CONST_INTER);
4173 %}
4175 // Long Immediate 32-bit signed
4176 operand immL32()
4177 %{
4178 predicate(n->get_long() == (int) (n->get_long()));
4179 match(ConL);
4181 op_cost(15);
4182 format %{ %}
4183 interface(CONST_INTER);
4184 %}
4187 //single-precision floating-point zero
4188 operand immF0() %{
4189 predicate(jint_cast(n->getf()) == 0);
4190 match(ConF);
4192 op_cost(5);
4193 format %{ %}
4194 interface(CONST_INTER);
4195 %}
4197 //single-precision floating-point immediate
4198 operand immF() %{
4199 match(ConF);
4201 op_cost(20);
4202 format %{ %}
4203 interface(CONST_INTER);
4204 %}
4206 //double-precision floating-point zero
4207 operand immD0() %{
4208 predicate(jlong_cast(n->getd()) == 0);
4209 match(ConD);
4211 op_cost(5);
4212 format %{ %}
4213 interface(CONST_INTER);
4214 %}
4216 //double-precision floating-point immediate
4217 operand immD() %{
4218 match(ConD);
4220 op_cost(20);
4221 format %{ %}
4222 interface(CONST_INTER);
4223 %}
4225 // Register Operands
4226 // Integer Register
4227 operand mRegI() %{
4228 constraint(ALLOC_IN_RC(int_reg));
4229 match(RegI);
4231 format %{ %}
4232 interface(REG_INTER);
4233 %}
4235 operand no_Ax_mRegI() %{
4236 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4237 match(RegI);
4238 match(mRegI);
4240 format %{ %}
4241 interface(REG_INTER);
4242 %}
4244 operand mS0RegI() %{
4245 constraint(ALLOC_IN_RC(s0_reg));
4246 match(RegI);
4247 match(mRegI);
4249 format %{ "S0" %}
4250 interface(REG_INTER);
4251 %}
4253 operand mS1RegI() %{
4254 constraint(ALLOC_IN_RC(s1_reg));
4255 match(RegI);
4256 match(mRegI);
4258 format %{ "S1" %}
4259 interface(REG_INTER);
4260 %}
4262 operand mS2RegI() %{
4263 constraint(ALLOC_IN_RC(s2_reg));
4264 match(RegI);
4265 match(mRegI);
4267 format %{ "S2" %}
4268 interface(REG_INTER);
4269 %}
4271 operand mS3RegI() %{
4272 constraint(ALLOC_IN_RC(s3_reg));
4273 match(RegI);
4274 match(mRegI);
4276 format %{ "S3" %}
4277 interface(REG_INTER);
4278 %}
4280 operand mS4RegI() %{
4281 constraint(ALLOC_IN_RC(s4_reg));
4282 match(RegI);
4283 match(mRegI);
4285 format %{ "S4" %}
4286 interface(REG_INTER);
4287 %}
4289 operand mS5RegI() %{
4290 constraint(ALLOC_IN_RC(s5_reg));
4291 match(RegI);
4292 match(mRegI);
4294 format %{ "S5" %}
4295 interface(REG_INTER);
4296 %}
4298 operand mS6RegI() %{
4299 constraint(ALLOC_IN_RC(s6_reg));
4300 match(RegI);
4301 match(mRegI);
4303 format %{ "S6" %}
4304 interface(REG_INTER);
4305 %}
4307 operand mS7RegI() %{
4308 constraint(ALLOC_IN_RC(s7_reg));
4309 match(RegI);
4310 match(mRegI);
4312 format %{ "S7" %}
4313 interface(REG_INTER);
4314 %}
4317 operand mT0RegI() %{
4318 constraint(ALLOC_IN_RC(t0_reg));
4319 match(RegI);
4320 match(mRegI);
4322 format %{ "T0" %}
4323 interface(REG_INTER);
4324 %}
4326 operand mT1RegI() %{
4327 constraint(ALLOC_IN_RC(t1_reg));
4328 match(RegI);
4329 match(mRegI);
4331 format %{ "T1" %}
4332 interface(REG_INTER);
4333 %}
4335 operand mT2RegI() %{
4336 constraint(ALLOC_IN_RC(t2_reg));
4337 match(RegI);
4338 match(mRegI);
4340 format %{ "T2" %}
4341 interface(REG_INTER);
4342 %}
4344 operand mT3RegI() %{
4345 constraint(ALLOC_IN_RC(t3_reg));
4346 match(RegI);
4347 match(mRegI);
4349 format %{ "T3" %}
4350 interface(REG_INTER);
4351 %}
4353 operand mT8RegI() %{
4354 constraint(ALLOC_IN_RC(t8_reg));
4355 match(RegI);
4356 match(mRegI);
4358 format %{ "T8" %}
4359 interface(REG_INTER);
4360 %}
4362 operand mT9RegI() %{
4363 constraint(ALLOC_IN_RC(t9_reg));
4364 match(RegI);
4365 match(mRegI);
4367 format %{ "T9" %}
4368 interface(REG_INTER);
4369 %}
4371 operand mA0RegI() %{
4372 constraint(ALLOC_IN_RC(a0_reg));
4373 match(RegI);
4374 match(mRegI);
4376 format %{ "A0" %}
4377 interface(REG_INTER);
4378 %}
4380 operand mA1RegI() %{
4381 constraint(ALLOC_IN_RC(a1_reg));
4382 match(RegI);
4383 match(mRegI);
4385 format %{ "A1" %}
4386 interface(REG_INTER);
4387 %}
4389 operand mA2RegI() %{
4390 constraint(ALLOC_IN_RC(a2_reg));
4391 match(RegI);
4392 match(mRegI);
4394 format %{ "A2" %}
4395 interface(REG_INTER);
4396 %}
4398 operand mA3RegI() %{
4399 constraint(ALLOC_IN_RC(a3_reg));
4400 match(RegI);
4401 match(mRegI);
4403 format %{ "A3" %}
4404 interface(REG_INTER);
4405 %}
4407 operand mA4RegI() %{
4408 constraint(ALLOC_IN_RC(a4_reg));
4409 match(RegI);
4410 match(mRegI);
4412 format %{ "A4" %}
4413 interface(REG_INTER);
4414 %}
4416 operand mA5RegI() %{
4417 constraint(ALLOC_IN_RC(a5_reg));
4418 match(RegI);
4419 match(mRegI);
4421 format %{ "A5" %}
4422 interface(REG_INTER);
4423 %}
4425 operand mA6RegI() %{
4426 constraint(ALLOC_IN_RC(a6_reg));
4427 match(RegI);
4428 match(mRegI);
4430 format %{ "A6" %}
4431 interface(REG_INTER);
4432 %}
4434 operand mA7RegI() %{
4435 constraint(ALLOC_IN_RC(a7_reg));
4436 match(RegI);
4437 match(mRegI);
4439 format %{ "A7" %}
4440 interface(REG_INTER);
4441 %}
4443 operand mV0RegI() %{
4444 constraint(ALLOC_IN_RC(v0_reg));
4445 match(RegI);
4446 match(mRegI);
4448 format %{ "V0" %}
4449 interface(REG_INTER);
4450 %}
4452 operand mV1RegI() %{
4453 constraint(ALLOC_IN_RC(v1_reg));
4454 match(RegI);
4455 match(mRegI);
4457 format %{ "V1" %}
4458 interface(REG_INTER);
4459 %}
4461 operand mRegN() %{
4462 constraint(ALLOC_IN_RC(int_reg));
4463 match(RegN);
4465 format %{ %}
4466 interface(REG_INTER);
4467 %}
4469 operand t0_RegN() %{
4470 constraint(ALLOC_IN_RC(t0_reg));
4471 match(RegN);
4472 match(mRegN);
4474 format %{ %}
4475 interface(REG_INTER);
4476 %}
4478 operand t1_RegN() %{
4479 constraint(ALLOC_IN_RC(t1_reg));
4480 match(RegN);
4481 match(mRegN);
4483 format %{ %}
4484 interface(REG_INTER);
4485 %}
4487 operand t2_RegN() %{
4488 constraint(ALLOC_IN_RC(t2_reg));
4489 match(RegN);
4490 match(mRegN);
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand t3_RegN() %{
4497 constraint(ALLOC_IN_RC(t3_reg));
4498 match(RegN);
4499 match(mRegN);
4501 format %{ %}
4502 interface(REG_INTER);
4503 %}
4505 operand t8_RegN() %{
4506 constraint(ALLOC_IN_RC(t8_reg));
4507 match(RegN);
4508 match(mRegN);
4510 format %{ %}
4511 interface(REG_INTER);
4512 %}
4514 operand t9_RegN() %{
4515 constraint(ALLOC_IN_RC(t9_reg));
4516 match(RegN);
4517 match(mRegN);
4519 format %{ %}
4520 interface(REG_INTER);
4521 %}
4523 operand a0_RegN() %{
4524 constraint(ALLOC_IN_RC(a0_reg));
4525 match(RegN);
4526 match(mRegN);
4528 format %{ %}
4529 interface(REG_INTER);
4530 %}
4532 operand a1_RegN() %{
4533 constraint(ALLOC_IN_RC(a1_reg));
4534 match(RegN);
4535 match(mRegN);
4537 format %{ %}
4538 interface(REG_INTER);
4539 %}
4541 operand a2_RegN() %{
4542 constraint(ALLOC_IN_RC(a2_reg));
4543 match(RegN);
4544 match(mRegN);
4546 format %{ %}
4547 interface(REG_INTER);
4548 %}
4550 operand a3_RegN() %{
4551 constraint(ALLOC_IN_RC(a3_reg));
4552 match(RegN);
4553 match(mRegN);
4555 format %{ %}
4556 interface(REG_INTER);
4557 %}
4559 operand a4_RegN() %{
4560 constraint(ALLOC_IN_RC(a4_reg));
4561 match(RegN);
4562 match(mRegN);
4564 format %{ %}
4565 interface(REG_INTER);
4566 %}
4568 operand a5_RegN() %{
4569 constraint(ALLOC_IN_RC(a5_reg));
4570 match(RegN);
4571 match(mRegN);
4573 format %{ %}
4574 interface(REG_INTER);
4575 %}
4577 operand a6_RegN() %{
4578 constraint(ALLOC_IN_RC(a6_reg));
4579 match(RegN);
4580 match(mRegN);
4582 format %{ %}
4583 interface(REG_INTER);
4584 %}
4586 operand a7_RegN() %{
4587 constraint(ALLOC_IN_RC(a7_reg));
4588 match(RegN);
4589 match(mRegN);
4591 format %{ %}
4592 interface(REG_INTER);
4593 %}
4595 operand s0_RegN() %{
4596 constraint(ALLOC_IN_RC(s0_reg));
4597 match(RegN);
4598 match(mRegN);
4600 format %{ %}
4601 interface(REG_INTER);
4602 %}
4604 operand s1_RegN() %{
4605 constraint(ALLOC_IN_RC(s1_reg));
4606 match(RegN);
4607 match(mRegN);
4609 format %{ %}
4610 interface(REG_INTER);
4611 %}
4613 operand s2_RegN() %{
4614 constraint(ALLOC_IN_RC(s2_reg));
4615 match(RegN);
4616 match(mRegN);
4618 format %{ %}
4619 interface(REG_INTER);
4620 %}
4622 operand s3_RegN() %{
4623 constraint(ALLOC_IN_RC(s3_reg));
4624 match(RegN);
4625 match(mRegN);
4627 format %{ %}
4628 interface(REG_INTER);
4629 %}
4631 operand s4_RegN() %{
4632 constraint(ALLOC_IN_RC(s4_reg));
4633 match(RegN);
4634 match(mRegN);
4636 format %{ %}
4637 interface(REG_INTER);
4638 %}
4640 operand s5_RegN() %{
4641 constraint(ALLOC_IN_RC(s5_reg));
4642 match(RegN);
4643 match(mRegN);
4645 format %{ %}
4646 interface(REG_INTER);
4647 %}
4649 operand s6_RegN() %{
4650 constraint(ALLOC_IN_RC(s6_reg));
4651 match(RegN);
4652 match(mRegN);
4654 format %{ %}
4655 interface(REG_INTER);
4656 %}
4658 operand s7_RegN() %{
4659 constraint(ALLOC_IN_RC(s7_reg));
4660 match(RegN);
4661 match(mRegN);
4663 format %{ %}
4664 interface(REG_INTER);
4665 %}
4667 operand v0_RegN() %{
4668 constraint(ALLOC_IN_RC(v0_reg));
4669 match(RegN);
4670 match(mRegN);
4672 format %{ %}
4673 interface(REG_INTER);
4674 %}
4676 operand v1_RegN() %{
4677 constraint(ALLOC_IN_RC(v1_reg));
4678 match(RegN);
4679 match(mRegN);
4681 format %{ %}
4682 interface(REG_INTER);
4683 %}
4685 // Pointer Register
4686 operand mRegP() %{
4687 constraint(ALLOC_IN_RC(p_reg));
4688 match(RegP);
4690 format %{ %}
4691 interface(REG_INTER);
4692 %}
4694 operand no_T8_mRegP() %{
4695 constraint(ALLOC_IN_RC(no_T8_p_reg));
4696 match(RegP);
4697 match(mRegP);
4699 format %{ %}
4700 interface(REG_INTER);
4701 %}
4703 operand s0_RegP()
4704 %{
4705 constraint(ALLOC_IN_RC(s0_long_reg));
4706 match(RegP);
4707 match(mRegP);
4708 match(no_T8_mRegP);
4710 format %{ %}
4711 interface(REG_INTER);
4712 %}
4714 operand s1_RegP()
4715 %{
4716 constraint(ALLOC_IN_RC(s1_long_reg));
4717 match(RegP);
4718 match(mRegP);
4719 match(no_T8_mRegP);
4721 format %{ %}
4722 interface(REG_INTER);
4723 %}
4725 operand s2_RegP()
4726 %{
4727 constraint(ALLOC_IN_RC(s2_long_reg));
4728 match(RegP);
4729 match(mRegP);
4730 match(no_T8_mRegP);
4732 format %{ %}
4733 interface(REG_INTER);
4734 %}
4736 operand s3_RegP()
4737 %{
4738 constraint(ALLOC_IN_RC(s3_long_reg));
4739 match(RegP);
4740 match(mRegP);
4741 match(no_T8_mRegP);
4743 format %{ %}
4744 interface(REG_INTER);
4745 %}
4747 operand s4_RegP()
4748 %{
4749 constraint(ALLOC_IN_RC(s4_long_reg));
4750 match(RegP);
4751 match(mRegP);
4752 match(no_T8_mRegP);
4754 format %{ %}
4755 interface(REG_INTER);
4756 %}
4758 operand s5_RegP()
4759 %{
4760 constraint(ALLOC_IN_RC(s5_long_reg));
4761 match(RegP);
4762 match(mRegP);
4763 match(no_T8_mRegP);
4765 format %{ %}
4766 interface(REG_INTER);
4767 %}
4769 operand s6_RegP()
4770 %{
4771 constraint(ALLOC_IN_RC(s6_long_reg));
4772 match(RegP);
4773 match(mRegP);
4774 match(no_T8_mRegP);
4776 format %{ %}
4777 interface(REG_INTER);
4778 %}
4780 operand s7_RegP()
4781 %{
4782 constraint(ALLOC_IN_RC(s7_long_reg));
4783 match(RegP);
4784 match(mRegP);
4785 match(no_T8_mRegP);
4787 format %{ %}
4788 interface(REG_INTER);
4789 %}
4791 operand t0_RegP()
4792 %{
4793 constraint(ALLOC_IN_RC(t0_long_reg));
4794 match(RegP);
4795 match(mRegP);
4796 match(no_T8_mRegP);
4798 format %{ %}
4799 interface(REG_INTER);
4800 %}
4802 operand t1_RegP()
4803 %{
4804 constraint(ALLOC_IN_RC(t1_long_reg));
4805 match(RegP);
4806 match(mRegP);
4807 match(no_T8_mRegP);
4809 format %{ %}
4810 interface(REG_INTER);
4811 %}
4813 operand t2_RegP()
4814 %{
4815 constraint(ALLOC_IN_RC(t2_long_reg));
4816 match(RegP);
4817 match(mRegP);
4818 match(no_T8_mRegP);
4820 format %{ %}
4821 interface(REG_INTER);
4822 %}
4824 operand t3_RegP()
4825 %{
4826 constraint(ALLOC_IN_RC(t3_long_reg));
4827 match(RegP);
4828 match(mRegP);
4829 match(no_T8_mRegP);
4831 format %{ %}
4832 interface(REG_INTER);
4833 %}
4835 operand t8_RegP()
4836 %{
4837 constraint(ALLOC_IN_RC(t8_long_reg));
4838 match(RegP);
4839 match(mRegP);
4841 format %{ %}
4842 interface(REG_INTER);
4843 %}
4845 operand t9_RegP()
4846 %{
4847 constraint(ALLOC_IN_RC(t9_long_reg));
4848 match(RegP);
4849 match(mRegP);
4850 match(no_T8_mRegP);
4852 format %{ %}
4853 interface(REG_INTER);
4854 %}
4856 operand a0_RegP()
4857 %{
4858 constraint(ALLOC_IN_RC(a0_long_reg));
4859 match(RegP);
4860 match(mRegP);
4861 match(no_T8_mRegP);
4863 format %{ %}
4864 interface(REG_INTER);
4865 %}
4867 operand a1_RegP()
4868 %{
4869 constraint(ALLOC_IN_RC(a1_long_reg));
4870 match(RegP);
4871 match(mRegP);
4872 match(no_T8_mRegP);
4874 format %{ %}
4875 interface(REG_INTER);
4876 %}
4878 operand a2_RegP()
4879 %{
4880 constraint(ALLOC_IN_RC(a2_long_reg));
4881 match(RegP);
4882 match(mRegP);
4883 match(no_T8_mRegP);
4885 format %{ %}
4886 interface(REG_INTER);
4887 %}
4889 operand a3_RegP()
4890 %{
4891 constraint(ALLOC_IN_RC(a3_long_reg));
4892 match(RegP);
4893 match(mRegP);
4894 match(no_T8_mRegP);
4896 format %{ %}
4897 interface(REG_INTER);
4898 %}
4900 operand a4_RegP()
4901 %{
4902 constraint(ALLOC_IN_RC(a4_long_reg));
4903 match(RegP);
4904 match(mRegP);
4905 match(no_T8_mRegP);
4907 format %{ %}
4908 interface(REG_INTER);
4909 %}
4912 operand a5_RegP()
4913 %{
4914 constraint(ALLOC_IN_RC(a5_long_reg));
4915 match(RegP);
4916 match(mRegP);
4917 match(no_T8_mRegP);
4919 format %{ %}
4920 interface(REG_INTER);
4921 %}
4923 operand a6_RegP()
4924 %{
4925 constraint(ALLOC_IN_RC(a6_long_reg));
4926 match(RegP);
4927 match(mRegP);
4928 match(no_T8_mRegP);
4930 format %{ %}
4931 interface(REG_INTER);
4932 %}
4934 operand a7_RegP()
4935 %{
4936 constraint(ALLOC_IN_RC(a7_long_reg));
4937 match(RegP);
4938 match(mRegP);
4939 match(no_T8_mRegP);
4941 format %{ %}
4942 interface(REG_INTER);
4943 %}
4945 operand v0_RegP()
4946 %{
4947 constraint(ALLOC_IN_RC(v0_long_reg));
4948 match(RegP);
4949 match(mRegP);
4950 match(no_T8_mRegP);
4952 format %{ %}
4953 interface(REG_INTER);
4954 %}
4956 operand v1_RegP()
4957 %{
4958 constraint(ALLOC_IN_RC(v1_long_reg));
4959 match(RegP);
4960 match(mRegP);
4961 match(no_T8_mRegP);
4963 format %{ %}
4964 interface(REG_INTER);
4965 %}
4967 /*
4968 operand mSPRegP(mRegP reg) %{
4969 constraint(ALLOC_IN_RC(sp_reg));
4970 match(reg);
4972 format %{ "SP" %}
4973 interface(REG_INTER);
4974 %}
4976 operand mFPRegP(mRegP reg) %{
4977 constraint(ALLOC_IN_RC(fp_reg));
4978 match(reg);
4980 format %{ "FP" %}
4981 interface(REG_INTER);
4982 %}
4983 */
4985 operand mRegL() %{
4986 constraint(ALLOC_IN_RC(long_reg));
4987 match(RegL);
4989 format %{ %}
4990 interface(REG_INTER);
4991 %}
4993 operand v0RegL() %{
4994 constraint(ALLOC_IN_RC(v0_long_reg));
4995 match(RegL);
4996 match(mRegL);
4998 format %{ %}
4999 interface(REG_INTER);
5000 %}
5002 operand v1RegL() %{
5003 constraint(ALLOC_IN_RC(v1_long_reg));
5004 match(RegL);
5005 match(mRegL);
5007 format %{ %}
5008 interface(REG_INTER);
5009 %}
5011 operand a0RegL() %{
5012 constraint(ALLOC_IN_RC(a0_long_reg));
5013 match(RegL);
5014 match(mRegL);
5016 format %{ "A0" %}
5017 interface(REG_INTER);
5018 %}
5020 operand a1RegL() %{
5021 constraint(ALLOC_IN_RC(a1_long_reg));
5022 match(RegL);
5023 match(mRegL);
5025 format %{ %}
5026 interface(REG_INTER);
5027 %}
5029 operand a2RegL() %{
5030 constraint(ALLOC_IN_RC(a2_long_reg));
5031 match(RegL);
5032 match(mRegL);
5034 format %{ %}
5035 interface(REG_INTER);
5036 %}
5038 operand a3RegL() %{
5039 constraint(ALLOC_IN_RC(a3_long_reg));
5040 match(RegL);
5041 match(mRegL);
5043 format %{ %}
5044 interface(REG_INTER);
5045 %}
5047 operand t0RegL() %{
5048 constraint(ALLOC_IN_RC(t0_long_reg));
5049 match(RegL);
5050 match(mRegL);
5052 format %{ %}
5053 interface(REG_INTER);
5054 %}
5056 operand t1RegL() %{
5057 constraint(ALLOC_IN_RC(t1_long_reg));
5058 match(RegL);
5059 match(mRegL);
5061 format %{ %}
5062 interface(REG_INTER);
5063 %}
5065 operand t2RegL() %{
5066 constraint(ALLOC_IN_RC(t2_long_reg));
5067 match(RegL);
5068 match(mRegL);
5070 format %{ %}
5071 interface(REG_INTER);
5072 %}
5074 operand t3RegL() %{
5075 constraint(ALLOC_IN_RC(t3_long_reg));
5076 match(RegL);
5077 match(mRegL);
5079 format %{ %}
5080 interface(REG_INTER);
5081 %}
5083 operand t8RegL() %{
5084 constraint(ALLOC_IN_RC(t8_long_reg));
5085 match(RegL);
5086 match(mRegL);
5088 format %{ %}
5089 interface(REG_INTER);
5090 %}
5092 operand a4RegL() %{
5093 constraint(ALLOC_IN_RC(a4_long_reg));
5094 match(RegL);
5095 match(mRegL);
5097 format %{ %}
5098 interface(REG_INTER);
5099 %}
5101 operand a5RegL() %{
5102 constraint(ALLOC_IN_RC(a5_long_reg));
5103 match(RegL);
5104 match(mRegL);
5106 format %{ %}
5107 interface(REG_INTER);
5108 %}
5110 operand a6RegL() %{
5111 constraint(ALLOC_IN_RC(a6_long_reg));
5112 match(RegL);
5113 match(mRegL);
5115 format %{ %}
5116 interface(REG_INTER);
5117 %}
5119 operand a7RegL() %{
5120 constraint(ALLOC_IN_RC(a7_long_reg));
5121 match(RegL);
5122 match(mRegL);
5124 format %{ %}
5125 interface(REG_INTER);
5126 %}
5128 operand s0RegL() %{
5129 constraint(ALLOC_IN_RC(s0_long_reg));
5130 match(RegL);
5131 match(mRegL);
5133 format %{ %}
5134 interface(REG_INTER);
5135 %}
5137 operand s1RegL() %{
5138 constraint(ALLOC_IN_RC(s1_long_reg));
5139 match(RegL);
5140 match(mRegL);
5142 format %{ %}
5143 interface(REG_INTER);
5144 %}
5146 operand s2RegL() %{
5147 constraint(ALLOC_IN_RC(s2_long_reg));
5148 match(RegL);
5149 match(mRegL);
5151 format %{ %}
5152 interface(REG_INTER);
5153 %}
5155 operand s3RegL() %{
5156 constraint(ALLOC_IN_RC(s3_long_reg));
5157 match(RegL);
5158 match(mRegL);
5160 format %{ %}
5161 interface(REG_INTER);
5162 %}
5164 operand s4RegL() %{
5165 constraint(ALLOC_IN_RC(s4_long_reg));
5166 match(RegL);
5167 match(mRegL);
5169 format %{ %}
5170 interface(REG_INTER);
5171 %}
5173 operand s7RegL() %{
5174 constraint(ALLOC_IN_RC(s7_long_reg));
5175 match(RegL);
5176 match(mRegL);
5178 format %{ %}
5179 interface(REG_INTER);
5180 %}
5182 // Floating register operands
5183 operand regF() %{
5184 constraint(ALLOC_IN_RC(flt_reg));
5185 match(RegF);
5187 format %{ %}
5188 interface(REG_INTER);
5189 %}
5191 //Double Precision Floating register operands
5192 operand regD() %{
5193 constraint(ALLOC_IN_RC(dbl_reg));
5194 match(RegD);
5196 format %{ %}
5197 interface(REG_INTER);
5198 %}
5200 //----------Memory Operands----------------------------------------------------
5201 // Indirect Memory Operand
5202 operand indirect(mRegP reg) %{
5203 constraint(ALLOC_IN_RC(p_reg));
5204 match(reg);
5206 format %{ "[$reg] @ indirect" %}
5207 interface(MEMORY_INTER) %{
5208 base($reg);
5209 index(0x0); /* NO_INDEX */
5210 scale(0x0);
5211 disp(0x0);
5212 %}
5213 %}
5215 // Indirect Memory Plus Short Offset Operand
5216 operand indOffset8(mRegP reg, immL8 off)
5217 %{
5218 constraint(ALLOC_IN_RC(p_reg));
5219 match(AddP reg off);
5221 op_cost(10);
5222 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5223 interface(MEMORY_INTER) %{
5224 base($reg);
5225 index(0x0); /* NO_INDEX */
5226 scale(0x0);
5227 disp($off);
5228 %}
5229 %}
5231 // Indirect Memory Times Scale Plus Index Register
5232 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5233 %{
5234 constraint(ALLOC_IN_RC(p_reg));
5235 match(AddP reg (LShiftL lreg scale));
5237 op_cost(10);
5238 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5239 interface(MEMORY_INTER) %{
5240 base($reg);
5241 index($lreg);
5242 scale($scale);
5243 disp(0x0);
5244 %}
5245 %}
5248 // [base + index + offset]
5249 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5250 %{
5251 constraint(ALLOC_IN_RC(p_reg));
5252 op_cost(5);
5253 match(AddP (AddP base index) off);
5255 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5256 interface(MEMORY_INTER) %{
5257 base($base);
5258 index($index);
5259 scale(0x0);
5260 disp($off);
5261 %}
5262 %}
5264 // [base + index + offset]
5265 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5266 %{
5267 constraint(ALLOC_IN_RC(p_reg));
5268 op_cost(5);
5269 match(AddP (AddP base (ConvI2L index)) off);
5271 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5272 interface(MEMORY_INTER) %{
5273 base($base);
5274 index($index);
5275 scale(0x0);
5276 disp($off);
5277 %}
5278 %}
5280 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5281 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5282 %{
5283 constraint(ALLOC_IN_RC(p_reg));
5284 match(AddP (AddP reg (LShiftL lreg scale)) off);
5286 op_cost(10);
5287 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5288 interface(MEMORY_INTER) %{
5289 base($reg);
5290 index($lreg);
5291 scale($scale);
5292 disp($off);
5293 %}
5294 %}
5296 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5297 %{
5298 constraint(ALLOC_IN_RC(p_reg));
5299 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5301 op_cost(10);
5302 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5303 interface(MEMORY_INTER) %{
5304 base($reg);
5305 index($ireg);
5306 scale($scale);
5307 disp($off);
5308 %}
5309 %}
5311 // [base + index<<scale + offset]
5312 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5313 %{
5314 constraint(ALLOC_IN_RC(p_reg));
5315 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5316 op_cost(10);
5317 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5319 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5320 interface(MEMORY_INTER) %{
5321 base($base);
5322 index($index);
5323 scale($scale);
5324 disp($off);
5325 %}
5326 %}
5328 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5329 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5330 %{
5331 predicate(Universe::narrow_oop_shift() == 0);
5332 constraint(ALLOC_IN_RC(p_reg));
5333 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5335 op_cost(10);
5336 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5337 interface(MEMORY_INTER) %{
5338 base($reg);
5339 index($lreg);
5340 scale($scale);
5341 disp($off);
5342 %}
5343 %}
5345 // [base + index<<scale + offset] for compressd Oops
5346 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5347 %{
5348 constraint(ALLOC_IN_RC(p_reg));
5349 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5350 predicate(Universe::narrow_oop_shift() == 0);
5351 op_cost(10);
5352 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5354 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5355 interface(MEMORY_INTER) %{
5356 base($base);
5357 index($index);
5358 scale($scale);
5359 disp($off);
5360 %}
5361 %}
5363 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5364 // Indirect Memory Plus Long Offset Operand
5365 operand indOffset32(mRegP reg, immL32 off) %{
5366 constraint(ALLOC_IN_RC(p_reg));
5367 op_cost(20);
5368 match(AddP reg off);
5370 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5371 interface(MEMORY_INTER) %{
5372 base($reg);
5373 index(0x0); /* NO_INDEX */
5374 scale(0x0);
5375 disp($off);
5376 %}
5377 %}
5379 // Indirect Memory Plus Index Register
5380 operand indIndex(mRegP addr, mRegL index) %{
5381 constraint(ALLOC_IN_RC(p_reg));
5382 match(AddP addr index);
5384 op_cost(20);
5385 format %{"[$addr + $index] @ indIndex" %}
5386 interface(MEMORY_INTER) %{
5387 base($addr);
5388 index($index);
5389 scale(0x0);
5390 disp(0x0);
5391 %}
5392 %}
5394 operand indirectNarrowKlass(mRegN reg)
5395 %{
5396 predicate(Universe::narrow_klass_shift() == 0);
5397 constraint(ALLOC_IN_RC(p_reg));
5398 op_cost(10);
5399 match(DecodeNKlass reg);
5401 format %{ "[$reg] @ indirectNarrowKlass" %}
5402 interface(MEMORY_INTER) %{
5403 base($reg);
5404 index(0x0);
5405 scale(0x0);
5406 disp(0x0);
5407 %}
5408 %}
5410 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5411 %{
5412 predicate(Universe::narrow_klass_shift() == 0);
5413 constraint(ALLOC_IN_RC(p_reg));
5414 op_cost(10);
5415 match(AddP (DecodeNKlass reg) off);
5417 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5418 interface(MEMORY_INTER) %{
5419 base($reg);
5420 index(0x0);
5421 scale(0x0);
5422 disp($off);
5423 %}
5424 %}
5426 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5427 %{
5428 predicate(Universe::narrow_klass_shift() == 0);
5429 constraint(ALLOC_IN_RC(p_reg));
5430 op_cost(10);
5431 match(AddP (DecodeNKlass reg) off);
5433 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5434 interface(MEMORY_INTER) %{
5435 base($reg);
5436 index(0x0);
5437 scale(0x0);
5438 disp($off);
5439 %}
5440 %}
5442 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5443 %{
5444 predicate(Universe::narrow_klass_shift() == 0);
5445 constraint(ALLOC_IN_RC(p_reg));
5446 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5448 op_cost(10);
5449 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5450 interface(MEMORY_INTER) %{
5451 base($reg);
5452 index($lreg);
5453 scale(0x0);
5454 disp($off);
5455 %}
5456 %}
5458 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5459 %{
5460 predicate(Universe::narrow_klass_shift() == 0);
5461 constraint(ALLOC_IN_RC(p_reg));
5462 match(AddP (DecodeNKlass reg) lreg);
5464 op_cost(10);
5465 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5466 interface(MEMORY_INTER) %{
5467 base($reg);
5468 index($lreg);
5469 scale(0x0);
5470 disp(0x0);
5471 %}
5472 %}
5474 // Indirect Memory Operand
5475 operand indirectNarrow(mRegN reg)
5476 %{
5477 predicate(Universe::narrow_oop_shift() == 0);
5478 constraint(ALLOC_IN_RC(p_reg));
5479 op_cost(10);
5480 match(DecodeN reg);
5482 format %{ "[$reg] @ indirectNarrow" %}
5483 interface(MEMORY_INTER) %{
5484 base($reg);
5485 index(0x0);
5486 scale(0x0);
5487 disp(0x0);
5488 %}
5489 %}
5491 // Indirect Memory Plus Short Offset Operand
5492 operand indOffset8Narrow(mRegN reg, immL8 off)
5493 %{
5494 predicate(Universe::narrow_oop_shift() == 0);
5495 constraint(ALLOC_IN_RC(p_reg));
5496 op_cost(10);
5497 match(AddP (DecodeN reg) off);
5499 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5500 interface(MEMORY_INTER) %{
5501 base($reg);
5502 index(0x0);
5503 scale(0x0);
5504 disp($off);
5505 %}
5506 %}
5508 // Indirect Memory Plus Index Register Plus Offset Operand
5509 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5510 %{
5511 predicate(Universe::narrow_oop_shift() == 0);
5512 constraint(ALLOC_IN_RC(p_reg));
5513 match(AddP (AddP (DecodeN reg) lreg) off);
5515 op_cost(10);
5516 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5517 interface(MEMORY_INTER) %{
5518 base($reg);
5519 index($lreg);
5520 scale(0x0);
5521 disp($off);
5522 %}
5523 %}
5525 //----------Load Long Memory Operands------------------------------------------
5526 // The load-long idiom will use it's address expression again after loading
5527 // the first word of the long. If the load-long destination overlaps with
5528 // registers used in the addressing expression, the 2nd half will be loaded
5529 // from a clobbered address. Fix this by requiring that load-long use
5530 // address registers that do not overlap with the load-long target.
5532 // load-long support
5533 operand load_long_RegP() %{
5534 constraint(ALLOC_IN_RC(p_reg));
5535 match(RegP);
5536 match(mRegP);
5537 op_cost(100);
5538 format %{ %}
5539 interface(REG_INTER);
5540 %}
5542 // Indirect Memory Operand Long
5543 operand load_long_indirect(load_long_RegP reg) %{
5544 constraint(ALLOC_IN_RC(p_reg));
5545 match(reg);
5547 format %{ "[$reg]" %}
5548 interface(MEMORY_INTER) %{
5549 base($reg);
5550 index(0x0);
5551 scale(0x0);
5552 disp(0x0);
5553 %}
5554 %}
5556 // Indirect Memory Plus Long Offset Operand
5557 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5558 match(AddP reg off);
5560 format %{ "[$reg + $off]" %}
5561 interface(MEMORY_INTER) %{
5562 base($reg);
5563 index(0x0);
5564 scale(0x0);
5565 disp($off);
5566 %}
5567 %}
5569 //----------Conditional Branch Operands----------------------------------------
5570 // Comparison Op - This is the operation of the comparison, and is limited to
5571 // the following set of codes:
5572 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5573 //
5574 // Other attributes of the comparison, such as unsignedness, are specified
5575 // by the comparison instruction that sets a condition code flags register.
5576 // That result is represented by a flags operand whose subtype is appropriate
5577 // to the unsignedness (etc.) of the comparison.
5578 //
5579 // Later, the instruction which matches both the Comparison Op (a Bool) and
5580 // the flags (produced by the Cmp) specifies the coding of the comparison op
5581 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5583 // Comparision Code
5584 operand cmpOp() %{
5585 match(Bool);
5587 format %{ "" %}
5588 interface(COND_INTER) %{
5589 equal(0x01);
5590 not_equal(0x02);
5591 greater(0x03);
5592 greater_equal(0x04);
5593 less(0x05);
5594 less_equal(0x06);
5595 overflow(0x7);
5596 no_overflow(0x8);
5597 %}
5598 %}
5601 // Comparision Code
5602 // Comparison Code, unsigned compare. Used by FP also, with
5603 // C2 (unordered) turned into GT or LT already. The other bits
5604 // C0 and C3 are turned into Carry & Zero flags.
5605 operand cmpOpU() %{
5606 match(Bool);
5608 format %{ "" %}
5609 interface(COND_INTER) %{
5610 equal(0x01);
5611 not_equal(0x02);
5612 greater(0x03);
5613 greater_equal(0x04);
5614 less(0x05);
5615 less_equal(0x06);
5616 overflow(0x7);
5617 no_overflow(0x8);
5618 %}
5619 %}
5621 /*
5622 // Comparison Code, unsigned compare. Used by FP also, with
5623 // C2 (unordered) turned into GT or LT already. The other bits
5624 // C0 and C3 are turned into Carry & Zero flags.
5625 operand cmpOpU() %{
5626 match(Bool);
5628 format %{ "" %}
5629 interface(COND_INTER) %{
5630 equal(0x4);
5631 not_equal(0x5);
5632 less(0x2);
5633 greater_equal(0x3);
5634 less_equal(0x6);
5635 greater(0x7);
5636 %}
5637 %}
5638 */
5639 /*
5640 // Comparison Code for FP conditional move
5641 operand cmpOp_fcmov() %{
5642 match(Bool);
5644 format %{ "" %}
5645 interface(COND_INTER) %{
5646 equal (0x01);
5647 not_equal (0x02);
5648 greater (0x03);
5649 greater_equal(0x04);
5650 less (0x05);
5651 less_equal (0x06);
5652 %}
5653 %}
5655 // Comparision Code used in long compares
5656 operand cmpOp_commute() %{
5657 match(Bool);
5659 format %{ "" %}
5660 interface(COND_INTER) %{
5661 equal(0x4);
5662 not_equal(0x5);
5663 less(0xF);
5664 greater_equal(0xE);
5665 less_equal(0xD);
5666 greater(0xC);
5667 %}
5668 %}
5669 */
5671 //----------Special Memory Operands--------------------------------------------
5672 // Stack Slot Operand - This operand is used for loading and storing temporary
5673 // values on the stack where a match requires a value to
5674 // flow through memory.
5675 operand stackSlotP(sRegP reg) %{
5676 constraint(ALLOC_IN_RC(stack_slots));
5677 // No match rule because this operand is only generated in matching
5678 op_cost(50);
5679 format %{ "[$reg]" %}
5680 interface(MEMORY_INTER) %{
5681 base(0x1d); // SP
5682 index(0x0); // No Index
5683 scale(0x0); // No Scale
5684 disp($reg); // Stack Offset
5685 %}
5686 %}
5688 operand stackSlotI(sRegI reg) %{
5689 constraint(ALLOC_IN_RC(stack_slots));
5690 // No match rule because this operand is only generated in matching
5691 op_cost(50);
5692 format %{ "[$reg]" %}
5693 interface(MEMORY_INTER) %{
5694 base(0x1d); // SP
5695 index(0x0); // No Index
5696 scale(0x0); // No Scale
5697 disp($reg); // Stack Offset
5698 %}
5699 %}
5701 operand stackSlotF(sRegF reg) %{
5702 constraint(ALLOC_IN_RC(stack_slots));
5703 // No match rule because this operand is only generated in matching
5704 op_cost(50);
5705 format %{ "[$reg]" %}
5706 interface(MEMORY_INTER) %{
5707 base(0x1d); // SP
5708 index(0x0); // No Index
5709 scale(0x0); // No Scale
5710 disp($reg); // Stack Offset
5711 %}
5712 %}
5714 operand stackSlotD(sRegD reg) %{
5715 constraint(ALLOC_IN_RC(stack_slots));
5716 // No match rule because this operand is only generated in matching
5717 op_cost(50);
5718 format %{ "[$reg]" %}
5719 interface(MEMORY_INTER) %{
5720 base(0x1d); // SP
5721 index(0x0); // No Index
5722 scale(0x0); // No Scale
5723 disp($reg); // Stack Offset
5724 %}
5725 %}
5727 operand stackSlotL(sRegL reg) %{
5728 constraint(ALLOC_IN_RC(stack_slots));
5729 // No match rule because this operand is only generated in matching
5730 op_cost(50);
5731 format %{ "[$reg]" %}
5732 interface(MEMORY_INTER) %{
5733 base(0x1d); // SP
5734 index(0x0); // No Index
5735 scale(0x0); // No Scale
5736 disp($reg); // Stack Offset
5737 %}
5738 %}
5741 //------------------------OPERAND CLASSES--------------------------------------
5742 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5743 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5746 //----------PIPELINE-----------------------------------------------------------
5747 // Rules which define the behavior of the target architectures pipeline.
5749 pipeline %{
5751 //----------ATTRIBUTES---------------------------------------------------------
5752 attributes %{
5753 fixed_size_instructions; // Fixed size instructions
5754 branch_has_delay_slot; // branch have delay slot in gs2
5755 max_instructions_per_bundle = 1; // 1 instruction per bundle
5756 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5757 bundle_unit_size=4;
5758 instruction_unit_size = 4; // An instruction is 4 bytes long
5759 instruction_fetch_unit_size = 16; // The processor fetches one line
5760 instruction_fetch_units = 1; // of 16 bytes
5762 // List of nop instructions
5763 nops( MachNop );
5764 %}
5766 //----------RESOURCES----------------------------------------------------------
5767 // Resources are the functional units available to the machine
5769 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5771 //----------PIPELINE DESCRIPTION-----------------------------------------------
5772 // Pipeline Description specifies the stages in the machine's pipeline
5774 // IF: fetch
5775 // ID: decode
5776 // RD: read
5777 // CA: caculate
5778 // WB: write back
5779 // CM: commit
5781 pipe_desc(IF, ID, RD, CA, WB, CM);
5784 //----------PIPELINE CLASSES---------------------------------------------------
5785 // Pipeline Classes describe the stages in which input and output are
5786 // referenced by the hardware pipeline.
5788 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5789 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5790 single_instruction;
5791 src1 : RD(read);
5792 src2 : RD(read);
5793 dst : WB(write)+1;
5794 DECODE : ID;
5795 ALU : CA;
5796 %}
5798 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5799 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5800 src1 : RD(read);
5801 src2 : RD(read);
5802 dst : WB(write)+5;
5803 DECODE : ID;
5804 ALU2 : CA;
5805 %}
5807 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5808 src1 : RD(read);
5809 src2 : RD(read);
5810 dst : WB(write)+10;
5811 DECODE : ID;
5812 ALU2 : CA;
5813 %}
5815 //No.19 Integer div operation : dst <-- reg1 div reg2
5816 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5817 src1 : RD(read);
5818 src2 : RD(read);
5819 dst : WB(write)+10;
5820 DECODE : ID;
5821 ALU2 : CA;
5822 %}
5824 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5825 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5826 instruction_count(2);
5827 src1 : RD(read);
5828 src2 : RD(read);
5829 dst : WB(write)+10;
5830 DECODE : ID;
5831 ALU2 : CA;
5832 %}
5834 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5835 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5836 instruction_count(2);
5837 src1 : RD(read);
5838 src2 : RD(read);
5839 dst : WB(write);
5840 DECODE : ID;
5841 ALU : CA;
5842 %}
5844 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5845 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5846 instruction_count(2);
5847 src : RD(read);
5848 dst : WB(write);
5849 DECODE : ID;
5850 ALU : CA;
5851 %}
5853 //no.16 load Long from memory :
5854 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5855 instruction_count(2);
5856 mem : RD(read);
5857 dst : WB(write)+5;
5858 DECODE : ID;
5859 MEM : RD;
5860 %}
5862 //No.17 Store Long to Memory :
5863 pipe_class ialu_storeL(mRegL src, memory mem) %{
5864 instruction_count(2);
5865 mem : RD(read);
5866 src : RD(read);
5867 DECODE : ID;
5868 MEM : RD;
5869 %}
5871 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5872 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5873 single_instruction;
5874 src : RD(read);
5875 dst : WB(write);
5876 DECODE : ID;
5877 ALU : CA;
5878 %}
5880 //No.3 Integer move operation : dst <-- reg
5881 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5882 src : RD(read);
5883 dst : WB(write);
5884 DECODE : ID;
5885 ALU : CA;
5886 %}
5888 //No.4 No instructions : do nothing
5889 pipe_class empty( ) %{
5890 instruction_count(0);
5891 %}
5893 //No.5 UnConditional branch :
5894 pipe_class pipe_jump( label labl ) %{
5895 multiple_bundles;
5896 DECODE : ID;
5897 BR : RD;
5898 %}
5900 //No.6 ALU Conditional branch :
5901 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5902 multiple_bundles;
5903 src1 : RD(read);
5904 src2 : RD(read);
5905 DECODE : ID;
5906 BR : RD;
5907 %}
5909 //no.7 load integer from memory :
5910 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5911 mem : RD(read);
5912 dst : WB(write)+3;
5913 DECODE : ID;
5914 MEM : RD;
5915 %}
5917 //No.8 Store Integer to Memory :
5918 pipe_class ialu_storeI(mRegI src, memory mem) %{
5919 mem : RD(read);
5920 src : RD(read);
5921 DECODE : ID;
5922 MEM : RD;
5923 %}
5926 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5927 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5928 src1 : RD(read);
5929 src2 : RD(read);
5930 dst : WB(write);
5931 DECODE : ID;
5932 FPU : CA;
5933 %}
5935 //No.22 Floating div operation : dst <-- reg1 div reg2
5936 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5937 src1 : RD(read);
5938 src2 : RD(read);
5939 dst : WB(write);
5940 DECODE : ID;
5941 FPU2 : CA;
5942 %}
5944 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5945 src : RD(read);
5946 dst : WB(write);
5947 DECODE : ID;
5948 FPU1 : CA;
5949 %}
5951 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5952 src : RD(read);
5953 dst : WB(write);
5954 DECODE : ID;
5955 FPU1 : CA;
5956 %}
5958 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5959 src : RD(read);
5960 dst : WB(write);
5961 DECODE : ID;
5962 MEM : RD;
5963 %}
5965 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5966 src : RD(read);
5967 dst : WB(write);
5968 DECODE : ID;
5969 MEM : RD(5);
5970 %}
5972 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5973 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5974 multiple_bundles;
5975 src1 : RD(read);
5976 src2 : RD(read);
5977 dst : WB(write);
5978 DECODE : ID;
5979 FPU2 : CA;
5980 %}
5982 //No.11 Load Floating from Memory :
5983 pipe_class fpu_loadF(regF dst, memory mem) %{
5984 instruction_count(1);
5985 mem : RD(read);
5986 dst : WB(write)+3;
5987 DECODE : ID;
5988 MEM : RD;
5989 %}
5991 //No.12 Store Floating to Memory :
5992 pipe_class fpu_storeF(regF src, memory mem) %{
5993 instruction_count(1);
5994 mem : RD(read);
5995 src : RD(read);
5996 DECODE : ID;
5997 MEM : RD;
5998 %}
6000 //No.13 FPU Conditional branch :
6001 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6002 multiple_bundles;
6003 src1 : RD(read);
6004 src2 : RD(read);
6005 DECODE : ID;
6006 BR : RD;
6007 %}
6009 //No.14 Floating FPU reg operation : dst <-- op reg
6010 pipe_class fpu1_regF(regF dst, regF src) %{
6011 src : RD(read);
6012 dst : WB(write);
6013 DECODE : ID;
6014 FPU : CA;
6015 %}
6017 pipe_class long_memory_op() %{
6018 instruction_count(10); multiple_bundles; force_serialization;
6019 fixed_latency(30);
6020 %}
6022 pipe_class simple_call() %{
6023 instruction_count(10); multiple_bundles; force_serialization;
6024 fixed_latency(200);
6025 BR : RD;
6026 %}
6028 pipe_class call() %{
6029 instruction_count(10); multiple_bundles; force_serialization;
6030 fixed_latency(200);
6031 %}
6033 //FIXME:
6034 //No.9 Piple slow : for multi-instructions
6035 pipe_class pipe_slow( ) %{
6036 instruction_count(20);
6037 force_serialization;
6038 multiple_bundles;
6039 fixed_latency(50);
6040 %}
6042 %}
6046 //----------INSTRUCTIONS-------------------------------------------------------
6047 //
6048 // match -- States which machine-independent subtree may be replaced
6049 // by this instruction.
6050 // ins_cost -- The estimated cost of this instruction is used by instruction
6051 // selection to identify a minimum cost tree of machine
6052 // instructions that matches a tree of machine-independent
6053 // instructions.
6054 // format -- A string providing the disassembly for this instruction.
6055 // The value of an instruction's operand may be inserted
6056 // by referring to it with a '$' prefix.
6057 // opcode -- Three instruction opcodes may be provided. These are referred
6058 // to within an encode class as $primary, $secondary, and $tertiary
6059 // respectively. The primary opcode is commonly used to
6060 // indicate the type of machine instruction, while secondary
6061 // and tertiary are often used for prefix options or addressing
6062 // modes.
6063 // ins_encode -- A list of encode classes with parameters. The encode class
6064 // name must have been defined in an 'enc_class' specification
6065 // in the encode section of the architecture description.
6068 // Load Integer
6069 instruct loadI(mRegI dst, memory mem) %{
6070 match(Set dst (LoadI mem));
6072 ins_cost(125);
6073 format %{ "lw $dst, $mem #@loadI" %}
6074 ins_encode (load_I_enc(dst, mem));
6075 ins_pipe( ialu_loadI );
6076 %}
6078 instruct loadI_convI2L(mRegL dst, memory mem) %{
6079 match(Set dst (ConvI2L (LoadI mem)));
6081 ins_cost(125);
6082 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6083 ins_encode (load_I_enc(dst, mem));
6084 ins_pipe( ialu_loadI );
6085 %}
6087 // Load Integer (32 bit signed) to Byte (8 bit signed)
6088 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6089 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6091 ins_cost(125);
6092 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6093 ins_encode(load_B_enc(dst, mem));
6094 ins_pipe(ialu_loadI);
6095 %}
6097 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6098 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6099 match(Set dst (AndI (LoadI mem) mask));
6101 ins_cost(125);
6102 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6103 ins_encode(load_UB_enc(dst, mem));
6104 ins_pipe(ialu_loadI);
6105 %}
6107 // Load Integer (32 bit signed) to Short (16 bit signed)
6108 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6109 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6111 ins_cost(125);
6112 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6113 ins_encode(load_S_enc(dst, mem));
6114 ins_pipe(ialu_loadI);
6115 %}
6117 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6118 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6119 match(Set dst (AndI (LoadI mem) mask));
6121 ins_cost(125);
6122 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6123 ins_encode(load_C_enc(dst, mem));
6124 ins_pipe(ialu_loadI);
6125 %}
6127 // Load Long.
6128 instruct loadL(mRegL dst, memory mem) %{
6129 // predicate(!((LoadLNode*)n)->require_atomic_access());
6130 match(Set dst (LoadL mem));
6132 ins_cost(250);
6133 format %{ "ld $dst, $mem #@loadL" %}
6134 ins_encode(load_L_enc(dst, mem));
6135 ins_pipe( ialu_loadL );
6136 %}
6138 // Load Long - UNaligned
6139 instruct loadL_unaligned(mRegL dst, memory mem) %{
6140 match(Set dst (LoadL_unaligned mem));
6142 // FIXME: Jin: Need more effective ldl/ldr
6143 ins_cost(450);
6144 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6145 ins_encode(load_L_enc(dst, mem));
6146 ins_pipe( ialu_loadL );
6147 %}
6149 // Store Long
6150 instruct storeL_reg(memory mem, mRegL src) %{
6151 match(Set mem (StoreL mem src));
6153 ins_cost(200);
6154 format %{ "sd $mem, $src #@storeL_reg\n" %}
6155 ins_encode(store_L_reg_enc(mem, src));
6156 ins_pipe( ialu_storeL );
6157 %}
6160 instruct storeL_immL0(memory mem, immL0 zero) %{
6161 match(Set mem (StoreL mem zero));
6163 ins_cost(180);
6164 format %{ "sd $mem, zero #@storeL_immL0" %}
6165 ins_encode(store_L_immL0_enc(mem, zero));
6166 ins_pipe( ialu_storeL );
6167 %}
6169 // Load Compressed Pointer
6170 instruct loadN(mRegN dst, memory mem)
6171 %{
6172 match(Set dst (LoadN mem));
6174 ins_cost(125); // XXX
6175 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6176 ins_encode (load_N_enc(dst, mem));
6177 ins_pipe( ialu_loadI ); // XXX
6178 %}
6180 instruct loadN2P(mRegP dst, memory mem)
6181 %{
6182 match(Set dst (DecodeN (LoadN mem)));
6183 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6185 ins_cost(125); // XXX
6186 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6187 ins_encode (load_N_enc(dst, mem));
6188 ins_pipe( ialu_loadI ); // XXX
6189 %}
6191 // Load Pointer
6192 instruct loadP(mRegP dst, memory mem) %{
6193 match(Set dst (LoadP mem));
6195 ins_cost(125);
6196 format %{ "ld $dst, $mem #@loadP" %}
6197 ins_encode (load_P_enc(dst, mem));
6198 ins_pipe( ialu_loadI );
6199 %}
6201 // Load Klass Pointer
6202 instruct loadKlass(mRegP dst, memory mem) %{
6203 match(Set dst (LoadKlass mem));
6205 ins_cost(125);
6206 format %{ "MOV $dst,$mem @ loadKlass" %}
6207 ins_encode (load_P_enc(dst, mem));
6208 ins_pipe( ialu_loadI );
6209 %}
6211 // Load narrow Klass Pointer
6212 instruct loadNKlass(mRegN dst, memory mem)
6213 %{
6214 match(Set dst (LoadNKlass mem));
6216 ins_cost(125); // XXX
6217 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6218 ins_encode (load_N_enc(dst, mem));
6219 ins_pipe( ialu_loadI ); // XXX
6220 %}
6222 instruct loadN2PKlass(mRegP dst, memory mem)
6223 %{
6224 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6225 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6227 ins_cost(125); // XXX
6228 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6229 ins_encode (load_N_enc(dst, mem));
6230 ins_pipe( ialu_loadI ); // XXX
6231 %}
6233 // Load Constant
6234 instruct loadConI(mRegI dst, immI src) %{
6235 match(Set dst src);
6237 ins_cost(150);
6238 format %{ "mov $dst, $src #@loadConI" %}
6239 ins_encode %{
6240 Register dst = $dst$$Register;
6241 int value = $src$$constant;
6242 __ move(dst, value);
6243 %}
6244 ins_pipe( ialu_regI_regI );
6245 %}
6248 instruct loadConL_set64(mRegL dst, immL src) %{
6249 match(Set dst src);
6250 ins_cost(120);
6251 format %{ "li $dst, $src @ loadConL_set64" %}
6252 ins_encode %{
6253 __ set64($dst$$Register, $src$$constant);
6254 %}
6255 ins_pipe(ialu_regL_regL);
6256 %}
6258 /*
6259 // Load long value from constant table (predicated by immL_expensive).
6260 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6261 match(Set dst src);
6262 ins_cost(150);
6263 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6264 ins_encode %{
6265 int con_offset = $constantoffset($src);
6267 if (Assembler::is_simm16(con_offset)) {
6268 __ ld($dst$$Register, $constanttablebase, con_offset);
6269 } else {
6270 __ set64(AT, con_offset);
6271 if (UseLoongsonISA) {
6272 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6273 } else {
6274 __ daddu(AT, $constanttablebase, AT);
6275 __ ld($dst$$Register, AT, 0);
6276 }
6277 }
6278 %}
6279 ins_pipe(ialu_loadI);
6280 %}
6281 */
6283 instruct loadConL16(mRegL dst, immL16 src) %{
6284 match(Set dst src);
6285 ins_cost(105);
6286 format %{ "mov $dst, $src #@loadConL16" %}
6287 ins_encode %{
6288 Register dst_reg = as_Register($dst$$reg);
6289 int value = $src$$constant;
6290 __ daddiu(dst_reg, R0, value);
6291 %}
6292 ins_pipe( ialu_regL_regL );
6293 %}
6296 instruct loadConL0(mRegL dst, immL0 src) %{
6297 match(Set dst src);
6298 ins_cost(100);
6299 format %{ "mov $dst, zero #@loadConL0" %}
6300 ins_encode %{
6301 Register dst_reg = as_Register($dst$$reg);
6302 __ daddu(dst_reg, R0, R0);
6303 %}
6304 ins_pipe( ialu_regL_regL );
6305 %}
6307 // Load Range
6308 instruct loadRange(mRegI dst, memory mem) %{
6309 match(Set dst (LoadRange mem));
6311 ins_cost(125);
6312 format %{ "MOV $dst,$mem @ loadRange" %}
6313 ins_encode(load_I_enc(dst, mem));
6314 ins_pipe( ialu_loadI );
6315 %}
6318 instruct storeP(memory mem, mRegP src ) %{
6319 match(Set mem (StoreP mem src));
6321 ins_cost(125);
6322 format %{ "sd $src, $mem #@storeP" %}
6323 ins_encode(store_P_reg_enc(mem, src));
6324 ins_pipe( ialu_storeI );
6325 %}
6327 // Store NULL Pointer, mark word, or other simple pointer constant.
6328 instruct storeImmP0(memory mem, immP0 zero) %{
6329 match(Set mem (StoreP mem zero));
6331 ins_cost(125);
6332 format %{ "mov $mem, $zero #@storeImmP0" %}
6333 ins_encode(store_P_immP0_enc(mem));
6334 ins_pipe( ialu_storeI );
6335 %}
6337 // Store Byte Immediate
6338 instruct storeImmB(memory mem, immI8 src) %{
6339 match(Set mem (StoreB mem src));
6341 ins_cost(150);
6342 format %{ "movb $mem, $src #@storeImmB" %}
6343 ins_encode(store_B_immI_enc(mem, src));
6344 ins_pipe( ialu_storeI );
6345 %}
6347 // Store Compressed Pointer
6348 instruct storeN(memory mem, mRegN src)
6349 %{
6350 match(Set mem (StoreN mem src));
6352 ins_cost(125); // XXX
6353 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6354 ins_encode(store_N_reg_enc(mem, src));
6355 ins_pipe( ialu_storeI );
6356 %}
6358 instruct storeP2N(memory mem, mRegP src)
6359 %{
6360 match(Set mem (StoreN mem (EncodeP src)));
6361 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6363 ins_cost(125); // XXX
6364 format %{ "sw $mem, $src\t# @ storeP2N" %}
6365 ins_encode(store_N_reg_enc(mem, src));
6366 ins_pipe( ialu_storeI );
6367 %}
6369 instruct storeNKlass(memory mem, mRegN src)
6370 %{
6371 match(Set mem (StoreNKlass mem src));
6373 ins_cost(125); // XXX
6374 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6375 ins_encode(store_N_reg_enc(mem, src));
6376 ins_pipe( ialu_storeI );
6377 %}
6379 instruct storeP2NKlass(memory mem, mRegP src)
6380 %{
6381 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6382 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6384 ins_cost(125); // XXX
6385 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6386 ins_encode(store_N_reg_enc(mem, src));
6387 ins_pipe( ialu_storeI );
6388 %}
6390 instruct storeImmN0(memory mem, immN0 zero)
6391 %{
6392 match(Set mem (StoreN mem zero));
6394 ins_cost(125); // XXX
6395 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6396 ins_encode(storeImmN0_enc(mem, zero));
6397 ins_pipe( ialu_storeI );
6398 %}
6400 // Store Byte
6401 instruct storeB(memory mem, mRegI src) %{
6402 match(Set mem (StoreB mem src));
6404 ins_cost(125);
6405 format %{ "sb $src, $mem #@storeB" %}
6406 ins_encode(store_B_reg_enc(mem, src));
6407 ins_pipe( ialu_storeI );
6408 %}
6410 instruct storeB_convL2I(memory mem, mRegL src) %{
6411 match(Set mem (StoreB mem (ConvL2I src)));
6413 ins_cost(125);
6414 format %{ "sb $src, $mem #@storeB_convL2I" %}
6415 ins_encode(store_B_reg_enc(mem, src));
6416 ins_pipe( ialu_storeI );
6417 %}
6419 // Load Byte (8bit signed)
6420 instruct loadB(mRegI dst, memory mem) %{
6421 match(Set dst (LoadB mem));
6423 ins_cost(125);
6424 format %{ "lb $dst, $mem #@loadB" %}
6425 ins_encode(load_B_enc(dst, mem));
6426 ins_pipe( ialu_loadI );
6427 %}
6429 instruct loadB_convI2L(mRegL dst, memory mem) %{
6430 match(Set dst (ConvI2L (LoadB mem)));
6432 ins_cost(125);
6433 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6434 ins_encode(load_B_enc(dst, mem));
6435 ins_pipe( ialu_loadI );
6436 %}
6438 // Load Byte (8bit UNsigned)
6439 instruct loadUB(mRegI dst, memory mem) %{
6440 match(Set dst (LoadUB mem));
6442 ins_cost(125);
6443 format %{ "lbu $dst, $mem #@loadUB" %}
6444 ins_encode(load_UB_enc(dst, mem));
6445 ins_pipe( ialu_loadI );
6446 %}
6448 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6449 match(Set dst (ConvI2L (LoadUB mem)));
6451 ins_cost(125);
6452 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6453 ins_encode(load_UB_enc(dst, mem));
6454 ins_pipe( ialu_loadI );
6455 %}
6457 // Load Short (16bit signed)
6458 instruct loadS(mRegI dst, memory mem) %{
6459 match(Set dst (LoadS mem));
6461 ins_cost(125);
6462 format %{ "lh $dst, $mem #@loadS" %}
6463 ins_encode(load_S_enc(dst, mem));
6464 ins_pipe( ialu_loadI );
6465 %}
6467 // Load Short (16 bit signed) to Byte (8 bit signed)
6468 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6469 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6471 ins_cost(125);
6472 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6473 ins_encode(load_B_enc(dst, mem));
6474 ins_pipe(ialu_loadI);
6475 %}
6477 instruct loadS_convI2L(mRegL dst, memory mem) %{
6478 match(Set dst (ConvI2L (LoadS mem)));
6480 ins_cost(125);
6481 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6482 ins_encode(load_S_enc(dst, mem));
6483 ins_pipe( ialu_loadI );
6484 %}
6486 // Store Integer Immediate
6487 instruct storeImmI(memory mem, immI src) %{
6488 match(Set mem (StoreI mem src));
6490 ins_cost(150);
6491 format %{ "mov $mem, $src #@storeImmI" %}
6492 ins_encode(store_I_immI_enc(mem, src));
6493 ins_pipe( ialu_storeI );
6494 %}
6496 // Store Integer
6497 instruct storeI(memory mem, mRegI src) %{
6498 match(Set mem (StoreI mem src));
6500 ins_cost(125);
6501 format %{ "sw $mem, $src #@storeI" %}
6502 ins_encode(store_I_reg_enc(mem, src));
6503 ins_pipe( ialu_storeI );
6504 %}
6506 instruct storeI_convL2I(memory mem, mRegL src) %{
6507 match(Set mem (StoreI mem (ConvL2I src)));
6509 ins_cost(125);
6510 format %{ "sw $mem, $src #@storeI_convL2I" %}
6511 ins_encode(store_I_reg_enc(mem, src));
6512 ins_pipe( ialu_storeI );
6513 %}
6515 // Load Float
6516 instruct loadF(regF dst, memory mem) %{
6517 match(Set dst (LoadF mem));
6519 ins_cost(150);
6520 format %{ "loadF $dst, $mem #@loadF" %}
6521 ins_encode(load_F_enc(dst, mem));
6522 ins_pipe( ialu_loadI );
6523 %}
6525 instruct loadConP_general(mRegP dst, immP src) %{
6526 match(Set dst src);
6528 ins_cost(120);
6529 format %{ "li $dst, $src #@loadConP_general" %}
6531 ins_encode %{
6532 Register dst = $dst$$Register;
6533 long* value = (long*)$src$$constant;
6535 if($src->constant_reloc() == relocInfo::metadata_type){
6536 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6537 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6539 __ relocate(rspec);
6540 __ patchable_set48(dst, (long)value);
6541 }else if($src->constant_reloc() == relocInfo::oop_type){
6542 int oop_index = __ oop_recorder()->find_index((jobject)value);
6543 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6545 __ relocate(rspec);
6546 __ patchable_set48(dst, (long)value);
6547 } else if ($src->constant_reloc() == relocInfo::none) {
6548 __ set64(dst, (long)value);
6549 }
6550 %}
6552 ins_pipe( ialu_regI_regI );
6553 %}
6555 /*
6556 instruct loadConP_load(mRegP dst, immP_load src) %{
6557 match(Set dst src);
6559 ins_cost(100);
6560 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6562 ins_encode %{
6564 int con_offset = $constantoffset($src);
6566 if (Assembler::is_simm16(con_offset)) {
6567 __ ld($dst$$Register, $constanttablebase, con_offset);
6568 } else {
6569 __ set64(AT, con_offset);
6570 if (UseLoongsonISA) {
6571 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6572 } else {
6573 __ daddu(AT, $constanttablebase, AT);
6574 __ ld($dst$$Register, AT, 0);
6575 }
6576 }
6577 %}
6579 ins_pipe(ialu_loadI);
6580 %}
6581 */
6583 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6584 match(Set dst src);
6586 ins_cost(80);
6587 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6589 ins_encode %{
6590 __ set64($dst$$Register, $src$$constant);
6591 %}
6593 ins_pipe(ialu_regI_regI);
6594 %}
6597 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6598 match(Set dst src);
6600 ins_cost(50);
6601 format %{ "li $dst, $src #@loadConP_poll" %}
6603 ins_encode %{
6604 Register dst = $dst$$Register;
6605 intptr_t value = (intptr_t)$src$$constant;
6607 __ set64(dst, (jlong)value);
6608 %}
6610 ins_pipe( ialu_regI_regI );
6611 %}
6613 instruct loadConP0(mRegP dst, immP0 src)
6614 %{
6615 match(Set dst src);
6617 ins_cost(50);
6618 format %{ "mov $dst, R0\t# ptr" %}
6619 ins_encode %{
6620 Register dst_reg = $dst$$Register;
6621 __ daddu(dst_reg, R0, R0);
6622 %}
6623 ins_pipe( ialu_regI_regI );
6624 %}
6626 instruct loadConN0(mRegN dst, immN0 src) %{
6627 match(Set dst src);
6628 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6629 ins_encode %{
6630 __ move($dst$$Register, R0);
6631 %}
6632 ins_pipe( ialu_regI_regI );
6633 %}
6635 instruct loadConN(mRegN dst, immN src) %{
6636 match(Set dst src);
6638 ins_cost(125);
6639 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6640 ins_encode %{
6641 Register dst = $dst$$Register;
6642 __ set_narrow_oop(dst, (jobject)$src$$constant);
6643 %}
6644 ins_pipe( ialu_regI_regI ); // XXX
6645 %}
6647 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6648 match(Set dst src);
6650 ins_cost(125);
6651 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6652 ins_encode %{
6653 Register dst = $dst$$Register;
6654 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6655 %}
6656 ins_pipe( ialu_regI_regI ); // XXX
6657 %}
6659 //FIXME
6660 // Tail Call; Jump from runtime stub to Java code.
6661 // Also known as an 'interprocedural jump'.
6662 // Target of jump will eventually return to caller.
6663 // TailJump below removes the return address.
6664 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6665 match(TailCall jump_target method_oop );
6666 ins_cost(300);
6667 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6669 ins_encode %{
6670 Register target = $jump_target$$Register;
6671 Register oop = $method_oop$$Register;
6673 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6674 __ push(RA);
6676 __ move(S3, oop);
6677 __ jr(target);
6678 __ nop();
6679 %}
6681 ins_pipe( pipe_jump );
6682 %}
6684 // Create exception oop: created by stack-crawling runtime code.
6685 // Created exception is now available to this handler, and is setup
6686 // just prior to jumping to this handler. No code emitted.
6687 instruct CreateException( a0_RegP ex_oop )
6688 %{
6689 match(Set ex_oop (CreateEx));
6691 // use the following format syntax
6692 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6693 ins_encode %{
6694 /* Jin: X86 leaves this function empty */
6695 __ block_comment("CreateException is empty in X86/MIPS");
6696 %}
6697 ins_pipe( empty );
6698 // ins_pipe( pipe_jump );
6699 %}
6702 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6704 - Common try/catch:
6705 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6706 |- V0, V1 are created
6707 |- T9 <= SharedRuntime::exception_handler_for_return_address
6708 `- jr T9
6709 `- the caller's exception_handler
6710 `- jr OptoRuntime::exception_blob
6711 `- here
6712 - Rethrow(e.g. 'unwind'):
6713 * The callee:
6714 |- an exception is triggered during execution
6715 `- exits the callee method through RethrowException node
6716 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6717 `- The callee jumps to OptoRuntime::rethrow_stub()
6718 * In OptoRuntime::rethrow_stub:
6719 |- The VM calls _rethrow_Java to determine the return address in the caller method
6720 `- exits the stub with tailjmpInd
6721 |- pops exception_oop(V0) and exception_pc(V1)
6722 `- jumps to the return address(usually an exception_handler)
6723 * The caller:
6724 `- continues processing the exception_blob with V0/V1
6725 */
6727 /*
6728 Disassembling OptoRuntime::rethrow_stub()
6730 ; locals
6731 0x2d3bf320: addiu sp, sp, 0xfffffff8
6732 0x2d3bf324: sw ra, 0x4(sp)
6733 0x2d3bf328: sw fp, 0x0(sp)
6734 0x2d3bf32c: addu fp, sp, zero
6735 0x2d3bf330: addiu sp, sp, 0xfffffff0
6736 0x2d3bf334: sw ra, 0x8(sp)
6737 0x2d3bf338: sw t0, 0x4(sp)
6738 0x2d3bf33c: sw sp, 0x0(sp)
6740 ; get_thread(S2)
6741 0x2d3bf340: addu s2, sp, zero
6742 0x2d3bf344: srl s2, s2, 12
6743 0x2d3bf348: sll s2, s2, 2
6744 0x2d3bf34c: lui at, 0x2c85
6745 0x2d3bf350: addu at, at, s2
6746 0x2d3bf354: lw s2, 0xffffcc80(at)
6748 0x2d3bf358: lw s0, 0x0(sp)
6749 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6750 0x2d3bf360: sw s2, 0xc(sp)
6752 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6753 0x2d3bf364: lw a0, 0x4(sp)
6754 0x2d3bf368: lw a1, 0xc(sp)
6755 0x2d3bf36c: lw a2, 0x8(sp)
6756 ;; Java_To_Runtime
6757 0x2d3bf370: lui t9, 0x2c34
6758 0x2d3bf374: addiu t9, t9, 0xffff8a48
6759 0x2d3bf378: jalr t9
6760 0x2d3bf37c: nop
6762 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6764 0x2d3bf384: lw s0, 0xc(sp)
6765 0x2d3bf388: sw zero, 0x118(s0)
6766 0x2d3bf38c: sw zero, 0x11c(s0)
6767 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6768 0x2d3bf394: addu s2, s0, zero
6769 0x2d3bf398: sw zero, 0x144(s2)
6770 0x2d3bf39c: lw s0, 0x4(s2)
6771 0x2d3bf3a0: addiu s4, zero, 0x0
6772 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6773 0x2d3bf3a8: nop
6774 0x2d3bf3ac: addiu sp, sp, 0x10
6775 0x2d3bf3b0: addiu sp, sp, 0x8
6776 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6777 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6778 0x2d3bf3bc: lui at, 0x2b48
6779 0x2d3bf3c0: lw at, 0x100(at)
6781 ; tailjmpInd: Restores exception_oop & exception_pc
6782 0x2d3bf3c4: addu v1, ra, zero
6783 0x2d3bf3c8: addu v0, s1, zero
6784 0x2d3bf3cc: jr s3
6785 0x2d3bf3d0: nop
6786 ; Exception:
6787 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6788 0x2d3bf3d8: addiu s1, s1, 0x40
6789 0x2d3bf3dc: addiu s2, zero, 0x0
6790 0x2d3bf3e0: addiu sp, sp, 0x10
6791 0x2d3bf3e4: addiu sp, sp, 0x8
6792 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6793 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6794 0x2d3bf3f0: lui at, 0x2b48
6795 0x2d3bf3f4: lw at, 0x100(at)
6796 ; TailCalljmpInd
6797 __ push(RA); ; to be used in generate_forward_exception()
6798 0x2d3bf3f8: addu t7, s2, zero
6799 0x2d3bf3fc: jr s1
6800 0x2d3bf400: nop
6801 */
6802 // Rethrow exception:
6803 // The exception oop will come in the first argument position.
6804 // Then JUMP (not call) to the rethrow stub code.
6805 instruct RethrowException()
6806 %{
6807 match(Rethrow);
6809 // use the following format syntax
6810 format %{ "JMP rethrow_stub #@RethrowException" %}
6811 ins_encode %{
6812 __ block_comment("@ RethrowException");
6814 cbuf.set_insts_mark();
6815 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6817 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6818 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6819 __ jr(T9);
6820 __ nop();
6821 %}
6822 ins_pipe( pipe_jump );
6823 %}
6825 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6826 match(If cmp (CmpP op1 zero));
6827 effect(USE labl);
6829 ins_cost(180);
6830 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6832 ins_encode %{
6833 Register op1 = $op1$$Register;
6834 Register op2 = R0;
6835 Label &L = *($labl$$label);
6836 int flag = $cmp$$cmpcode;
6838 switch(flag)
6839 {
6840 case 0x01: //equal
6841 if (&L)
6842 __ beq(op1, op2, L);
6843 else
6844 __ beq(op1, op2, (int)0);
6845 break;
6846 case 0x02: //not_equal
6847 if (&L)
6848 __ bne(op1, op2, L);
6849 else
6850 __ bne(op1, op2, (int)0);
6851 break;
6852 /*
6853 case 0x03: //above
6854 __ sltu(AT, op2, op1);
6855 if(&L)
6856 __ bne(R0, AT, L);
6857 else
6858 __ bne(R0, AT, (int)0);
6859 break;
6860 case 0x04: //above_equal
6861 __ sltu(AT, op1, op2);
6862 if(&L)
6863 __ beq(AT, R0, L);
6864 else
6865 __ beq(AT, R0, (int)0);
6866 break;
6867 case 0x05: //below
6868 __ sltu(AT, op1, op2);
6869 if(&L)
6870 __ bne(R0, AT, L);
6871 else
6872 __ bne(R0, AT, (int)0);
6873 break;
6874 case 0x06: //below_equal
6875 __ sltu(AT, op2, op1);
6876 if(&L)
6877 __ beq(AT, R0, L);
6878 else
6879 __ beq(AT, R0, (int)0);
6880 break;
6881 */
6882 default:
6883 Unimplemented();
6884 }
6885 __ nop();
6886 %}
6888 ins_pc_relative(1);
6889 ins_pipe( pipe_alu_branch );
6890 %}
6892 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6893 match(If cmp (CmpP (DecodeN op1) zero));
6894 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6895 effect(USE labl);
6897 ins_cost(180);
6898 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6900 ins_encode %{
6901 Register op1 = $op1$$Register;
6902 Register op2 = R0;
6903 Label &L = *($labl$$label);
6904 int flag = $cmp$$cmpcode;
6906 switch(flag)
6907 {
6908 case 0x01: //equal
6909 if (&L)
6910 __ beq(op1, op2, L);
6911 else
6912 __ beq(op1, op2, (int)0);
6913 break;
6914 case 0x02: //not_equal
6915 if (&L)
6916 __ bne(op1, op2, L);
6917 else
6918 __ bne(op1, op2, (int)0);
6919 break;
6920 default:
6921 Unimplemented();
6922 }
6923 __ nop();
6924 %}
6926 ins_pc_relative(1);
6927 ins_pipe( pipe_alu_branch );
6928 %}
6931 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6932 match(If cmp (CmpP op1 op2));
6933 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6934 effect(USE labl);
6936 ins_cost(200);
6937 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6939 ins_encode %{
6940 Register op1 = $op1$$Register;
6941 Register op2 = $op2$$Register;
6942 Label &L = *($labl$$label);
6943 int flag = $cmp$$cmpcode;
6945 switch(flag)
6946 {
6947 case 0x01: //equal
6948 if (&L)
6949 __ beq(op1, op2, L);
6950 else
6951 __ beq(op1, op2, (int)0);
6952 break;
6953 case 0x02: //not_equal
6954 if (&L)
6955 __ bne(op1, op2, L);
6956 else
6957 __ bne(op1, op2, (int)0);
6958 break;
6959 case 0x03: //above
6960 __ sltu(AT, op2, op1);
6961 if(&L)
6962 __ bne(R0, AT, L);
6963 else
6964 __ bne(R0, AT, (int)0);
6965 break;
6966 case 0x04: //above_equal
6967 __ sltu(AT, op1, op2);
6968 if(&L)
6969 __ beq(AT, R0, L);
6970 else
6971 __ beq(AT, R0, (int)0);
6972 break;
6973 case 0x05: //below
6974 __ sltu(AT, op1, op2);
6975 if(&L)
6976 __ bne(R0, AT, L);
6977 else
6978 __ bne(R0, AT, (int)0);
6979 break;
6980 case 0x06: //below_equal
6981 __ sltu(AT, op2, op1);
6982 if(&L)
6983 __ beq(AT, R0, L);
6984 else
6985 __ beq(AT, R0, (int)0);
6986 break;
6987 default:
6988 Unimplemented();
6989 }
6990 __ nop();
6991 %}
6993 ins_pc_relative(1);
6994 ins_pipe( pipe_alu_branch );
6995 %}
6997 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6998 match(If cmp (CmpN op1 null));
6999 effect(USE labl);
7001 ins_cost(180);
7002 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7003 "BP$cmp $labl @ cmpN_null_branch" %}
7004 ins_encode %{
7005 Register op1 = $op1$$Register;
7006 Register op2 = R0;
7007 Label &L = *($labl$$label);
7008 int flag = $cmp$$cmpcode;
7010 switch(flag)
7011 {
7012 case 0x01: //equal
7013 if (&L)
7014 __ beq(op1, op2, L);
7015 else
7016 __ beq(op1, op2, (int)0);
7017 break;
7018 case 0x02: //not_equal
7019 if (&L)
7020 __ bne(op1, op2, L);
7021 else
7022 __ bne(op1, op2, (int)0);
7023 break;
7024 default:
7025 Unimplemented();
7026 }
7027 __ nop();
7028 %}
7029 //TODO: pipe_branchP or create pipe_branchN LEE
7030 ins_pc_relative(1);
7031 ins_pipe( pipe_alu_branch );
7032 %}
7034 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7035 match(If cmp (CmpN op1 op2));
7036 effect(USE labl);
7038 ins_cost(180);
7039 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7040 "BP$cmp $labl" %}
7041 ins_encode %{
7042 Register op1_reg = $op1$$Register;
7043 Register op2_reg = $op2$$Register;
7044 Label &L = *($labl$$label);
7045 int flag = $cmp$$cmpcode;
7047 switch(flag)
7048 {
7049 case 0x01: //equal
7050 if (&L)
7051 __ beq(op1_reg, op2_reg, L);
7052 else
7053 __ beq(op1_reg, op2_reg, (int)0);
7054 break;
7055 case 0x02: //not_equal
7056 if (&L)
7057 __ bne(op1_reg, op2_reg, L);
7058 else
7059 __ bne(op1_reg, op2_reg, (int)0);
7060 break;
7061 case 0x03: //above
7062 __ sltu(AT, op2_reg, op1_reg);
7063 if(&L)
7064 __ bne(R0, AT, L);
7065 else
7066 __ bne(R0, AT, (int)0);
7067 break;
7068 case 0x04: //above_equal
7069 __ sltu(AT, op1_reg, op2_reg);
7070 if(&L)
7071 __ beq(AT, R0, L);
7072 else
7073 __ beq(AT, R0, (int)0);
7074 break;
7075 case 0x05: //below
7076 __ sltu(AT, op1_reg, op2_reg);
7077 if(&L)
7078 __ bne(R0, AT, L);
7079 else
7080 __ bne(R0, AT, (int)0);
7081 break;
7082 case 0x06: //below_equal
7083 __ sltu(AT, op2_reg, op1_reg);
7084 if(&L)
7085 __ beq(AT, R0, L);
7086 else
7087 __ beq(AT, R0, (int)0);
7088 break;
7089 default:
7090 Unimplemented();
7091 }
7092 __ nop();
7093 %}
7094 ins_pc_relative(1);
7095 ins_pipe( pipe_alu_branch );
7096 %}
7098 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7099 match( If cmp (CmpU src1 src2) );
7100 effect(USE labl);
7101 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7103 ins_encode %{
7104 Register op1 = $src1$$Register;
7105 Register op2 = $src2$$Register;
7106 Label &L = *($labl$$label);
7107 int flag = $cmp$$cmpcode;
7109 switch(flag)
7110 {
7111 case 0x01: //equal
7112 if (&L)
7113 __ beq(op1, op2, L);
7114 else
7115 __ beq(op1, op2, (int)0);
7116 break;
7117 case 0x02: //not_equal
7118 if (&L)
7119 __ bne(op1, op2, L);
7120 else
7121 __ bne(op1, op2, (int)0);
7122 break;
7123 case 0x03: //above
7124 __ sltu(AT, op2, op1);
7125 if(&L)
7126 __ bne(AT, R0, L);
7127 else
7128 __ bne(AT, R0, (int)0);
7129 break;
7130 case 0x04: //above_equal
7131 __ sltu(AT, op1, op2);
7132 if(&L)
7133 __ beq(AT, R0, L);
7134 else
7135 __ beq(AT, R0, (int)0);
7136 break;
7137 case 0x05: //below
7138 __ sltu(AT, op1, op2);
7139 if(&L)
7140 __ bne(AT, R0, L);
7141 else
7142 __ bne(AT, R0, (int)0);
7143 break;
7144 case 0x06: //below_equal
7145 __ sltu(AT, op2, op1);
7146 if(&L)
7147 __ beq(AT, R0, L);
7148 else
7149 __ beq(AT, R0, (int)0);
7150 break;
7151 default:
7152 Unimplemented();
7153 }
7154 __ nop();
7155 %}
7157 ins_pc_relative(1);
7158 ins_pipe( pipe_alu_branch );
7159 %}
7162 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7163 match( If cmp (CmpU src1 src2) );
7164 effect(USE labl);
7165 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7167 ins_encode %{
7168 Register op1 = $src1$$Register;
7169 int val = $src2$$constant;
7170 Label &L = *($labl$$label);
7171 int flag = $cmp$$cmpcode;
7173 __ move(AT, val);
7174 switch(flag)
7175 {
7176 case 0x01: //equal
7177 if (&L)
7178 __ beq(op1, AT, L);
7179 else
7180 __ beq(op1, AT, (int)0);
7181 break;
7182 case 0x02: //not_equal
7183 if (&L)
7184 __ bne(op1, AT, L);
7185 else
7186 __ bne(op1, AT, (int)0);
7187 break;
7188 case 0x03: //above
7189 __ sltu(AT, AT, op1);
7190 if(&L)
7191 __ bne(R0, AT, L);
7192 else
7193 __ bne(R0, AT, (int)0);
7194 break;
7195 case 0x04: //above_equal
7196 __ sltu(AT, op1, AT);
7197 if(&L)
7198 __ beq(AT, R0, L);
7199 else
7200 __ beq(AT, R0, (int)0);
7201 break;
7202 case 0x05: //below
7203 __ sltu(AT, op1, AT);
7204 if(&L)
7205 __ bne(R0, AT, L);
7206 else
7207 __ bne(R0, AT, (int)0);
7208 break;
7209 case 0x06: //below_equal
7210 __ sltu(AT, AT, op1);
7211 if(&L)
7212 __ beq(AT, R0, L);
7213 else
7214 __ beq(AT, R0, (int)0);
7215 break;
7216 default:
7217 Unimplemented();
7218 }
7219 __ nop();
7220 %}
7222 ins_pc_relative(1);
7223 ins_pipe( pipe_alu_branch );
7224 %}
7226 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7227 match( If cmp (CmpI src1 src2) );
7228 effect(USE labl);
7229 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7231 ins_encode %{
7232 Register op1 = $src1$$Register;
7233 Register op2 = $src2$$Register;
7234 Label &L = *($labl$$label);
7235 int flag = $cmp$$cmpcode;
7237 switch(flag)
7238 {
7239 case 0x01: //equal
7240 if (&L)
7241 __ beq(op1, op2, L);
7242 else
7243 __ beq(op1, op2, (int)0);
7244 break;
7245 case 0x02: //not_equal
7246 if (&L)
7247 __ bne(op1, op2, L);
7248 else
7249 __ bne(op1, op2, (int)0);
7250 break;
7251 case 0x03: //above
7252 __ slt(AT, op2, op1);
7253 if(&L)
7254 __ bne(R0, AT, L);
7255 else
7256 __ bne(R0, AT, (int)0);
7257 break;
7258 case 0x04: //above_equal
7259 __ slt(AT, op1, op2);
7260 if(&L)
7261 __ beq(AT, R0, L);
7262 else
7263 __ beq(AT, R0, (int)0);
7264 break;
7265 case 0x05: //below
7266 __ slt(AT, op1, op2);
7267 if(&L)
7268 __ bne(R0, AT, L);
7269 else
7270 __ bne(R0, AT, (int)0);
7271 break;
7272 case 0x06: //below_equal
7273 __ slt(AT, op2, op1);
7274 if(&L)
7275 __ beq(AT, R0, L);
7276 else
7277 __ beq(AT, R0, (int)0);
7278 break;
7279 default:
7280 Unimplemented();
7281 }
7282 __ nop();
7283 %}
7285 ins_pc_relative(1);
7286 ins_pipe( pipe_alu_branch );
7287 %}
7289 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7290 match( If cmp (CmpI src1 src2) );
7291 effect(USE labl);
7292 ins_cost(170);
7293 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7295 ins_encode %{
7296 Register op1 = $src1$$Register;
7297 // int val = $src2$$constant;
7298 Label &L = *($labl$$label);
7299 int flag = $cmp$$cmpcode;
7301 //__ move(AT, val);
7302 switch(flag)
7303 {
7304 case 0x01: //equal
7305 if (&L)
7306 __ beq(op1, R0, L);
7307 else
7308 __ beq(op1, R0, (int)0);
7309 break;
7310 case 0x02: //not_equal
7311 if (&L)
7312 __ bne(op1, R0, L);
7313 else
7314 __ bne(op1, R0, (int)0);
7315 break;
7316 case 0x03: //greater
7317 if(&L)
7318 __ bgtz(op1, L);
7319 else
7320 __ bgtz(op1, (int)0);
7321 break;
7322 case 0x04: //greater_equal
7323 if(&L)
7324 __ bgez(op1, L);
7325 else
7326 __ bgez(op1, (int)0);
7327 break;
7328 case 0x05: //less
7329 if(&L)
7330 __ bltz(op1, L);
7331 else
7332 __ bltz(op1, (int)0);
7333 break;
7334 case 0x06: //less_equal
7335 if(&L)
7336 __ blez(op1, L);
7337 else
7338 __ blez(op1, (int)0);
7339 break;
7340 default:
7341 Unimplemented();
7342 }
7343 __ nop();
7344 %}
7346 ins_pc_relative(1);
7347 ins_pipe( pipe_alu_branch );
7348 %}
7351 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7352 match( If cmp (CmpI src1 src2) );
7353 effect(USE labl);
7354 ins_cost(200);
7355 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7357 ins_encode %{
7358 Register op1 = $src1$$Register;
7359 int val = $src2$$constant;
7360 Label &L = *($labl$$label);
7361 int flag = $cmp$$cmpcode;
7363 __ move(AT, val);
7364 switch(flag)
7365 {
7366 case 0x01: //equal
7367 if (&L)
7368 __ beq(op1, AT, L);
7369 else
7370 __ beq(op1, AT, (int)0);
7371 break;
7372 case 0x02: //not_equal
7373 if (&L)
7374 __ bne(op1, AT, L);
7375 else
7376 __ bne(op1, AT, (int)0);
7377 break;
7378 case 0x03: //greater
7379 __ slt(AT, AT, op1);
7380 if(&L)
7381 __ bne(R0, AT, L);
7382 else
7383 __ bne(R0, AT, (int)0);
7384 break;
7385 case 0x04: //greater_equal
7386 __ slt(AT, op1, AT);
7387 if(&L)
7388 __ beq(AT, R0, L);
7389 else
7390 __ beq(AT, R0, (int)0);
7391 break;
7392 case 0x05: //less
7393 __ slt(AT, op1, AT);
7394 if(&L)
7395 __ bne(R0, AT, L);
7396 else
7397 __ bne(R0, AT, (int)0);
7398 break;
7399 case 0x06: //less_equal
7400 __ slt(AT, AT, op1);
7401 if(&L)
7402 __ beq(AT, R0, L);
7403 else
7404 __ beq(AT, R0, (int)0);
7405 break;
7406 default:
7407 Unimplemented();
7408 }
7409 __ nop();
7410 %}
7412 ins_pc_relative(1);
7413 ins_pipe( pipe_alu_branch );
7414 %}
7416 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7417 match( If cmp (CmpU src1 zero) );
7418 effect(USE labl);
7419 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7421 ins_encode %{
7422 Register op1 = $src1$$Register;
7423 Label &L = *($labl$$label);
7424 int flag = $cmp$$cmpcode;
7426 switch(flag)
7427 {
7428 case 0x01: //equal
7429 if (&L)
7430 __ beq(op1, R0, L);
7431 else
7432 __ beq(op1, R0, (int)0);
7433 break;
7434 case 0x02: //not_equal
7435 if (&L)
7436 __ bne(op1, R0, L);
7437 else
7438 __ bne(op1, R0, (int)0);
7439 break;
7440 case 0x03: //above
7441 if(&L)
7442 __ bne(R0, op1, L);
7443 else
7444 __ bne(R0, op1, (int)0);
7445 break;
7446 case 0x04: //above_equal
7447 if(&L)
7448 __ beq(R0, R0, L);
7449 else
7450 __ beq(R0, R0, (int)0);
7451 break;
7452 case 0x05: //below
7453 return;
7454 break;
7455 case 0x06: //below_equal
7456 if(&L)
7457 __ beq(op1, R0, L);
7458 else
7459 __ beq(op1, R0, (int)0);
7460 break;
7461 default:
7462 Unimplemented();
7463 }
7464 __ nop();
7465 %}
7467 ins_pc_relative(1);
7468 ins_pipe( pipe_alu_branch );
7469 %}
7472 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7473 match( If cmp (CmpU src1 src2) );
7474 effect(USE labl);
7475 ins_cost(180);
7476 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7478 ins_encode %{
7479 Register op1 = $src1$$Register;
7480 int val = $src2$$constant;
7481 Label &L = *($labl$$label);
7482 int flag = $cmp$$cmpcode;
7484 switch(flag)
7485 {
7486 case 0x01: //equal
7487 __ move(AT, val);
7488 if (&L)
7489 __ beq(op1, AT, L);
7490 else
7491 __ beq(op1, AT, (int)0);
7492 break;
7493 case 0x02: //not_equal
7494 __ move(AT, val);
7495 if (&L)
7496 __ bne(op1, AT, L);
7497 else
7498 __ bne(op1, AT, (int)0);
7499 break;
7500 case 0x03: //above
7501 __ move(AT, val);
7502 __ sltu(AT, AT, op1);
7503 if(&L)
7504 __ bne(R0, AT, L);
7505 else
7506 __ bne(R0, AT, (int)0);
7507 break;
7508 case 0x04: //above_equal
7509 __ sltiu(AT, op1, val);
7510 if(&L)
7511 __ beq(AT, R0, L);
7512 else
7513 __ beq(AT, R0, (int)0);
7514 break;
7515 case 0x05: //below
7516 __ sltiu(AT, op1, val);
7517 if(&L)
7518 __ bne(R0, AT, L);
7519 else
7520 __ bne(R0, AT, (int)0);
7521 break;
7522 case 0x06: //below_equal
7523 __ move(AT, val);
7524 __ sltu(AT, AT, op1);
7525 if(&L)
7526 __ beq(AT, R0, L);
7527 else
7528 __ beq(AT, R0, (int)0);
7529 break;
7530 default:
7531 Unimplemented();
7532 }
7533 __ nop();
7534 %}
7536 ins_pc_relative(1);
7537 ins_pipe( pipe_alu_branch );
7538 %}
7541 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7542 match( If cmp (CmpL src1 src2) );
7543 effect(USE labl);
7544 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7545 ins_cost(250);
7547 ins_encode %{
7548 Register opr1_reg = as_Register($src1$$reg);
7549 Register opr2_reg = as_Register($src2$$reg);
7551 Label &target = *($labl$$label);
7552 int flag = $cmp$$cmpcode;
7554 switch(flag)
7555 {
7556 case 0x01: //equal
7557 if (&target)
7558 __ beq(opr1_reg, opr2_reg, target);
7559 else
7560 __ beq(opr1_reg, opr2_reg, (int)0);
7561 __ delayed()->nop();
7562 break;
7564 case 0x02: //not_equal
7565 if(&target)
7566 __ bne(opr1_reg, opr2_reg, target);
7567 else
7568 __ bne(opr1_reg, opr2_reg, (int)0);
7569 __ delayed()->nop();
7570 break;
7572 case 0x03: //greater
7573 __ slt(AT, opr2_reg, opr1_reg);
7574 if(&target)
7575 __ bne(AT, R0, target);
7576 else
7577 __ bne(AT, R0, (int)0);
7578 __ delayed()->nop();
7579 break;
7581 case 0x04: //greater_equal
7582 __ slt(AT, opr1_reg, opr2_reg);
7583 if(&target)
7584 __ beq(AT, R0, target);
7585 else
7586 __ beq(AT, R0, (int)0);
7587 __ delayed()->nop();
7589 break;
7591 case 0x05: //less
7592 __ slt(AT, opr1_reg, opr2_reg);
7593 if(&target)
7594 __ bne(AT, R0, target);
7595 else
7596 __ bne(AT, R0, (int)0);
7597 __ delayed()->nop();
7599 break;
7601 case 0x06: //less_equal
7602 __ slt(AT, opr2_reg, opr1_reg);
7604 if(&target)
7605 __ beq(AT, R0, target);
7606 else
7607 __ beq(AT, R0, (int)0);
7608 __ delayed()->nop();
7610 break;
7612 default:
7613 Unimplemented();
7614 }
7615 %}
7618 ins_pc_relative(1);
7619 ins_pipe( pipe_alu_branch );
7620 %}
7622 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7623 match( If cmp (CmpL src1 src2) );
7624 effect(USE labl);
7625 ins_cost(180);
7626 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7628 ins_encode %{
7629 Register op1 = $src1$$Register;
7630 int val = $src2$$constant;
7631 Label &L = *($labl$$label);
7632 int flag = $cmp$$cmpcode;
7634 __ daddiu(AT, op1, -1 * val);
7635 switch(flag)
7636 {
7637 case 0x01: //equal
7638 if (&L)
7639 __ beq(R0, AT, L);
7640 else
7641 __ beq(R0, AT, (int)0);
7642 break;
7643 case 0x02: //not_equal
7644 if (&L)
7645 __ bne(R0, AT, L);
7646 else
7647 __ bne(R0, AT, (int)0);
7648 break;
7649 case 0x03: //greater
7650 if(&L)
7651 __ bgtz(AT, L);
7652 else
7653 __ bgtz(AT, (int)0);
7654 break;
7655 case 0x04: //greater_equal
7656 if(&L)
7657 __ bgez(AT, L);
7658 else
7659 __ bgez(AT, (int)0);
7660 break;
7661 case 0x05: //less
7662 if(&L)
7663 __ bltz(AT, L);
7664 else
7665 __ bltz(AT, (int)0);
7666 break;
7667 case 0x06: //less_equal
7668 if(&L)
7669 __ blez(AT, L);
7670 else
7671 __ blez(AT, (int)0);
7672 break;
7673 default:
7674 Unimplemented();
7675 }
7676 __ nop();
7677 %}
7679 ins_pc_relative(1);
7680 ins_pipe( pipe_alu_branch );
7681 %}
7684 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7685 match( If cmp (CmpI src1 src2) );
7686 effect(USE labl);
7687 ins_cost(180);
7688 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7690 ins_encode %{
7691 Register op1 = $src1$$Register;
7692 int val = $src2$$constant;
7693 Label &L = *($labl$$label);
7694 int flag = $cmp$$cmpcode;
7696 __ addiu32(AT, op1, -1 * val);
7697 switch(flag)
7698 {
7699 case 0x01: //equal
7700 if (&L)
7701 __ beq(R0, AT, L);
7702 else
7703 __ beq(R0, AT, (int)0);
7704 break;
7705 case 0x02: //not_equal
7706 if (&L)
7707 __ bne(R0, AT, L);
7708 else
7709 __ bne(R0, AT, (int)0);
7710 break;
7711 case 0x03: //greater
7712 if(&L)
7713 __ bgtz(AT, L);
7714 else
7715 __ bgtz(AT, (int)0);
7716 break;
7717 case 0x04: //greater_equal
7718 if(&L)
7719 __ bgez(AT, L);
7720 else
7721 __ bgez(AT, (int)0);
7722 break;
7723 case 0x05: //less
7724 if(&L)
7725 __ bltz(AT, L);
7726 else
7727 __ bltz(AT, (int)0);
7728 break;
7729 case 0x06: //less_equal
7730 if(&L)
7731 __ blez(AT, L);
7732 else
7733 __ blez(AT, (int)0);
7734 break;
7735 default:
7736 Unimplemented();
7737 }
7738 __ nop();
7739 %}
7741 ins_pc_relative(1);
7742 ins_pipe( pipe_alu_branch );
7743 %}
7745 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7746 match( If cmp (CmpL src1 zero) );
7747 effect(USE labl);
7748 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7749 ins_cost(150);
7751 ins_encode %{
7752 Register opr1_reg = as_Register($src1$$reg);
7753 Label &target = *($labl$$label);
7754 int flag = $cmp$$cmpcode;
7756 switch(flag)
7757 {
7758 case 0x01: //equal
7759 if (&target)
7760 __ beq(opr1_reg, R0, target);
7761 else
7762 __ beq(opr1_reg, R0, int(0));
7763 break;
7765 case 0x02: //not_equal
7766 if(&target)
7767 __ bne(opr1_reg, R0, target);
7768 else
7769 __ bne(opr1_reg, R0, (int)0);
7770 break;
7772 case 0x03: //greater
7773 if(&target)
7774 __ bgtz(opr1_reg, target);
7775 else
7776 __ bgtz(opr1_reg, (int)0);
7777 break;
7779 case 0x04: //greater_equal
7780 if(&target)
7781 __ bgez(opr1_reg, target);
7782 else
7783 __ bgez(opr1_reg, (int)0);
7784 break;
7786 case 0x05: //less
7787 __ slt(AT, opr1_reg, R0);
7788 if(&target)
7789 __ bne(AT, R0, target);
7790 else
7791 __ bne(AT, R0, (int)0);
7792 break;
7794 case 0x06: //less_equal
7795 if (&target)
7796 __ blez(opr1_reg, target);
7797 else
7798 __ blez(opr1_reg, int(0));
7799 break;
7801 default:
7802 Unimplemented();
7803 }
7804 __ delayed()->nop();
7805 %}
7808 ins_pc_relative(1);
7809 ins_pipe( pipe_alu_branch );
7810 %}
7813 //FIXME
7814 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7815 match( If cmp (CmpF src1 src2) );
7816 effect(USE labl);
7817 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7819 ins_encode %{
7820 FloatRegister reg_op1 = $src1$$FloatRegister;
7821 FloatRegister reg_op2 = $src2$$FloatRegister;
7822 Label &L = *($labl$$label);
7823 int flag = $cmp$$cmpcode;
7825 switch(flag)
7826 {
7827 case 0x01: //equal
7828 __ c_eq_s(reg_op1, reg_op2);
7829 if (&L)
7830 __ bc1t(L);
7831 else
7832 __ bc1t((int)0);
7833 break;
7834 case 0x02: //not_equal
7835 __ c_eq_s(reg_op1, reg_op2);
7836 if (&L)
7837 __ bc1f(L);
7838 else
7839 __ bc1f((int)0);
7840 break;
7841 case 0x03: //greater
7842 __ c_ule_s(reg_op1, reg_op2);
7843 if(&L)
7844 __ bc1f(L);
7845 else
7846 __ bc1f((int)0);
7847 break;
7848 case 0x04: //greater_equal
7849 __ c_ult_s(reg_op1, reg_op2);
7850 if(&L)
7851 __ bc1f(L);
7852 else
7853 __ bc1f((int)0);
7854 break;
7855 case 0x05: //less
7856 __ c_ult_s(reg_op1, reg_op2);
7857 if(&L)
7858 __ bc1t(L);
7859 else
7860 __ bc1t((int)0);
7861 break;
7862 case 0x06: //less_equal
7863 __ c_ule_s(reg_op1, reg_op2);
7864 if(&L)
7865 __ bc1t(L);
7866 else
7867 __ bc1t((int)0);
7868 break;
7869 default:
7870 Unimplemented();
7871 }
7872 __ nop();
7873 %}
7875 ins_pc_relative(1);
7876 ins_pipe(pipe_slow);
7877 %}
7879 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7880 match( If cmp (CmpD src1 src2) );
7881 effect(USE labl);
7882 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7884 ins_encode %{
7885 FloatRegister reg_op1 = $src1$$FloatRegister;
7886 FloatRegister reg_op2 = $src2$$FloatRegister;
7887 Label &L = *($labl$$label);
7888 int flag = $cmp$$cmpcode;
7890 switch(flag)
7891 {
7892 case 0x01: //equal
7893 __ c_eq_d(reg_op1, reg_op2);
7894 if (&L)
7895 __ bc1t(L);
7896 else
7897 __ bc1t((int)0);
7898 break;
7899 case 0x02: //not_equal
7900 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7901 __ c_eq_d(reg_op1, reg_op2);
7902 if (&L)
7903 __ bc1f(L);
7904 else
7905 __ bc1f((int)0);
7906 break;
7907 case 0x03: //greater
7908 __ c_ule_d(reg_op1, reg_op2);
7909 if(&L)
7910 __ bc1f(L);
7911 else
7912 __ bc1f((int)0);
7913 break;
7914 case 0x04: //greater_equal
7915 __ c_ult_d(reg_op1, reg_op2);
7916 if(&L)
7917 __ bc1f(L);
7918 else
7919 __ bc1f((int)0);
7920 break;
7921 case 0x05: //less
7922 __ c_ult_d(reg_op1, reg_op2);
7923 if(&L)
7924 __ bc1t(L);
7925 else
7926 __ bc1t((int)0);
7927 break;
7928 case 0x06: //less_equal
7929 __ c_ule_d(reg_op1, reg_op2);
7930 if(&L)
7931 __ bc1t(L);
7932 else
7933 __ bc1t((int)0);
7934 break;
7935 default:
7936 Unimplemented();
7937 }
7938 __ nop();
7939 %}
7941 ins_pc_relative(1);
7942 ins_pipe(pipe_slow);
7943 %}
7946 // Call Runtime Instruction
7947 instruct CallRuntimeDirect(method meth) %{
7948 match(CallRuntime );
7949 effect(USE meth);
7951 ins_cost(300);
7952 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7953 ins_encode( Java_To_Runtime( meth ) );
7954 ins_pipe( pipe_slow );
7955 ins_alignment(16);
7956 %}
7960 //------------------------MemBar Instructions-------------------------------
7961 //Memory barrier flavors
7963 instruct membar_acquire() %{
7964 match(MemBarAcquire);
7965 ins_cost(0);
7967 size(0);
7968 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7969 ins_encode();
7970 ins_pipe(empty);
7971 %}
7973 instruct load_fence() %{
7974 match(LoadFence);
7975 ins_cost(400);
7977 format %{ "MEMBAR @ load_fence" %}
7978 ins_encode %{
7979 __ sync();
7980 %}
7981 ins_pipe(pipe_slow);
7982 %}
7984 instruct membar_acquire_lock()
7985 %{
7986 match(MemBarAcquireLock);
7987 ins_cost(0);
7989 size(0);
7990 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7991 ins_encode();
7992 ins_pipe(empty);
7993 %}
7995 instruct membar_release() %{
7996 match(MemBarRelease);
7997 ins_cost(0);
7999 size(0);
8000 format %{ "MEMBAR-release (empty) @ membar_release" %}
8001 ins_encode();
8002 ins_pipe(empty);
8003 %}
8005 instruct store_fence() %{
8006 match(StoreFence);
8007 ins_cost(400);
8009 format %{ "MEMBAR @ store_fence" %}
8011 ins_encode %{
8012 __ sync();
8013 %}
8015 ins_pipe(pipe_slow);
8016 %}
8018 instruct membar_release_lock()
8019 %{
8020 match(MemBarReleaseLock);
8021 ins_cost(0);
8023 size(0);
8024 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8025 ins_encode();
8026 ins_pipe(empty);
8027 %}
8030 instruct membar_volatile() %{
8031 match(MemBarVolatile);
8032 ins_cost(400);
8034 format %{ "MEMBAR-volatile" %}
8035 ins_encode %{
8036 if( !os::is_MP() ) return; // Not needed on single CPU
8037 __ sync();
8039 %}
8040 ins_pipe(pipe_slow);
8041 %}
8043 instruct unnecessary_membar_volatile() %{
8044 match(MemBarVolatile);
8045 predicate(Matcher::post_store_load_barrier(n));
8046 ins_cost(0);
8048 size(0);
8049 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8050 ins_encode( );
8051 ins_pipe(empty);
8052 %}
8054 instruct membar_storestore() %{
8055 match(MemBarStoreStore);
8057 ins_cost(0);
8058 size(0);
8059 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8060 ins_encode( );
8061 ins_pipe(empty);
8062 %}
8064 //----------Move Instructions--------------------------------------------------
8065 instruct castX2P(mRegP dst, mRegL src) %{
8066 match(Set dst (CastX2P src));
8067 format %{ "castX2P $dst, $src @ castX2P" %}
8068 ins_encode %{
8069 Register src = $src$$Register;
8070 Register dst = $dst$$Register;
8072 if(src != dst)
8073 __ move(dst, src);
8074 %}
8075 ins_cost(10);
8076 ins_pipe( ialu_regI_mov );
8077 %}
8079 instruct castP2X(mRegL dst, mRegP src ) %{
8080 match(Set dst (CastP2X src));
8082 format %{ "mov $dst, $src\t #@castP2X" %}
8083 ins_encode %{
8084 Register src = $src$$Register;
8085 Register dst = $dst$$Register;
8087 if(src != dst)
8088 __ move(dst, src);
8089 %}
8090 ins_pipe( ialu_regI_mov );
8091 %}
8093 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8094 match(Set dst (MoveF2I src));
8095 effect(DEF dst, USE src);
8096 ins_cost(85);
8097 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8098 ins_encode %{
8099 Register dst = as_Register($dst$$reg);
8100 FloatRegister src = as_FloatRegister($src$$reg);
8102 __ mfc1(dst, src);
8103 %}
8104 ins_pipe( pipe_slow );
8105 %}
8107 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8108 match(Set dst (MoveI2F src));
8109 effect(DEF dst, USE src);
8110 ins_cost(85);
8111 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8112 ins_encode %{
8113 Register src = as_Register($src$$reg);
8114 FloatRegister dst = as_FloatRegister($dst$$reg);
8116 __ mtc1(src, dst);
8117 %}
8118 ins_pipe( pipe_slow );
8119 %}
8121 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8122 match(Set dst (MoveD2L src));
8123 effect(DEF dst, USE src);
8124 ins_cost(85);
8125 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8126 ins_encode %{
8127 Register dst = as_Register($dst$$reg);
8128 FloatRegister src = as_FloatRegister($src$$reg);
8130 __ dmfc1(dst, src);
8131 %}
8132 ins_pipe( pipe_slow );
8133 %}
8135 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8136 match(Set dst (MoveL2D src));
8137 effect(DEF dst, USE src);
8138 ins_cost(85);
8139 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8140 ins_encode %{
8141 FloatRegister dst = as_FloatRegister($dst$$reg);
8142 Register src = as_Register($src$$reg);
8144 __ dmtc1(src, dst);
8145 %}
8146 ins_pipe( pipe_slow );
8147 %}
8149 //----------Conditional Move---------------------------------------------------
8150 // Conditional move
8151 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8152 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8153 ins_cost(80);
8154 format %{
8155 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8156 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8157 %}
8159 ins_encode %{
8160 Register op1 = $tmp1$$Register;
8161 Register op2 = $tmp2$$Register;
8162 Register dst = $dst$$Register;
8163 Register src = $src$$Register;
8164 int flag = $cop$$cmpcode;
8166 switch(flag)
8167 {
8168 case 0x01: //equal
8169 __ subu32(AT, op1, op2);
8170 __ movz(dst, src, AT);
8171 break;
8173 case 0x02: //not_equal
8174 __ subu32(AT, op1, op2);
8175 __ movn(dst, src, AT);
8176 break;
8178 case 0x03: //great
8179 __ slt(AT, op2, op1);
8180 __ movn(dst, src, AT);
8181 break;
8183 case 0x04: //great_equal
8184 __ slt(AT, op1, op2);
8185 __ movz(dst, src, AT);
8186 break;
8188 case 0x05: //less
8189 __ slt(AT, op1, op2);
8190 __ movn(dst, src, AT);
8191 break;
8193 case 0x06: //less_equal
8194 __ slt(AT, op2, op1);
8195 __ movz(dst, src, AT);
8196 break;
8198 default:
8199 Unimplemented();
8200 }
8201 %}
8203 ins_pipe( pipe_slow );
8204 %}
8206 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8207 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8208 ins_cost(80);
8209 format %{
8210 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8211 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8212 %}
8213 ins_encode %{
8214 Register op1 = $tmp1$$Register;
8215 Register op2 = $tmp2$$Register;
8216 Register dst = $dst$$Register;
8217 Register src = $src$$Register;
8218 int flag = $cop$$cmpcode;
8220 switch(flag)
8221 {
8222 case 0x01: //equal
8223 __ subu(AT, op1, op2);
8224 __ movz(dst, src, AT);
8225 break;
8227 case 0x02: //not_equal
8228 __ subu(AT, op1, op2);
8229 __ movn(dst, src, AT);
8230 break;
8232 case 0x03: //above
8233 __ sltu(AT, op2, op1);
8234 __ movn(dst, src, AT);
8235 break;
8237 case 0x04: //above_equal
8238 __ sltu(AT, op1, op2);
8239 __ movz(dst, src, AT);
8240 break;
8242 case 0x05: //below
8243 __ sltu(AT, op1, op2);
8244 __ movn(dst, src, AT);
8245 break;
8247 case 0x06: //below_equal
8248 __ sltu(AT, op2, op1);
8249 __ movz(dst, src, AT);
8250 break;
8252 default:
8253 Unimplemented();
8254 }
8255 %}
8257 ins_pipe( pipe_slow );
8258 %}
8260 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8261 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8262 ins_cost(80);
8263 format %{
8264 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8265 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8266 %}
8267 ins_encode %{
8268 Register op1 = $tmp1$$Register;
8269 Register op2 = $tmp2$$Register;
8270 Register dst = $dst$$Register;
8271 Register src = $src$$Register;
8272 int flag = $cop$$cmpcode;
8274 switch(flag)
8275 {
8276 case 0x01: //equal
8277 __ subu32(AT, op1, op2);
8278 __ movz(dst, src, AT);
8279 break;
8281 case 0x02: //not_equal
8282 __ subu32(AT, op1, op2);
8283 __ movn(dst, src, AT);
8284 break;
8286 case 0x03: //above
8287 __ sltu(AT, op2, op1);
8288 __ movn(dst, src, AT);
8289 break;
8291 case 0x04: //above_equal
8292 __ sltu(AT, op1, op2);
8293 __ movz(dst, src, AT);
8294 break;
8296 case 0x05: //below
8297 __ sltu(AT, op1, op2);
8298 __ movn(dst, src, AT);
8299 break;
8301 case 0x06: //below_equal
8302 __ sltu(AT, op2, op1);
8303 __ movz(dst, src, AT);
8304 break;
8306 default:
8307 Unimplemented();
8308 }
8309 %}
8311 ins_pipe( pipe_slow );
8312 %}
8314 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8315 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8316 ins_cost(80);
8317 format %{
8318 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8319 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8320 %}
8321 ins_encode %{
8322 Register op1 = $tmp1$$Register;
8323 Register op2 = $tmp2$$Register;
8324 Register dst = $dst$$Register;
8325 Register src = $src$$Register;
8326 int flag = $cop$$cmpcode;
8328 switch(flag)
8329 {
8330 case 0x01: //equal
8331 __ subu32(AT, op1, op2);
8332 __ movz(dst, src, AT);
8333 break;
8335 case 0x02: //not_equal
8336 __ subu32(AT, op1, op2);
8337 __ movn(dst, src, AT);
8338 break;
8340 case 0x03: //above
8341 __ sltu(AT, op2, op1);
8342 __ movn(dst, src, AT);
8343 break;
8345 case 0x04: //above_equal
8346 __ sltu(AT, op1, op2);
8347 __ movz(dst, src, AT);
8348 break;
8350 case 0x05: //below
8351 __ sltu(AT, op1, op2);
8352 __ movn(dst, src, AT);
8353 break;
8355 case 0x06: //below_equal
8356 __ sltu(AT, op2, op1);
8357 __ movz(dst, src, AT);
8358 break;
8360 default:
8361 Unimplemented();
8362 }
8363 %}
8365 ins_pipe( pipe_slow );
8366 %}
8368 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8369 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8370 ins_cost(80);
8371 format %{
8372 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8373 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8374 %}
8375 ins_encode %{
8376 Register op1 = $tmp1$$Register;
8377 Register op2 = $tmp2$$Register;
8378 Register dst = $dst$$Register;
8379 Register src = $src$$Register;
8380 int flag = $cop$$cmpcode;
8382 switch(flag)
8383 {
8384 case 0x01: //equal
8385 __ subu(AT, op1, op2);
8386 __ movz(dst, src, AT);
8387 break;
8389 case 0x02: //not_equal
8390 __ subu(AT, op1, op2);
8391 __ movn(dst, src, AT);
8392 break;
8394 case 0x03: //above
8395 __ sltu(AT, op2, op1);
8396 __ movn(dst, src, AT);
8397 break;
8399 case 0x04: //above_equal
8400 __ sltu(AT, op1, op2);
8401 __ movz(dst, src, AT);
8402 break;
8404 case 0x05: //below
8405 __ sltu(AT, op1, op2);
8406 __ movn(dst, src, AT);
8407 break;
8409 case 0x06: //below_equal
8410 __ sltu(AT, op2, op1);
8411 __ movz(dst, src, AT);
8412 break;
8414 default:
8415 Unimplemented();
8416 }
8417 %}
8419 ins_pipe( pipe_slow );
8420 %}
8422 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8423 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8424 ins_cost(80);
8425 format %{
8426 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8427 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8428 %}
8429 ins_encode %{
8430 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8431 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8432 Register dst = as_Register($dst$$reg);
8433 Register src = as_Register($src$$reg);
8435 int flag = $cop$$cmpcode;
8437 switch(flag)
8438 {
8439 case 0x01: //equal
8440 __ c_eq_d(reg_op1, reg_op2);
8441 __ movt(dst, src);
8442 break;
8443 case 0x02: //not_equal
8444 __ c_eq_d(reg_op1, reg_op2);
8445 __ movf(dst, src);
8446 break;
8447 case 0x03: //greater
8448 __ c_ole_d(reg_op1, reg_op2);
8449 __ movf(dst, src);
8450 break;
8451 case 0x04: //greater_equal
8452 __ c_olt_d(reg_op1, reg_op2);
8453 __ movf(dst, src);
8454 break;
8455 case 0x05: //less
8456 __ c_ult_d(reg_op1, reg_op2);
8457 __ movt(dst, src);
8458 break;
8459 case 0x06: //less_equal
8460 __ c_ule_d(reg_op1, reg_op2);
8461 __ movt(dst, src);
8462 break;
8463 default:
8464 Unimplemented();
8465 }
8466 %}
8468 ins_pipe( pipe_slow );
8469 %}
8472 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8473 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8474 ins_cost(80);
8475 format %{
8476 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8477 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8478 %}
8479 ins_encode %{
8480 Register op1 = $tmp1$$Register;
8481 Register op2 = $tmp2$$Register;
8482 Register dst = $dst$$Register;
8483 Register src = $src$$Register;
8484 int flag = $cop$$cmpcode;
8486 switch(flag)
8487 {
8488 case 0x01: //equal
8489 __ subu32(AT, op1, op2);
8490 __ movz(dst, src, AT);
8491 break;
8493 case 0x02: //not_equal
8494 __ subu32(AT, op1, op2);
8495 __ movn(dst, src, AT);
8496 break;
8498 case 0x03: //above
8499 __ sltu(AT, op2, op1);
8500 __ movn(dst, src, AT);
8501 break;
8503 case 0x04: //above_equal
8504 __ sltu(AT, op1, op2);
8505 __ movz(dst, src, AT);
8506 break;
8508 case 0x05: //below
8509 __ sltu(AT, op1, op2);
8510 __ movn(dst, src, AT);
8511 break;
8513 case 0x06: //below_equal
8514 __ sltu(AT, op2, op1);
8515 __ movz(dst, src, AT);
8516 break;
8518 default:
8519 Unimplemented();
8520 }
8521 %}
8523 ins_pipe( pipe_slow );
8524 %}
8527 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8528 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8529 ins_cost(80);
8530 format %{
8531 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8532 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8533 %}
8534 ins_encode %{
8535 Register op1 = $tmp1$$Register;
8536 Register op2 = $tmp2$$Register;
8537 Register dst = $dst$$Register;
8538 Register src = $src$$Register;
8539 int flag = $cop$$cmpcode;
8541 switch(flag)
8542 {
8543 case 0x01: //equal
8544 __ subu(AT, op1, op2);
8545 __ movz(dst, src, AT);
8546 break;
8548 case 0x02: //not_equal
8549 __ subu(AT, op1, op2);
8550 __ movn(dst, src, AT);
8551 break;
8553 case 0x03: //above
8554 __ sltu(AT, op2, op1);
8555 __ movn(dst, src, AT);
8556 break;
8558 case 0x04: //above_equal
8559 __ sltu(AT, op1, op2);
8560 __ movz(dst, src, AT);
8561 break;
8563 case 0x05: //below
8564 __ sltu(AT, op1, op2);
8565 __ movn(dst, src, AT);
8566 break;
8568 case 0x06: //below_equal
8569 __ sltu(AT, op2, op1);
8570 __ movz(dst, src, AT);
8571 break;
8573 default:
8574 Unimplemented();
8575 }
8576 %}
8578 ins_pipe( pipe_slow );
8579 %}
8581 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8582 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8583 ins_cost(80);
8584 format %{
8585 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8586 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8587 %}
8588 ins_encode %{
8589 Register opr1 = as_Register($tmp1$$reg);
8590 Register opr2 = as_Register($tmp2$$reg);
8591 Register dst = $dst$$Register;
8592 Register src = $src$$Register;
8593 int flag = $cop$$cmpcode;
8595 switch(flag)
8596 {
8597 case 0x01: //equal
8598 __ subu(AT, opr1, opr2);
8599 __ movz(dst, src, AT);
8600 break;
8602 case 0x02: //not_equal
8603 __ subu(AT, opr1, opr2);
8604 __ movn(dst, src, AT);
8605 break;
8607 case 0x03: //greater
8608 __ slt(AT, opr2, opr1);
8609 __ movn(dst, src, AT);
8610 break;
8612 case 0x04: //greater_equal
8613 __ slt(AT, opr1, opr2);
8614 __ movz(dst, src, AT);
8615 break;
8617 case 0x05: //less
8618 __ slt(AT, opr1, opr2);
8619 __ movn(dst, src, AT);
8620 break;
8622 case 0x06: //less_equal
8623 __ slt(AT, opr2, opr1);
8624 __ movz(dst, src, AT);
8625 break;
8627 default:
8628 Unimplemented();
8629 }
8630 %}
8632 ins_pipe( pipe_slow );
8633 %}
8635 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8636 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8637 ins_cost(80);
8638 format %{
8639 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8640 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8641 %}
8642 ins_encode %{
8643 Register opr1 = as_Register($tmp1$$reg);
8644 Register opr2 = as_Register($tmp2$$reg);
8645 Register dst = $dst$$Register;
8646 Register src = $src$$Register;
8647 int flag = $cop$$cmpcode;
8649 switch(flag)
8650 {
8651 case 0x01: //equal
8652 __ subu(AT, opr1, opr2);
8653 __ movz(dst, src, AT);
8654 break;
8656 case 0x02: //not_equal
8657 __ subu(AT, opr1, opr2);
8658 __ movn(dst, src, AT);
8659 break;
8661 case 0x03: //greater
8662 __ slt(AT, opr2, opr1);
8663 __ movn(dst, src, AT);
8664 break;
8666 case 0x04: //greater_equal
8667 __ slt(AT, opr1, opr2);
8668 __ movz(dst, src, AT);
8669 break;
8671 case 0x05: //less
8672 __ slt(AT, opr1, opr2);
8673 __ movn(dst, src, AT);
8674 break;
8676 case 0x06: //less_equal
8677 __ slt(AT, opr2, opr1);
8678 __ movz(dst, src, AT);
8679 break;
8681 default:
8682 Unimplemented();
8683 }
8684 %}
8686 ins_pipe( pipe_slow );
8687 %}
8689 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8690 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8691 ins_cost(80);
8692 format %{
8693 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8694 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8695 %}
8696 ins_encode %{
8697 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8698 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8699 Register dst = as_Register($dst$$reg);
8700 Register src = as_Register($src$$reg);
8702 int flag = $cop$$cmpcode;
8704 switch(flag)
8705 {
8706 case 0x01: //equal
8707 __ c_eq_d(reg_op1, reg_op2);
8708 __ movt(dst, src);
8709 break;
8710 case 0x02: //not_equal
8711 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8712 __ c_eq_d(reg_op1, reg_op2);
8713 __ movf(dst, src);
8714 break;
8715 case 0x03: //greater
8716 __ c_ole_d(reg_op1, reg_op2);
8717 __ movf(dst, src);
8718 break;
8719 case 0x04: //greater_equal
8720 __ c_olt_d(reg_op1, reg_op2);
8721 __ movf(dst, src);
8722 break;
8723 case 0x05: //less
8724 __ c_ult_d(reg_op1, reg_op2);
8725 __ movt(dst, src);
8726 break;
8727 case 0x06: //less_equal
8728 __ c_ule_d(reg_op1, reg_op2);
8729 __ movt(dst, src);
8730 break;
8731 default:
8732 Unimplemented();
8733 }
8734 %}
8736 ins_pipe( pipe_slow );
8737 %}
8740 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8741 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8742 ins_cost(80);
8743 format %{
8744 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8745 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8746 %}
8747 ins_encode %{
8748 Register op1 = $tmp1$$Register;
8749 Register op2 = $tmp2$$Register;
8750 Register dst = $dst$$Register;
8751 Register src = $src$$Register;
8752 int flag = $cop$$cmpcode;
8754 switch(flag)
8755 {
8756 case 0x01: //equal
8757 __ subu(AT, op1, op2);
8758 __ movz(dst, src, AT);
8759 break;
8761 case 0x02: //not_equal
8762 __ subu(AT, op1, op2);
8763 __ movn(dst, src, AT);
8764 break;
8766 case 0x03: //above
8767 __ sltu(AT, op2, op1);
8768 __ movn(dst, src, AT);
8769 break;
8771 case 0x04: //above_equal
8772 __ sltu(AT, op1, op2);
8773 __ movz(dst, src, AT);
8774 break;
8776 case 0x05: //below
8777 __ sltu(AT, op1, op2);
8778 __ movn(dst, src, AT);
8779 break;
8781 case 0x06: //below_equal
8782 __ sltu(AT, op2, op1);
8783 __ movz(dst, src, AT);
8784 break;
8786 default:
8787 Unimplemented();
8788 }
8789 %}
8791 ins_pipe( pipe_slow );
8792 %}
8794 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8795 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8796 ins_cost(80);
8797 format %{
8798 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8799 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8800 %}
8801 ins_encode %{
8802 Register op1 = $tmp1$$Register;
8803 Register op2 = $tmp2$$Register;
8804 Register dst = $dst$$Register;
8805 Register src = $src$$Register;
8806 int flag = $cop$$cmpcode;
8808 switch(flag)
8809 {
8810 case 0x01: //equal
8811 __ subu32(AT, op1, op2);
8812 __ movz(dst, src, AT);
8813 break;
8815 case 0x02: //not_equal
8816 __ subu32(AT, op1, op2);
8817 __ movn(dst, src, AT);
8818 break;
8820 case 0x03: //above
8821 __ slt(AT, op2, op1);
8822 __ movn(dst, src, AT);
8823 break;
8825 case 0x04: //above_equal
8826 __ slt(AT, op1, op2);
8827 __ movz(dst, src, AT);
8828 break;
8830 case 0x05: //below
8831 __ slt(AT, op1, op2);
8832 __ movn(dst, src, AT);
8833 break;
8835 case 0x06: //below_equal
8836 __ slt(AT, op2, op1);
8837 __ movz(dst, src, AT);
8838 break;
8840 default:
8841 Unimplemented();
8842 }
8843 %}
8845 ins_pipe( pipe_slow );
8846 %}
8848 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8849 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8850 ins_cost(80);
8851 format %{
8852 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8853 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8854 %}
8855 ins_encode %{
8856 Register op1 = $tmp1$$Register;
8857 Register op2 = $tmp2$$Register;
8858 Register dst = $dst$$Register;
8859 Register src = $src$$Register;
8860 int flag = $cop$$cmpcode;
8862 switch(flag)
8863 {
8864 case 0x01: //equal
8865 __ subu32(AT, op1, op2);
8866 __ movz(dst, src, AT);
8867 break;
8869 case 0x02: //not_equal
8870 __ subu32(AT, op1, op2);
8871 __ movn(dst, src, AT);
8872 break;
8874 case 0x03: //above
8875 __ slt(AT, op2, op1);
8876 __ movn(dst, src, AT);
8877 break;
8879 case 0x04: //above_equal
8880 __ slt(AT, op1, op2);
8881 __ movz(dst, src, AT);
8882 break;
8884 case 0x05: //below
8885 __ slt(AT, op1, op2);
8886 __ movn(dst, src, AT);
8887 break;
8889 case 0x06: //below_equal
8890 __ slt(AT, op2, op1);
8891 __ movz(dst, src, AT);
8892 break;
8894 default:
8895 Unimplemented();
8896 }
8897 %}
8899 ins_pipe( pipe_slow );
8900 %}
8903 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8904 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8905 ins_cost(80);
8906 format %{
8907 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8908 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8909 %}
8911 ins_encode %{
8912 Register op1 = $tmp1$$Register;
8913 Register op2 = $tmp2$$Register;
8914 Register dst = as_Register($dst$$reg);
8915 Register src = as_Register($src$$reg);
8916 int flag = $cop$$cmpcode;
8918 switch(flag)
8919 {
8920 case 0x01: //equal
8921 __ subu32(AT, op1, op2);
8922 __ movz(dst, src, AT);
8923 break;
8925 case 0x02: //not_equal
8926 __ subu32(AT, op1, op2);
8927 __ movn(dst, src, AT);
8928 break;
8930 case 0x03: //great
8931 __ slt(AT, op2, op1);
8932 __ movn(dst, src, AT);
8933 break;
8935 case 0x04: //great_equal
8936 __ slt(AT, op1, op2);
8937 __ movz(dst, src, AT);
8938 break;
8940 case 0x05: //less
8941 __ slt(AT, op1, op2);
8942 __ movn(dst, src, AT);
8943 break;
8945 case 0x06: //less_equal
8946 __ slt(AT, op2, op1);
8947 __ movz(dst, src, AT);
8948 break;
8950 default:
8951 Unimplemented();
8952 }
8953 %}
8955 ins_pipe( pipe_slow );
8956 %}
8958 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8959 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8960 ins_cost(80);
8961 format %{
8962 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8963 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8964 %}
8965 ins_encode %{
8966 Register opr1 = as_Register($tmp1$$reg);
8967 Register opr2 = as_Register($tmp2$$reg);
8968 Register dst = as_Register($dst$$reg);
8969 Register src = as_Register($src$$reg);
8970 int flag = $cop$$cmpcode;
8972 switch(flag)
8973 {
8974 case 0x01: //equal
8975 __ subu(AT, opr1, opr2);
8976 __ movz(dst, src, AT);
8977 break;
8979 case 0x02: //not_equal
8980 __ subu(AT, opr1, opr2);
8981 __ movn(dst, src, AT);
8982 break;
8984 case 0x03: //greater
8985 __ slt(AT, opr2, opr1);
8986 __ movn(dst, src, AT);
8987 break;
8989 case 0x04: //greater_equal
8990 __ slt(AT, opr1, opr2);
8991 __ movz(dst, src, AT);
8992 break;
8994 case 0x05: //less
8995 __ slt(AT, opr1, opr2);
8996 __ movn(dst, src, AT);
8997 break;
8999 case 0x06: //less_equal
9000 __ slt(AT, opr2, opr1);
9001 __ movz(dst, src, AT);
9002 break;
9004 default:
9005 Unimplemented();
9006 }
9007 %}
9009 ins_pipe( pipe_slow );
9010 %}
9012 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9013 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9014 ins_cost(80);
9015 format %{
9016 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9017 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9018 %}
9019 ins_encode %{
9020 Register op1 = $tmp1$$Register;
9021 Register op2 = $tmp2$$Register;
9022 Register dst = $dst$$Register;
9023 Register src = $src$$Register;
9024 int flag = $cop$$cmpcode;
9026 switch(flag)
9027 {
9028 case 0x01: //equal
9029 __ subu32(AT, op1, op2);
9030 __ movz(dst, src, AT);
9031 break;
9033 case 0x02: //not_equal
9034 __ subu32(AT, op1, op2);
9035 __ movn(dst, src, AT);
9036 break;
9038 case 0x03: //above
9039 __ sltu(AT, op2, op1);
9040 __ movn(dst, src, AT);
9041 break;
9043 case 0x04: //above_equal
9044 __ sltu(AT, op1, op2);
9045 __ movz(dst, src, AT);
9046 break;
9048 case 0x05: //below
9049 __ sltu(AT, op1, op2);
9050 __ movn(dst, src, AT);
9051 break;
9053 case 0x06: //below_equal
9054 __ sltu(AT, op2, op1);
9055 __ movz(dst, src, AT);
9056 break;
9058 default:
9059 Unimplemented();
9060 }
9061 %}
9063 ins_pipe( pipe_slow );
9064 %}
9067 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9068 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9069 ins_cost(80);
9070 format %{
9071 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9072 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9073 %}
9074 ins_encode %{
9075 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9076 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9077 Register dst = as_Register($dst$$reg);
9078 Register src = as_Register($src$$reg);
9080 int flag = $cop$$cmpcode;
9082 switch(flag)
9083 {
9084 case 0x01: //equal
9085 __ c_eq_d(reg_op1, reg_op2);
9086 __ movt(dst, src);
9087 break;
9088 case 0x02: //not_equal
9089 __ c_eq_d(reg_op1, reg_op2);
9090 __ movf(dst, src);
9091 break;
9092 case 0x03: //greater
9093 __ c_ole_d(reg_op1, reg_op2);
9094 __ movf(dst, src);
9095 break;
9096 case 0x04: //greater_equal
9097 __ c_olt_d(reg_op1, reg_op2);
9098 __ movf(dst, src);
9099 break;
9100 case 0x05: //less
9101 __ c_ult_d(reg_op1, reg_op2);
9102 __ movt(dst, src);
9103 break;
9104 case 0x06: //less_equal
9105 __ c_ule_d(reg_op1, reg_op2);
9106 __ movt(dst, src);
9107 break;
9108 default:
9109 Unimplemented();
9110 }
9111 %}
9113 ins_pipe( pipe_slow );
9114 %}
9116 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9117 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9118 ins_cost(200);
9119 format %{
9120 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9121 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9122 %}
9123 ins_encode %{
9124 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9125 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9126 FloatRegister dst = as_FloatRegister($dst$$reg);
9127 FloatRegister src = as_FloatRegister($src$$reg);
9129 int flag = $cop$$cmpcode;
9131 Label L;
9133 switch(flag)
9134 {
9135 case 0x01: //equal
9136 __ c_eq_d(reg_op1, reg_op2);
9137 __ bc1f(L);
9138 __ nop();
9139 __ mov_d(dst, src);
9140 __ bind(L);
9141 break;
9142 case 0x02: //not_equal
9143 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9144 __ c_eq_d(reg_op1, reg_op2);
9145 __ bc1t(L);
9146 __ nop();
9147 __ mov_d(dst, src);
9148 __ bind(L);
9149 break;
9150 case 0x03: //greater
9151 __ c_ole_d(reg_op1, reg_op2);
9152 __ bc1t(L);
9153 __ nop();
9154 __ mov_d(dst, src);
9155 __ bind(L);
9156 break;
9157 case 0x04: //greater_equal
9158 __ c_olt_d(reg_op1, reg_op2);
9159 __ bc1t(L);
9160 __ nop();
9161 __ mov_d(dst, src);
9162 __ bind(L);
9163 break;
9164 case 0x05: //less
9165 __ c_ult_d(reg_op1, reg_op2);
9166 __ bc1f(L);
9167 __ nop();
9168 __ mov_d(dst, src);
9169 __ bind(L);
9170 break;
9171 case 0x06: //less_equal
9172 __ c_ule_d(reg_op1, reg_op2);
9173 __ bc1f(L);
9174 __ nop();
9175 __ mov_d(dst, src);
9176 __ bind(L);
9177 break;
9178 default:
9179 Unimplemented();
9180 }
9181 %}
9183 ins_pipe( pipe_slow );
9184 %}
9186 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9187 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9188 ins_cost(200);
9189 format %{
9190 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9191 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9192 %}
9194 ins_encode %{
9195 Register op1 = $tmp1$$Register;
9196 Register op2 = $tmp2$$Register;
9197 FloatRegister dst = as_FloatRegister($dst$$reg);
9198 FloatRegister src = as_FloatRegister($src$$reg);
9199 int flag = $cop$$cmpcode;
9200 Label L;
9202 switch(flag)
9203 {
9204 case 0x01: //equal
9205 __ bne(op1, op2, L);
9206 __ nop();
9207 __ mov_s(dst, src);
9208 __ bind(L);
9209 break;
9210 case 0x02: //not_equal
9211 __ beq(op1, op2, L);
9212 __ nop();
9213 __ mov_s(dst, src);
9214 __ bind(L);
9215 break;
9216 case 0x03: //great
9217 __ slt(AT, op2, op1);
9218 __ beq(AT, R0, L);
9219 __ nop();
9220 __ mov_s(dst, src);
9221 __ bind(L);
9222 break;
9223 case 0x04: //great_equal
9224 __ slt(AT, op1, op2);
9225 __ bne(AT, R0, L);
9226 __ nop();
9227 __ mov_s(dst, src);
9228 __ bind(L);
9229 break;
9230 case 0x05: //less
9231 __ slt(AT, op1, op2);
9232 __ beq(AT, R0, L);
9233 __ nop();
9234 __ mov_s(dst, src);
9235 __ bind(L);
9236 break;
9237 case 0x06: //less_equal
9238 __ slt(AT, op2, op1);
9239 __ bne(AT, R0, L);
9240 __ nop();
9241 __ mov_s(dst, src);
9242 __ bind(L);
9243 break;
9244 default:
9245 Unimplemented();
9246 }
9247 %}
9249 ins_pipe( pipe_slow );
9250 %}
9252 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9253 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9254 ins_cost(200);
9255 format %{
9256 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9257 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9258 %}
9260 ins_encode %{
9261 Register op1 = $tmp1$$Register;
9262 Register op2 = $tmp2$$Register;
9263 FloatRegister dst = as_FloatRegister($dst$$reg);
9264 FloatRegister src = as_FloatRegister($src$$reg);
9265 int flag = $cop$$cmpcode;
9266 Label L;
9268 switch(flag)
9269 {
9270 case 0x01: //equal
9271 __ bne(op1, op2, L);
9272 __ nop();
9273 __ mov_d(dst, src);
9274 __ bind(L);
9275 break;
9276 case 0x02: //not_equal
9277 __ beq(op1, op2, L);
9278 __ nop();
9279 __ mov_d(dst, src);
9280 __ bind(L);
9281 break;
9282 case 0x03: //great
9283 __ slt(AT, op2, op1);
9284 __ beq(AT, R0, L);
9285 __ nop();
9286 __ mov_d(dst, src);
9287 __ bind(L);
9288 break;
9289 case 0x04: //great_equal
9290 __ slt(AT, op1, op2);
9291 __ bne(AT, R0, L);
9292 __ nop();
9293 __ mov_d(dst, src);
9294 __ bind(L);
9295 break;
9296 case 0x05: //less
9297 __ slt(AT, op1, op2);
9298 __ beq(AT, R0, L);
9299 __ nop();
9300 __ mov_d(dst, src);
9301 __ bind(L);
9302 break;
9303 case 0x06: //less_equal
9304 __ slt(AT, op2, op1);
9305 __ bne(AT, R0, L);
9306 __ nop();
9307 __ mov_d(dst, src);
9308 __ bind(L);
9309 break;
9310 default:
9311 Unimplemented();
9312 }
9313 %}
9315 ins_pipe( pipe_slow );
9316 %}
9318 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9319 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9320 ins_cost(200);
9321 format %{
9322 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9323 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9324 %}
9326 ins_encode %{
9327 Register op1 = $tmp1$$Register;
9328 Register op2 = $tmp2$$Register;
9329 FloatRegister dst = as_FloatRegister($dst$$reg);
9330 FloatRegister src = as_FloatRegister($src$$reg);
9331 int flag = $cop$$cmpcode;
9332 Label L;
9334 switch(flag)
9335 {
9336 case 0x01: //equal
9337 __ bne(op1, op2, L);
9338 __ nop();
9339 __ mov_d(dst, src);
9340 __ bind(L);
9341 break;
9342 case 0x02: //not_equal
9343 __ beq(op1, op2, L);
9344 __ nop();
9345 __ mov_d(dst, src);
9346 __ bind(L);
9347 break;
9348 case 0x03: //great
9349 __ slt(AT, op2, op1);
9350 __ beq(AT, R0, L);
9351 __ nop();
9352 __ mov_d(dst, src);
9353 __ bind(L);
9354 break;
9355 case 0x04: //great_equal
9356 __ slt(AT, op1, op2);
9357 __ bne(AT, R0, L);
9358 __ nop();
9359 __ mov_d(dst, src);
9360 __ bind(L);
9361 break;
9362 case 0x05: //less
9363 __ slt(AT, op1, op2);
9364 __ beq(AT, R0, L);
9365 __ nop();
9366 __ mov_d(dst, src);
9367 __ bind(L);
9368 break;
9369 case 0x06: //less_equal
9370 __ slt(AT, op2, op1);
9371 __ bne(AT, R0, L);
9372 __ nop();
9373 __ mov_d(dst, src);
9374 __ bind(L);
9375 break;
9376 default:
9377 Unimplemented();
9378 }
9379 %}
9381 ins_pipe( pipe_slow );
9382 %}
9384 //FIXME
9385 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9386 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9387 ins_cost(80);
9388 format %{
9389 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9390 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9391 %}
9393 ins_encode %{
9394 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9395 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9396 Register dst = $dst$$Register;
9397 Register src = $src$$Register;
9398 int flag = $cop$$cmpcode;
9400 switch(flag)
9401 {
9402 case 0x01: //equal
9403 __ c_eq_s(reg_op1, reg_op2);
9404 __ movt(dst, src);
9405 break;
9406 case 0x02: //not_equal
9407 __ c_eq_s(reg_op1, reg_op2);
9408 __ movf(dst, src);
9409 break;
9410 case 0x03: //greater
9411 __ c_ole_s(reg_op1, reg_op2);
9412 __ movf(dst, src);
9413 break;
9414 case 0x04: //greater_equal
9415 __ c_olt_s(reg_op1, reg_op2);
9416 __ movf(dst, src);
9417 break;
9418 case 0x05: //less
9419 __ c_ult_s(reg_op1, reg_op2);
9420 __ movt(dst, src);
9421 break;
9422 case 0x06: //less_equal
9423 __ c_ule_s(reg_op1, reg_op2);
9424 __ movt(dst, src);
9425 break;
9426 default:
9427 Unimplemented();
9428 }
9429 %}
9430 ins_pipe( pipe_slow );
9431 %}
9433 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9434 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9435 ins_cost(200);
9436 format %{
9437 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9438 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9439 %}
9441 ins_encode %{
9442 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9443 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9444 FloatRegister dst = $dst$$FloatRegister;
9445 FloatRegister src = $src$$FloatRegister;
9446 Label L;
9447 int flag = $cop$$cmpcode;
9449 switch(flag)
9450 {
9451 case 0x01: //equal
9452 __ c_eq_s(reg_op1, reg_op2);
9453 __ bc1f(L);
9454 __ nop();
9455 __ mov_s(dst, src);
9456 __ bind(L);
9457 break;
9458 case 0x02: //not_equal
9459 __ c_eq_s(reg_op1, reg_op2);
9460 __ bc1t(L);
9461 __ nop();
9462 __ mov_s(dst, src);
9463 __ bind(L);
9464 break;
9465 case 0x03: //greater
9466 __ c_ole_s(reg_op1, reg_op2);
9467 __ bc1t(L);
9468 __ nop();
9469 __ mov_s(dst, src);
9470 __ bind(L);
9471 break;
9472 case 0x04: //greater_equal
9473 __ c_olt_s(reg_op1, reg_op2);
9474 __ bc1t(L);
9475 __ nop();
9476 __ mov_s(dst, src);
9477 __ bind(L);
9478 break;
9479 case 0x05: //less
9480 __ c_ult_s(reg_op1, reg_op2);
9481 __ bc1f(L);
9482 __ nop();
9483 __ mov_s(dst, src);
9484 __ bind(L);
9485 break;
9486 case 0x06: //less_equal
9487 __ c_ule_s(reg_op1, reg_op2);
9488 __ bc1f(L);
9489 __ nop();
9490 __ mov_s(dst, src);
9491 __ bind(L);
9492 break;
9493 default:
9494 Unimplemented();
9495 }
9496 %}
9497 ins_pipe( pipe_slow );
9498 %}
9500 // Manifest a CmpL result in an integer register. Very painful.
9501 // This is the test to avoid.
9502 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9503 match(Set dst (CmpL3 src1 src2));
9504 ins_cost(1000);
9505 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9506 ins_encode %{
9507 Register opr1 = as_Register($src1$$reg);
9508 Register opr2 = as_Register($src2$$reg);
9509 Register dst = as_Register($dst$$reg);
9511 Label Done;
9513 __ subu(AT, opr1, opr2);
9514 __ bltz(AT, Done);
9515 __ delayed()->daddiu(dst, R0, -1);
9517 __ move(dst, 1);
9518 __ movz(dst, R0, AT);
9520 __ bind(Done);
9521 %}
9522 ins_pipe( pipe_slow );
9523 %}
9525 //
9526 // less_rsult = -1
9527 // greater_result = 1
9528 // equal_result = 0
9529 // nan_result = -1
9530 //
9531 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9532 match(Set dst (CmpF3 src1 src2));
9533 ins_cost(1000);
9534 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9535 ins_encode %{
9536 FloatRegister src1 = as_FloatRegister($src1$$reg);
9537 FloatRegister src2 = as_FloatRegister($src2$$reg);
9538 Register dst = as_Register($dst$$reg);
9540 Label Done;
9542 __ c_ult_s(src1, src2);
9543 __ bc1t(Done);
9544 __ delayed()->daddiu(dst, R0, -1);
9546 __ c_eq_s(src1, src2);
9547 __ move(dst, 1);
9548 __ movt(dst, R0);
9550 __ bind(Done);
9551 %}
9552 ins_pipe( pipe_slow );
9553 %}
9555 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9556 match(Set dst (CmpD3 src1 src2));
9557 ins_cost(1000);
9558 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9559 ins_encode %{
9560 FloatRegister src1 = as_FloatRegister($src1$$reg);
9561 FloatRegister src2 = as_FloatRegister($src2$$reg);
9562 Register dst = as_Register($dst$$reg);
9564 Label Done;
9566 __ c_ult_d(src1, src2);
9567 __ bc1t(Done);
9568 __ delayed()->daddiu(dst, R0, -1);
9570 __ c_eq_d(src1, src2);
9571 __ move(dst, 1);
9572 __ movt(dst, R0);
9574 __ bind(Done);
9575 %}
9576 ins_pipe( pipe_slow );
9577 %}
9579 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9580 match(Set dummy (ClearArray cnt base));
9581 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9582 ins_encode %{
9583 //Assume cnt is the number of bytes in an array to be cleared,
9584 //and base points to the starting address of the array.
9585 Register base = $base$$Register;
9586 Register num = $cnt$$Register;
9587 Label Loop, done;
9589 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9590 __ move(T9, num); /* T9 = words */
9591 __ beq(T9, R0, done);
9592 __ nop();
9593 __ move(AT, base);
9595 __ bind(Loop);
9596 __ sd(R0, Address(AT, 0));
9597 __ daddi(AT, AT, wordSize);
9598 __ daddi(T9, T9, -1);
9599 __ bne(T9, R0, Loop);
9600 __ delayed()->nop();
9601 __ bind(done);
9602 %}
9603 ins_pipe( pipe_slow );
9604 %}
9606 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9607 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9608 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9610 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9611 ins_encode %{
9612 // Get the first character position in both strings
9613 // [8] char array, [12] offset, [16] count
9614 Register str1 = $str1$$Register;
9615 Register str2 = $str2$$Register;
9616 Register cnt1 = $cnt1$$Register;
9617 Register cnt2 = $cnt2$$Register;
9618 Register result = $result$$Register;
9620 Label L, Loop, haveResult, done;
9622 // compute the and difference of lengths (in result)
9623 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9625 // compute the shorter length (in cnt1)
9626 __ slt(AT, cnt2, cnt1);
9627 __ movn(cnt1, cnt2, AT);
9629 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9630 __ bind(Loop); // Loop begin
9631 __ beq(cnt1, R0, done);
9632 __ delayed()->lhu(AT, str1, 0);;
9634 // compare current character
9635 __ lhu(cnt2, str2, 0);
9636 __ bne(AT, cnt2, haveResult);
9637 __ delayed()->addi(str1, str1, 2);
9638 __ addi(str2, str2, 2);
9639 __ b(Loop);
9640 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9642 __ bind(haveResult);
9643 __ subu(result, AT, cnt2);
9645 __ bind(done);
9646 %}
9648 ins_pipe( pipe_slow );
9649 %}
9651 // intrinsic optimization
9652 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9653 match(Set result (StrEquals (Binary str1 str2) cnt));
9654 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9656 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9657 ins_encode %{
9658 // Get the first character position in both strings
9659 // [8] char array, [12] offset, [16] count
9660 Register str1 = $str1$$Register;
9661 Register str2 = $str2$$Register;
9662 Register cnt = $cnt$$Register;
9663 Register tmp = $temp$$Register;
9664 Register result = $result$$Register;
9666 Label Loop, done;
9669 __ beq(str1, str2, done); // same char[] ?
9670 __ daddiu(result, R0, 1);
9672 __ bind(Loop); // Loop begin
9673 __ beq(cnt, R0, done);
9674 __ daddiu(result, R0, 1); // count == 0
9676 // compare current character
9677 __ lhu(AT, str1, 0);;
9678 __ lhu(tmp, str2, 0);
9679 __ bne(AT, tmp, done);
9680 __ delayed()->daddi(result, R0, 0);
9681 __ addi(str1, str1, 2);
9682 __ addi(str2, str2, 2);
9683 __ b(Loop);
9684 __ delayed()->addi(cnt, cnt, -1); // Loop end
9686 __ bind(done);
9687 %}
9689 ins_pipe( pipe_slow );
9690 %}
9692 //----------Arithmetic Instructions-------------------------------------------
9693 //----------Addition Instructions---------------------------------------------
9694 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9695 match(Set dst (AddI src1 src2));
9697 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9698 ins_encode %{
9699 Register dst = $dst$$Register;
9700 Register src1 = $src1$$Register;
9701 Register src2 = $src2$$Register;
9702 __ addu32(dst, src1, src2);
9703 %}
9704 ins_pipe( ialu_regI_regI );
9705 %}
9707 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9708 match(Set dst (AddI src1 src2));
9710 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9711 ins_encode %{
9712 Register dst = $dst$$Register;
9713 Register src1 = $src1$$Register;
9714 int imm = $src2$$constant;
9716 if(Assembler::is_simm16(imm)) {
9717 __ addiu32(dst, src1, imm);
9718 } else {
9719 __ move(AT, imm);
9720 __ addu32(dst, src1, AT);
9721 }
9722 %}
9723 ins_pipe( ialu_regI_regI );
9724 %}
9726 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9727 match(Set dst (AddP src1 src2));
9729 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9731 ins_encode %{
9732 Register dst = $dst$$Register;
9733 Register src1 = $src1$$Register;
9734 Register src2 = $src2$$Register;
9735 __ daddu(dst, src1, src2);
9736 %}
9738 ins_pipe( ialu_regI_regI );
9739 %}
9741 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9742 match(Set dst (AddP src1 (ConvI2L src2)));
9744 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9746 ins_encode %{
9747 Register dst = $dst$$Register;
9748 Register src1 = $src1$$Register;
9749 Register src2 = $src2$$Register;
9750 __ daddu(dst, src1, src2);
9751 %}
9753 ins_pipe( ialu_regI_regI );
9754 %}
9756 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9757 match(Set dst (AddP src1 src2));
9759 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9760 ins_encode %{
9761 Register src1 = $src1$$Register;
9762 long src2 = $src2$$constant;
9763 Register dst = $dst$$Register;
9765 if(Assembler::is_simm16(src2)) {
9766 __ daddiu(dst, src1, src2);
9767 } else {
9768 __ set64(AT, src2);
9769 __ daddu(dst, src1, AT);
9770 }
9771 %}
9772 ins_pipe( ialu_regI_imm16 );
9773 %}
9775 // Add Long Register with Register
9776 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9777 match(Set dst (AddL src1 src2));
9778 ins_cost(200);
9779 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9781 ins_encode %{
9782 Register dst_reg = as_Register($dst$$reg);
9783 Register src1_reg = as_Register($src1$$reg);
9784 Register src2_reg = as_Register($src2$$reg);
9786 __ daddu(dst_reg, src1_reg, src2_reg);
9787 %}
9789 ins_pipe( ialu_regL_regL );
9790 %}
9792 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9793 %{
9794 match(Set dst (AddL src1 src2));
9796 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9797 ins_encode %{
9798 Register dst_reg = as_Register($dst$$reg);
9799 Register src1_reg = as_Register($src1$$reg);
9800 int src2_imm = $src2$$constant;
9802 __ daddiu(dst_reg, src1_reg, src2_imm);
9803 %}
9805 ins_pipe( ialu_regL_regL );
9806 %}
9808 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9809 %{
9810 match(Set dst (AddL (ConvI2L src1) src2));
9812 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9813 ins_encode %{
9814 Register dst_reg = as_Register($dst$$reg);
9815 Register src1_reg = as_Register($src1$$reg);
9816 int src2_imm = $src2$$constant;
9818 __ daddiu(dst_reg, src1_reg, src2_imm);
9819 %}
9821 ins_pipe( ialu_regL_regL );
9822 %}
9824 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9825 match(Set dst (AddL (ConvI2L src1) src2));
9826 ins_cost(200);
9827 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9829 ins_encode %{
9830 Register dst_reg = as_Register($dst$$reg);
9831 Register src1_reg = as_Register($src1$$reg);
9832 Register src2_reg = as_Register($src2$$reg);
9834 __ daddu(dst_reg, src1_reg, src2_reg);
9835 %}
9837 ins_pipe( ialu_regL_regL );
9838 %}
9840 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9841 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9842 ins_cost(200);
9843 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9845 ins_encode %{
9846 Register dst_reg = as_Register($dst$$reg);
9847 Register src1_reg = as_Register($src1$$reg);
9848 Register src2_reg = as_Register($src2$$reg);
9850 __ daddu(dst_reg, src1_reg, src2_reg);
9851 %}
9853 ins_pipe( ialu_regL_regL );
9854 %}
9856 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9857 match(Set dst (AddL src1 (ConvI2L src2)));
9858 ins_cost(200);
9859 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9861 ins_encode %{
9862 Register dst_reg = as_Register($dst$$reg);
9863 Register src1_reg = as_Register($src1$$reg);
9864 Register src2_reg = as_Register($src2$$reg);
9866 __ daddu(dst_reg, src1_reg, src2_reg);
9867 %}
9869 ins_pipe( ialu_regL_regL );
9870 %}
9872 //----------Subtraction Instructions-------------------------------------------
9873 // Integer Subtraction Instructions
9874 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9875 match(Set dst (SubI src1 src2));
9876 ins_cost(100);
9878 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9879 ins_encode %{
9880 Register dst = $dst$$Register;
9881 Register src1 = $src1$$Register;
9882 Register src2 = $src2$$Register;
9883 __ subu32(dst, src1, src2);
9884 %}
9885 ins_pipe( ialu_regI_regI );
9886 %}
9888 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9889 match(Set dst (SubI src1 src2));
9890 ins_cost(80);
9892 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9893 ins_encode %{
9894 Register dst = $dst$$Register;
9895 Register src1 = $src1$$Register;
9896 __ addiu32(dst, src1, -1 * $src2$$constant);
9897 %}
9898 ins_pipe( ialu_regI_regI );
9899 %}
9901 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9902 match(Set dst (SubI zero src));
9903 ins_cost(80);
9905 format %{ "neg $dst, $src #@negI_Reg" %}
9906 ins_encode %{
9907 Register dst = $dst$$Register;
9908 Register src = $src$$Register;
9909 __ subu32(dst, R0, src);
9910 %}
9911 ins_pipe( ialu_regI_regI );
9912 %}
9914 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9915 match(Set dst (SubL zero src));
9916 ins_cost(80);
9918 format %{ "neg $dst, $src #@negL_Reg" %}
9919 ins_encode %{
9920 Register dst = $dst$$Register;
9921 Register src = $src$$Register;
9922 __ subu(dst, R0, src);
9923 %}
9924 ins_pipe( ialu_regI_regI );
9925 %}
9927 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9928 match(Set dst (SubL src1 src2));
9929 ins_cost(80);
9931 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9932 ins_encode %{
9933 Register dst = $dst$$Register;
9934 Register src1 = $src1$$Register;
9935 __ daddiu(dst, src1, -1 * $src2$$constant);
9936 %}
9937 ins_pipe( ialu_regI_regI );
9938 %}
9940 // Subtract Long Register with Register.
9941 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9942 match(Set dst (SubL src1 src2));
9943 ins_cost(100);
9944 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9945 ins_encode %{
9946 Register dst = as_Register($dst$$reg);
9947 Register src1 = as_Register($src1$$reg);
9948 Register src2 = as_Register($src2$$reg);
9950 __ subu(dst, src1, src2);
9951 %}
9952 ins_pipe( ialu_regL_regL );
9953 %}
9955 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9956 match(Set dst (SubL src1 (ConvI2L src2)));
9957 ins_cost(100);
9958 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9959 ins_encode %{
9960 Register dst = as_Register($dst$$reg);
9961 Register src1 = as_Register($src1$$reg);
9962 Register src2 = as_Register($src2$$reg);
9964 __ subu(dst, src1, src2);
9965 %}
9966 ins_pipe( ialu_regL_regL );
9967 %}
9969 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9970 match(Set dst (SubL (ConvI2L src1) src2));
9971 ins_cost(200);
9972 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9973 ins_encode %{
9974 Register dst = as_Register($dst$$reg);
9975 Register src1 = as_Register($src1$$reg);
9976 Register src2 = as_Register($src2$$reg);
9978 __ subu(dst, src1, src2);
9979 %}
9980 ins_pipe( ialu_regL_regL );
9981 %}
9983 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9984 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9985 ins_cost(200);
9986 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9987 ins_encode %{
9988 Register dst = as_Register($dst$$reg);
9989 Register src1 = as_Register($src1$$reg);
9990 Register src2 = as_Register($src2$$reg);
9992 __ subu(dst, src1, src2);
9993 %}
9994 ins_pipe( ialu_regL_regL );
9995 %}
9997 // Integer MOD with Register
9998 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9999 match(Set dst (ModI src1 src2));
10000 ins_cost(300);
10001 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10002 ins_encode %{
10003 Register dst = $dst$$Register;
10004 Register src1 = $src1$$Register;
10005 Register src2 = $src2$$Register;
10007 //if (UseLoongsonISA) {
10008 if (0) {
10009 // 2016.08.10
10010 // Experiments show that gsmod is slower that div+mfhi.
10011 // So I just disable it here.
10012 __ gsmod(dst, src1, src2);
10013 } else {
10014 __ div(src1, src2);
10015 __ mfhi(dst);
10016 }
10017 %}
10019 //ins_pipe( ialu_mod );
10020 ins_pipe( ialu_regI_regI );
10021 %}
10023 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10024 match(Set dst (ModL src1 src2));
10025 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10027 ins_encode %{
10028 Register dst = as_Register($dst$$reg);
10029 Register op1 = as_Register($src1$$reg);
10030 Register op2 = as_Register($src2$$reg);
10032 if (UseLoongsonISA) {
10033 __ gsdmod(dst, op1, op2);
10034 } else {
10035 __ ddiv(op1, op2);
10036 __ mfhi(dst);
10037 }
10038 %}
10039 ins_pipe( pipe_slow );
10040 %}
10042 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10043 match(Set dst (MulI src1 src2));
10045 ins_cost(300);
10046 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10047 ins_encode %{
10048 Register src1 = $src1$$Register;
10049 Register src2 = $src2$$Register;
10050 Register dst = $dst$$Register;
10052 __ mul(dst, src1, src2);
10053 %}
10054 ins_pipe( ialu_mult );
10055 %}
10057 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10058 match(Set dst (AddI (MulI src1 src2) src3));
10060 ins_cost(999);
10061 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10062 ins_encode %{
10063 Register src1 = $src1$$Register;
10064 Register src2 = $src2$$Register;
10065 Register src3 = $src3$$Register;
10066 Register dst = $dst$$Register;
10068 __ mtlo(src3);
10069 __ madd(src1, src2);
10070 __ mflo(dst);
10071 %}
10072 ins_pipe( ialu_mult );
10073 %}
10075 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10076 match(Set dst (DivI src1 src2));
10078 ins_cost(300);
10079 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10080 ins_encode %{
10081 Register src1 = $src1$$Register;
10082 Register src2 = $src2$$Register;
10083 Register dst = $dst$$Register;
10085 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10086 We must trap an exception manually. */
10087 __ teq(R0, src2, 0x7);
10089 if (UseLoongsonISA) {
10090 __ gsdiv(dst, src1, src2);
10091 } else {
10092 __ div(src1, src2);
10094 __ nop();
10095 __ nop();
10096 __ mflo(dst);
10097 }
10098 %}
10099 ins_pipe( ialu_mod );
10100 %}
10102 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10103 match(Set dst (DivF src1 src2));
10105 ins_cost(300);
10106 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10107 ins_encode %{
10108 FloatRegister src1 = $src1$$FloatRegister;
10109 FloatRegister src2 = $src2$$FloatRegister;
10110 FloatRegister dst = $dst$$FloatRegister;
10112 /* Here do we need to trap an exception manually ? */
10113 __ div_s(dst, src1, src2);
10114 %}
10115 ins_pipe( pipe_slow );
10116 %}
10118 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10119 match(Set dst (DivD src1 src2));
10121 ins_cost(300);
10122 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10123 ins_encode %{
10124 FloatRegister src1 = $src1$$FloatRegister;
10125 FloatRegister src2 = $src2$$FloatRegister;
10126 FloatRegister dst = $dst$$FloatRegister;
10128 /* Here do we need to trap an exception manually ? */
10129 __ div_d(dst, src1, src2);
10130 %}
10131 ins_pipe( pipe_slow );
10132 %}
10134 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10135 match(Set dst (MulL src1 src2));
10136 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10137 ins_encode %{
10138 Register dst = as_Register($dst$$reg);
10139 Register op1 = as_Register($src1$$reg);
10140 Register op2 = as_Register($src2$$reg);
10142 if (UseLoongsonISA) {
10143 __ gsdmult(dst, op1, op2);
10144 } else {
10145 __ dmult(op1, op2);
10146 __ mflo(dst);
10147 }
10148 %}
10149 ins_pipe( pipe_slow );
10150 %}
10152 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10153 match(Set dst (MulL src1 (ConvI2L src2)));
10154 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10155 ins_encode %{
10156 Register dst = as_Register($dst$$reg);
10157 Register op1 = as_Register($src1$$reg);
10158 Register op2 = as_Register($src2$$reg);
10160 if (UseLoongsonISA) {
10161 __ gsdmult(dst, op1, op2);
10162 } else {
10163 __ dmult(op1, op2);
10164 __ mflo(dst);
10165 }
10166 %}
10167 ins_pipe( pipe_slow );
10168 %}
10170 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10171 match(Set dst (DivL src1 src2));
10172 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10174 ins_encode %{
10175 Register dst = as_Register($dst$$reg);
10176 Register op1 = as_Register($src1$$reg);
10177 Register op2 = as_Register($src2$$reg);
10179 if (UseLoongsonISA) {
10180 __ gsddiv(dst, op1, op2);
10181 } else {
10182 __ ddiv(op1, op2);
10183 __ mflo(dst);
10184 }
10185 %}
10186 ins_pipe( pipe_slow );
10187 %}
10189 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10190 match(Set dst (AddF src1 src2));
10191 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10192 ins_encode %{
10193 FloatRegister src1 = as_FloatRegister($src1$$reg);
10194 FloatRegister src2 = as_FloatRegister($src2$$reg);
10195 FloatRegister dst = as_FloatRegister($dst$$reg);
10197 __ add_s(dst, src1, src2);
10198 %}
10199 ins_pipe( fpu_regF_regF );
10200 %}
10202 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10203 match(Set dst (SubF src1 src2));
10204 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10205 ins_encode %{
10206 FloatRegister src1 = as_FloatRegister($src1$$reg);
10207 FloatRegister src2 = as_FloatRegister($src2$$reg);
10208 FloatRegister dst = as_FloatRegister($dst$$reg);
10210 __ sub_s(dst, src1, src2);
10211 %}
10212 ins_pipe( fpu_regF_regF );
10213 %}
10214 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10215 match(Set dst (AddD src1 src2));
10216 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10217 ins_encode %{
10218 FloatRegister src1 = as_FloatRegister($src1$$reg);
10219 FloatRegister src2 = as_FloatRegister($src2$$reg);
10220 FloatRegister dst = as_FloatRegister($dst$$reg);
10222 __ add_d(dst, src1, src2);
10223 %}
10224 ins_pipe( fpu_regF_regF );
10225 %}
10227 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10228 match(Set dst (SubD src1 src2));
10229 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10230 ins_encode %{
10231 FloatRegister src1 = as_FloatRegister($src1$$reg);
10232 FloatRegister src2 = as_FloatRegister($src2$$reg);
10233 FloatRegister dst = as_FloatRegister($dst$$reg);
10235 __ sub_d(dst, src1, src2);
10236 %}
10237 ins_pipe( fpu_regF_regF );
10238 %}
10240 instruct negF_reg(regF dst, regF src) %{
10241 match(Set dst (NegF src));
10242 format %{ "negF $dst, $src @negF_reg" %}
10243 ins_encode %{
10244 FloatRegister src = as_FloatRegister($src$$reg);
10245 FloatRegister dst = as_FloatRegister($dst$$reg);
10247 __ neg_s(dst, src);
10248 %}
10249 ins_pipe( fpu_regF_regF );
10250 %}
10252 instruct negD_reg(regD dst, regD src) %{
10253 match(Set dst (NegD src));
10254 format %{ "negD $dst, $src @negD_reg" %}
10255 ins_encode %{
10256 FloatRegister src = as_FloatRegister($src$$reg);
10257 FloatRegister dst = as_FloatRegister($dst$$reg);
10259 __ neg_d(dst, src);
10260 %}
10261 ins_pipe( fpu_regF_regF );
10262 %}
10265 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10266 match(Set dst (MulF src1 src2));
10267 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10268 ins_encode %{
10269 FloatRegister src1 = $src1$$FloatRegister;
10270 FloatRegister src2 = $src2$$FloatRegister;
10271 FloatRegister dst = $dst$$FloatRegister;
10273 __ mul_s(dst, src1, src2);
10274 %}
10275 ins_pipe( fpu_regF_regF );
10276 %}
10278 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10279 match(Set dst (AddF (MulF src1 src2) src3));
10280 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10281 ins_cost(44444);
10282 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10283 ins_encode %{
10284 FloatRegister src1 = $src1$$FloatRegister;
10285 FloatRegister src2 = $src2$$FloatRegister;
10286 FloatRegister src3 = $src3$$FloatRegister;
10287 FloatRegister dst = $dst$$FloatRegister;
10289 __ madd_s(dst, src1, src2, src3);
10290 %}
10291 ins_pipe( fpu_regF_regF );
10292 %}
10294 // Mul two double precision floating piont number
10295 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10296 match(Set dst (MulD src1 src2));
10297 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10298 ins_encode %{
10299 FloatRegister src1 = $src1$$FloatRegister;
10300 FloatRegister src2 = $src2$$FloatRegister;
10301 FloatRegister dst = $dst$$FloatRegister;
10303 __ mul_d(dst, src1, src2);
10304 %}
10305 ins_pipe( fpu_regF_regF );
10306 %}
10308 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10309 match(Set dst (AddD (MulD src1 src2) src3));
10310 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10311 ins_cost(44444);
10312 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10313 ins_encode %{
10314 FloatRegister src1 = $src1$$FloatRegister;
10315 FloatRegister src2 = $src2$$FloatRegister;
10316 FloatRegister src3 = $src3$$FloatRegister;
10317 FloatRegister dst = $dst$$FloatRegister;
10319 __ madd_d(dst, src1, src2, src3);
10320 %}
10321 ins_pipe( fpu_regF_regF );
10322 %}
10324 instruct absF_reg(regF dst, regF src) %{
10325 match(Set dst (AbsF src));
10326 ins_cost(100);
10327 format %{ "absF $dst, $src @absF_reg" %}
10328 ins_encode %{
10329 FloatRegister src = as_FloatRegister($src$$reg);
10330 FloatRegister dst = as_FloatRegister($dst$$reg);
10332 __ abs_s(dst, src);
10333 %}
10334 ins_pipe( fpu_regF_regF );
10335 %}
10338 // intrinsics for math_native.
10339 // AbsD SqrtD CosD SinD TanD LogD Log10D
10341 instruct absD_reg(regD dst, regD src) %{
10342 match(Set dst (AbsD src));
10343 ins_cost(100);
10344 format %{ "absD $dst, $src @absD_reg" %}
10345 ins_encode %{
10346 FloatRegister src = as_FloatRegister($src$$reg);
10347 FloatRegister dst = as_FloatRegister($dst$$reg);
10349 __ abs_d(dst, src);
10350 %}
10351 ins_pipe( fpu_regF_regF );
10352 %}
10354 instruct sqrtD_reg(regD dst, regD src) %{
10355 match(Set dst (SqrtD src));
10356 ins_cost(100);
10357 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10358 ins_encode %{
10359 FloatRegister src = as_FloatRegister($src$$reg);
10360 FloatRegister dst = as_FloatRegister($dst$$reg);
10362 __ sqrt_d(dst, src);
10363 %}
10364 ins_pipe( fpu_regF_regF );
10365 %}
10367 instruct sqrtF_reg(regF dst, regF src) %{
10368 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10369 ins_cost(100);
10370 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10371 ins_encode %{
10372 FloatRegister src = as_FloatRegister($src$$reg);
10373 FloatRegister dst = as_FloatRegister($dst$$reg);
10375 __ sqrt_s(dst, src);
10376 %}
10377 ins_pipe( fpu_regF_regF );
10378 %}
10379 //----------------------------------Logical Instructions----------------------
10380 //__________________________________Integer Logical Instructions-------------
10382 //And Instuctions
10383 // And Register with Immediate
10384 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10385 match(Set dst (AndI src1 src2));
10387 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10388 ins_encode %{
10389 Register dst = $dst$$Register;
10390 Register src = $src1$$Register;
10391 int val = $src2$$constant;
10393 __ move(AT, val);
10394 __ andr(dst, src, AT);
10395 %}
10396 ins_pipe( ialu_regI_regI );
10397 %}
10399 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10400 match(Set dst (AndI src1 src2));
10401 ins_cost(60);
10403 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10404 ins_encode %{
10405 Register dst = $dst$$Register;
10406 Register src = $src1$$Register;
10407 int val = $src2$$constant;
10409 __ andi(dst, src, val);
10410 %}
10411 ins_pipe( ialu_regI_regI );
10412 %}
10414 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10415 match(Set dst (AndI src1 mask));
10416 ins_cost(60);
10418 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10419 ins_encode %{
10420 Register dst = $dst$$Register;
10421 Register src = $src1$$Register;
10422 int size = Assembler::is_int_mask($mask$$constant);
10424 __ ext(dst, src, 0, size);
10425 %}
10426 ins_pipe( ialu_regI_regI );
10427 %}
10429 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10430 match(Set dst (AndL src1 mask));
10431 ins_cost(60);
10433 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10434 ins_encode %{
10435 Register dst = $dst$$Register;
10436 Register src = $src1$$Register;
10437 int size = Assembler::is_jlong_mask($mask$$constant);
10439 __ dext(dst, src, 0, size);
10440 %}
10441 ins_pipe( ialu_regI_regI );
10442 %}
10444 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10445 match(Set dst (XorI src1 src2));
10446 ins_cost(60);
10448 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10449 ins_encode %{
10450 Register dst = $dst$$Register;
10451 Register src = $src1$$Register;
10452 int val = $src2$$constant;
10454 __ xori(dst, src, val);
10455 %}
10456 ins_pipe( ialu_regI_regI );
10457 %}
10459 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10460 match(Set dst (XorI src1 M1));
10461 predicate(UseLoongsonISA && Use3A2000);
10462 ins_cost(60);
10464 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10465 ins_encode %{
10466 Register dst = $dst$$Register;
10467 Register src = $src1$$Register;
10469 __ gsorn(dst, R0, src);
10470 %}
10471 ins_pipe( ialu_regI_regI );
10472 %}
10474 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10475 match(Set dst (XorI (ConvL2I src1) M1));
10476 predicate(UseLoongsonISA && Use3A2000);
10477 ins_cost(60);
10479 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10480 ins_encode %{
10481 Register dst = $dst$$Register;
10482 Register src = $src1$$Register;
10484 __ gsorn(dst, R0, src);
10485 %}
10486 ins_pipe( ialu_regI_regI );
10487 %}
10489 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10490 match(Set dst (XorL src1 src2));
10491 ins_cost(60);
10493 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10494 ins_encode %{
10495 Register dst = $dst$$Register;
10496 Register src = $src1$$Register;
10497 int val = $src2$$constant;
10499 __ xori(dst, src, val);
10500 %}
10501 ins_pipe( ialu_regI_regI );
10502 %}
10504 /*
10505 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10506 match(Set dst (XorL src1 M1));
10507 predicate(UseLoongsonISA);
10508 ins_cost(60);
10510 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10511 ins_encode %{
10512 Register dst = $dst$$Register;
10513 Register src = $src1$$Register;
10515 __ gsorn(dst, R0, src);
10516 %}
10517 ins_pipe( ialu_regI_regI );
10518 %}
10519 */
10521 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10522 match(Set dst (AndI mask (LoadB mem)));
10523 ins_cost(60);
10525 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10526 ins_encode(load_UB_enc(dst, mem));
10527 ins_pipe( ialu_loadI );
10528 %}
10530 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10531 match(Set dst (AndI (LoadB mem) mask));
10532 ins_cost(60);
10534 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10535 ins_encode(load_UB_enc(dst, mem));
10536 ins_pipe( ialu_loadI );
10537 %}
10539 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10540 match(Set dst (AndI src1 src2));
10542 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10543 ins_encode %{
10544 Register dst = $dst$$Register;
10545 Register src1 = $src1$$Register;
10546 Register src2 = $src2$$Register;
10547 __ andr(dst, src1, src2);
10548 %}
10549 ins_pipe( ialu_regI_regI );
10550 %}
10552 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10553 match(Set dst (AndI src1 (XorI src2 M1)));
10554 predicate(UseLoongsonISA && Use3A2000);
10556 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10557 ins_encode %{
10558 Register dst = $dst$$Register;
10559 Register src1 = $src1$$Register;
10560 Register src2 = $src2$$Register;
10562 __ gsandn(dst, src1, src2);
10563 %}
10564 ins_pipe( ialu_regI_regI );
10565 %}
10567 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10568 match(Set dst (OrI src1 (XorI src2 M1)));
10569 predicate(UseLoongsonISA && Use3A2000);
10571 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10572 ins_encode %{
10573 Register dst = $dst$$Register;
10574 Register src1 = $src1$$Register;
10575 Register src2 = $src2$$Register;
10577 __ gsorn(dst, src1, src2);
10578 %}
10579 ins_pipe( ialu_regI_regI );
10580 %}
10582 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10583 match(Set dst (AndI (XorI src1 M1) src2));
10584 predicate(UseLoongsonISA && Use3A2000);
10586 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10587 ins_encode %{
10588 Register dst = $dst$$Register;
10589 Register src1 = $src1$$Register;
10590 Register src2 = $src2$$Register;
10592 __ gsandn(dst, src2, src1);
10593 %}
10594 ins_pipe( ialu_regI_regI );
10595 %}
10597 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10598 match(Set dst (OrI (XorI src1 M1) src2));
10599 predicate(UseLoongsonISA && Use3A2000);
10601 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10602 ins_encode %{
10603 Register dst = $dst$$Register;
10604 Register src1 = $src1$$Register;
10605 Register src2 = $src2$$Register;
10607 __ gsorn(dst, src2, src1);
10608 %}
10609 ins_pipe( ialu_regI_regI );
10610 %}
10612 // And Long Register with Register
10613 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10614 match(Set dst (AndL src1 src2));
10615 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10616 ins_encode %{
10617 Register dst_reg = as_Register($dst$$reg);
10618 Register src1_reg = as_Register($src1$$reg);
10619 Register src2_reg = as_Register($src2$$reg);
10621 __ andr(dst_reg, src1_reg, src2_reg);
10622 %}
10623 ins_pipe( ialu_regL_regL );
10624 %}
10626 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10627 match(Set dst (AndL src1 (ConvI2L src2)));
10628 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10629 ins_encode %{
10630 Register dst_reg = as_Register($dst$$reg);
10631 Register src1_reg = as_Register($src1$$reg);
10632 Register src2_reg = as_Register($src2$$reg);
10634 __ andr(dst_reg, src1_reg, src2_reg);
10635 %}
10636 ins_pipe( ialu_regL_regL );
10637 %}
10639 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10640 match(Set dst (AndL src1 src2));
10641 ins_cost(60);
10643 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10644 ins_encode %{
10645 Register dst = $dst$$Register;
10646 Register src = $src1$$Register;
10647 long val = $src2$$constant;
10649 __ andi(dst, src, val);
10650 %}
10651 ins_pipe( ialu_regI_regI );
10652 %}
10654 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10655 match(Set dst (ConvL2I (AndL src1 src2)));
10656 ins_cost(60);
10658 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10659 ins_encode %{
10660 Register dst = $dst$$Register;
10661 Register src = $src1$$Register;
10662 long val = $src2$$constant;
10664 __ andi(dst, src, val);
10665 %}
10666 ins_pipe( ialu_regI_regI );
10667 %}
10669 /*
10670 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10671 match(Set dst (AndL src1 (XorL src2 M1)));
10672 predicate(UseLoongsonISA);
10674 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10675 ins_encode %{
10676 Register dst = $dst$$Register;
10677 Register src1 = $src1$$Register;
10678 Register src2 = $src2$$Register;
10680 __ gsandn(dst, src1, src2);
10681 %}
10682 ins_pipe( ialu_regI_regI );
10683 %}
10684 */
10686 /*
10687 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10688 match(Set dst (OrL src1 (XorL src2 M1)));
10689 predicate(UseLoongsonISA);
10691 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10692 ins_encode %{
10693 Register dst = $dst$$Register;
10694 Register src1 = $src1$$Register;
10695 Register src2 = $src2$$Register;
10697 __ gsorn(dst, src1, src2);
10698 %}
10699 ins_pipe( ialu_regI_regI );
10700 %}
10701 */
10703 /*
10704 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10705 match(Set dst (AndL (XorL src1 M1) src2));
10706 predicate(UseLoongsonISA);
10708 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10709 ins_encode %{
10710 Register dst = $dst$$Register;
10711 Register src1 = $src1$$Register;
10712 Register src2 = $src2$$Register;
10714 __ gsandn(dst, src2, src1);
10715 %}
10716 ins_pipe( ialu_regI_regI );
10717 %}
10718 */
10720 /*
10721 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10722 match(Set dst (OrL (XorL src1 M1) src2));
10723 predicate(UseLoongsonISA);
10725 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10726 ins_encode %{
10727 Register dst = $dst$$Register;
10728 Register src1 = $src1$$Register;
10729 Register src2 = $src2$$Register;
10731 __ gsorn(dst, src2, src1);
10732 %}
10733 ins_pipe( ialu_regI_regI );
10734 %}
10735 */
10737 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10738 match(Set dst (AndL dst M8));
10739 ins_cost(60);
10741 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10742 ins_encode %{
10743 Register dst = $dst$$Register;
10745 __ dins(dst, R0, 0, 3);
10746 %}
10747 ins_pipe( ialu_regI_regI );
10748 %}
10750 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10751 match(Set dst (AndL dst M5));
10752 ins_cost(60);
10754 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10755 ins_encode %{
10756 Register dst = $dst$$Register;
10758 __ dins(dst, R0, 2, 1);
10759 %}
10760 ins_pipe( ialu_regI_regI );
10761 %}
10763 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10764 match(Set dst (AndL dst M7));
10765 ins_cost(60);
10767 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10768 ins_encode %{
10769 Register dst = $dst$$Register;
10771 __ dins(dst, R0, 1, 2);
10772 %}
10773 ins_pipe( ialu_regI_regI );
10774 %}
10776 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10777 match(Set dst (AndL dst M4));
10778 ins_cost(60);
10780 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10781 ins_encode %{
10782 Register dst = $dst$$Register;
10784 __ dins(dst, R0, 0, 2);
10785 %}
10786 ins_pipe( ialu_regI_regI );
10787 %}
10789 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10790 match(Set dst (AndL dst M121));
10791 ins_cost(60);
10793 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10794 ins_encode %{
10795 Register dst = $dst$$Register;
10797 __ dins(dst, R0, 3, 4);
10798 %}
10799 ins_pipe( ialu_regI_regI );
10800 %}
10802 // Or Long Register with Register
10803 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10804 match(Set dst (OrL src1 src2));
10805 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10806 ins_encode %{
10807 Register dst_reg = $dst$$Register;
10808 Register src1_reg = $src1$$Register;
10809 Register src2_reg = $src2$$Register;
10811 __ orr(dst_reg, src1_reg, src2_reg);
10812 %}
10813 ins_pipe( ialu_regL_regL );
10814 %}
10816 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10817 match(Set dst (OrL (CastP2X src1) src2));
10818 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10819 ins_encode %{
10820 Register dst_reg = $dst$$Register;
10821 Register src1_reg = $src1$$Register;
10822 Register src2_reg = $src2$$Register;
10824 __ orr(dst_reg, src1_reg, src2_reg);
10825 %}
10826 ins_pipe( ialu_regL_regL );
10827 %}
10829 // Xor Long Register with Register
10830 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10831 match(Set dst (XorL src1 src2));
10832 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10833 ins_encode %{
10834 Register dst_reg = as_Register($dst$$reg);
10835 Register src1_reg = as_Register($src1$$reg);
10836 Register src2_reg = as_Register($src2$$reg);
10838 __ xorr(dst_reg, src1_reg, src2_reg);
10839 %}
10840 ins_pipe( ialu_regL_regL );
10841 %}
10843 // Shift Left by 8-bit immediate
10844 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10845 match(Set dst (LShiftI src shift));
10847 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10848 ins_encode %{
10849 Register src = $src$$Register;
10850 Register dst = $dst$$Register;
10851 int shamt = $shift$$constant;
10853 __ sll(dst, src, shamt);
10854 %}
10855 ins_pipe( ialu_regI_regI );
10856 %}
10858 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10859 match(Set dst (LShiftI (ConvL2I src) shift));
10861 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10862 ins_encode %{
10863 Register src = $src$$Register;
10864 Register dst = $dst$$Register;
10865 int shamt = $shift$$constant;
10867 __ sll(dst, src, shamt);
10868 %}
10869 ins_pipe( ialu_regI_regI );
10870 %}
10872 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10873 match(Set dst (AndI (LShiftI src shift) mask));
10875 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10876 ins_encode %{
10877 Register src = $src$$Register;
10878 Register dst = $dst$$Register;
10880 __ sll(dst, src, 16);
10881 %}
10882 ins_pipe( ialu_regI_regI );
10883 %}
10885 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10886 %{
10887 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10889 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10890 ins_encode %{
10891 Register src = $src$$Register;
10892 Register dst = $dst$$Register;
10894 __ andi(dst, src, 7);
10895 %}
10896 ins_pipe(ialu_regI_regI);
10897 %}
10899 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10900 %{
10901 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10903 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10904 ins_encode %{
10905 Register src = $src1$$Register;
10906 int val = $src2$$constant;
10907 Register dst = $dst$$Register;
10909 __ ori(dst, src, val);
10910 %}
10911 ins_pipe(ialu_regI_regI);
10912 %}
10914 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10915 // This idiom is used by the compiler the i2s bytecode.
10916 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10917 %{
10918 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10920 format %{ "i2s $dst, $src\t# @i2s" %}
10921 ins_encode %{
10922 Register src = $src$$Register;
10923 Register dst = $dst$$Register;
10925 __ seh(dst, src);
10926 %}
10927 ins_pipe(ialu_regI_regI);
10928 %}
10930 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10931 // This idiom is used by the compiler for the i2b bytecode.
10932 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10933 %{
10934 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10936 format %{ "i2b $dst, $src\t# @i2b" %}
10937 ins_encode %{
10938 Register src = $src$$Register;
10939 Register dst = $dst$$Register;
10941 __ seb(dst, src);
10942 %}
10943 ins_pipe(ialu_regI_regI);
10944 %}
10947 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10948 match(Set dst (LShiftI (ConvL2I src) shift));
10950 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10951 ins_encode %{
10952 Register src = $src$$Register;
10953 Register dst = $dst$$Register;
10954 int shamt = $shift$$constant;
10956 __ sll(dst, src, shamt);
10957 %}
10958 ins_pipe( ialu_regI_regI );
10959 %}
10961 // Shift Left by 8-bit immediate
10962 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10963 match(Set dst (LShiftI src shift));
10965 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10966 ins_encode %{
10967 Register src = $src$$Register;
10968 Register dst = $dst$$Register;
10969 Register shamt = $shift$$Register;
10970 __ sllv(dst, src, shamt);
10971 %}
10972 ins_pipe( ialu_regI_regI );
10973 %}
10976 // Shift Left Long
10977 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10978 //predicate(UseNewLongLShift);
10979 match(Set dst (LShiftL src shift));
10980 ins_cost(100);
10981 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10982 ins_encode %{
10983 Register src_reg = as_Register($src$$reg);
10984 Register dst_reg = as_Register($dst$$reg);
10985 int shamt = $shift$$constant;
10987 if (__ is_simm(shamt, 5))
10988 __ dsll(dst_reg, src_reg, shamt);
10989 else
10990 {
10991 int sa = Assembler::low(shamt, 6);
10992 if (sa < 32) {
10993 __ dsll(dst_reg, src_reg, sa);
10994 } else {
10995 __ dsll32(dst_reg, src_reg, sa - 32);
10996 }
10997 }
10998 %}
10999 ins_pipe( ialu_regL_regL );
11000 %}
11002 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11003 //predicate(UseNewLongLShift);
11004 match(Set dst (LShiftL (ConvI2L src) shift));
11005 ins_cost(100);
11006 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11007 ins_encode %{
11008 Register src_reg = as_Register($src$$reg);
11009 Register dst_reg = as_Register($dst$$reg);
11010 int shamt = $shift$$constant;
11012 if (__ is_simm(shamt, 5))
11013 __ dsll(dst_reg, src_reg, shamt);
11014 else
11015 {
11016 int sa = Assembler::low(shamt, 6);
11017 if (sa < 32) {
11018 __ dsll(dst_reg, src_reg, sa);
11019 } else {
11020 __ dsll32(dst_reg, src_reg, sa - 32);
11021 }
11022 }
11023 %}
11024 ins_pipe( ialu_regL_regL );
11025 %}
11027 // Shift Left Long
11028 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11029 //predicate(UseNewLongLShift);
11030 match(Set dst (LShiftL src shift));
11031 ins_cost(100);
11032 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11033 ins_encode %{
11034 Register src_reg = as_Register($src$$reg);
11035 Register dst_reg = as_Register($dst$$reg);
11037 __ dsllv(dst_reg, src_reg, $shift$$Register);
11038 %}
11039 ins_pipe( ialu_regL_regL );
11040 %}
11042 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11043 match(Set dst (LShiftL (ConvI2L src) shift));
11044 ins_cost(100);
11045 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11046 ins_encode %{
11047 Register src_reg = as_Register($src$$reg);
11048 Register dst_reg = as_Register($dst$$reg);
11049 int shamt = $shift$$constant;
11051 if (__ is_simm(shamt, 5)) {
11052 __ dsll(dst_reg, src_reg, shamt);
11053 } else {
11054 int sa = Assembler::low(shamt, 6);
11055 if (sa < 32) {
11056 __ dsll(dst_reg, src_reg, sa);
11057 } else {
11058 __ dsll32(dst_reg, src_reg, sa - 32);
11059 }
11060 }
11061 %}
11062 ins_pipe( ialu_regL_regL );
11063 %}
11065 // Shift Right Long
11066 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11067 match(Set dst (RShiftL src shift));
11068 ins_cost(100);
11069 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11070 ins_encode %{
11071 Register src_reg = as_Register($src$$reg);
11072 Register dst_reg = as_Register($dst$$reg);
11073 int shamt = ($shift$$constant & 0x3f);
11074 if (__ is_simm(shamt, 5))
11075 __ dsra(dst_reg, src_reg, shamt);
11076 else {
11077 int sa = Assembler::low(shamt, 6);
11078 if (sa < 32) {
11079 __ dsra(dst_reg, src_reg, sa);
11080 } else {
11081 __ dsra32(dst_reg, src_reg, sa - 32);
11082 }
11083 }
11084 %}
11085 ins_pipe( ialu_regL_regL );
11086 %}
11088 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11089 match(Set dst (ConvL2I (RShiftL src shift)));
11090 ins_cost(100);
11091 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11092 ins_encode %{
11093 Register src_reg = as_Register($src$$reg);
11094 Register dst_reg = as_Register($dst$$reg);
11095 int shamt = $shift$$constant;
11097 __ dsra32(dst_reg, src_reg, shamt - 32);
11098 %}
11099 ins_pipe( ialu_regL_regL );
11100 %}
11102 // Shift Right Long arithmetically
11103 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11104 //predicate(UseNewLongLShift);
11105 match(Set dst (RShiftL src shift));
11106 ins_cost(100);
11107 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11108 ins_encode %{
11109 Register src_reg = as_Register($src$$reg);
11110 Register dst_reg = as_Register($dst$$reg);
11112 __ dsrav(dst_reg, src_reg, $shift$$Register);
11113 %}
11114 ins_pipe( ialu_regL_regL );
11115 %}
11117 // Shift Right Long logically
11118 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11119 match(Set dst (URShiftL src shift));
11120 ins_cost(100);
11121 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11122 ins_encode %{
11123 Register src_reg = as_Register($src$$reg);
11124 Register dst_reg = as_Register($dst$$reg);
11126 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11127 %}
11128 ins_pipe( ialu_regL_regL );
11129 %}
11131 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11132 match(Set dst (URShiftL src shift));
11133 ins_cost(80);
11134 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11135 ins_encode %{
11136 Register src_reg = as_Register($src$$reg);
11137 Register dst_reg = as_Register($dst$$reg);
11138 int shamt = $shift$$constant;
11140 __ dsrl(dst_reg, src_reg, shamt);
11141 %}
11142 ins_pipe( ialu_regL_regL );
11143 %}
11145 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11146 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11147 ins_cost(80);
11148 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11149 ins_encode %{
11150 Register src_reg = as_Register($src$$reg);
11151 Register dst_reg = as_Register($dst$$reg);
11152 int shamt = $shift$$constant;
11154 __ dext(dst_reg, src_reg, shamt, 31);
11155 %}
11156 ins_pipe( ialu_regL_regL );
11157 %}
11159 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11160 match(Set dst (URShiftL (CastP2X src) shift));
11161 ins_cost(80);
11162 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11163 ins_encode %{
11164 Register src_reg = as_Register($src$$reg);
11165 Register dst_reg = as_Register($dst$$reg);
11166 int shamt = $shift$$constant;
11168 __ dsrl(dst_reg, src_reg, shamt);
11169 %}
11170 ins_pipe( ialu_regL_regL );
11171 %}
11173 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11174 match(Set dst (URShiftL src shift));
11175 ins_cost(80);
11176 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11177 ins_encode %{
11178 Register src_reg = as_Register($src$$reg);
11179 Register dst_reg = as_Register($dst$$reg);
11180 int shamt = $shift$$constant;
11182 __ dsrl32(dst_reg, src_reg, shamt - 32);
11183 %}
11184 ins_pipe( ialu_regL_regL );
11185 %}
11187 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11188 match(Set dst (ConvL2I (URShiftL src shift)));
11189 predicate(n->in(1)->in(2)->get_int() > 32);
11190 ins_cost(80);
11191 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11192 ins_encode %{
11193 Register src_reg = as_Register($src$$reg);
11194 Register dst_reg = as_Register($dst$$reg);
11195 int shamt = $shift$$constant;
11197 __ dsrl32(dst_reg, src_reg, shamt - 32);
11198 %}
11199 ins_pipe( ialu_regL_regL );
11200 %}
11202 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11203 match(Set dst (URShiftL (CastP2X src) shift));
11204 ins_cost(80);
11205 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11206 ins_encode %{
11207 Register src_reg = as_Register($src$$reg);
11208 Register dst_reg = as_Register($dst$$reg);
11209 int shamt = $shift$$constant;
11211 __ dsrl32(dst_reg, src_reg, shamt - 32);
11212 %}
11213 ins_pipe( ialu_regL_regL );
11214 %}
11216 // Xor Instructions
11217 // Xor Register with Register
11218 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11219 match(Set dst (XorI src1 src2));
11221 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11223 ins_encode %{
11224 Register dst = $dst$$Register;
11225 Register src1 = $src1$$Register;
11226 Register src2 = $src2$$Register;
11227 __ xorr(dst, src1, src2);
11228 __ sll(dst, dst, 0); /* long -> int */
11229 %}
11231 ins_pipe( ialu_regI_regI );
11232 %}
11234 // Or Instructions
11235 // Or Register with Register
11236 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11237 match(Set dst (OrI src1 src2));
11239 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11240 ins_encode %{
11241 Register dst = $dst$$Register;
11242 Register src1 = $src1$$Register;
11243 Register src2 = $src2$$Register;
11244 __ orr(dst, src1, src2);
11245 %}
11247 ins_pipe( ialu_regI_regI );
11248 %}
11250 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11251 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11252 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11254 format %{ "rotr $dst, $src, 1 ...\n\t"
11255 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11256 ins_encode %{
11257 Register dst = $dst$$Register;
11258 Register src = $src$$Register;
11259 int rshift = $rshift$$constant;
11261 __ rotr(dst, src, 1);
11262 if (rshift - 1) {
11263 __ srl(dst, dst, rshift - 1);
11264 }
11265 %}
11267 ins_pipe( ialu_regI_regI );
11268 %}
11270 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11271 match(Set dst (OrI src1 (CastP2X src2)));
11273 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11274 ins_encode %{
11275 Register dst = $dst$$Register;
11276 Register src1 = $src1$$Register;
11277 Register src2 = $src2$$Register;
11278 __ orr(dst, src1, src2);
11279 %}
11281 ins_pipe( ialu_regI_regI );
11282 %}
11284 // Logical Shift Right by 8-bit immediate
11285 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11286 match(Set dst (URShiftI src shift));
11287 // effect(KILL cr);
11289 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11290 ins_encode %{
11291 Register src = $src$$Register;
11292 Register dst = $dst$$Register;
11293 int shift = $shift$$constant;
11295 __ srl(dst, src, shift);
11296 %}
11297 ins_pipe( ialu_regI_regI );
11298 %}
11300 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11301 match(Set dst (AndI (URShiftI src shift) mask));
11303 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11304 ins_encode %{
11305 Register src = $src$$Register;
11306 Register dst = $dst$$Register;
11307 int pos = $shift$$constant;
11308 int size = Assembler::is_int_mask($mask$$constant);
11310 __ ext(dst, src, pos, size);
11311 %}
11312 ins_pipe( ialu_regI_regI );
11313 %}
11315 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11316 %{
11317 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11318 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11320 ins_cost(100);
11321 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11322 ins_encode %{
11323 Register dst = $dst$$Register;
11324 int sa = $rshift$$constant;
11326 __ rotr(dst, dst, sa);
11327 %}
11328 ins_pipe( ialu_regI_regI );
11329 %}
11331 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11332 %{
11333 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11334 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11336 ins_cost(100);
11337 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11338 ins_encode %{
11339 Register dst = $dst$$Register;
11340 int sa = $rshift$$constant;
11342 __ drotr(dst, dst, sa);
11343 %}
11344 ins_pipe( ialu_regI_regI );
11345 %}
11347 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11348 %{
11349 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11350 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11352 ins_cost(100);
11353 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11354 ins_encode %{
11355 Register dst = $dst$$Register;
11356 int sa = $rshift$$constant;
11358 __ drotr32(dst, dst, sa - 32);
11359 %}
11360 ins_pipe( ialu_regI_regI );
11361 %}
11363 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11364 %{
11365 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11366 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11368 ins_cost(100);
11369 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11370 ins_encode %{
11371 Register dst = $dst$$Register;
11372 int sa = $rshift$$constant;
11374 __ rotr(dst, dst, sa);
11375 %}
11376 ins_pipe( ialu_regI_regI );
11377 %}
11379 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11380 %{
11381 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11382 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11384 ins_cost(100);
11385 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11386 ins_encode %{
11387 Register dst = $dst$$Register;
11388 int sa = $rshift$$constant;
11390 __ drotr(dst, dst, sa);
11391 %}
11392 ins_pipe( ialu_regI_regI );
11393 %}
11395 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11396 %{
11397 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11398 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11400 ins_cost(100);
11401 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11402 ins_encode %{
11403 Register dst = $dst$$Register;
11404 int sa = $rshift$$constant;
11406 __ drotr32(dst, dst, sa - 32);
11407 %}
11408 ins_pipe( ialu_regI_regI );
11409 %}
11411 // Logical Shift Right
11412 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11413 match(Set dst (URShiftI src shift));
11415 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11416 ins_encode %{
11417 Register src = $src$$Register;
11418 Register dst = $dst$$Register;
11419 Register shift = $shift$$Register;
11420 __ srlv(dst, src, shift);
11421 %}
11422 ins_pipe( ialu_regI_regI );
11423 %}
11426 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11427 match(Set dst (RShiftI src shift));
11428 // effect(KILL cr);
11430 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11431 ins_encode %{
11432 Register src = $src$$Register;
11433 Register dst = $dst$$Register;
11434 int shift = $shift$$constant;
11435 __ sra(dst, src, shift);
11436 %}
11437 ins_pipe( ialu_regI_regI );
11438 %}
11440 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11441 match(Set dst (RShiftI src shift));
11442 // effect(KILL cr);
11444 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11445 ins_encode %{
11446 Register src = $src$$Register;
11447 Register dst = $dst$$Register;
11448 Register shift = $shift$$Register;
11449 __ srav(dst, src, shift);
11450 %}
11451 ins_pipe( ialu_regI_regI );
11452 %}
11454 //----------Convert Int to Boolean---------------------------------------------
11456 instruct convI2B(mRegI dst, mRegI src) %{
11457 match(Set dst (Conv2B src));
11459 ins_cost(100);
11460 format %{ "convI2B $dst, $src @ convI2B" %}
11461 ins_encode %{
11462 Register dst = as_Register($dst$$reg);
11463 Register src = as_Register($src$$reg);
11465 if (dst != src) {
11466 __ daddiu(dst, R0, 1);
11467 __ movz(dst, R0, src);
11468 } else {
11469 __ move(AT, src);
11470 __ daddiu(dst, R0, 1);
11471 __ movz(dst, R0, AT);
11472 }
11473 %}
11475 ins_pipe( ialu_regL_regL );
11476 %}
11478 instruct convI2L_reg( mRegL dst, mRegI src) %{
11479 match(Set dst (ConvI2L src));
11481 ins_cost(100);
11482 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11483 ins_encode %{
11484 Register dst = as_Register($dst$$reg);
11485 Register src = as_Register($src$$reg);
11487 if(dst != src) __ sll(dst, src, 0);
11488 %}
11489 ins_pipe( ialu_regL_regL );
11490 %}
11493 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11494 match(Set dst (ConvL2I src));
11496 format %{ "MOV $dst, $src @ convL2I_reg" %}
11497 ins_encode %{
11498 Register dst = as_Register($dst$$reg);
11499 Register src = as_Register($src$$reg);
11501 __ sll(dst, src, 0);
11502 %}
11504 ins_pipe( ialu_regI_regI );
11505 %}
11507 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11508 match(Set dst (ConvI2L (ConvL2I src)));
11510 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11511 ins_encode %{
11512 Register dst = as_Register($dst$$reg);
11513 Register src = as_Register($src$$reg);
11515 __ sll(dst, src, 0);
11516 %}
11518 ins_pipe( ialu_regI_regI );
11519 %}
11521 instruct convL2D_reg( regD dst, mRegL src ) %{
11522 match(Set dst (ConvL2D src));
11523 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11524 ins_encode %{
11525 Register src = as_Register($src$$reg);
11526 FloatRegister dst = as_FloatRegister($dst$$reg);
11528 __ dmtc1(src, dst);
11529 __ cvt_d_l(dst, dst);
11530 %}
11532 ins_pipe( pipe_slow );
11533 %}
11535 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11536 match(Set dst (ConvD2L src));
11537 ins_cost(150);
11538 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11539 ins_encode %{
11540 Register dst = as_Register($dst$$reg);
11541 FloatRegister src = as_FloatRegister($src$$reg);
11543 Label Done;
11545 __ trunc_l_d(F30, src);
11546 // max_long: 0x7fffffffffffffff
11547 // __ set64(AT, 0x7fffffffffffffff);
11548 __ daddiu(AT, R0, -1);
11549 __ dsrl(AT, AT, 1);
11550 __ dmfc1(dst, F30);
11552 __ bne(dst, AT, Done);
11553 __ delayed()->mtc1(R0, F30);
11555 __ cvt_d_w(F30, F30);
11556 __ c_ult_d(src, F30);
11557 __ bc1f(Done);
11558 __ delayed()->daddiu(T9, R0, -1);
11560 __ c_un_d(src, src); //NaN?
11561 __ subu(dst, T9, AT);
11562 __ movt(dst, R0);
11564 __ bind(Done);
11565 %}
11567 ins_pipe( pipe_slow );
11568 %}
11570 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11571 match(Set dst (ConvD2L src));
11572 ins_cost(250);
11573 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11574 ins_encode %{
11575 Register dst = as_Register($dst$$reg);
11576 FloatRegister src = as_FloatRegister($src$$reg);
11578 Label L;
11580 __ c_un_d(src, src); //NaN?
11581 __ bc1t(L);
11582 __ delayed();
11583 __ move(dst, R0);
11585 __ trunc_l_d(F30, src);
11586 __ cfc1(AT, 31);
11587 __ li(T9, 0x10000);
11588 __ andr(AT, AT, T9);
11589 __ beq(AT, R0, L);
11590 __ delayed()->dmfc1(dst, F30);
11592 __ mov_d(F12, src);
11593 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11594 __ move(dst, V0);
11595 __ bind(L);
11596 %}
11598 ins_pipe( pipe_slow );
11599 %}
11601 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11602 match(Set dst (ConvF2I src));
11603 ins_cost(150);
11604 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11605 ins_encode %{
11606 Register dreg = $dst$$Register;
11607 FloatRegister fval = $src$$FloatRegister;
11609 __ trunc_w_s(F30, fval);
11610 __ mfc1(dreg, F30);
11611 __ c_un_s(fval, fval); //NaN?
11612 __ movt(dreg, R0);
11613 %}
11615 ins_pipe( pipe_slow );
11616 %}
11618 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11619 match(Set dst (ConvF2I src));
11620 ins_cost(250);
11621 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11622 ins_encode %{
11623 Register dreg = $dst$$Register;
11624 FloatRegister fval = $src$$FloatRegister;
11625 Label L;
11627 __ c_un_s(fval, fval); //NaN?
11628 __ bc1t(L);
11629 __ delayed();
11630 __ move(dreg, R0);
11632 __ trunc_w_s(F30, fval);
11634 /* Call SharedRuntime:f2i() to do valid convention */
11635 __ cfc1(AT, 31);
11636 __ li(T9, 0x10000);
11637 __ andr(AT, AT, T9);
11638 __ beq(AT, R0, L);
11639 __ delayed()->mfc1(dreg, F30);
11641 __ mov_s(F12, fval);
11643 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11644 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11645 *
11646 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11647 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11648 */
11649 if(dreg != V0) {
11650 __ push(V0);
11651 }
11652 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11653 if(dreg != V0) {
11654 __ move(dreg, V0);
11655 __ pop(V0);
11656 }
11657 __ bind(L);
11658 %}
11660 ins_pipe( pipe_slow );
11661 %}
11663 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11664 match(Set dst (ConvF2L src));
11665 ins_cost(150);
11666 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11667 ins_encode %{
11668 Register dreg = $dst$$Register;
11669 FloatRegister fval = $src$$FloatRegister;
11671 __ trunc_l_s(F30, fval);
11672 __ dmfc1(dreg, F30);
11673 __ c_un_s(fval, fval); //NaN?
11674 __ movt(dreg, R0);
11675 %}
11677 ins_pipe( pipe_slow );
11678 %}
11680 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11681 match(Set dst (ConvF2L src));
11682 ins_cost(250);
11683 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11684 ins_encode %{
11685 Register dst = as_Register($dst$$reg);
11686 FloatRegister fval = $src$$FloatRegister;
11687 Label L;
11689 __ c_un_s(fval, fval); //NaN?
11690 __ bc1t(L);
11691 __ delayed();
11692 __ move(dst, R0);
11694 __ trunc_l_s(F30, fval);
11695 __ cfc1(AT, 31);
11696 __ li(T9, 0x10000);
11697 __ andr(AT, AT, T9);
11698 __ beq(AT, R0, L);
11699 __ delayed()->dmfc1(dst, F30);
11701 __ mov_s(F12, fval);
11702 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11703 __ move(dst, V0);
11704 __ bind(L);
11705 %}
11707 ins_pipe( pipe_slow );
11708 %}
11710 instruct convL2F_reg( regF dst, mRegL src ) %{
11711 match(Set dst (ConvL2F src));
11712 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11713 ins_encode %{
11714 FloatRegister dst = $dst$$FloatRegister;
11715 Register src = as_Register($src$$reg);
11716 Label L;
11718 __ dmtc1(src, dst);
11719 __ cvt_s_l(dst, dst);
11720 %}
11722 ins_pipe( pipe_slow );
11723 %}
11725 instruct convI2F_reg( regF dst, mRegI src ) %{
11726 match(Set dst (ConvI2F src));
11727 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11728 ins_encode %{
11729 Register src = $src$$Register;
11730 FloatRegister dst = $dst$$FloatRegister;
11732 __ mtc1(src, dst);
11733 __ cvt_s_w(dst, dst);
11734 %}
11736 ins_pipe( fpu_regF_regF );
11737 %}
11739 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11740 match(Set dst (CmpLTMask p zero));
11741 ins_cost(100);
11743 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11744 ins_encode %{
11745 Register src = $p$$Register;
11746 Register dst = $dst$$Register;
11748 __ sra(dst, src, 31);
11749 %}
11750 ins_pipe( pipe_slow );
11751 %}
11754 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11755 match(Set dst (CmpLTMask p q));
11756 ins_cost(400);
11758 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11759 ins_encode %{
11760 Register p = $p$$Register;
11761 Register q = $q$$Register;
11762 Register dst = $dst$$Register;
11764 __ slt(dst, p, q);
11765 __ subu(dst, R0, dst);
11766 %}
11767 ins_pipe( pipe_slow );
11768 %}
11770 instruct convP2B(mRegI dst, mRegP src) %{
11771 match(Set dst (Conv2B src));
11773 ins_cost(100);
11774 format %{ "convP2B $dst, $src @ convP2B" %}
11775 ins_encode %{
11776 Register dst = as_Register($dst$$reg);
11777 Register src = as_Register($src$$reg);
11779 if (dst != src) {
11780 __ daddiu(dst, R0, 1);
11781 __ movz(dst, R0, src);
11782 } else {
11783 __ move(AT, src);
11784 __ daddiu(dst, R0, 1);
11785 __ movz(dst, R0, AT);
11786 }
11787 %}
11789 ins_pipe( ialu_regL_regL );
11790 %}
11793 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11794 match(Set dst (ConvI2D src));
11795 format %{ "conI2D $dst, $src @convI2D_reg" %}
11796 ins_encode %{
11797 Register src = $src$$Register;
11798 FloatRegister dst = $dst$$FloatRegister;
11799 __ mtc1(src, dst);
11800 __ cvt_d_w(dst, dst);
11801 %}
11802 ins_pipe( fpu_regF_regF );
11803 %}
11805 instruct convF2D_reg_reg(regD dst, regF src) %{
11806 match(Set dst (ConvF2D src));
11807 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11808 ins_encode %{
11809 FloatRegister dst = $dst$$FloatRegister;
11810 FloatRegister src = $src$$FloatRegister;
11812 __ cvt_d_s(dst, src);
11813 %}
11814 ins_pipe( fpu_regF_regF );
11815 %}
11817 instruct convD2F_reg_reg(regF dst, regD src) %{
11818 match(Set dst (ConvD2F src));
11819 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11820 ins_encode %{
11821 FloatRegister dst = $dst$$FloatRegister;
11822 FloatRegister src = $src$$FloatRegister;
11824 __ cvt_s_d(dst, src);
11825 %}
11826 ins_pipe( fpu_regF_regF );
11827 %}
11829 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11830 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11831 match(Set dst (ConvD2I src));
11833 ins_cost(150);
11834 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11836 ins_encode %{
11837 FloatRegister src = $src$$FloatRegister;
11838 Register dst = $dst$$Register;
11840 Label Done;
11842 __ trunc_w_d(F30, src);
11843 // max_int: 2147483647
11844 __ move(AT, 0x7fffffff);
11845 __ mfc1(dst, F30);
11847 __ bne(dst, AT, Done);
11848 __ delayed()->mtc1(R0, F30);
11850 __ cvt_d_w(F30, F30);
11851 __ c_ult_d(src, F30);
11852 __ bc1f(Done);
11853 __ delayed()->addiu(T9, R0, -1);
11855 __ c_un_d(src, src); //NaN?
11856 __ subu32(dst, T9, AT);
11857 __ movt(dst, R0);
11859 __ bind(Done);
11860 %}
11861 ins_pipe( pipe_slow );
11862 %}
11864 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11865 match(Set dst (ConvD2I src));
11867 ins_cost(250);
11868 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11870 ins_encode %{
11871 FloatRegister src = $src$$FloatRegister;
11872 Register dst = $dst$$Register;
11873 Label L;
11875 __ trunc_w_d(F30, src);
11876 __ cfc1(AT, 31);
11877 __ li(T9, 0x10000);
11878 __ andr(AT, AT, T9);
11879 __ beq(AT, R0, L);
11880 __ delayed()->mfc1(dst, F30);
11882 __ mov_d(F12, src);
11883 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11884 __ move(dst, V0);
11885 __ bind(L);
11887 %}
11888 ins_pipe( pipe_slow );
11889 %}
11891 // Convert oop pointer into compressed form
11892 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11893 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11894 match(Set dst (EncodeP src));
11895 format %{ "encode_heap_oop $dst,$src" %}
11896 ins_encode %{
11897 Register src = $src$$Register;
11898 Register dst = $dst$$Register;
11899 if (src != dst) {
11900 __ move(dst, src);
11901 }
11902 __ encode_heap_oop(dst);
11903 %}
11904 ins_pipe( ialu_regL_regL );
11905 %}
11907 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11908 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11909 match(Set dst (EncodeP src));
11910 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11911 ins_encode %{
11912 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11913 %}
11914 ins_pipe( ialu_regL_regL );
11915 %}
11917 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11918 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11919 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11920 match(Set dst (DecodeN src));
11921 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11922 ins_encode %{
11923 Register s = $src$$Register;
11924 Register d = $dst$$Register;
11925 if (s != d) {
11926 __ move(d, s);
11927 }
11928 __ decode_heap_oop(d);
11929 %}
11930 ins_pipe( ialu_regL_regL );
11931 %}
11933 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11934 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11935 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11936 match(Set dst (DecodeN src));
11937 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11938 ins_encode %{
11939 Register s = $src$$Register;
11940 Register d = $dst$$Register;
11941 if (s != d) {
11942 __ decode_heap_oop_not_null(d, s);
11943 } else {
11944 __ decode_heap_oop_not_null(d);
11945 }
11946 %}
11947 ins_pipe( ialu_regL_regL );
11948 %}
11950 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11951 match(Set dst (EncodePKlass src));
11952 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11953 ins_encode %{
11954 __ encode_klass_not_null($dst$$Register, $src$$Register);
11955 %}
11956 ins_pipe( ialu_regL_regL );
11957 %}
11959 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11960 match(Set dst (DecodeNKlass src));
11961 format %{ "decode_heap_klass_not_null $dst,$src" %}
11962 ins_encode %{
11963 Register s = $src$$Register;
11964 Register d = $dst$$Register;
11965 if (s != d) {
11966 __ decode_klass_not_null(d, s);
11967 } else {
11968 __ decode_klass_not_null(d);
11969 }
11970 %}
11971 ins_pipe( ialu_regL_regL );
11972 %}
11974 //FIXME
11975 instruct tlsLoadP(mRegP dst) %{
11976 match(Set dst (ThreadLocal));
11978 ins_cost(0);
11979 format %{ " get_thread in $dst #@tlsLoadP" %}
11980 ins_encode %{
11981 Register dst = $dst$$Register;
11982 #ifdef OPT_THREAD
11983 __ move(dst, TREG);
11984 #else
11985 __ get_thread(dst);
11986 #endif
11987 %}
11989 ins_pipe( ialu_loadI );
11990 %}
11993 instruct checkCastPP( mRegP dst ) %{
11994 match(Set dst (CheckCastPP dst));
11996 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11997 ins_encode( /*empty encoding*/ );
11998 ins_pipe( empty );
11999 %}
12001 instruct castPP(mRegP dst)
12002 %{
12003 match(Set dst (CastPP dst));
12005 size(0);
12006 format %{ "# castPP of $dst" %}
12007 ins_encode(/* empty encoding */);
12008 ins_pipe(empty);
12009 %}
12011 instruct castII( mRegI dst ) %{
12012 match(Set dst (CastII dst));
12013 format %{ "#castII of $dst empty encoding" %}
12014 ins_encode( /*empty encoding*/ );
12015 ins_cost(0);
12016 ins_pipe( empty );
12017 %}
12019 // Return Instruction
12020 // Remove the return address & jump to it.
12021 instruct Ret() %{
12022 match(Return);
12023 format %{ "RET #@Ret" %}
12025 ins_encode %{
12026 __ jr(RA);
12027 __ nop();
12028 %}
12030 ins_pipe( pipe_jump );
12031 %}
12033 /*
12034 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12035 instruct jumpXtnd(mRegL switch_val) %{
12036 match(Jump switch_val);
12038 ins_cost(350);
12040 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12041 "jr T9\n\t"
12042 "nop" %}
12043 ins_encode %{
12044 Register table_base = $constanttablebase;
12045 int con_offset = $constantoffset;
12046 Register switch_reg = $switch_val$$Register;
12048 if (UseLoongsonISA) {
12049 if (Assembler::is_simm(con_offset, 8)) {
12050 __ gsldx(T9, table_base, switch_reg, con_offset);
12051 } else if (Assembler::is_simm16(con_offset)) {
12052 __ daddu(T9, table_base, switch_reg);
12053 __ ld(T9, T9, con_offset);
12054 } else {
12055 __ move(T9, con_offset);
12056 __ daddu(AT, table_base, switch_reg);
12057 __ gsldx(T9, AT, T9, 0);
12058 }
12059 } else {
12060 if (Assembler::is_simm16(con_offset)) {
12061 __ daddu(T9, table_base, switch_reg);
12062 __ ld(T9, T9, con_offset);
12063 } else {
12064 __ move(T9, con_offset);
12065 __ daddu(AT, table_base, switch_reg);
12066 __ daddu(AT, T9, AT);
12067 __ ld(T9, AT, 0);
12068 }
12069 }
12071 __ jr(T9);
12072 __ nop();
12074 %}
12075 ins_pipe(pipe_jump);
12076 %}
12077 */
12079 // Jump Direct - Label defines a relative address from JMP
12080 instruct jmpDir(label labl) %{
12081 match(Goto);
12082 effect(USE labl);
12084 ins_cost(300);
12085 format %{ "JMP $labl #@jmpDir" %}
12087 ins_encode %{
12088 Label &L = *($labl$$label);
12089 if(&L)
12090 __ b(L);
12091 else
12092 __ b(int(0));
12093 __ nop();
12094 %}
12096 ins_pipe( pipe_jump );
12097 ins_pc_relative(1);
12098 %}
12102 // Tail Jump; remove the return address; jump to target.
12103 // TailCall above leaves the return address around.
12104 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12105 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12106 // "restore" before this instruction (in Epilogue), we need to materialize it
12107 // in %i0.
12108 //FIXME
12109 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12110 match( TailJump jump_target ex_oop );
12111 ins_cost(200);
12112 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12113 ins_encode %{
12114 Register target = $jump_target$$Register;
12116 /* 2012/9/14 Jin: V0, V1 are indicated in:
12117 * [stubGenerator_mips.cpp] generate_forward_exception()
12118 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12119 */
12120 Register oop = $ex_oop$$Register;
12121 Register exception_oop = V0;
12122 Register exception_pc = V1;
12124 __ move(exception_pc, RA);
12125 __ move(exception_oop, oop);
12127 __ jr(target);
12128 __ nop();
12129 %}
12130 ins_pipe( pipe_jump );
12131 %}
12133 // ============================================================================
12134 // Procedure Call/Return Instructions
12135 // Call Java Static Instruction
12136 // Note: If this code changes, the corresponding ret_addr_offset() and
12137 // compute_padding() functions will have to be adjusted.
12138 instruct CallStaticJavaDirect(method meth) %{
12139 match(CallStaticJava);
12140 effect(USE meth);
12142 ins_cost(300);
12143 format %{ "CALL,static #@CallStaticJavaDirect " %}
12144 ins_encode( Java_Static_Call( meth ) );
12145 ins_pipe( pipe_slow );
12146 ins_pc_relative(1);
12147 ins_alignment(16);
12148 %}
12150 // Call Java Dynamic Instruction
12151 // Note: If this code changes, the corresponding ret_addr_offset() and
12152 // compute_padding() functions will have to be adjusted.
12153 instruct CallDynamicJavaDirect(method meth) %{
12154 match(CallDynamicJava);
12155 effect(USE meth);
12157 ins_cost(300);
12158 format %{"MOV IC_Klass, (oop)-1\n\t"
12159 "CallDynamic @ CallDynamicJavaDirect" %}
12160 ins_encode( Java_Dynamic_Call( meth ) );
12161 ins_pipe( pipe_slow );
12162 ins_pc_relative(1);
12163 ins_alignment(16);
12164 %}
12166 instruct CallLeafNoFPDirect(method meth) %{
12167 match(CallLeafNoFP);
12168 effect(USE meth);
12170 ins_cost(300);
12171 format %{ "CALL_LEAF_NOFP,runtime " %}
12172 ins_encode(Java_To_Runtime(meth));
12173 ins_pipe( pipe_slow );
12174 ins_pc_relative(1);
12175 ins_alignment(16);
12176 %}
12178 // Prefetch instructions.
12180 instruct prefetchrNTA( memory mem ) %{
12181 match(PrefetchRead mem);
12182 ins_cost(125);
12184 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12185 ins_encode %{
12186 int base = $mem$$base;
12187 int index = $mem$$index;
12188 int scale = $mem$$scale;
12189 int disp = $mem$$disp;
12191 if( index != 0 ) {
12192 if (scale == 0) {
12193 __ daddu(AT, as_Register(base), as_Register(index));
12194 } else {
12195 __ dsll(AT, as_Register(index), scale);
12196 __ daddu(AT, as_Register(base), AT);
12197 }
12198 } else {
12199 __ move(AT, as_Register(base));
12200 }
12201 if( Assembler::is_simm16(disp) ) {
12202 __ daddiu(AT, as_Register(base), disp);
12203 __ daddiu(AT, AT, disp);
12204 } else {
12205 __ move(T9, disp);
12206 __ daddu(AT, as_Register(base), T9);
12207 }
12208 __ pref(0, AT, 0); //hint: 0:load
12209 %}
12210 ins_pipe(pipe_slow);
12211 %}
12213 instruct prefetchwNTA( memory mem ) %{
12214 match(PrefetchWrite mem);
12215 ins_cost(125);
12216 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12217 ins_encode %{
12218 int base = $mem$$base;
12219 int index = $mem$$index;
12220 int scale = $mem$$scale;
12221 int disp = $mem$$disp;
12223 if( index != 0 ) {
12224 if (scale == 0) {
12225 __ daddu(AT, as_Register(base), as_Register(index));
12226 } else {
12227 __ dsll(AT, as_Register(index), scale);
12228 __ daddu(AT, as_Register(base), AT);
12229 }
12230 } else {
12231 __ move(AT, as_Register(base));
12232 }
12233 if( Assembler::is_simm16(disp) ) {
12234 __ daddiu(AT, as_Register(base), disp);
12235 __ daddiu(AT, AT, disp);
12236 } else {
12237 __ move(T9, disp);
12238 __ daddu(AT, as_Register(base), T9);
12239 }
12240 __ pref(1, AT, 0); //hint: 1:store
12241 %}
12242 ins_pipe(pipe_slow);
12243 %}
12245 // Prefetch instructions for allocation.
12247 instruct prefetchAllocNTA( memory mem ) %{
12248 match(PrefetchAllocation mem);
12249 ins_cost(125);
12250 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12251 ins_encode %{
12252 int base = $mem$$base;
12253 int index = $mem$$index;
12254 int scale = $mem$$scale;
12255 int disp = $mem$$disp;
12257 Register dst = R0;
12259 if( index != 0 ) {
12260 if( Assembler::is_simm16(disp) ) {
12261 if( UseLoongsonISA ) {
12262 if (scale == 0) {
12263 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12264 } else {
12265 __ dsll(AT, as_Register(index), scale);
12266 __ gslbx(dst, as_Register(base), AT, disp);
12267 }
12268 } else {
12269 if (scale == 0) {
12270 __ addu(AT, as_Register(base), as_Register(index));
12271 } else {
12272 __ dsll(AT, as_Register(index), scale);
12273 __ addu(AT, as_Register(base), AT);
12274 }
12275 __ lb(dst, AT, disp);
12276 }
12277 } else {
12278 if (scale == 0) {
12279 __ addu(AT, as_Register(base), as_Register(index));
12280 } else {
12281 __ dsll(AT, as_Register(index), scale);
12282 __ addu(AT, as_Register(base), AT);
12283 }
12284 __ move(T9, disp);
12285 if( UseLoongsonISA ) {
12286 __ gslbx(dst, AT, T9, 0);
12287 } else {
12288 __ addu(AT, AT, T9);
12289 __ lb(dst, AT, 0);
12290 }
12291 }
12292 } else {
12293 if( Assembler::is_simm16(disp) ) {
12294 __ lb(dst, as_Register(base), disp);
12295 } else {
12296 __ move(T9, disp);
12297 if( UseLoongsonISA ) {
12298 __ gslbx(dst, as_Register(base), T9, 0);
12299 } else {
12300 __ addu(AT, as_Register(base), T9);
12301 __ lb(dst, AT, 0);
12302 }
12303 }
12304 }
12305 %}
12306 ins_pipe(pipe_slow);
12307 %}
12310 // Call runtime without safepoint
12311 instruct CallLeafDirect(method meth) %{
12312 match(CallLeaf);
12313 effect(USE meth);
12315 ins_cost(300);
12316 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12317 ins_encode(Java_To_Runtime(meth));
12318 ins_pipe( pipe_slow );
12319 ins_pc_relative(1);
12320 ins_alignment(16);
12321 %}
12323 // Load Char (16bit unsigned)
12324 instruct loadUS(mRegI dst, memory mem) %{
12325 match(Set dst (LoadUS mem));
12327 ins_cost(125);
12328 format %{ "loadUS $dst,$mem @ loadC" %}
12329 ins_encode(load_C_enc(dst, mem));
12330 ins_pipe( ialu_loadI );
12331 %}
12333 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12334 match(Set dst (ConvI2L (LoadUS mem)));
12336 ins_cost(125);
12337 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12338 ins_encode(load_C_enc(dst, mem));
12339 ins_pipe( ialu_loadI );
12340 %}
12342 // Store Char (16bit unsigned)
12343 instruct storeC(memory mem, mRegI src) %{
12344 match(Set mem (StoreC mem src));
12346 ins_cost(125);
12347 format %{ "storeC $src, $mem @ storeC" %}
12348 ins_encode(store_C_reg_enc(mem, src));
12349 ins_pipe( ialu_loadI );
12350 %}
12352 instruct storeC0(memory mem, immI0 zero) %{
12353 match(Set mem (StoreC mem zero));
12355 ins_cost(125);
12356 format %{ "storeC $zero, $mem @ storeC0" %}
12357 ins_encode(store_C0_enc(mem));
12358 ins_pipe( ialu_loadI );
12359 %}
12362 instruct loadConF0(regF dst, immF0 zero) %{
12363 match(Set dst zero);
12364 ins_cost(100);
12366 format %{ "mov $dst, zero @ loadConF0\n"%}
12367 ins_encode %{
12368 FloatRegister dst = $dst$$FloatRegister;
12370 __ mtc1(R0, dst);
12371 %}
12372 ins_pipe( fpu_loadF );
12373 %}
12376 instruct loadConF(regF dst, immF src) %{
12377 match(Set dst src);
12378 ins_cost(125);
12380 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12381 ins_encode %{
12382 int con_offset = $constantoffset($src);
12384 if (Assembler::is_simm16(con_offset)) {
12385 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12386 } else {
12387 __ set64(AT, con_offset);
12388 if (UseLoongsonISA) {
12389 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12390 } else {
12391 __ daddu(AT, $constanttablebase, AT);
12392 __ lwc1($dst$$FloatRegister, AT, 0);
12393 }
12394 }
12395 %}
12396 ins_pipe( fpu_loadF );
12397 %}
12400 instruct loadConD0(regD dst, immD0 zero) %{
12401 match(Set dst zero);
12402 ins_cost(100);
12404 format %{ "mov $dst, zero @ loadConD0"%}
12405 ins_encode %{
12406 FloatRegister dst = as_FloatRegister($dst$$reg);
12408 __ dmtc1(R0, dst);
12409 %}
12410 ins_pipe( fpu_loadF );
12411 %}
12413 instruct loadConD(regD dst, immD src) %{
12414 match(Set dst src);
12415 ins_cost(125);
12417 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12418 ins_encode %{
12419 int con_offset = $constantoffset($src);
12421 if (Assembler::is_simm16(con_offset)) {
12422 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12423 } else {
12424 __ set64(AT, con_offset);
12425 if (UseLoongsonISA) {
12426 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12427 } else {
12428 __ daddu(AT, $constanttablebase, AT);
12429 __ ldc1($dst$$FloatRegister, AT, 0);
12430 }
12431 }
12432 %}
12433 ins_pipe( fpu_loadF );
12434 %}
12436 // Store register Float value (it is faster than store from FPU register)
12437 instruct storeF_reg( memory mem, regF src) %{
12438 match(Set mem (StoreF mem src));
12440 ins_cost(50);
12441 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12442 ins_encode(store_F_reg_enc(mem, src));
12443 ins_pipe( fpu_storeF );
12444 %}
12446 instruct storeF_imm0( memory mem, immF0 zero) %{
12447 match(Set mem (StoreF mem zero));
12449 ins_cost(40);
12450 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12451 ins_encode %{
12452 int base = $mem$$base;
12453 int index = $mem$$index;
12454 int scale = $mem$$scale;
12455 int disp = $mem$$disp;
12457 if( index != 0 ) {
12458 if ( UseLoongsonISA ) {
12459 if ( Assembler::is_simm(disp, 8) ) {
12460 if ( scale == 0 ) {
12461 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12462 } else {
12463 __ dsll(T9, as_Register(index), scale);
12464 __ gsswx(R0, as_Register(base), T9, disp);
12465 }
12466 } else if ( Assembler::is_simm16(disp) ) {
12467 if ( scale == 0 ) {
12468 __ daddu(AT, as_Register(base), as_Register(index));
12469 } else {
12470 __ dsll(T9, as_Register(index), scale);
12471 __ daddu(AT, as_Register(base), T9);
12472 }
12473 __ sw(R0, AT, disp);
12474 } else {
12475 if ( scale == 0 ) {
12476 __ move(T9, disp);
12477 __ daddu(AT, as_Register(index), T9);
12478 __ gsswx(R0, as_Register(base), AT, 0);
12479 } else {
12480 __ dsll(T9, as_Register(index), scale);
12481 __ move(AT, disp);
12482 __ daddu(AT, AT, T9);
12483 __ gsswx(R0, as_Register(base), AT, 0);
12484 }
12485 }
12486 } else { //not use loongson isa
12487 if(scale != 0) {
12488 __ dsll(T9, as_Register(index), scale);
12489 __ daddu(AT, as_Register(base), T9);
12490 } else {
12491 __ daddu(AT, as_Register(base), as_Register(index));
12492 }
12493 if( Assembler::is_simm16(disp) ) {
12494 __ sw(R0, AT, disp);
12495 } else {
12496 __ move(T9, disp);
12497 __ daddu(AT, AT, T9);
12498 __ sw(R0, AT, 0);
12499 }
12500 }
12501 } else { //index is 0
12502 if ( UseLoongsonISA ) {
12503 if ( Assembler::is_simm16(disp) ) {
12504 __ sw(R0, as_Register(base), disp);
12505 } else {
12506 __ move(T9, disp);
12507 __ gsswx(R0, as_Register(base), T9, 0);
12508 }
12509 } else {
12510 if( Assembler::is_simm16(disp) ) {
12511 __ sw(R0, as_Register(base), disp);
12512 } else {
12513 __ move(T9, disp);
12514 __ daddu(AT, as_Register(base), T9);
12515 __ sw(R0, AT, 0);
12516 }
12517 }
12518 }
12519 %}
12520 ins_pipe( ialu_storeI );
12521 %}
12523 // Load Double
12524 instruct loadD(regD dst, memory mem) %{
12525 match(Set dst (LoadD mem));
12527 ins_cost(150);
12528 format %{ "loadD $dst, $mem #@loadD" %}
12529 ins_encode(load_D_enc(dst, mem));
12530 ins_pipe( ialu_loadI );
12531 %}
12533 // Load Double - UNaligned
12534 instruct loadD_unaligned(regD dst, memory mem ) %{
12535 match(Set dst (LoadD_unaligned mem));
12536 ins_cost(250);
12537 // FIXME: Jin: Need more effective ldl/ldr
12538 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12539 ins_encode(load_D_enc(dst, mem));
12540 ins_pipe( ialu_loadI );
12541 %}
12543 instruct storeD_reg( memory mem, regD src) %{
12544 match(Set mem (StoreD mem src));
12546 ins_cost(50);
12547 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12548 ins_encode(store_D_reg_enc(mem, src));
12549 ins_pipe( fpu_storeF );
12550 %}
12552 instruct storeD_imm0( memory mem, immD0 zero) %{
12553 match(Set mem (StoreD mem zero));
12555 ins_cost(40);
12556 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12557 ins_encode %{
12558 int base = $mem$$base;
12559 int index = $mem$$index;
12560 int scale = $mem$$scale;
12561 int disp = $mem$$disp;
12563 __ mtc1(R0, F30);
12564 __ cvt_d_w(F30, F30);
12566 if( index != 0 ) {
12567 if ( UseLoongsonISA ) {
12568 if ( Assembler::is_simm(disp, 8) ) {
12569 if (scale == 0) {
12570 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12571 } else {
12572 __ dsll(T9, as_Register(index), scale);
12573 __ gssdxc1(F30, as_Register(base), T9, disp);
12574 }
12575 } else if ( Assembler::is_simm16(disp) ) {
12576 if (scale == 0) {
12577 __ daddu(AT, as_Register(base), as_Register(index));
12578 __ sdc1(F30, AT, disp);
12579 } else {
12580 __ dsll(T9, as_Register(index), scale);
12581 __ daddu(AT, as_Register(base), T9);
12582 __ sdc1(F30, AT, disp);
12583 }
12584 } else {
12585 if (scale == 0) {
12586 __ move(T9, disp);
12587 __ daddu(AT, as_Register(index), T9);
12588 __ gssdxc1(F30, as_Register(base), AT, 0);
12589 } else {
12590 __ move(T9, disp);
12591 __ dsll(AT, as_Register(index), scale);
12592 __ daddu(AT, AT, T9);
12593 __ gssdxc1(F30, as_Register(base), AT, 0);
12594 }
12595 }
12596 } else { // not use loongson isa
12597 if(scale != 0) {
12598 __ dsll(T9, as_Register(index), scale);
12599 __ daddu(AT, as_Register(base), T9);
12600 } else {
12601 __ daddu(AT, as_Register(base), as_Register(index));
12602 }
12603 if( Assembler::is_simm16(disp) ) {
12604 __ sdc1(F30, AT, disp);
12605 } else {
12606 __ move(T9, disp);
12607 __ daddu(AT, AT, T9);
12608 __ sdc1(F30, AT, 0);
12609 }
12610 }
12611 } else {// index is 0
12612 if ( UseLoongsonISA ) {
12613 if ( Assembler::is_simm16(disp) ) {
12614 __ sdc1(F30, as_Register(base), disp);
12615 } else {
12616 __ move(T9, disp);
12617 __ gssdxc1(F30, as_Register(base), T9, 0);
12618 }
12619 } else {
12620 if( Assembler::is_simm16(disp) ) {
12621 __ sdc1(F30, as_Register(base), disp);
12622 } else {
12623 __ move(T9, disp);
12624 __ daddu(AT, as_Register(base), T9);
12625 __ sdc1(F30, AT, 0);
12626 }
12627 }
12628 }
12629 %}
12630 ins_pipe( ialu_storeI );
12631 %}
12633 instruct loadSSI(mRegI dst, stackSlotI src)
12634 %{
12635 match(Set dst src);
12637 ins_cost(125);
12638 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12639 ins_encode %{
12640 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12641 __ lw($dst$$Register, SP, $src$$disp);
12642 %}
12643 ins_pipe(ialu_loadI);
12644 %}
12646 instruct storeSSI(stackSlotI dst, mRegI src)
12647 %{
12648 match(Set dst src);
12650 ins_cost(100);
12651 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12652 ins_encode %{
12653 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12654 __ sw($src$$Register, SP, $dst$$disp);
12655 %}
12656 ins_pipe(ialu_storeI);
12657 %}
12659 instruct loadSSL(mRegL dst, stackSlotL src)
12660 %{
12661 match(Set dst src);
12663 ins_cost(125);
12664 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12665 ins_encode %{
12666 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12667 __ ld($dst$$Register, SP, $src$$disp);
12668 %}
12669 ins_pipe(ialu_loadI);
12670 %}
12672 instruct storeSSL(stackSlotL dst, mRegL src)
12673 %{
12674 match(Set dst src);
12676 ins_cost(100);
12677 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12678 ins_encode %{
12679 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12680 __ sd($src$$Register, SP, $dst$$disp);
12681 %}
12682 ins_pipe(ialu_storeI);
12683 %}
12685 instruct loadSSP(mRegP dst, stackSlotP src)
12686 %{
12687 match(Set dst src);
12689 ins_cost(125);
12690 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12691 ins_encode %{
12692 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12693 __ ld($dst$$Register, SP, $src$$disp);
12694 %}
12695 ins_pipe(ialu_loadI);
12696 %}
12698 instruct storeSSP(stackSlotP dst, mRegP src)
12699 %{
12700 match(Set dst src);
12702 ins_cost(100);
12703 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12704 ins_encode %{
12705 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12706 __ sd($src$$Register, SP, $dst$$disp);
12707 %}
12708 ins_pipe(ialu_storeI);
12709 %}
12711 instruct loadSSF(regF dst, stackSlotF src)
12712 %{
12713 match(Set dst src);
12715 ins_cost(125);
12716 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12717 ins_encode %{
12718 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12719 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12720 %}
12721 ins_pipe(ialu_loadI);
12722 %}
12724 instruct storeSSF(stackSlotF dst, regF src)
12725 %{
12726 match(Set dst src);
12728 ins_cost(100);
12729 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12730 ins_encode %{
12731 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12732 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12733 %}
12734 ins_pipe(fpu_storeF);
12735 %}
12737 // Use the same format since predicate() can not be used here.
12738 instruct loadSSD(regD dst, stackSlotD src)
12739 %{
12740 match(Set dst src);
12742 ins_cost(125);
12743 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12744 ins_encode %{
12745 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12746 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12747 %}
12748 ins_pipe(ialu_loadI);
12749 %}
12751 instruct storeSSD(stackSlotD dst, regD src)
12752 %{
12753 match(Set dst src);
12755 ins_cost(100);
12756 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12757 ins_encode %{
12758 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12759 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12760 %}
12761 ins_pipe(fpu_storeF);
12762 %}
12764 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12765 match( Set cr (FastLock object box) );
12766 effect( TEMP tmp, TEMP scr, USE_KILL box );
12767 ins_cost(300);
12768 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12769 ins_encode %{
12770 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12771 %}
12773 ins_pipe( pipe_slow );
12774 ins_pc_relative(1);
12775 %}
12777 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12778 match( Set cr (FastUnlock object box) );
12779 effect( TEMP tmp, USE_KILL box );
12780 ins_cost(300);
12781 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12782 ins_encode %{
12783 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12784 %}
12786 ins_pipe( pipe_slow );
12787 ins_pc_relative(1);
12788 %}
12790 // Store CMS card-mark Immediate
12791 instruct storeImmCM(memory mem, immI8 src) %{
12792 match(Set mem (StoreCM mem src));
12794 ins_cost(150);
12795 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12796 // opcode(0xC6);
12797 ins_encode(store_B_immI_enc_sync(mem, src));
12798 ins_pipe( ialu_storeI );
12799 %}
12801 // Die now
12802 instruct ShouldNotReachHere( )
12803 %{
12804 match(Halt);
12805 ins_cost(300);
12807 // Use the following format syntax
12808 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12809 ins_encode %{
12810 // Here we should emit illtrap !
12812 __ stop("in ShoudNotReachHere");
12814 %}
12815 ins_pipe( pipe_jump );
12816 %}
12818 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12819 %{
12820 predicate(Universe::narrow_oop_shift() == 0);
12821 match(Set dst mem);
12823 ins_cost(110);
12824 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12825 ins_encode %{
12826 Register dst = $dst$$Register;
12827 Register base = as_Register($mem$$base);
12828 int disp = $mem$$disp;
12830 __ daddiu(dst, base, disp);
12831 %}
12832 ins_pipe( ialu_regI_imm16 );
12833 %}
12835 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12836 %{
12837 match(Set dst mem);
12839 ins_cost(110);
12840 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12841 ins_encode %{
12842 Register dst = $dst$$Register;
12843 Register base = as_Register($mem$$base);
12844 Register index = as_Register($mem$$index);
12845 int scale = $mem$$scale;
12846 int disp = $mem$$disp;
12848 if (scale == 0) {
12849 __ daddu(AT, base, index);
12850 __ daddiu(dst, AT, disp);
12851 } else {
12852 __ dsll(AT, index, scale);
12853 __ daddu(AT, base, AT);
12854 __ daddiu(dst, AT, disp);
12855 }
12856 %}
12858 ins_pipe( ialu_regI_imm16 );
12859 %}
12861 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12862 %{
12863 match(Set dst mem);
12865 ins_cost(110);
12866 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12867 ins_encode %{
12868 Register dst = $dst$$Register;
12869 Register base = as_Register($mem$$base);
12870 Register index = as_Register($mem$$index);
12871 int scale = $mem$$scale;
12873 if (scale == 0) {
12874 __ daddu(dst, base, index);
12875 } else {
12876 __ dsll(AT, index, scale);
12877 __ daddu(dst, base, AT);
12878 }
12879 %}
12881 ins_pipe( ialu_regI_imm16 );
12882 %}
12884 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12885 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12886 match(CountedLoopEnd cop (CmpI src1 src2));
12887 effect(USE labl);
12889 ins_cost(300);
12890 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12891 ins_encode %{
12892 Register op1 = $src1$$Register;
12893 Register op2 = $src2$$Register;
12894 Label &L = *($labl$$label);
12895 int flag = $cop$$cmpcode;
12897 switch(flag)
12898 {
12899 case 0x01: //equal
12900 if (&L)
12901 __ beq(op1, op2, L);
12902 else
12903 __ beq(op1, op2, (int)0);
12904 break;
12905 case 0x02: //not_equal
12906 if (&L)
12907 __ bne(op1, op2, L);
12908 else
12909 __ bne(op1, op2, (int)0);
12910 break;
12911 case 0x03: //above
12912 __ slt(AT, op2, op1);
12913 if(&L)
12914 __ bne(AT, R0, L);
12915 else
12916 __ bne(AT, R0, (int)0);
12917 break;
12918 case 0x04: //above_equal
12919 __ slt(AT, op1, op2);
12920 if(&L)
12921 __ beq(AT, R0, L);
12922 else
12923 __ beq(AT, R0, (int)0);
12924 break;
12925 case 0x05: //below
12926 __ slt(AT, op1, op2);
12927 if(&L)
12928 __ bne(AT, R0, L);
12929 else
12930 __ bne(AT, R0, (int)0);
12931 break;
12932 case 0x06: //below_equal
12933 __ slt(AT, op2, op1);
12934 if(&L)
12935 __ beq(AT, R0, L);
12936 else
12937 __ beq(AT, R0, (int)0);
12938 break;
12939 default:
12940 Unimplemented();
12941 }
12942 __ nop();
12943 %}
12944 ins_pipe( pipe_jump );
12945 ins_pc_relative(1);
12946 %}
12949 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12950 match(CountedLoopEnd cop (CmpI src1 src2));
12951 effect(USE labl);
12953 ins_cost(250);
12954 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12955 ins_encode %{
12956 Register op1 = $src1$$Register;
12957 int op2 = $src2$$constant;
12958 Label &L = *($labl$$label);
12959 int flag = $cop$$cmpcode;
12961 __ addiu32(AT, op1, -1 * op2);
12963 switch(flag)
12964 {
12965 case 0x01: //equal
12966 if (&L)
12967 __ beq(AT, R0, L);
12968 else
12969 __ beq(AT, R0, (int)0);
12970 break;
12971 case 0x02: //not_equal
12972 if (&L)
12973 __ bne(AT, R0, L);
12974 else
12975 __ bne(AT, R0, (int)0);
12976 break;
12977 case 0x03: //above
12978 if(&L)
12979 __ bgtz(AT, L);
12980 else
12981 __ bgtz(AT, (int)0);
12982 break;
12983 case 0x04: //above_equal
12984 if(&L)
12985 __ bgez(AT, L);
12986 else
12987 __ bgez(AT,(int)0);
12988 break;
12989 case 0x05: //below
12990 if(&L)
12991 __ bltz(AT, L);
12992 else
12993 __ bltz(AT, (int)0);
12994 break;
12995 case 0x06: //below_equal
12996 if(&L)
12997 __ blez(AT, L);
12998 else
12999 __ blez(AT, (int)0);
13000 break;
13001 default:
13002 Unimplemented();
13003 }
13004 __ nop();
13005 %}
13006 ins_pipe( pipe_jump );
13007 ins_pc_relative(1);
13008 %}
13011 /*
13012 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13013 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13014 match(CountedLoopEnd cop cmp);
13015 effect(USE labl);
13017 ins_cost(300);
13018 format %{ "J$cop,u $labl\t# Loop end" %}
13019 size(6);
13020 opcode(0x0F, 0x80);
13021 ins_encode( Jcc( cop, labl) );
13022 ins_pipe( pipe_jump );
13023 ins_pc_relative(1);
13024 %}
13026 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13027 match(CountedLoopEnd cop cmp);
13028 effect(USE labl);
13030 ins_cost(200);
13031 format %{ "J$cop,u $labl\t# Loop end" %}
13032 opcode(0x0F, 0x80);
13033 ins_encode( Jcc( cop, labl) );
13034 ins_pipe( pipe_jump );
13035 ins_pc_relative(1);
13036 %}
13037 */
13039 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13040 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13041 match(If cop cr);
13042 effect(USE labl);
13044 ins_cost(300);
13045 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13047 ins_encode %{
13048 Label &L = *($labl$$label);
13049 switch($cop$$cmpcode)
13050 {
13051 case 0x01: //equal
13052 if (&L)
13053 __ bne(AT, R0, L);
13054 else
13055 __ bne(AT, R0, (int)0);
13056 break;
13057 case 0x02: //not equal
13058 if (&L)
13059 __ beq(AT, R0, L);
13060 else
13061 __ beq(AT, R0, (int)0);
13062 break;
13063 default:
13064 Unimplemented();
13065 }
13066 __ nop();
13067 %}
13069 ins_pipe( pipe_jump );
13070 ins_pc_relative(1);
13071 %}
13074 // ============================================================================
13075 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13076 // array for an instance of the superklass. Set a hidden internal cache on a
13077 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13078 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13079 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13080 match(Set result (PartialSubtypeCheck sub super));
13081 effect(KILL tmp);
13082 ins_cost(1100); // slightly larger than the next version
13083 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13085 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13086 ins_pipe( pipe_slow );
13087 %}
13090 // Conditional-store of an int value.
13091 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13092 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13093 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13094 // effect(KILL oldval);
13095 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13097 ins_encode %{
13098 Register oldval = $oldval$$Register;
13099 Register newval = $newval$$Register;
13100 Address addr(as_Register($mem$$base), $mem$$disp);
13101 Label again, failure;
13103 // int base = $mem$$base;
13104 int index = $mem$$index;
13105 int scale = $mem$$scale;
13106 int disp = $mem$$disp;
13108 guarantee(Assembler::is_simm16(disp), "");
13110 if( index != 0 ) {
13111 __ stop("in storeIConditional: index != 0");
13112 } else {
13113 __ bind(again);
13114 if(UseSyncLevel <= 1000) __ sync();
13115 __ ll(AT, addr);
13116 __ bne(AT, oldval, failure);
13117 __ delayed()->addu(AT, R0, R0);
13119 __ addu(AT, newval, R0);
13120 __ sc(AT, addr);
13121 __ beq(AT, R0, again);
13122 __ delayed()->addiu(AT, R0, 0xFF);
13123 __ bind(failure);
13124 __ sync();
13125 }
13126 %}
13128 ins_pipe( long_memory_op );
13129 %}
13131 // Conditional-store of a long value.
13132 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13133 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13134 %{
13135 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13136 effect(KILL oldval);
13138 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13139 ins_encode%{
13140 Register oldval = $oldval$$Register;
13141 Register newval = $newval$$Register;
13142 Address addr((Register)$mem$$base, $mem$$disp);
13144 int index = $mem$$index;
13145 int scale = $mem$$scale;
13146 int disp = $mem$$disp;
13148 guarantee(Assembler::is_simm16(disp), "");
13150 if( index != 0 ) {
13151 __ stop("in storeIConditional: index != 0");
13152 } else {
13153 __ cmpxchg(newval, addr, oldval);
13154 }
13155 %}
13156 ins_pipe( long_memory_op );
13157 %}
13160 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13161 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13162 effect(KILL oldval);
13163 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13164 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13165 "MOV $res, 1 @ compareAndSwapI\n\t"
13166 "BNE AT, R0 @ compareAndSwapI\n\t"
13167 "MOV $res, 0 @ compareAndSwapI\n"
13168 "L:" %}
13169 ins_encode %{
13170 Register newval = $newval$$Register;
13171 Register oldval = $oldval$$Register;
13172 Register res = $res$$Register;
13173 Address addr($mem_ptr$$Register, 0);
13174 Label L;
13176 __ cmpxchg32(newval, addr, oldval);
13177 __ move(res, AT);
13178 %}
13179 ins_pipe( long_memory_op );
13180 %}
13182 //FIXME:
13183 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13184 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13185 effect(KILL oldval);
13186 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13187 "MOV $res, AT @ compareAndSwapP\n\t"
13188 "L:" %}
13189 ins_encode %{
13190 Register newval = $newval$$Register;
13191 Register oldval = $oldval$$Register;
13192 Register res = $res$$Register;
13193 Address addr($mem_ptr$$Register, 0);
13194 Label L;
13196 __ cmpxchg(newval, addr, oldval);
13197 __ move(res, AT);
13198 %}
13199 ins_pipe( long_memory_op );
13200 %}
13202 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13203 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13204 effect(KILL oldval);
13205 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13206 "MOV $res, AT @ compareAndSwapN\n\t"
13207 "L:" %}
13208 ins_encode %{
13209 Register newval = $newval$$Register;
13210 Register oldval = $oldval$$Register;
13211 Register res = $res$$Register;
13212 Address addr($mem_ptr$$Register, 0);
13213 Label L;
13215 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13216 * Thus, we should extend oldval's sign for correct comparision.
13217 */
13218 __ sll(oldval, oldval, 0);
13220 __ cmpxchg32(newval, addr, oldval);
13221 __ move(res, AT);
13222 %}
13223 ins_pipe( long_memory_op );
13224 %}
13226 //----------Max and Min--------------------------------------------------------
13227 // Min Instructions
13228 ////
13229 // *** Min and Max using the conditional move are slower than the
13230 // *** branch version on a Pentium III.
13231 // // Conditional move for min
13232 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13233 // effect( USE_DEF op2, USE op1, USE cr );
13234 // format %{ "CMOVlt $op2,$op1\t! min" %}
13235 // opcode(0x4C,0x0F);
13236 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13237 // ins_pipe( pipe_cmov_reg );
13238 //%}
13239 //
13240 //// Min Register with Register (P6 version)
13241 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13242 // predicate(VM_Version::supports_cmov() );
13243 // match(Set op2 (MinI op1 op2));
13244 // ins_cost(200);
13245 // expand %{
13246 // eFlagsReg cr;
13247 // compI_eReg(cr,op1,op2);
13248 // cmovI_reg_lt(op2,op1,cr);
13249 // %}
13250 //%}
13252 // Min Register with Register (generic version)
13253 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13254 match(Set dst (MinI dst src));
13255 //effect(KILL flags);
13256 ins_cost(80);
13258 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13259 ins_encode %{
13260 Register dst = $dst$$Register;
13261 Register src = $src$$Register;
13263 __ slt(AT, src, dst);
13264 __ movn(dst, src, AT);
13266 %}
13268 ins_pipe( pipe_slow );
13269 %}
13271 // Max Register with Register
13272 // *** Min and Max using the conditional move are slower than the
13273 // *** branch version on a Pentium III.
13274 // // Conditional move for max
13275 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13276 // effect( USE_DEF op2, USE op1, USE cr );
13277 // format %{ "CMOVgt $op2,$op1\t! max" %}
13278 // opcode(0x4F,0x0F);
13279 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13280 // ins_pipe( pipe_cmov_reg );
13281 //%}
13282 //
13283 // // Max Register with Register (P6 version)
13284 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13285 // predicate(VM_Version::supports_cmov() );
13286 // match(Set op2 (MaxI op1 op2));
13287 // ins_cost(200);
13288 // expand %{
13289 // eFlagsReg cr;
13290 // compI_eReg(cr,op1,op2);
13291 // cmovI_reg_gt(op2,op1,cr);
13292 // %}
13293 //%}
13295 // Max Register with Register (generic version)
13296 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13297 match(Set dst (MaxI dst src));
13298 ins_cost(80);
13300 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13302 ins_encode %{
13303 Register dst = $dst$$Register;
13304 Register src = $src$$Register;
13306 __ slt(AT, dst, src);
13307 __ movn(dst, src, AT);
13309 %}
13311 ins_pipe( pipe_slow );
13312 %}
13314 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13315 match(Set dst (MaxI dst zero));
13316 ins_cost(50);
13318 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13320 ins_encode %{
13321 Register dst = $dst$$Register;
13323 __ slt(AT, dst, R0);
13324 __ movn(dst, R0, AT);
13326 %}
13328 ins_pipe( pipe_slow );
13329 %}
13331 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13332 %{
13333 match(Set dst (AndL src mask));
13335 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13336 ins_encode %{
13337 Register dst = $dst$$Register;
13338 Register src = $src$$Register;
13340 __ dext(dst, src, 0, 32);
13341 %}
13342 ins_pipe(ialu_regI_regI);
13343 %}
13345 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13346 %{
13347 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13349 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13350 ins_encode %{
13351 Register dst = $dst$$Register;
13352 Register src1 = $src1$$Register;
13353 Register src2 = $src2$$Register;
13355 if (src1 == dst) {
13356 __ dinsu(dst, src2, 32, 32);
13357 } else if (src2 == dst) {
13358 __ dsll32(dst, dst, 0);
13359 __ dins(dst, src1, 0, 32);
13360 } else {
13361 __ dext(dst, src1, 0, 32);
13362 __ dinsu(dst, src2, 32, 32);
13363 }
13364 %}
13365 ins_pipe(ialu_regI_regI);
13366 %}
13368 // Zero-extend convert int to long
13369 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13370 %{
13371 match(Set dst (AndL (ConvI2L src) mask));
13373 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13374 ins_encode %{
13375 Register dst = $dst$$Register;
13376 Register src = $src$$Register;
13378 __ dext(dst, src, 0, 32);
13379 %}
13380 ins_pipe(ialu_regI_regI);
13381 %}
13383 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13384 %{
13385 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13387 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13388 ins_encode %{
13389 Register dst = $dst$$Register;
13390 Register src = $src$$Register;
13392 __ dext(dst, src, 0, 32);
13393 %}
13394 ins_pipe(ialu_regI_regI);
13395 %}
13397 // Match loading integer and casting it to unsigned int in long register.
13398 // LoadI + ConvI2L + AndL 0xffffffff.
13399 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13400 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13402 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13403 ins_encode (load_N_enc(dst, mem));
13404 ins_pipe(ialu_loadI);
13405 %}
13407 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13408 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13410 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13411 ins_encode (load_N_enc(dst, mem));
13412 ins_pipe(ialu_loadI);
13413 %}
13416 // ============================================================================
13417 // Safepoint Instruction
13418 instruct safePoint_poll_reg(mRegP poll) %{
13419 match(SafePoint poll);
13420 predicate(false);
13421 effect(USE poll);
13423 ins_cost(125);
13424 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13426 ins_encode %{
13427 Register poll_reg = $poll$$Register;
13429 __ block_comment("Safepoint:");
13430 __ relocate(relocInfo::poll_type);
13431 __ lw(AT, poll_reg, 0);
13432 %}
13434 ins_pipe( ialu_storeI );
13435 %}
13437 instruct safePoint_poll() %{
13438 match(SafePoint);
13440 ins_cost(105);
13441 format %{ "poll for GC @ safePoint_poll" %}
13443 ins_encode %{
13444 __ block_comment("Safepoint:");
13445 __ set64(T9, (long)os::get_polling_page());
13446 __ relocate(relocInfo::poll_type);
13447 __ lw(AT, T9, 0);
13448 %}
13450 ins_pipe( ialu_storeI );
13451 %}
13453 //----------Arithmetic Conversion Instructions---------------------------------
13455 instruct roundFloat_nop(regF dst)
13456 %{
13457 match(Set dst (RoundFloat dst));
13459 ins_cost(0);
13460 ins_encode();
13461 ins_pipe(empty);
13462 %}
13464 instruct roundDouble_nop(regD dst)
13465 %{
13466 match(Set dst (RoundDouble dst));
13468 ins_cost(0);
13469 ins_encode();
13470 ins_pipe(empty);
13471 %}
13473 //---------- Zeros Count Instructions ------------------------------------------
13474 // CountLeadingZerosINode CountTrailingZerosINode
13475 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13476 predicate(UseCountLeadingZerosInstruction);
13477 match(Set dst (CountLeadingZerosI src));
13479 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13480 ins_encode %{
13481 __ clz($dst$$Register, $src$$Register);
13482 %}
13483 ins_pipe( ialu_regL_regL );
13484 %}
13486 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13487 predicate(UseCountLeadingZerosInstruction);
13488 match(Set dst (CountLeadingZerosL src));
13490 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13491 ins_encode %{
13492 __ dclz($dst$$Register, $src$$Register);
13493 %}
13494 ins_pipe( ialu_regL_regL );
13495 %}
13497 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13498 predicate(UseCountTrailingZerosInstruction);
13499 match(Set dst (CountTrailingZerosI src));
13501 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13502 ins_encode %{
13503 // ctz and dctz is gs instructions.
13504 __ ctz($dst$$Register, $src$$Register);
13505 %}
13506 ins_pipe( ialu_regL_regL );
13507 %}
13509 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13510 predicate(UseCountTrailingZerosInstruction);
13511 match(Set dst (CountTrailingZerosL src));
13513 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13514 ins_encode %{
13515 __ dctz($dst$$Register, $src$$Register);
13516 %}
13517 ins_pipe( ialu_regL_regL );
13518 %}
13520 // ====================VECTOR INSTRUCTIONS=====================================
13522 // Load vectors (8 bytes long)
13523 instruct loadV8(vecD dst, memory mem) %{
13524 predicate(n->as_LoadVector()->memory_size() == 8);
13525 match(Set dst (LoadVector mem));
13526 ins_cost(125);
13527 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13528 ins_encode(load_D_enc(dst, mem));
13529 ins_pipe( fpu_loadF );
13530 %}
13532 // Store vectors (8 bytes long)
13533 instruct storeV8(memory mem, vecD src) %{
13534 predicate(n->as_StoreVector()->memory_size() == 8);
13535 match(Set mem (StoreVector mem src));
13536 ins_cost(145);
13537 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13538 ins_encode(store_D_reg_enc(mem, src));
13539 ins_pipe( fpu_storeF );
13540 %}
13542 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13543 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13544 match(Set dst (ReplicateB src));
13545 ins_cost(100);
13546 format %{ "replv_ob AT, $src\n\t"
13547 "dmtc1 AT, $dst\t! replicate8B" %}
13548 ins_encode %{
13549 __ replv_ob(AT, $src$$Register);
13550 __ dmtc1(AT, $dst$$FloatRegister);
13551 %}
13552 ins_pipe( pipe_mtc1 );
13553 %}
13555 instruct Repl8B(vecD dst, mRegI src) %{
13556 predicate(n->as_Vector()->length() == 8);
13557 match(Set dst (ReplicateB src));
13558 ins_cost(140);
13559 format %{ "move AT, $src\n\t"
13560 "dins AT, AT, 8, 8\n\t"
13561 "dins AT, AT, 16, 16\n\t"
13562 "dinsu AT, AT, 32, 32\n\t"
13563 "dmtc1 AT, $dst\t! replicate8B" %}
13564 ins_encode %{
13565 __ move(AT, $src$$Register);
13566 __ dins(AT, AT, 8, 8);
13567 __ dins(AT, AT, 16, 16);
13568 __ dinsu(AT, AT, 32, 32);
13569 __ dmtc1(AT, $dst$$FloatRegister);
13570 %}
13571 ins_pipe( pipe_mtc1 );
13572 %}
13574 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13575 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13576 match(Set dst (ReplicateB con));
13577 ins_cost(110);
13578 format %{ "repl_ob AT, [$con]\n\t"
13579 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13580 ins_encode %{
13581 int val = $con$$constant;
13582 __ repl_ob(AT, val);
13583 __ dmtc1(AT, $dst$$FloatRegister);
13584 %}
13585 ins_pipe( pipe_mtc1 );
13586 %}
13588 instruct Repl8B_imm(vecD dst, immI con) %{
13589 predicate(n->as_Vector()->length() == 8);
13590 match(Set dst (ReplicateB con));
13591 ins_cost(150);
13592 format %{ "move AT, [$con]\n\t"
13593 "dins AT, AT, 8, 8\n\t"
13594 "dins AT, AT, 16, 16\n\t"
13595 "dinsu AT, AT, 32, 32\n\t"
13596 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13597 ins_encode %{
13598 __ move(AT, $con$$constant);
13599 __ dins(AT, AT, 8, 8);
13600 __ dins(AT, AT, 16, 16);
13601 __ dinsu(AT, AT, 32, 32);
13602 __ dmtc1(AT, $dst$$FloatRegister);
13603 %}
13604 ins_pipe( pipe_mtc1 );
13605 %}
13607 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13608 predicate(n->as_Vector()->length() == 8);
13609 match(Set dst (ReplicateB zero));
13610 ins_cost(90);
13611 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13612 ins_encode %{
13613 __ dmtc1(R0, $dst$$FloatRegister);
13614 %}
13615 ins_pipe( pipe_mtc1 );
13616 %}
13618 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13619 predicate(n->as_Vector()->length() == 8);
13620 match(Set dst (ReplicateB M1));
13621 ins_cost(80);
13622 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13623 ins_encode %{
13624 __ nor(AT, R0, R0);
13625 __ dmtc1(AT, $dst$$FloatRegister);
13626 %}
13627 ins_pipe( pipe_mtc1 );
13628 %}
13630 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13631 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13632 match(Set dst (ReplicateS src));
13633 ins_cost(100);
13634 format %{ "replv_qh AT, $src\n\t"
13635 "dmtc1 AT, $dst\t! replicate4S" %}
13636 ins_encode %{
13637 __ replv_qh(AT, $src$$Register);
13638 __ dmtc1(AT, $dst$$FloatRegister);
13639 %}
13640 ins_pipe( pipe_mtc1 );
13641 %}
13643 instruct Repl4S(vecD dst, mRegI src) %{
13644 predicate(n->as_Vector()->length() == 4);
13645 match(Set dst (ReplicateS src));
13646 ins_cost(120);
13647 format %{ "move AT, $src \n\t"
13648 "dins AT, AT, 16, 16\n\t"
13649 "dinsu AT, AT, 32, 32\n\t"
13650 "dmtc1 AT, $dst\t! replicate4S" %}
13651 ins_encode %{
13652 __ move(AT, $src$$Register);
13653 __ dins(AT, AT, 16, 16);
13654 __ dinsu(AT, AT, 32, 32);
13655 __ dmtc1(AT, $dst$$FloatRegister);
13656 %}
13657 ins_pipe( pipe_mtc1 );
13658 %}
13660 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13661 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13662 match(Set dst (ReplicateS con));
13663 ins_cost(100);
13664 format %{ "replv_qh AT, [$con]\n\t"
13665 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13666 ins_encode %{
13667 int val = $con$$constant;
13668 if ( Assembler::is_simm(val, 10)) {
13669 //repl_qh supports 10 bits immediate
13670 __ repl_qh(AT, val);
13671 } else {
13672 __ li32(AT, val);
13673 __ replv_qh(AT, AT);
13674 }
13675 __ dmtc1(AT, $dst$$FloatRegister);
13676 %}
13677 ins_pipe( pipe_mtc1 );
13678 %}
13680 instruct Repl4S_imm(vecD dst, immI con) %{
13681 predicate(n->as_Vector()->length() == 4);
13682 match(Set dst (ReplicateS con));
13683 ins_cost(110);
13684 format %{ "move AT, [$con]\n\t"
13685 "dins AT, AT, 16, 16\n\t"
13686 "dinsu AT, AT, 32, 32\n\t"
13687 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13688 ins_encode %{
13689 __ move(AT, $con$$constant);
13690 __ dins(AT, AT, 16, 16);
13691 __ dinsu(AT, AT, 32, 32);
13692 __ dmtc1(AT, $dst$$FloatRegister);
13693 %}
13694 ins_pipe( pipe_mtc1 );
13695 %}
13697 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13698 predicate(n->as_Vector()->length() == 4);
13699 match(Set dst (ReplicateS zero));
13700 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13701 ins_encode %{
13702 __ dmtc1(R0, $dst$$FloatRegister);
13703 %}
13704 ins_pipe( pipe_mtc1 );
13705 %}
13707 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13708 predicate(n->as_Vector()->length() == 4);
13709 match(Set dst (ReplicateS M1));
13710 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13711 ins_encode %{
13712 __ nor(AT, R0, R0);
13713 __ dmtc1(AT, $dst$$FloatRegister);
13714 %}
13715 ins_pipe( pipe_mtc1 );
13716 %}
13718 // Replicate integer (4 byte) scalar to be vector
13719 instruct Repl2I(vecD dst, mRegI src) %{
13720 predicate(n->as_Vector()->length() == 2);
13721 match(Set dst (ReplicateI src));
13722 format %{ "dins AT, $src, 0, 32\n\t"
13723 "dinsu AT, $src, 32, 32\n\t"
13724 "dmtc1 AT, $dst\t! replicate2I" %}
13725 ins_encode %{
13726 __ dins(AT, $src$$Register, 0, 32);
13727 __ dinsu(AT, $src$$Register, 32, 32);
13728 __ dmtc1(AT, $dst$$FloatRegister);
13729 %}
13730 ins_pipe( pipe_mtc1 );
13731 %}
13733 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13734 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13735 predicate(n->as_Vector()->length() == 2);
13736 match(Set dst (ReplicateI con));
13737 effect(KILL tmp);
13738 format %{ "li32 AT, [$con], 32\n\t"
13739 "dinsu AT, AT\n\t"
13740 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13741 ins_encode %{
13742 int val = $con$$constant;
13743 __ li32(AT, val);
13744 __ dinsu(AT, AT, 32, 32);
13745 __ dmtc1(AT, $dst$$FloatRegister);
13746 %}
13747 ins_pipe( pipe_mtc1 );
13748 %}
13750 // Replicate integer (4 byte) scalar zero to be vector
13751 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13752 predicate(n->as_Vector()->length() == 2);
13753 match(Set dst (ReplicateI zero));
13754 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13755 ins_encode %{
13756 __ dmtc1(R0, $dst$$FloatRegister);
13757 %}
13758 ins_pipe( pipe_mtc1 );
13759 %}
13761 // Replicate integer (4 byte) scalar -1 to be vector
13762 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13763 predicate(n->as_Vector()->length() == 2);
13764 match(Set dst (ReplicateI M1));
13765 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13766 ins_encode %{
13767 __ nor(AT, R0, R0);
13768 __ dmtc1(AT, $dst$$FloatRegister);
13769 %}
13770 ins_pipe( pipe_mtc1 );
13771 %}
13773 // Replicate float (4 byte) scalar to be vector
13774 instruct Repl2F(vecD dst, regF src) %{
13775 predicate(n->as_Vector()->length() == 2);
13776 match(Set dst (ReplicateF src));
13777 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13778 ins_encode %{
13779 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13780 %}
13781 ins_pipe( pipe_slow );
13782 %}
13784 // Replicate float (4 byte) scalar zero to be vector
13785 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13786 predicate(n->as_Vector()->length() == 2);
13787 match(Set dst (ReplicateF zero));
13788 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13789 ins_encode %{
13790 __ dmtc1(R0, $dst$$FloatRegister);
13791 %}
13792 ins_pipe( pipe_mtc1 );
13793 %}
13796 // ====================VECTOR ARITHMETIC=======================================
13798 // --------------------------------- ADD --------------------------------------
13800 // Floats vector add
13801 instruct vadd2F(vecD dst, vecD src) %{
13802 predicate(n->as_Vector()->length() == 2);
13803 match(Set dst (AddVF dst src));
13804 format %{ "add.ps $dst,$src\t! add packed2F" %}
13805 ins_encode %{
13806 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13807 %}
13808 ins_pipe( pipe_slow );
13809 %}
13811 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13812 predicate(n->as_Vector()->length() == 2);
13813 match(Set dst (AddVF src1 src2));
13814 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13815 ins_encode %{
13816 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13817 %}
13818 ins_pipe( fpu_regF_regF );
13819 %}
13821 // --------------------------------- SUB --------------------------------------
13823 // Floats vector sub
13824 instruct vsub2F(vecD dst, vecD src) %{
13825 predicate(n->as_Vector()->length() == 2);
13826 match(Set dst (SubVF dst src));
13827 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13828 ins_encode %{
13829 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13830 %}
13831 ins_pipe( fpu_regF_regF );
13832 %}
13834 // --------------------------------- MUL --------------------------------------
13836 // Floats vector mul
13837 instruct vmul2F(vecD dst, vecD src) %{
13838 predicate(n->as_Vector()->length() == 2);
13839 match(Set dst (MulVF dst src));
13840 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13841 ins_encode %{
13842 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13843 %}
13844 ins_pipe( fpu_regF_regF );
13845 %}
13847 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13848 predicate(n->as_Vector()->length() == 2);
13849 match(Set dst (MulVF src1 src2));
13850 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13851 ins_encode %{
13852 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13853 %}
13854 ins_pipe( fpu_regF_regF );
13855 %}
13857 // --------------------------------- DIV --------------------------------------
13858 // MIPS do not have div.ps
13861 //----------PEEPHOLE RULES-----------------------------------------------------
13862 // These must follow all instruction definitions as they use the names
13863 // defined in the instructions definitions.
13864 //
13865 // peepmatch ( root_instr_name [preceeding_instruction]* );
13866 //
13867 // peepconstraint %{
13868 // (instruction_number.operand_name relational_op instruction_number.operand_name
13869 // [, ...] );
13870 // // instruction numbers are zero-based using left to right order in peepmatch
13871 //
13872 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13873 // // provide an instruction_number.operand_name for each operand that appears
13874 // // in the replacement instruction's match rule
13875 //
13876 // ---------VM FLAGS---------------------------------------------------------
13877 //
13878 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13879 //
13880 // Each peephole rule is given an identifying number starting with zero and
13881 // increasing by one in the order seen by the parser. An individual peephole
13882 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13883 // on the command-line.
13884 //
13885 // ---------CURRENT LIMITATIONS----------------------------------------------
13886 //
13887 // Only match adjacent instructions in same basic block
13888 // Only equality constraints
13889 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13890 // Only one replacement instruction
13891 //
13892 // ---------EXAMPLE----------------------------------------------------------
13893 //
13894 // // pertinent parts of existing instructions in architecture description
13895 // instruct movI(eRegI dst, eRegI src) %{
13896 // match(Set dst (CopyI src));
13897 // %}
13898 //
13899 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13900 // match(Set dst (AddI dst src));
13901 // effect(KILL cr);
13902 // %}
13903 //
13904 // // Change (inc mov) to lea
13905 // peephole %{
13906 // // increment preceeded by register-register move
13907 // peepmatch ( incI_eReg movI );
13908 // // require that the destination register of the increment
13909 // // match the destination register of the move
13910 // peepconstraint ( 0.dst == 1.dst );
13911 // // construct a replacement instruction that sets
13912 // // the destination to ( move's source register + one )
13913 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13914 // %}
13915 //
13916 // Implementation no longer uses movX instructions since
13917 // machine-independent system no longer uses CopyX nodes.
13918 //
13919 // peephole %{
13920 // peepmatch ( incI_eReg movI );
13921 // peepconstraint ( 0.dst == 1.dst );
13922 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13923 // %}
13924 //
13925 // peephole %{
13926 // peepmatch ( decI_eReg movI );
13927 // peepconstraint ( 0.dst == 1.dst );
13928 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13929 // %}
13930 //
13931 // peephole %{
13932 // peepmatch ( addI_eReg_imm movI );
13933 // peepconstraint ( 0.dst == 1.dst );
13934 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13935 // %}
13936 //
13937 // peephole %{
13938 // peepmatch ( addP_eReg_imm movP );
13939 // peepconstraint ( 0.dst == 1.dst );
13940 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13941 // %}
13943 // // Change load of spilled value to only a spill
13944 // instruct storeI(memory mem, eRegI src) %{
13945 // match(Set mem (StoreI mem src));
13946 // %}
13947 //
13948 // instruct loadI(eRegI dst, memory mem) %{
13949 // match(Set dst (LoadI mem));
13950 // %}
13951 //
13952 //peephole %{
13953 // peepmatch ( loadI storeI );
13954 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13955 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13956 //%}
13958 //----------SMARTSPILL RULES---------------------------------------------------
13959 // These must follow all instruction definitions as they use the names
13960 // defined in the instructions definitions.