Mon, 07 Aug 2017 09:58:19 +0800
#5681,#5776: Add more instruct rules for mips.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 if (MaxVectorSize == 0)
696 return 0;
697 assert(MaxVectorSize == 8, "");
698 return 8;
699 }
701 // Vector ideal reg
702 const int Matcher::vector_ideal_reg(int size) {
703 assert(MaxVectorSize == 8, "");
704 switch(size) {
705 case 8: return Op_VecD;
706 }
707 ShouldNotReachHere();
708 return 0;
709 }
711 // Only lowest bits of xmm reg are used for vector shift count.
712 const int Matcher::vector_shift_count_ideal_reg(int size) {
713 fatal("vector shift is not supported");
714 return Node::NotAMachineReg;
715 }
717 // Limits on vector size (number of elements) loaded into vector.
718 const int Matcher::max_vector_size(const BasicType bt) {
719 assert(is_java_primitive(bt), "only primitive type vectors");
720 return vector_width_in_bytes(bt)/type2aelembytes(bt);
721 }
723 const int Matcher::min_vector_size(const BasicType bt) {
724 return max_vector_size(bt); // Same as max.
725 }
727 // MIPS supports misaligned vectors store/load? FIXME
728 const bool Matcher::misaligned_vectors_ok() {
729 return false;
730 //return !AlignVector; // can be changed by flag
731 }
733 // Register for DIVI projection of divmodI
734 RegMask Matcher::divI_proj_mask() {
735 ShouldNotReachHere();
736 return RegMask();
737 }
739 // Register for MODI projection of divmodI
740 RegMask Matcher::modI_proj_mask() {
741 ShouldNotReachHere();
742 return RegMask();
743 }
745 // Register for DIVL projection of divmodL
746 RegMask Matcher::divL_proj_mask() {
747 ShouldNotReachHere();
748 return RegMask();
749 }
751 int Matcher::regnum_to_fpu_offset(int regnum) {
752 return regnum - 32; // The FP registers are in the second chunk
753 }
756 const bool Matcher::isSimpleConstant64(jlong value) {
757 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
758 return true;
759 }
762 // Return whether or not this register is ever used as an argument. This
763 // function is used on startup to build the trampoline stubs in generateOptoStub.
764 // Registers not mentioned will be killed by the VM call in the trampoline, and
765 // arguments in those registers not be available to the callee.
766 bool Matcher::can_be_java_arg( int reg ) {
767 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
768 if ( reg == T0_num || reg == T0_H_num
769 || reg == A0_num || reg == A0_H_num
770 || reg == A1_num || reg == A1_H_num
771 || reg == A2_num || reg == A2_H_num
772 || reg == A3_num || reg == A3_H_num
773 || reg == A4_num || reg == A4_H_num
774 || reg == A5_num || reg == A5_H_num
775 || reg == A6_num || reg == A6_H_num
776 || reg == A7_num || reg == A7_H_num )
777 return true;
779 if ( reg == F12_num || reg == F12_H_num
780 || reg == F13_num || reg == F13_H_num
781 || reg == F14_num || reg == F14_H_num
782 || reg == F15_num || reg == F15_H_num
783 || reg == F16_num || reg == F16_H_num
784 || reg == F17_num || reg == F17_H_num
785 || reg == F18_num || reg == F18_H_num
786 || reg == F19_num || reg == F19_H_num )
787 return true;
789 return false;
790 }
792 bool Matcher::is_spillable_arg( int reg ) {
793 return can_be_java_arg(reg);
794 }
796 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
797 return false;
798 }
800 // Register for MODL projection of divmodL
801 RegMask Matcher::modL_proj_mask() {
802 ShouldNotReachHere();
803 return RegMask();
804 }
806 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
807 return FP_REG_mask();
808 }
810 // MIPS doesn't support AES intrinsics
811 const bool Matcher::pass_original_key_for_aes() {
812 return false;
813 }
815 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
816 //lui
817 //ori
818 //dsll
819 //ori
821 //jalr
822 //nop
824 return round_to(current_offset, alignment_required()) - current_offset;
825 }
827 int CallLeafDirectNode::compute_padding(int current_offset) const {
828 //lui
829 //ori
830 //dsll
831 //ori
833 //jalr
834 //nop
836 return round_to(current_offset, alignment_required()) - current_offset;
837 }
839 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
840 //lui
841 //ori
842 //dsll
843 //ori
845 //jalr
846 //nop
848 return round_to(current_offset, alignment_required()) - current_offset;
849 }
851 // If CPU can load and store mis-aligned doubles directly then no fixup is
852 // needed. Else we split the double into 2 integer pieces and move it
853 // piece-by-piece. Only happens when passing doubles into C code as the
854 // Java calling convention forces doubles to be aligned.
855 const bool Matcher::misaligned_doubles_ok = false;
856 // Do floats take an entire double register or just half?
857 //const bool Matcher::float_in_double = true;
858 bool Matcher::float_in_double() { return false; }
859 // Threshold size for cleararray.
860 const int Matcher::init_array_short_size = 8 * BytesPerLong;
861 // Do ints take an entire long register or just half?
862 const bool Matcher::int_in_long = true;
863 // Is it better to copy float constants, or load them directly from memory?
864 // Intel can load a float constant from a direct address, requiring no
865 // extra registers. Most RISCs will have to materialize an address into a
866 // register first, so they would do better to copy the constant from stack.
867 const bool Matcher::rematerialize_float_constants = false;
868 // Advertise here if the CPU requires explicit rounding operations
869 // to implement the UseStrictFP mode.
870 const bool Matcher::strict_fp_requires_explicit_rounding = false;
871 // The ecx parameter to rep stos for the ClearArray node is in dwords.
872 const bool Matcher::init_array_count_is_in_bytes = false;
875 // Indicate if the safepoint node needs the polling page as an input.
876 // Since MIPS doesn't have absolute addressing, it needs.
877 bool SafePointNode::needs_polling_address_input() {
878 return false;
879 }
881 // !!!!! Special hack to get all type of calls to specify the byte offset
882 // from the start of the call to the point where the return address
883 // will point.
884 int MachCallStaticJavaNode::ret_addr_offset() {
885 //lui
886 //ori
887 //nop
888 //nop
889 //jalr
890 //nop
891 return 24;
892 }
894 int MachCallDynamicJavaNode::ret_addr_offset() {
895 //lui IC_Klass,
896 //ori IC_Klass,
897 //dsll IC_Klass
898 //ori IC_Klass
900 //lui T9
901 //ori T9
902 //nop
903 //nop
904 //jalr T9
905 //nop
906 return 4 * 4 + 4 * 6;
907 }
909 //=============================================================================
911 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
912 enum RC { rc_bad, rc_int, rc_float, rc_stack };
913 static enum RC rc_class( OptoReg::Name reg ) {
914 if( !OptoReg::is_valid(reg) ) return rc_bad;
915 if (OptoReg::is_stack(reg)) return rc_stack;
916 VMReg r = OptoReg::as_VMReg(reg);
917 if (r->is_Register()) return rc_int;
918 assert(r->is_FloatRegister(), "must be");
919 return rc_float;
920 }
922 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
923 // Get registers to move
924 OptoReg::Name src_second = ra_->get_reg_second(in(1));
925 OptoReg::Name src_first = ra_->get_reg_first(in(1));
926 OptoReg::Name dst_second = ra_->get_reg_second(this );
927 OptoReg::Name dst_first = ra_->get_reg_first(this );
929 enum RC src_second_rc = rc_class(src_second);
930 enum RC src_first_rc = rc_class(src_first);
931 enum RC dst_second_rc = rc_class(dst_second);
932 enum RC dst_first_rc = rc_class(dst_first);
934 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
936 // Generate spill code!
937 int size = 0;
939 if( src_first == dst_first && src_second == dst_second )
940 return 0; // Self copy, no move
942 if (src_first_rc == rc_stack) {
943 // mem ->
944 if (dst_first_rc == rc_stack) {
945 // mem -> mem
946 assert(src_second != dst_first, "overlap");
947 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
948 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
949 // 64-bit
950 int src_offset = ra_->reg2offset(src_first);
951 int dst_offset = ra_->reg2offset(dst_first);
952 if (cbuf) {
953 MacroAssembler _masm(cbuf);
954 __ ld(AT, Address(SP, src_offset));
955 __ sd(AT, Address(SP, dst_offset));
956 #ifndef PRODUCT
957 } else {
958 if(!do_size){
959 if (size != 0) st->print("\n\t");
960 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
961 "sd AT, [SP + #%d]",
962 src_offset, dst_offset);
963 }
964 #endif
965 }
966 size += 8;
967 } else {
968 // 32-bit
969 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
970 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
971 // No pushl/popl, so:
972 int src_offset = ra_->reg2offset(src_first);
973 int dst_offset = ra_->reg2offset(dst_first);
974 if (cbuf) {
975 MacroAssembler _masm(cbuf);
976 __ lw(AT, Address(SP, src_offset));
977 __ sw(AT, Address(SP, dst_offset));
978 #ifndef PRODUCT
979 } else {
980 if(!do_size){
981 if (size != 0) st->print("\n\t");
982 st->print("lw AT, [SP + #%d] spill 2\n\t"
983 "sw AT, [SP + #%d]\n\t",
984 src_offset, dst_offset);
985 }
986 #endif
987 }
988 size += 8;
989 }
990 return size;
991 } else if (dst_first_rc == rc_int) {
992 // mem -> gpr
993 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
994 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
995 // 64-bit
996 int offset = ra_->reg2offset(src_first);
997 if (cbuf) {
998 MacroAssembler _masm(cbuf);
999 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1000 #ifndef PRODUCT
1001 } else {
1002 if(!do_size){
1003 if (size != 0) st->print("\n\t");
1004 st->print("ld %s, [SP + #%d]\t# spill 3",
1005 Matcher::regName[dst_first],
1006 offset);
1007 }
1008 #endif
1009 }
1010 size += 4;
1011 } else {
1012 // 32-bit
1013 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1014 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1015 int offset = ra_->reg2offset(src_first);
1016 if (cbuf) {
1017 MacroAssembler _masm(cbuf);
1018 if (this->ideal_reg() == Op_RegI)
1019 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 else
1021 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1022 #ifndef PRODUCT
1023 } else {
1024 if(!do_size){
1025 if (size != 0) st->print("\n\t");
1026 if (this->ideal_reg() == Op_RegI)
1027 st->print("lw %s, [SP + #%d]\t# spill 4",
1028 Matcher::regName[dst_first],
1029 offset);
1030 else
1031 st->print("lwu %s, [SP + #%d]\t# spill 5",
1032 Matcher::regName[dst_first],
1033 offset);
1034 }
1035 #endif
1036 }
1037 size += 4;
1038 }
1039 return size;
1040 } else if (dst_first_rc == rc_float) {
1041 // mem-> xmm
1042 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1043 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1044 // 64-bit
1045 int offset = ra_->reg2offset(src_first);
1046 if (cbuf) {
1047 MacroAssembler _masm(cbuf);
1048 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1049 #ifndef PRODUCT
1050 } else {
1051 if(!do_size){
1052 if (size != 0) st->print("\n\t");
1053 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1054 Matcher::regName[dst_first],
1055 offset);
1056 }
1057 #endif
1058 }
1059 size += 4;
1060 } else {
1061 // 32-bit
1062 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1063 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1064 int offset = ra_->reg2offset(src_first);
1065 if (cbuf) {
1066 MacroAssembler _masm(cbuf);
1067 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1068 #ifndef PRODUCT
1069 } else {
1070 if(!do_size){
1071 if (size != 0) st->print("\n\t");
1072 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1073 Matcher::regName[dst_first],
1074 offset);
1075 }
1076 #endif
1077 }
1078 size += 4;
1079 }
1080 return size;
1081 }
1082 } else if (src_first_rc == rc_int) {
1083 // gpr ->
1084 if (dst_first_rc == rc_stack) {
1085 // gpr -> mem
1086 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1087 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1088 // 64-bit
1089 int offset = ra_->reg2offset(dst_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1093 #ifndef PRODUCT
1094 } else {
1095 if(!do_size){
1096 if (size != 0) st->print("\n\t");
1097 st->print("sd %s, [SP + #%d] # spill 8",
1098 Matcher::regName[src_first],
1099 offset);
1100 }
1101 #endif
1102 }
1103 size += 4;
1104 } else {
1105 // 32-bit
1106 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1107 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1108 int offset = ra_->reg2offset(dst_first);
1109 if (cbuf) {
1110 MacroAssembler _masm(cbuf);
1111 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1112 #ifndef PRODUCT
1113 } else {
1114 if(!do_size){
1115 if (size != 0) st->print("\n\t");
1116 st->print("sw %s, [SP + #%d]\t# spill 9",
1117 Matcher::regName[src_first], offset);
1118 }
1119 #endif
1120 }
1121 size += 4;
1122 }
1123 return size;
1124 } else if (dst_first_rc == rc_int) {
1125 // gpr -> gpr
1126 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1127 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1128 // 64-bit
1129 if (cbuf) {
1130 MacroAssembler _masm(cbuf);
1131 __ move(as_Register(Matcher::_regEncode[dst_first]),
1132 as_Register(Matcher::_regEncode[src_first]));
1133 #ifndef PRODUCT
1134 } else {
1135 if(!do_size){
1136 if (size != 0) st->print("\n\t");
1137 st->print("move(64bit) %s <-- %s\t# spill 10",
1138 Matcher::regName[dst_first],
1139 Matcher::regName[src_first]);
1140 }
1141 #endif
1142 }
1143 size += 4;
1144 return size;
1145 } else {
1146 // 32-bit
1147 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1148 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1149 if (cbuf) {
1150 MacroAssembler _masm(cbuf);
1151 if (this->ideal_reg() == Op_RegI)
1152 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1153 else
1154 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1156 #ifndef PRODUCT
1157 } else {
1158 if(!do_size){
1159 if (size != 0) st->print("\n\t");
1160 st->print("move(32-bit) %s <-- %s\t# spill 11",
1161 Matcher::regName[dst_first],
1162 Matcher::regName[src_first]);
1163 }
1164 #endif
1165 }
1166 size += 4;
1167 return size;
1168 }
1169 } else if (dst_first_rc == rc_float) {
1170 // gpr -> xmm
1171 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1172 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1173 // 64-bit
1174 if (cbuf) {
1175 MacroAssembler _masm(cbuf);
1176 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1177 #ifndef PRODUCT
1178 } else {
1179 if(!do_size){
1180 if (size != 0) st->print("\n\t");
1181 st->print("dmtc1 %s, %s\t# spill 12",
1182 Matcher::regName[dst_first],
1183 Matcher::regName[src_first]);
1184 }
1185 #endif
1186 }
1187 size += 4;
1188 } else {
1189 // 32-bit
1190 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1191 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1192 if (cbuf) {
1193 MacroAssembler _masm(cbuf);
1194 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1195 #ifndef PRODUCT
1196 } else {
1197 if(!do_size){
1198 if (size != 0) st->print("\n\t");
1199 st->print("mtc1 %s, %s\t# spill 13",
1200 Matcher::regName[dst_first],
1201 Matcher::regName[src_first]);
1202 }
1203 #endif
1204 }
1205 size += 4;
1206 }
1207 return size;
1208 }
1209 } else if (src_first_rc == rc_float) {
1210 // xmm ->
1211 if (dst_first_rc == rc_stack) {
1212 // xmm -> mem
1213 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1214 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1215 // 64-bit
1216 int offset = ra_->reg2offset(dst_first);
1217 if (cbuf) {
1218 MacroAssembler _masm(cbuf);
1219 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1220 #ifndef PRODUCT
1221 } else {
1222 if(!do_size){
1223 if (size != 0) st->print("\n\t");
1224 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1225 Matcher::regName[src_first],
1226 offset);
1227 }
1228 #endif
1229 }
1230 size += 4;
1231 } else {
1232 // 32-bit
1233 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1234 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1235 int offset = ra_->reg2offset(dst_first);
1236 if (cbuf) {
1237 MacroAssembler _masm(cbuf);
1238 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1239 #ifndef PRODUCT
1240 } else {
1241 if(!do_size){
1242 if (size != 0) st->print("\n\t");
1243 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1244 Matcher::regName[src_first],
1245 offset);
1246 }
1247 #endif
1248 }
1249 size += 4;
1250 }
1251 return size;
1252 } else if (dst_first_rc == rc_int) {
1253 // xmm -> gpr
1254 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1255 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1256 // 64-bit
1257 if (cbuf) {
1258 MacroAssembler _masm(cbuf);
1259 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1260 #ifndef PRODUCT
1261 } else {
1262 if(!do_size){
1263 if (size != 0) st->print("\n\t");
1264 st->print("dmfc1 %s, %s\t# spill 16",
1265 Matcher::regName[dst_first],
1266 Matcher::regName[src_first]);
1267 }
1268 #endif
1269 }
1270 size += 4;
1271 } else {
1272 // 32-bit
1273 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1274 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1275 if (cbuf) {
1276 MacroAssembler _masm(cbuf);
1277 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1278 #ifndef PRODUCT
1279 } else {
1280 if(!do_size){
1281 if (size != 0) st->print("\n\t");
1282 st->print("mfc1 %s, %s\t# spill 17",
1283 Matcher::regName[dst_first],
1284 Matcher::regName[src_first]);
1285 }
1286 #endif
1287 }
1288 size += 4;
1289 }
1290 return size;
1291 } else if (dst_first_rc == rc_float) {
1292 // xmm -> xmm
1293 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1294 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1295 // 64-bit
1296 if (cbuf) {
1297 MacroAssembler _masm(cbuf);
1298 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1299 #ifndef PRODUCT
1300 } else {
1301 if(!do_size){
1302 if (size != 0) st->print("\n\t");
1303 st->print("mov_d %s <-- %s\t# spill 18",
1304 Matcher::regName[dst_first],
1305 Matcher::regName[src_first]);
1306 }
1307 #endif
1308 }
1309 size += 4;
1310 } else {
1311 // 32-bit
1312 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1313 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1314 if (cbuf) {
1315 MacroAssembler _masm(cbuf);
1316 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1317 #ifndef PRODUCT
1318 } else {
1319 if(!do_size){
1320 if (size != 0) st->print("\n\t");
1321 st->print("mov_s %s <-- %s\t# spill 19",
1322 Matcher::regName[dst_first],
1323 Matcher::regName[src_first]);
1324 }
1325 #endif
1326 }
1327 size += 4;
1328 }
1329 return size;
1330 }
1331 }
1333 assert(0," foo ");
1334 Unimplemented();
1335 return size;
1337 }
1339 #ifndef PRODUCT
1340 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1341 implementation( NULL, ra_, false, st );
1342 }
1343 #endif
1345 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1346 implementation( &cbuf, ra_, false, NULL );
1347 }
1349 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1350 return implementation( NULL, ra_, true, NULL );
1351 }
1353 //=============================================================================
1354 #
1356 #ifndef PRODUCT
1357 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1358 st->print("INT3");
1359 }
1360 #endif
1362 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1363 MacroAssembler _masm(&cbuf);
1364 __ int3();
1365 }
1367 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1368 return MachNode::size(ra_);
1369 }
1372 //=============================================================================
1373 #ifndef PRODUCT
1374 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1375 Compile *C = ra_->C;
1376 int framesize = C->frame_size_in_bytes();
1378 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1380 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1381 st->cr(); st->print("\t");
1382 if (UseLoongsonISA) {
1383 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1384 } else {
1385 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1386 st->cr(); st->print("\t");
1387 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1388 }
1390 if( do_polling() && C->is_method_compilation() ) {
1391 st->print("Poll Safepoint # MachEpilogNode");
1392 }
1393 }
1394 #endif
1396 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1397 Compile *C = ra_->C;
1398 MacroAssembler _masm(&cbuf);
1399 int framesize = C->frame_size_in_bytes();
1401 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1403 __ daddiu(SP, SP, framesize);
1405 if (UseLoongsonISA) {
1406 __ gslq(RA, FP, SP, -wordSize*2);
1407 } else {
1408 __ ld(RA, SP, -wordSize );
1409 __ ld(FP, SP, -wordSize*2 );
1410 }
1412 if( do_polling() && C->is_method_compilation() ) {
1413 __ set64(AT, (long)os::get_polling_page());
1414 __ relocate(relocInfo::poll_return_type);
1415 __ lw(AT, AT, 0);
1416 }
1417 }
1419 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1420 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1421 }
1423 int MachEpilogNode::reloc() const {
1424 return 0; // a large enough number
1425 }
1427 const Pipeline * MachEpilogNode::pipeline() const {
1428 return MachNode::pipeline_class();
1429 }
1431 int MachEpilogNode::safepoint_offset() const { return 0; }
1433 //=============================================================================
1435 #ifndef PRODUCT
1436 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1437 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1438 int reg = ra_->get_reg_first(this);
1439 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1440 }
1441 #endif
1444 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1445 return 4;
1446 }
1448 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1449 MacroAssembler _masm(&cbuf);
1450 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1451 int reg = ra_->get_encode(this);
1453 __ addi(as_Register(reg), SP, offset);
1454 /*
1455 if( offset >= 128 ) {
1456 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1457 emit_rm(cbuf, 0x2, reg, 0x04);
1458 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1459 emit_d32(cbuf, offset);
1460 }
1461 else {
1462 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1463 emit_rm(cbuf, 0x1, reg, 0x04);
1464 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1465 emit_d8(cbuf, offset);
1466 }
1467 */
1468 }
1471 //static int sizeof_FFree_Float_Stack_All = -1;
1473 int MachCallRuntimeNode::ret_addr_offset() {
1474 //lui
1475 //ori
1476 //dsll
1477 //ori
1478 //jalr
1479 //nop
1480 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1481 return NativeCall::instruction_size;
1482 // return 16;
1483 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1492 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1493 }
1494 #endif
1496 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1497 MacroAssembler _masm(&cbuf);
1498 int i = 0;
1499 for(i = 0; i < _count; i++)
1500 __ nop();
1501 }
1503 uint MachNopNode::size(PhaseRegAlloc *) const {
1504 return 4 * _count;
1505 }
1506 const Pipeline* MachNopNode::pipeline() const {
1507 return MachNode::pipeline_class();
1508 }
1510 //=============================================================================
1512 //=============================================================================
1513 #ifndef PRODUCT
1514 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1515 st->print_cr("load_klass(T9, T0)");
1516 st->print_cr("\tbeq(T9, iCache, L)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1519 st->print_cr("\tnop");
1520 st->print_cr("\tnop");
1521 st->print_cr(" L:");
1522 }
1523 #endif
1526 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1527 MacroAssembler _masm(&cbuf);
1528 #ifdef ASSERT
1529 //uint code_size = cbuf.code_size();
1530 #endif
1531 int ic_reg = Matcher::inline_cache_reg_encode();
1532 Label L;
1533 Register receiver = T0;
1534 Register iCache = as_Register(ic_reg);
1535 __ load_klass(T9, receiver);
1536 __ beq(T9, iCache, L);
1537 __ nop();
1539 __ relocate(relocInfo::runtime_call_type);
1540 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1542 /* WARNING these NOPs are critical so that verified entry point is properly
1543 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1544 __ align(CodeEntryAlignment);
1545 __ bind(L);
1546 }
1548 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1549 return MachNode::size(ra_);
1550 }
1554 //=============================================================================
1556 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1558 int Compile::ConstantTable::calculate_table_base_offset() const {
1559 return 0; // absolute addressing, no offset
1560 }
1562 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1563 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1564 ShouldNotReachHere();
1565 }
1567 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1568 Compile* C = ra_->C;
1569 Compile::ConstantTable& constant_table = C->constant_table();
1570 MacroAssembler _masm(&cbuf);
1572 Register Rtoc = as_Register(ra_->get_encode(this));
1573 CodeSection* consts_section = __ code()->consts();
1574 int consts_size = consts_section->align_at_start(consts_section->size());
1575 assert(constant_table.size() == consts_size, "must be equal");
1577 if (consts_section->size()) {
1578 // Materialize the constant table base.
1579 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1580 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1581 __ relocate(relocInfo::internal_pc_type);
1582 __ patchable_set48(Rtoc, (long)baseaddr);
1583 }
1584 }
1586 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1587 // patchable_set48 (4 insts)
1588 return 4 * 4;
1589 }
1591 #ifndef PRODUCT
1592 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1593 Register r = as_Register(ra_->get_encode(this));
1594 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1595 }
1596 #endif
1599 //=============================================================================
1600 #ifndef PRODUCT
1601 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1602 Compile* C = ra_->C;
1604 int framesize = C->frame_size_in_bytes();
1605 int bangsize = C->bang_size_in_bytes();
1606 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1608 // Calls to C2R adapters often do not accept exceptional returns.
1609 // We require that their callers must bang for them. But be careful, because
1610 // some VM calls (such as call site linkage) can use several kilobytes of
1611 // stack. But the stack safety zone should account for that.
1612 // See bugs 4446381, 4468289, 4497237.
1613 if (C->need_stack_bang(bangsize)) {
1614 st->print_cr("# stack bang"); st->print("\t");
1615 }
1616 if (UseLoongsonISA) {
1617 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1618 } else {
1619 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1620 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1621 }
1622 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1623 st->print("daddiu SP, SP, -%d \t",framesize);
1624 }
1625 #endif
1628 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1629 Compile* C = ra_->C;
1630 MacroAssembler _masm(&cbuf);
1632 int framesize = C->frame_size_in_bytes();
1633 int bangsize = C->bang_size_in_bytes();
1635 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1637 if (C->need_stack_bang(bangsize)) {
1638 __ generate_stack_overflow_check(bangsize);
1639 }
1641 if (UseLoongsonISA) {
1642 __ gssq(RA, FP, SP, -wordSize*2);
1643 } else {
1644 __ sd(RA, SP, -wordSize);
1645 __ sd(FP, SP, -wordSize*2);
1646 }
1647 __ daddiu(FP, SP, -wordSize*2);
1648 __ daddiu(SP, SP, -framesize);
1649 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1650 __ nop();
1652 C->set_frame_complete(cbuf.insts_size());
1653 if (C->has_mach_constant_base_node()) {
1654 // NOTE: We set the table base offset here because users might be
1655 // emitted before MachConstantBaseNode.
1656 Compile::ConstantTable& constant_table = C->constant_table();
1657 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1658 }
1660 }
1663 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1664 return MachNode::size(ra_); // too many variables; just compute it the hard way
1665 }
1667 int MachPrologNode::reloc() const {
1668 return 0; // a large enough number
1669 }
1671 %}
1673 //----------ENCODING BLOCK-----------------------------------------------------
1674 // This block specifies the encoding classes used by the compiler to output
1675 // byte streams. Encoding classes generate functions which are called by
1676 // Machine Instruction Nodes in order to generate the bit encoding of the
1677 // instruction. Operands specify their base encoding interface with the
1678 // interface keyword. There are currently supported four interfaces,
1679 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1680 // operand to generate a function which returns its register number when
1681 // queried. CONST_INTER causes an operand to generate a function which
1682 // returns the value of the constant when queried. MEMORY_INTER causes an
1683 // operand to generate four functions which return the Base Register, the
1684 // Index Register, the Scale Value, and the Offset Value of the operand when
1685 // queried. COND_INTER causes an operand to generate six functions which
1686 // return the encoding code (ie - encoding bits for the instruction)
1687 // associated with each basic boolean condition for a conditional instruction.
1688 // Instructions specify two basic values for encoding. They use the
1689 // ins_encode keyword to specify their encoding class (which must be one of
1690 // the class names specified in the encoding block), and they use the
1691 // opcode keyword to specify, in order, their primary, secondary, and
1692 // tertiary opcode. Only the opcode sections which a particular instruction
1693 // needs for encoding need to be specified.
1694 encode %{
1696 //Load byte signed
1697 enc_class load_B_enc (mRegI dst, memory mem) %{
1698 MacroAssembler _masm(&cbuf);
1699 int dst = $dst$$reg;
1700 int base = $mem$$base;
1701 int index = $mem$$index;
1702 int scale = $mem$$scale;
1703 int disp = $mem$$disp;
1705 if( index != 0 ) {
1706 if( Assembler::is_simm16(disp) ) {
1707 if( UseLoongsonISA ) {
1708 if (scale == 0) {
1709 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1710 } else {
1711 __ dsll(AT, as_Register(index), scale);
1712 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1713 }
1714 } else {
1715 if (scale == 0) {
1716 __ addu(AT, as_Register(base), as_Register(index));
1717 } else {
1718 __ dsll(AT, as_Register(index), scale);
1719 __ addu(AT, as_Register(base), AT);
1720 }
1721 __ lb(as_Register(dst), AT, disp);
1722 }
1723 } else {
1724 if (scale == 0) {
1725 __ addu(AT, as_Register(base), as_Register(index));
1726 } else {
1727 __ dsll(AT, as_Register(index), scale);
1728 __ addu(AT, as_Register(base), AT);
1729 }
1730 __ move(T9, disp);
1731 if( UseLoongsonISA ) {
1732 __ gslbx(as_Register(dst), AT, T9, 0);
1733 } else {
1734 __ addu(AT, AT, T9);
1735 __ lb(as_Register(dst), AT, 0);
1736 }
1737 }
1738 } else {
1739 if( Assembler::is_simm16(disp) ) {
1740 __ lb(as_Register(dst), as_Register(base), disp);
1741 } else {
1742 __ move(T9, disp);
1743 if( UseLoongsonISA ) {
1744 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1745 } else {
1746 __ addu(AT, as_Register(base), T9);
1747 __ lb(as_Register(dst), AT, 0);
1748 }
1749 }
1750 }
1751 %}
1753 //Load byte unsigned
1754 enc_class load_UB_enc (mRegI dst, memory mem) %{
1755 MacroAssembler _masm(&cbuf);
1756 int dst = $dst$$reg;
1757 int base = $mem$$base;
1758 int index = $mem$$index;
1759 int scale = $mem$$scale;
1760 int disp = $mem$$disp;
1762 if( index != 0 ) {
1763 if (scale == 0) {
1764 __ daddu(AT, as_Register(base), as_Register(index));
1765 } else {
1766 __ dsll(AT, as_Register(index), scale);
1767 __ daddu(AT, as_Register(base), AT);
1768 }
1769 if( Assembler::is_simm16(disp) ) {
1770 __ lbu(as_Register(dst), AT, disp);
1771 } else {
1772 __ move(T9, disp);
1773 __ daddu(AT, AT, T9);
1774 __ lbu(as_Register(dst), AT, 0);
1775 }
1776 } else {
1777 if( Assembler::is_simm16(disp) ) {
1778 __ lbu(as_Register(dst), as_Register(base), disp);
1779 } else {
1780 __ move(T9, disp);
1781 __ daddu(AT, as_Register(base), T9);
1782 __ lbu(as_Register(dst), AT, 0);
1783 }
1784 }
1785 %}
1787 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1788 MacroAssembler _masm(&cbuf);
1789 int src = $src$$reg;
1790 int base = $mem$$base;
1791 int index = $mem$$index;
1792 int scale = $mem$$scale;
1793 int disp = $mem$$disp;
1795 if( index != 0 ) {
1796 if (scale == 0) {
1797 if( Assembler::is_simm(disp, 8) ) {
1798 if (UseLoongsonISA) {
1799 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1800 } else {
1801 __ addu(AT, as_Register(base), as_Register(index));
1802 __ sb(as_Register(src), AT, disp);
1803 }
1804 } else if( Assembler::is_simm16(disp) ) {
1805 __ addu(AT, as_Register(base), as_Register(index));
1806 __ sb(as_Register(src), AT, disp);
1807 } else {
1808 __ addu(AT, as_Register(base), as_Register(index));
1809 __ move(T9, disp);
1810 if (UseLoongsonISA) {
1811 __ gssbx(as_Register(src), AT, T9, 0);
1812 } else {
1813 __ addu(AT, AT, T9);
1814 __ sb(as_Register(src), AT, 0);
1815 }
1816 }
1817 } else {
1818 __ dsll(AT, as_Register(index), scale);
1819 if( Assembler::is_simm(disp, 8) ) {
1820 if (UseLoongsonISA) {
1821 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1822 } else {
1823 __ addu(AT, as_Register(base), AT);
1824 __ sb(as_Register(src), AT, disp);
1825 }
1826 } else if( Assembler::is_simm16(disp) ) {
1827 __ addu(AT, as_Register(base), AT);
1828 __ sb(as_Register(src), AT, disp);
1829 } else {
1830 __ addu(AT, as_Register(base), AT);
1831 __ move(T9, disp);
1832 if (UseLoongsonISA) {
1833 __ gssbx(as_Register(src), AT, T9, 0);
1834 } else {
1835 __ addu(AT, AT, T9);
1836 __ sb(as_Register(src), AT, 0);
1837 }
1838 }
1839 }
1840 } else {
1841 if( Assembler::is_simm16(disp) ) {
1842 __ sb(as_Register(src), as_Register(base), disp);
1843 } else {
1844 __ move(T9, disp);
1845 if (UseLoongsonISA) {
1846 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1847 } else {
1848 __ addu(AT, as_Register(base), T9);
1849 __ sb(as_Register(src), AT, 0);
1850 }
1851 }
1852 }
1853 %}
1855 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1856 MacroAssembler _masm(&cbuf);
1857 int base = $mem$$base;
1858 int index = $mem$$index;
1859 int scale = $mem$$scale;
1860 int disp = $mem$$disp;
1861 int value = $src$$constant;
1863 if( index != 0 ) {
1864 if (!UseLoongsonISA) {
1865 if (scale == 0) {
1866 __ daddu(AT, as_Register(base), as_Register(index));
1867 } else {
1868 __ dsll(AT, as_Register(index), scale);
1869 __ daddu(AT, as_Register(base), AT);
1870 }
1871 if( Assembler::is_simm16(disp) ) {
1872 if (value == 0) {
1873 __ sb(R0, AT, disp);
1874 } else {
1875 __ move(T9, value);
1876 __ sb(T9, AT, disp);
1877 }
1878 } else {
1879 if (value == 0) {
1880 __ move(T9, disp);
1881 __ daddu(AT, AT, T9);
1882 __ sb(R0, AT, 0);
1883 } else {
1884 __ move(T9, disp);
1885 __ daddu(AT, AT, T9);
1886 __ move(T9, value);
1887 __ sb(T9, AT, 0);
1888 }
1889 }
1890 } else {
1892 if (scale == 0) {
1893 if( Assembler::is_simm(disp, 8) ) {
1894 if (value == 0) {
1895 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1896 } else {
1897 __ move(T9, value);
1898 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1899 }
1900 } else if( Assembler::is_simm16(disp) ) {
1901 __ daddu(AT, as_Register(base), as_Register(index));
1902 if (value == 0) {
1903 __ sb(R0, AT, disp);
1904 } else {
1905 __ move(T9, value);
1906 __ sb(T9, AT, disp);
1907 }
1908 } else {
1909 if (value == 0) {
1910 __ daddu(AT, as_Register(base), as_Register(index));
1911 __ move(T9, disp);
1912 __ gssbx(R0, AT, T9, 0);
1913 } else {
1914 __ move(AT, disp);
1915 __ move(T9, value);
1916 __ daddu(AT, as_Register(base), AT);
1917 __ gssbx(T9, AT, as_Register(index), 0);
1918 }
1919 }
1921 } else {
1923 if( Assembler::is_simm(disp, 8) ) {
1924 __ dsll(AT, as_Register(index), scale);
1925 if (value == 0) {
1926 __ gssbx(R0, as_Register(base), AT, disp);
1927 } else {
1928 __ move(T9, value);
1929 __ gssbx(T9, as_Register(base), AT, disp);
1930 }
1931 } else if( Assembler::is_simm16(disp) ) {
1932 __ dsll(AT, as_Register(index), scale);
1933 __ daddu(AT, as_Register(base), AT);
1934 if (value == 0) {
1935 __ sb(R0, AT, disp);
1936 } else {
1937 __ move(T9, value);
1938 __ sb(T9, AT, disp);
1939 }
1940 } else {
1941 __ dsll(AT, as_Register(index), scale);
1942 if (value == 0) {
1943 __ daddu(AT, as_Register(base), AT);
1944 __ move(T9, disp);
1945 __ gssbx(R0, AT, T9, 0);
1946 } else {
1947 __ move(T9, disp);
1948 __ daddu(AT, AT, T9);
1949 __ move(T9, value);
1950 __ gssbx(T9, as_Register(base), AT, 0);
1951 }
1952 }
1953 }
1954 }
1955 } else {
1956 if( Assembler::is_simm16(disp) ) {
1957 if (value == 0) {
1958 __ sb(R0, as_Register(base), disp);
1959 } else {
1960 __ move(AT, value);
1961 __ sb(AT, as_Register(base), disp);
1962 }
1963 } else {
1964 if (value == 0) {
1965 __ move(T9, disp);
1966 if (UseLoongsonISA) {
1967 __ gssbx(R0, as_Register(base), T9, 0);
1968 } else {
1969 __ daddu(AT, as_Register(base), T9);
1970 __ sb(R0, AT, 0);
1971 }
1972 } else {
1973 __ move(T9, disp);
1974 if (UseLoongsonISA) {
1975 __ move(AT, value);
1976 __ gssbx(AT, as_Register(base), T9, 0);
1977 } else {
1978 __ daddu(AT, as_Register(base), T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 }
1984 }
1985 %}
1988 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1989 MacroAssembler _masm(&cbuf);
1990 int base = $mem$$base;
1991 int index = $mem$$index;
1992 int scale = $mem$$scale;
1993 int disp = $mem$$disp;
1994 int value = $src$$constant;
1996 if( index != 0 ) {
1997 if ( UseLoongsonISA ) {
1998 if ( Assembler::is_simm(disp,8) ) {
1999 if ( scale == 0 ) {
2000 if ( value == 0 ) {
2001 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2002 } else {
2003 __ move(AT, value);
2004 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2005 }
2006 } else {
2007 __ dsll(AT, as_Register(index), scale);
2008 if ( value == 0 ) {
2009 __ gssbx(R0, as_Register(base), AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ gssbx(T9, as_Register(base), AT, disp);
2013 }
2014 }
2015 } else if ( Assembler::is_simm16(disp) ) {
2016 if ( scale == 0 ) {
2017 __ daddu(AT, as_Register(base), as_Register(index));
2018 if ( value == 0 ){
2019 __ sb(R0, AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ sb(T9, AT, disp);
2023 }
2024 } else {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if ( value == 0 ) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 }
2034 } else {
2035 if ( scale == 0 ) {
2036 __ move(AT, disp);
2037 __ daddu(AT, as_Register(index), AT);
2038 if ( value == 0 ) {
2039 __ gssbx(R0, as_Register(base), AT, 0);
2040 } else {
2041 __ move(T9, value);
2042 __ gssbx(T9, as_Register(base), AT, 0);
2043 }
2044 } else {
2045 __ dsll(AT, as_Register(index), scale);
2046 __ move(T9, disp);
2047 __ daddu(AT, AT, T9);
2048 if ( value == 0 ) {
2049 __ gssbx(R0, as_Register(base), AT, 0);
2050 } else {
2051 __ move(T9, value);
2052 __ gssbx(T9, as_Register(base), AT, 0);
2053 }
2054 }
2055 }
2056 } else { //not use loongson isa
2057 if (scale == 0) {
2058 __ daddu(AT, as_Register(base), as_Register(index));
2059 } else {
2060 __ dsll(AT, as_Register(index), scale);
2061 __ daddu(AT, as_Register(base), AT);
2062 }
2063 if( Assembler::is_simm16(disp) ) {
2064 if (value == 0) {
2065 __ sb(R0, AT, disp);
2066 } else {
2067 __ move(T9, value);
2068 __ sb(T9, AT, disp);
2069 }
2070 } else {
2071 if (value == 0) {
2072 __ move(T9, disp);
2073 __ daddu(AT, AT, T9);
2074 __ sb(R0, AT, 0);
2075 } else {
2076 __ move(T9, disp);
2077 __ daddu(AT, AT, T9);
2078 __ move(T9, value);
2079 __ sb(T9, AT, 0);
2080 }
2081 }
2082 }
2083 } else {
2084 if ( UseLoongsonISA ){
2085 if ( Assembler::is_simm16(disp) ){
2086 if ( value == 0 ) {
2087 __ sb(R0, as_Register(base), disp);
2088 } else {
2089 __ move(AT, value);
2090 __ sb(AT, as_Register(base), disp);
2091 }
2092 } else {
2093 __ move(AT, disp);
2094 if ( value == 0 ) {
2095 __ gssbx(R0, as_Register(base), AT, 0);
2096 } else {
2097 __ move(T9, value);
2098 __ gssbx(T9, as_Register(base), AT, 0);
2099 }
2100 }
2101 } else {
2102 if( Assembler::is_simm16(disp) ) {
2103 if (value == 0) {
2104 __ sb(R0, as_Register(base), disp);
2105 } else {
2106 __ move(AT, value);
2107 __ sb(AT, as_Register(base), disp);
2108 }
2109 } else {
2110 if (value == 0) {
2111 __ move(T9, disp);
2112 __ daddu(AT, as_Register(base), T9);
2113 __ sb(R0, AT, 0);
2114 } else {
2115 __ move(T9, disp);
2116 __ daddu(AT, as_Register(base), T9);
2117 __ move(T9, value);
2118 __ sb(T9, AT, 0);
2119 }
2120 }
2121 }
2122 }
2124 __ sync();
2125 %}
2127 // Load Short (16bit signed)
2128 enc_class load_S_enc (mRegI dst, memory mem) %{
2129 MacroAssembler _masm(&cbuf);
2130 int dst = $dst$$reg;
2131 int base = $mem$$base;
2132 int index = $mem$$index;
2133 int scale = $mem$$scale;
2134 int disp = $mem$$disp;
2136 if( index != 0 ) {
2137 if ( UseLoongsonISA ) {
2138 if ( Assembler::is_simm(disp, 8) ) {
2139 if (scale == 0) {
2140 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2141 } else {
2142 __ dsll(AT, as_Register(index), scale);
2143 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2144 }
2145 } else if ( Assembler::is_simm16(disp) ) {
2146 if (scale == 0) {
2147 __ daddu(AT, as_Register(base), as_Register(index));
2148 __ lh(as_Register(dst), AT, disp);
2149 } else {
2150 __ dsll(AT, as_Register(index), scale);
2151 __ daddu(AT, as_Register(base), AT);
2152 __ lh(as_Register(dst), AT, disp);
2153 }
2154 } else {
2155 if (scale == 0) {
2156 __ move(AT, disp);
2157 __ daddu(AT, as_Register(index), AT);
2158 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2159 } else {
2160 __ dsll(AT, as_Register(index), scale);
2161 __ move(T9, disp);
2162 __ daddu(AT, AT, T9);
2163 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2164 }
2165 }
2166 } else { // not use loongson isa
2167 if (scale == 0) {
2168 __ daddu(AT, as_Register(base), as_Register(index));
2169 } else {
2170 __ dsll(AT, as_Register(index), scale);
2171 __ daddu(AT, as_Register(base), AT);
2172 }
2173 if( Assembler::is_simm16(disp) ) {
2174 __ lh(as_Register(dst), AT, disp);
2175 } else {
2176 __ move(T9, disp);
2177 __ daddu(AT, AT, T9);
2178 __ lh(as_Register(dst), AT, 0);
2179 }
2180 }
2181 } else { // index is 0
2182 if ( UseLoongsonISA ) {
2183 if ( Assembler::is_simm16(disp) ) {
2184 __ lh(as_Register(dst), as_Register(base), disp);
2185 } else {
2186 __ move(T9, disp);
2187 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2188 }
2189 } else { //not use loongson isa
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lh(as_Register(dst), as_Register(base), disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ daddu(AT, as_Register(base), T9);
2195 __ lh(as_Register(dst), AT, 0);
2196 }
2197 }
2198 }
2199 %}
2201 // Load Char (16bit unsigned)
2202 enc_class load_C_enc (mRegI dst, memory mem) %{
2203 MacroAssembler _masm(&cbuf);
2204 int dst = $dst$$reg;
2205 int base = $mem$$base;
2206 int index = $mem$$index;
2207 int scale = $mem$$scale;
2208 int disp = $mem$$disp;
2210 if( index != 0 ) {
2211 if (scale == 0) {
2212 __ daddu(AT, as_Register(base), as_Register(index));
2213 } else {
2214 __ dsll(AT, as_Register(index), scale);
2215 __ daddu(AT, as_Register(base), AT);
2216 }
2217 if( Assembler::is_simm16(disp) ) {
2218 __ lhu(as_Register(dst), AT, disp);
2219 } else {
2220 __ move(T9, disp);
2221 __ addu(AT, AT, T9);
2222 __ lhu(as_Register(dst), AT, 0);
2223 }
2224 } else {
2225 if( Assembler::is_simm16(disp) ) {
2226 __ lhu(as_Register(dst), as_Register(base), disp);
2227 } else {
2228 __ move(T9, disp);
2229 __ daddu(AT, as_Register(base), T9);
2230 __ lhu(as_Register(dst), AT, 0);
2231 }
2232 }
2233 %}
2235 // Store Char (16bit unsigned)
2236 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2237 MacroAssembler _masm(&cbuf);
2238 int src = $src$$reg;
2239 int base = $mem$$base;
2240 int index = $mem$$index;
2241 int scale = $mem$$scale;
2242 int disp = $mem$$disp;
2244 if( index != 0 ) {
2245 if( Assembler::is_simm16(disp) ) {
2246 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2247 if (scale == 0) {
2248 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2249 } else {
2250 __ dsll(AT, as_Register(index), scale);
2251 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2252 }
2253 } else {
2254 if (scale == 0) {
2255 __ addu(AT, as_Register(base), as_Register(index));
2256 } else {
2257 __ dsll(AT, as_Register(index), scale);
2258 __ addu(AT, as_Register(base), AT);
2259 }
2260 __ sh(as_Register(src), AT, disp);
2261 }
2262 } else {
2263 if (scale == 0) {
2264 __ addu(AT, as_Register(base), as_Register(index));
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ addu(AT, as_Register(base), AT);
2268 }
2269 __ move(T9, disp);
2270 if( UseLoongsonISA ) {
2271 __ gsshx(as_Register(src), AT, T9, 0);
2272 } else {
2273 __ addu(AT, AT, T9);
2274 __ sh(as_Register(src), AT, 0);
2275 }
2276 }
2277 } else {
2278 if( Assembler::is_simm16(disp) ) {
2279 __ sh(as_Register(src), as_Register(base), disp);
2280 } else {
2281 __ move(T9, disp);
2282 if( UseLoongsonISA ) {
2283 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2284 } else {
2285 __ addu(AT, as_Register(base), T9);
2286 __ sh(as_Register(src), AT, 0);
2287 }
2288 }
2289 }
2290 %}
2292 enc_class store_C0_enc (memory mem) %{
2293 MacroAssembler _masm(&cbuf);
2294 int base = $mem$$base;
2295 int index = $mem$$index;
2296 int scale = $mem$$scale;
2297 int disp = $mem$$disp;
2299 if( index != 0 ) {
2300 if( Assembler::is_simm16(disp) ) {
2301 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2302 if (scale == 0) {
2303 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2304 } else {
2305 __ dsll(AT, as_Register(index), scale);
2306 __ gsshx(R0, as_Register(base), AT, disp);
2307 }
2308 } else {
2309 if (scale == 0) {
2310 __ addu(AT, as_Register(base), as_Register(index));
2311 } else {
2312 __ dsll(AT, as_Register(index), scale);
2313 __ addu(AT, as_Register(base), AT);
2314 }
2315 __ sh(R0, AT, disp);
2316 }
2317 } else {
2318 if (scale == 0) {
2319 __ addu(AT, as_Register(base), as_Register(index));
2320 } else {
2321 __ dsll(AT, as_Register(index), scale);
2322 __ addu(AT, as_Register(base), AT);
2323 }
2324 __ move(T9, disp);
2325 if( UseLoongsonISA ) {
2326 __ gsshx(R0, AT, T9, 0);
2327 } else {
2328 __ addu(AT, AT, T9);
2329 __ sh(R0, AT, 0);
2330 }
2331 }
2332 } else {
2333 if( Assembler::is_simm16(disp) ) {
2334 __ sh(R0, as_Register(base), disp);
2335 } else {
2336 __ move(T9, disp);
2337 if( UseLoongsonISA ) {
2338 __ gsshx(R0, as_Register(base), T9, 0);
2339 } else {
2340 __ addu(AT, as_Register(base), T9);
2341 __ sh(R0, AT, 0);
2342 }
2343 }
2344 }
2345 %}
2347 enc_class load_I_enc (mRegI dst, memory mem) %{
2348 MacroAssembler _masm(&cbuf);
2349 int dst = $dst$$reg;
2350 int base = $mem$$base;
2351 int index = $mem$$index;
2352 int scale = $mem$$scale;
2353 int disp = $mem$$disp;
2355 if( index != 0 ) {
2356 if( Assembler::is_simm16(disp) ) {
2357 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2358 if (scale == 0) {
2359 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2360 } else {
2361 __ dsll(AT, as_Register(index), scale);
2362 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2363 }
2364 } else {
2365 if (scale == 0) {
2366 __ addu(AT, as_Register(base), as_Register(index));
2367 } else {
2368 __ dsll(AT, as_Register(index), scale);
2369 __ addu(AT, as_Register(base), AT);
2370 }
2371 __ lw(as_Register(dst), AT, disp);
2372 }
2373 } else {
2374 if (scale == 0) {
2375 __ addu(AT, as_Register(base), as_Register(index));
2376 } else {
2377 __ dsll(AT, as_Register(index), scale);
2378 __ addu(AT, as_Register(base), AT);
2379 }
2380 __ move(T9, disp);
2381 if( UseLoongsonISA ) {
2382 __ gslwx(as_Register(dst), AT, T9, 0);
2383 } else {
2384 __ addu(AT, AT, T9);
2385 __ lw(as_Register(dst), AT, 0);
2386 }
2387 }
2388 } else {
2389 if( Assembler::is_simm16(disp) ) {
2390 __ lw(as_Register(dst), as_Register(base), disp);
2391 } else {
2392 __ move(T9, disp);
2393 if( UseLoongsonISA ) {
2394 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2395 } else {
2396 __ addu(AT, as_Register(base), T9);
2397 __ lw(as_Register(dst), AT, 0);
2398 }
2399 }
2400 }
2401 %}
2403 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2404 MacroAssembler _masm(&cbuf);
2405 int src = $src$$reg;
2406 int base = $mem$$base;
2407 int index = $mem$$index;
2408 int scale = $mem$$scale;
2409 int disp = $mem$$disp;
2411 if( index != 0 ) {
2412 if( Assembler::is_simm16(disp) ) {
2413 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2414 if (scale == 0) {
2415 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2416 } else {
2417 __ dsll(AT, as_Register(index), scale);
2418 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2419 }
2420 } else {
2421 if (scale == 0) {
2422 __ addu(AT, as_Register(base), as_Register(index));
2423 } else {
2424 __ dsll(AT, as_Register(index), scale);
2425 __ addu(AT, as_Register(base), AT);
2426 }
2427 __ sw(as_Register(src), AT, disp);
2428 }
2429 } else {
2430 if (scale == 0) {
2431 __ addu(AT, as_Register(base), as_Register(index));
2432 } else {
2433 __ dsll(AT, as_Register(index), scale);
2434 __ addu(AT, as_Register(base), AT);
2435 }
2436 __ move(T9, disp);
2437 if( UseLoongsonISA ) {
2438 __ gsswx(as_Register(src), AT, T9, 0);
2439 } else {
2440 __ addu(AT, AT, T9);
2441 __ sw(as_Register(src), AT, 0);
2442 }
2443 }
2444 } else {
2445 if( Assembler::is_simm16(disp) ) {
2446 __ sw(as_Register(src), as_Register(base), disp);
2447 } else {
2448 __ move(T9, disp);
2449 if( UseLoongsonISA ) {
2450 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2451 } else {
2452 __ addu(AT, as_Register(base), T9);
2453 __ sw(as_Register(src), AT, 0);
2454 }
2455 }
2456 }
2457 %}
2459 enc_class store_I_immI_enc (memory mem, immI src) %{
2460 MacroAssembler _masm(&cbuf);
2461 int base = $mem$$base;
2462 int index = $mem$$index;
2463 int scale = $mem$$scale;
2464 int disp = $mem$$disp;
2465 int value = $src$$constant;
2467 if( index != 0 ) {
2468 if ( UseLoongsonISA ) {
2469 if ( Assembler::is_simm(disp, 8) ) {
2470 if ( scale == 0 ) {
2471 if ( value == 0 ) {
2472 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2473 } else {
2474 __ move(T9, value);
2475 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2476 }
2477 } else {
2478 __ dsll(AT, as_Register(index), scale);
2479 if ( value == 0 ) {
2480 __ gsswx(R0, as_Register(base), AT, disp);
2481 } else {
2482 __ move(T9, value);
2483 __ gsswx(T9, as_Register(base), AT, disp);
2484 }
2485 }
2486 } else if ( Assembler::is_simm16(disp) ) {
2487 if ( scale == 0 ) {
2488 __ daddu(AT, as_Register(base), as_Register(index));
2489 if ( value == 0 ) {
2490 __ sw(R0, AT, disp);
2491 } else {
2492 __ move(T9, value);
2493 __ sw(T9, AT, disp);
2494 }
2495 } else {
2496 __ dsll(AT, as_Register(index), scale);
2497 __ daddu(AT, as_Register(base), AT);
2498 if ( value == 0 ) {
2499 __ sw(R0, AT, disp);
2500 } else {
2501 __ move(T9, value);
2502 __ sw(T9, AT, disp);
2503 }
2504 }
2505 } else {
2506 if ( scale == 0 ) {
2507 __ move(T9, disp);
2508 __ daddu(AT, as_Register(index), T9);
2509 if ( value ==0 ) {
2510 __ gsswx(R0, as_Register(base), AT, 0);
2511 } else {
2512 __ move(T9, value);
2513 __ gsswx(T9, as_Register(base), AT, 0);
2514 }
2515 } else {
2516 __ dsll(AT, as_Register(index), scale);
2517 __ move(T9, disp);
2518 __ daddu(AT, AT, T9);
2519 if ( value == 0 ) {
2520 __ gsswx(R0, as_Register(base), AT, 0);
2521 } else {
2522 __ move(T9, value);
2523 __ gsswx(T9, as_Register(base), AT, 0);
2524 }
2525 }
2526 }
2527 } else { //not use loongson isa
2528 if (scale == 0) {
2529 __ daddu(AT, as_Register(base), as_Register(index));
2530 } else {
2531 __ dsll(AT, as_Register(index), scale);
2532 __ daddu(AT, as_Register(base), AT);
2533 }
2534 if( Assembler::is_simm16(disp) ) {
2535 if (value == 0) {
2536 __ sw(R0, AT, disp);
2537 } else {
2538 __ move(T9, value);
2539 __ sw(T9, AT, disp);
2540 }
2541 } else {
2542 if (value == 0) {
2543 __ move(T9, disp);
2544 __ daddu(AT, AT, T9);
2545 __ sw(R0, AT, 0);
2546 } else {
2547 __ move(T9, disp);
2548 __ daddu(AT, AT, T9);
2549 __ move(T9, value);
2550 __ sw(T9, AT, 0);
2551 }
2552 }
2553 }
2554 } else {
2555 if ( UseLoongsonISA ) {
2556 if ( Assembler::is_simm16(disp) ) {
2557 if ( value == 0 ) {
2558 __ sw(R0, as_Register(base), disp);
2559 } else {
2560 __ move(AT, value);
2561 __ sw(AT, as_Register(base), disp);
2562 }
2563 } else {
2564 __ move(T9, disp);
2565 if ( value == 0 ) {
2566 __ gsswx(R0, as_Register(base), T9, 0);
2567 } else {
2568 __ move(AT, value);
2569 __ gsswx(AT, as_Register(base), T9, 0);
2570 }
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 if (value == 0) {
2575 __ sw(R0, as_Register(base), disp);
2576 } else {
2577 __ move(AT, value);
2578 __ sw(AT, as_Register(base), disp);
2579 }
2580 } else {
2581 if (value == 0) {
2582 __ move(T9, disp);
2583 __ daddu(AT, as_Register(base), T9);
2584 __ sw(R0, AT, 0);
2585 } else {
2586 __ move(T9, disp);
2587 __ daddu(AT, as_Register(base), T9);
2588 __ move(T9, value);
2589 __ sw(T9, AT, 0);
2590 }
2591 }
2592 }
2593 }
2594 %}
2596 enc_class load_N_enc (mRegN dst, memory mem) %{
2597 MacroAssembler _masm(&cbuf);
2598 int dst = $dst$$reg;
2599 int base = $mem$$base;
2600 int index = $mem$$index;
2601 int scale = $mem$$scale;
2602 int disp = $mem$$disp;
2603 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2604 assert(disp_reloc == relocInfo::none, "cannot have disp");
2606 if( index != 0 ) {
2607 if (scale == 0) {
2608 __ daddu(AT, as_Register(base), as_Register(index));
2609 } else {
2610 __ dsll(AT, as_Register(index), scale);
2611 __ daddu(AT, as_Register(base), AT);
2612 }
2613 if( Assembler::is_simm16(disp) ) {
2614 __ lwu(as_Register(dst), AT, disp);
2615 } else {
2616 __ set64(T9, disp);
2617 __ daddu(AT, AT, T9);
2618 __ lwu(as_Register(dst), AT, 0);
2619 }
2620 } else {
2621 if( Assembler::is_simm16(disp) ) {
2622 __ lwu(as_Register(dst), as_Register(base), disp);
2623 } else {
2624 __ set64(T9, disp);
2625 __ daddu(AT, as_Register(base), T9);
2626 __ lwu(as_Register(dst), AT, 0);
2627 }
2628 }
2630 %}
2633 enc_class load_P_enc (mRegP dst, memory mem) %{
2634 MacroAssembler _masm(&cbuf);
2635 int dst = $dst$$reg;
2636 int base = $mem$$base;
2637 int index = $mem$$index;
2638 int scale = $mem$$scale;
2639 int disp = $mem$$disp;
2640 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2641 assert(disp_reloc == relocInfo::none, "cannot have disp");
2643 if( index != 0 ) {
2644 if ( UseLoongsonISA ) {
2645 if ( Assembler::is_simm(disp, 8) ) {
2646 if ( scale != 0 ) {
2647 __ dsll(AT, as_Register(index), scale);
2648 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2649 } else {
2650 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2651 }
2652 } else if ( Assembler::is_simm16(disp) ){
2653 if ( scale != 0 ) {
2654 __ dsll(AT, as_Register(index), scale);
2655 __ daddu(AT, AT, as_Register(base));
2656 } else {
2657 __ daddu(AT, as_Register(index), as_Register(base));
2658 }
2659 __ ld(as_Register(dst), AT, disp);
2660 } else {
2661 if ( scale != 0 ) {
2662 __ dsll(AT, as_Register(index), scale);
2663 __ move(T9, disp);
2664 __ daddu(AT, AT, T9);
2665 } else {
2666 __ move(T9, disp);
2667 __ daddu(AT, as_Register(index), T9);
2668 }
2669 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2670 }
2671 } else { //not use loongson isa
2672 if (scale == 0) {
2673 __ daddu(AT, as_Register(base), as_Register(index));
2674 } else {
2675 __ dsll(AT, as_Register(index), scale);
2676 __ daddu(AT, as_Register(base), AT);
2677 }
2678 if( Assembler::is_simm16(disp) ) {
2679 __ ld(as_Register(dst), AT, disp);
2680 } else {
2681 __ set64(T9, disp);
2682 __ daddu(AT, AT, T9);
2683 __ ld(as_Register(dst), AT, 0);
2684 }
2685 }
2686 } else {
2687 if ( UseLoongsonISA ) {
2688 if ( Assembler::is_simm16(disp) ){
2689 __ ld(as_Register(dst), as_Register(base), disp);
2690 } else {
2691 __ set64(T9, disp);
2692 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2693 }
2694 } else { //not use loongson isa
2695 if( Assembler::is_simm16(disp) ) {
2696 __ ld(as_Register(dst), as_Register(base), disp);
2697 } else {
2698 __ set64(T9, disp);
2699 __ daddu(AT, as_Register(base), T9);
2700 __ ld(as_Register(dst), AT, 0);
2701 }
2702 }
2703 }
2704 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2705 %}
2707 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2708 MacroAssembler _masm(&cbuf);
2709 int src = $src$$reg;
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2715 if( index != 0 ) {
2716 if ( UseLoongsonISA ){
2717 if ( Assembler::is_simm(disp, 8) ) {
2718 if ( scale == 0 ) {
2719 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2720 } else {
2721 __ dsll(AT, as_Register(index), scale);
2722 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2723 }
2724 } else if ( Assembler::is_simm16(disp) ) {
2725 if ( scale == 0 ) {
2726 __ daddu(AT, as_Register(base), as_Register(index));
2727 } else {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ daddu(AT, as_Register(base), AT);
2730 }
2731 __ sd(as_Register(src), AT, disp);
2732 } else {
2733 if ( scale == 0 ) {
2734 __ move(T9, disp);
2735 __ daddu(AT, as_Register(index), T9);
2736 } else {
2737 __ dsll(AT, as_Register(index), scale);
2738 __ move(T9, disp);
2739 __ daddu(AT, AT, T9);
2740 }
2741 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2742 }
2743 } else { //not use loongson isa
2744 if (scale == 0) {
2745 __ daddu(AT, as_Register(base), as_Register(index));
2746 } else {
2747 __ dsll(AT, as_Register(index), scale);
2748 __ daddu(AT, as_Register(base), AT);
2749 }
2750 if( Assembler::is_simm16(disp) ) {
2751 __ sd(as_Register(src), AT, disp);
2752 } else {
2753 __ move(T9, disp);
2754 __ daddu(AT, AT, T9);
2755 __ sd(as_Register(src), AT, 0);
2756 }
2757 }
2758 } else {
2759 if ( UseLoongsonISA ) {
2760 if ( Assembler::is_simm16(disp) ) {
2761 __ sd(as_Register(src), as_Register(base), disp);
2762 } else {
2763 __ move(T9, disp);
2764 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2765 }
2766 } else {
2767 if( Assembler::is_simm16(disp) ) {
2768 __ sd(as_Register(src), as_Register(base), disp);
2769 } else {
2770 __ move(T9, disp);
2771 __ daddu(AT, as_Register(base), T9);
2772 __ sd(as_Register(src), AT, 0);
2773 }
2774 }
2775 }
2776 %}
2778 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2779 MacroAssembler _masm(&cbuf);
2780 int src = $src$$reg;
2781 int base = $mem$$base;
2782 int index = $mem$$index;
2783 int scale = $mem$$scale;
2784 int disp = $mem$$disp;
2786 if( index != 0 ) {
2787 if ( UseLoongsonISA ){
2788 if ( Assembler::is_simm(disp, 8) ) {
2789 if ( scale == 0 ) {
2790 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2791 } else {
2792 __ dsll(AT, as_Register(index), scale);
2793 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2794 }
2795 } else if ( Assembler::is_simm16(disp) ) {
2796 if ( scale == 0 ) {
2797 __ daddu(AT, as_Register(base), as_Register(index));
2798 } else {
2799 __ dsll(AT, as_Register(index), scale);
2800 __ daddu(AT, as_Register(base), AT);
2801 }
2802 __ sw(as_Register(src), AT, disp);
2803 } else {
2804 if ( scale == 0 ) {
2805 __ move(T9, disp);
2806 __ daddu(AT, as_Register(index), T9);
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ move(T9, disp);
2810 __ daddu(AT, AT, T9);
2811 }
2812 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2813 }
2814 } else { //not use loongson isa
2815 if (scale == 0) {
2816 __ daddu(AT, as_Register(base), as_Register(index));
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ daddu(AT, as_Register(base), AT);
2820 }
2821 if( Assembler::is_simm16(disp) ) {
2822 __ sw(as_Register(src), AT, disp);
2823 } else {
2824 __ move(T9, disp);
2825 __ daddu(AT, AT, T9);
2826 __ sw(as_Register(src), AT, 0);
2827 }
2828 }
2829 } else {
2830 if ( UseLoongsonISA ) {
2831 if ( Assembler::is_simm16(disp) ) {
2832 __ sw(as_Register(src), as_Register(base), disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2836 }
2837 } else {
2838 if( Assembler::is_simm16(disp) ) {
2839 __ sw(as_Register(src), as_Register(base), disp);
2840 } else {
2841 __ move(T9, disp);
2842 __ daddu(AT, as_Register(base), T9);
2843 __ sw(as_Register(src), AT, 0);
2844 }
2845 }
2846 }
2847 %}
2849 enc_class store_P_immP0_enc (memory mem) %{
2850 MacroAssembler _masm(&cbuf);
2851 int base = $mem$$base;
2852 int index = $mem$$index;
2853 int scale = $mem$$scale;
2854 int disp = $mem$$disp;
2856 if( index != 0 ) {
2857 if (scale == 0) {
2858 if( Assembler::is_simm16(disp) ) {
2859 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2860 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2861 } else {
2862 __ daddu(AT, as_Register(base), as_Register(index));
2863 __ sd(R0, AT, disp);
2864 }
2865 } else {
2866 __ daddu(AT, as_Register(base), as_Register(index));
2867 __ move(T9, disp);
2868 if(UseLoongsonISA) {
2869 __ gssdx(R0, AT, T9, 0);
2870 } else {
2871 __ daddu(AT, AT, T9);
2872 __ sd(R0, AT, 0);
2873 }
2874 }
2875 } else {
2876 __ dsll(AT, as_Register(index), scale);
2877 if( Assembler::is_simm16(disp) ) {
2878 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2879 __ gssdx(R0, as_Register(base), AT, disp);
2880 } else {
2881 __ daddu(AT, as_Register(base), AT);
2882 __ sd(R0, AT, disp);
2883 }
2884 } else {
2885 __ daddu(AT, as_Register(base), AT);
2886 __ move(T9, disp);
2887 if (UseLoongsonISA) {
2888 __ gssdx(R0, AT, T9, 0);
2889 } else {
2890 __ daddu(AT, AT, T9);
2891 __ sd(R0, AT, 0);
2892 }
2893 }
2894 }
2895 } else {
2896 if( Assembler::is_simm16(disp) ) {
2897 __ sd(R0, as_Register(base), disp);
2898 } else {
2899 __ move(T9, disp);
2900 if (UseLoongsonISA) {
2901 __ gssdx(R0, as_Register(base), T9, 0);
2902 } else {
2903 __ daddu(AT, as_Register(base), T9);
2904 __ sd(R0, AT, 0);
2905 }
2906 }
2907 }
2908 %}
2910 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2911 MacroAssembler _masm(&cbuf);
2912 int base = $mem$$base;
2913 int index = $mem$$index;
2914 int scale = $mem$$scale;
2915 int disp = $mem$$disp;
2917 if(index!=0){
2918 if (scale == 0) {
2919 __ daddu(AT, as_Register(base), as_Register(index));
2920 } else {
2921 __ dsll(AT, as_Register(index), scale);
2922 __ daddu(AT, as_Register(base), AT);
2923 }
2925 if( Assembler::is_simm16(disp) ) {
2926 __ sw(R0, AT, disp);
2927 } else {
2928 __ move(T9, disp);
2929 __ daddu(AT, AT, T9);
2930 __ sw(R0, AT, 0);
2931 }
2932 }
2933 else {
2934 if( Assembler::is_simm16(disp) ) {
2935 __ sw(R0, as_Register(base), disp);
2936 } else {
2937 __ move(T9, disp);
2938 __ daddu(AT, as_Register(base), T9);
2939 __ sw(R0, AT, 0);
2940 }
2941 }
2942 %}
2944 enc_class load_L_enc (mRegL dst, memory mem) %{
2945 MacroAssembler _masm(&cbuf);
2946 int base = $mem$$base;
2947 int index = $mem$$index;
2948 int scale = $mem$$scale;
2949 int disp = $mem$$disp;
2950 Register dst_reg = as_Register($dst$$reg);
2952 // For implicit null check
2953 __ lb(AT, as_Register(base), 0);
2955 if( index != 0 ) {
2956 if (scale == 0) {
2957 __ daddu(AT, as_Register(base), as_Register(index));
2958 } else {
2959 __ dsll(AT, as_Register(index), scale);
2960 __ daddu(AT, as_Register(base), AT);
2961 }
2962 if( Assembler::is_simm16(disp) ) {
2963 __ ld(dst_reg, AT, disp);
2964 } else {
2965 __ move(T9, disp);
2966 __ daddu(AT, AT, T9);
2967 __ ld(dst_reg, AT, 0);
2968 }
2969 } else {
2970 if( Assembler::is_simm16(disp) ) {
2971 __ ld(dst_reg, as_Register(base), disp);
2972 } else {
2973 __ move(T9, disp);
2974 __ daddu(AT, as_Register(base), T9);
2975 __ ld(dst_reg, AT, 0);
2976 }
2977 }
2978 %}
2980 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2981 MacroAssembler _masm(&cbuf);
2982 int base = $mem$$base;
2983 int index = $mem$$index;
2984 int scale = $mem$$scale;
2985 int disp = $mem$$disp;
2986 Register src_reg = as_Register($src$$reg);
2988 if( index != 0 ) {
2989 if (scale == 0) {
2990 __ daddu(AT, as_Register(base), as_Register(index));
2991 } else {
2992 __ dsll(AT, as_Register(index), scale);
2993 __ daddu(AT, as_Register(base), AT);
2994 }
2995 if( Assembler::is_simm16(disp) ) {
2996 __ sd(src_reg, AT, disp);
2997 } else {
2998 __ move(T9, disp);
2999 __ daddu(AT, AT, T9);
3000 __ sd(src_reg, AT, 0);
3001 }
3002 } else {
3003 if( Assembler::is_simm16(disp) ) {
3004 __ sd(src_reg, as_Register(base), disp);
3005 } else {
3006 __ move(T9, disp);
3007 __ daddu(AT, as_Register(base), T9);
3008 __ sd(src_reg, AT, 0);
3009 }
3010 }
3011 %}
3013 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3014 MacroAssembler _masm(&cbuf);
3015 int base = $mem$$base;
3016 int index = $mem$$index;
3017 int scale = $mem$$scale;
3018 int disp = $mem$$disp;
3020 if( index != 0 ) {
3021 // For implicit null check
3022 __ lb(AT, as_Register(base), 0);
3024 if (scale == 0) {
3025 __ daddu(AT, as_Register(base), as_Register(index));
3026 } else {
3027 __ dsll(AT, as_Register(index), scale);
3028 __ daddu(AT, as_Register(base), AT);
3029 }
3030 if( Assembler::is_simm16(disp) ) {
3031 __ sd(R0, AT, disp);
3032 } else {
3033 __ move(T9, disp);
3034 __ addu(AT, AT, T9);
3035 __ sd(R0, AT, 0);
3036 }
3037 } else {
3038 if( Assembler::is_simm16(disp) ) {
3039 __ sd(R0, as_Register(base), disp);
3040 } else {
3041 __ move(T9, disp);
3042 __ addu(AT, as_Register(base), T9);
3043 __ sd(R0, AT, 0);
3044 }
3045 }
3046 %}
3048 enc_class store_L_immL_enc (memory mem, immL src) %{
3049 MacroAssembler _masm(&cbuf);
3050 int base = $mem$$base;
3051 int index = $mem$$index;
3052 int scale = $mem$$scale;
3053 int disp = $mem$$disp;
3054 long imm = $src$$constant;
3056 if( index != 0 ) {
3057 if (scale == 0) {
3058 __ daddu(AT, as_Register(base), as_Register(index));
3059 } else {
3060 __ dsll(AT, as_Register(index), scale);
3061 __ daddu(AT, as_Register(base), AT);
3062 }
3063 if( Assembler::is_simm16(disp) ) {
3064 __ set64(T9, imm);
3065 __ sd(T9, AT, disp);
3066 } else {
3067 __ move(T9, disp);
3068 __ addu(AT, AT, T9);
3069 __ set64(T9, imm);
3070 __ sd(T9, AT, 0);
3071 }
3072 } else {
3073 if( Assembler::is_simm16(disp) ) {
3074 __ move(AT, as_Register(base));
3075 __ set64(T9, imm);
3076 __ sd(T9, AT, disp);
3077 } else {
3078 __ move(T9, disp);
3079 __ addu(AT, as_Register(base), T9);
3080 __ set64(T9, imm);
3081 __ sd(T9, AT, 0);
3082 }
3083 }
3084 %}
3086 enc_class load_F_enc (regF dst, memory mem) %{
3087 MacroAssembler _masm(&cbuf);
3088 int base = $mem$$base;
3089 int index = $mem$$index;
3090 int scale = $mem$$scale;
3091 int disp = $mem$$disp;
3092 FloatRegister dst = $dst$$FloatRegister;
3094 if( index != 0 ) {
3095 if( Assembler::is_simm16(disp) ) {
3096 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3097 if (scale == 0) {
3098 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3099 } else {
3100 __ dsll(AT, as_Register(index), scale);
3101 __ gslwxc1(dst, as_Register(base), AT, disp);
3102 }
3103 } else {
3104 if (scale == 0) {
3105 __ daddu(AT, as_Register(base), as_Register(index));
3106 } else {
3107 __ dsll(AT, as_Register(index), scale);
3108 __ daddu(AT, as_Register(base), AT);
3109 }
3110 __ lwc1(dst, AT, disp);
3111 }
3112 } else {
3113 if (scale == 0) {
3114 __ daddu(AT, as_Register(base), as_Register(index));
3115 } else {
3116 __ dsll(AT, as_Register(index), scale);
3117 __ daddu(AT, as_Register(base), AT);
3118 }
3119 __ move(T9, disp);
3120 if( UseLoongsonISA ) {
3121 __ gslwxc1(dst, AT, T9, 0);
3122 } else {
3123 __ daddu(AT, AT, T9);
3124 __ lwc1(dst, AT, 0);
3125 }
3126 }
3127 } else {
3128 if( Assembler::is_simm16(disp) ) {
3129 __ lwc1(dst, as_Register(base), disp);
3130 } else {
3131 __ move(T9, disp);
3132 if( UseLoongsonISA ) {
3133 __ gslwxc1(dst, as_Register(base), T9, 0);
3134 } else {
3135 __ daddu(AT, as_Register(base), T9);
3136 __ lwc1(dst, AT, 0);
3137 }
3138 }
3139 }
3140 %}
3142 enc_class store_F_reg_enc (memory mem, regF src) %{
3143 MacroAssembler _masm(&cbuf);
3144 int base = $mem$$base;
3145 int index = $mem$$index;
3146 int scale = $mem$$scale;
3147 int disp = $mem$$disp;
3148 FloatRegister src = $src$$FloatRegister;
3150 if( index != 0 ) {
3151 if( Assembler::is_simm16(disp) ) {
3152 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3153 if (scale == 0) {
3154 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3155 } else {
3156 __ dsll(AT, as_Register(index), scale);
3157 __ gsswxc1(src, as_Register(base), AT, disp);
3158 }
3159 } else {
3160 if (scale == 0) {
3161 __ daddu(AT, as_Register(base), as_Register(index));
3162 } else {
3163 __ dsll(AT, as_Register(index), scale);
3164 __ daddu(AT, as_Register(base), AT);
3165 }
3166 __ swc1(src, AT, disp);
3167 }
3168 } else {
3169 if (scale == 0) {
3170 __ daddu(AT, as_Register(base), as_Register(index));
3171 } else {
3172 __ dsll(AT, as_Register(index), scale);
3173 __ daddu(AT, as_Register(base), AT);
3174 }
3175 __ move(T9, disp);
3176 if( UseLoongsonISA ) {
3177 __ gsswxc1(src, AT, T9, 0);
3178 } else {
3179 __ daddu(AT, AT, T9);
3180 __ swc1(src, AT, 0);
3181 }
3182 }
3183 } else {
3184 if( Assembler::is_simm16(disp) ) {
3185 __ swc1(src, as_Register(base), disp);
3186 } else {
3187 __ move(T9, disp);
3188 if( UseLoongsonISA ) {
3189 __ gsswxc1(src, as_Register(base), T9, 0);
3190 } else {
3191 __ daddu(AT, as_Register(base), T9);
3192 __ swc1(src, AT, 0);
3193 }
3194 }
3195 }
3196 %}
3198 enc_class load_D_enc (regD dst, memory mem) %{
3199 MacroAssembler _masm(&cbuf);
3200 int base = $mem$$base;
3201 int index = $mem$$index;
3202 int scale = $mem$$scale;
3203 int disp = $mem$$disp;
3204 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3206 if( index != 0 ) {
3207 if( Assembler::is_simm16(disp) ) {
3208 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3209 if (scale == 0) {
3210 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3211 } else {
3212 __ dsll(AT, as_Register(index), scale);
3213 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3214 }
3215 } else {
3216 if (scale == 0) {
3217 __ daddu(AT, as_Register(base), as_Register(index));
3218 } else {
3219 __ dsll(AT, as_Register(index), scale);
3220 __ daddu(AT, as_Register(base), AT);
3221 }
3222 __ ldc1(dst_reg, AT, disp);
3223 }
3224 } else {
3225 if (scale == 0) {
3226 __ daddu(AT, as_Register(base), as_Register(index));
3227 } else {
3228 __ dsll(AT, as_Register(index), scale);
3229 __ daddu(AT, as_Register(base), AT);
3230 }
3231 __ move(T9, disp);
3232 if( UseLoongsonISA ) {
3233 __ gsldxc1(dst_reg, AT, T9, 0);
3234 } else {
3235 __ addu(AT, AT, T9);
3236 __ ldc1(dst_reg, AT, 0);
3237 }
3238 }
3239 } else {
3240 if( Assembler::is_simm16(disp) ) {
3241 __ ldc1(dst_reg, as_Register(base), disp);
3242 } else {
3243 __ move(T9, disp);
3244 if( UseLoongsonISA ) {
3245 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3246 } else {
3247 __ addu(AT, as_Register(base), T9);
3248 __ ldc1(dst_reg, AT, 0);
3249 }
3250 }
3251 }
3252 %}
3254 enc_class store_D_reg_enc (memory mem, regD src) %{
3255 MacroAssembler _masm(&cbuf);
3256 int base = $mem$$base;
3257 int index = $mem$$index;
3258 int scale = $mem$$scale;
3259 int disp = $mem$$disp;
3260 FloatRegister src_reg = as_FloatRegister($src$$reg);
3262 if( index != 0 ) {
3263 if( Assembler::is_simm16(disp) ) {
3264 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3265 if (scale == 0) {
3266 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3267 } else {
3268 __ dsll(AT, as_Register(index), scale);
3269 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3270 }
3271 } else {
3272 if (scale == 0) {
3273 __ daddu(AT, as_Register(base), as_Register(index));
3274 } else {
3275 __ dsll(AT, as_Register(index), scale);
3276 __ daddu(AT, as_Register(base), AT);
3277 }
3278 __ sdc1(src_reg, AT, disp);
3279 }
3280 } else {
3281 if (scale == 0) {
3282 __ daddu(AT, as_Register(base), as_Register(index));
3283 } else {
3284 __ dsll(AT, as_Register(index), scale);
3285 __ daddu(AT, as_Register(base), AT);
3286 }
3287 __ move(T9, disp);
3288 if( UseLoongsonISA ) {
3289 __ gssdxc1(src_reg, AT, T9, 0);
3290 } else {
3291 __ addu(AT, AT, T9);
3292 __ sdc1(src_reg, AT, 0);
3293 }
3294 }
3295 } else {
3296 if( Assembler::is_simm16(disp) ) {
3297 __ sdc1(src_reg, as_Register(base), disp);
3298 } else {
3299 __ move(T9, disp);
3300 if( UseLoongsonISA ) {
3301 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3302 } else {
3303 __ addu(AT, as_Register(base), T9);
3304 __ sdc1(src_reg, AT, 0);
3305 }
3306 }
3307 }
3308 %}
3310 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3311 MacroAssembler _masm(&cbuf);
3312 // This is the instruction starting address for relocation info.
3313 __ block_comment("Java_To_Runtime");
3314 cbuf.set_insts_mark();
3315 __ relocate(relocInfo::runtime_call_type);
3317 __ patchable_call((address)$meth$$method);
3318 %}
3320 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3321 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3322 // who we intended to call.
3323 MacroAssembler _masm(&cbuf);
3324 cbuf.set_insts_mark();
3326 if ( !_method ) {
3327 __ relocate(relocInfo::runtime_call_type);
3328 } else if(_optimized_virtual) {
3329 __ relocate(relocInfo::opt_virtual_call_type);
3330 } else {
3331 __ relocate(relocInfo::static_call_type);
3332 }
3334 __ patchable_call((address)($meth$$method));
3335 if( _method ) { // Emit stub for static call
3336 emit_java_to_interp(cbuf);
3337 }
3338 %}
3341 /*
3342 * [Ref: LIR_Assembler::ic_call() ]
3343 */
3344 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3345 MacroAssembler _masm(&cbuf);
3346 __ block_comment("Java_Dynamic_Call");
3347 __ ic_call((address)$meth$$method);
3348 %}
3351 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3352 Register flags = $cr$$Register;
3353 Label L;
3355 MacroAssembler _masm(&cbuf);
3357 __ addu(flags, R0, R0);
3358 __ beq(AT, R0, L);
3359 __ delayed()->nop();
3360 __ move(flags, 0xFFFFFFFF);
3361 __ bind(L);
3362 %}
3364 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3365 Register result = $result$$Register;
3366 Register sub = $sub$$Register;
3367 Register super = $super$$Register;
3368 Register length = $tmp$$Register;
3369 Register tmp = T9;
3370 Label miss;
3372 /* 2012/9/28 Jin: result may be the same as sub
3373 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3374 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3375 * 4bc mov S2, NULL #@loadConP
3376 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3377 */
3378 MacroAssembler _masm(&cbuf);
3379 Label done;
3380 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3381 NULL, &miss,
3382 /*set_cond_codes:*/ true);
3383 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3384 __ move(result, 0);
3385 __ b(done);
3386 __ nop();
3388 __ bind(miss);
3389 __ move(result, 1);
3390 __ bind(done);
3391 %}
3393 %}
3396 //---------MIPS FRAME--------------------------------------------------------------
3397 // Definition of frame structure and management information.
3398 //
3399 // S T A C K L A Y O U T Allocators stack-slot number
3400 // | (to get allocators register number
3401 // G Owned by | | v add SharedInfo::stack0)
3402 // r CALLER | |
3403 // o | +--------+ pad to even-align allocators stack-slot
3404 // w V | pad0 | numbers; owned by CALLER
3405 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3406 // h ^ | in | 5
3407 // | | args | 4 Holes in incoming args owned by SELF
3408 // | | old | | 3
3409 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3410 // v | | ret | 3 return address
3411 // Owned by +--------+
3412 // Self | pad2 | 2 pad to align old SP
3413 // | +--------+ 1
3414 // | | locks | 0
3415 // | +--------+----> SharedInfo::stack0, even aligned
3416 // | | pad1 | 11 pad to align new SP
3417 // | +--------+
3418 // | | | 10
3419 // | | spills | 9 spills
3420 // V | | 8 (pad0 slot for callee)
3421 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3422 // ^ | out | 7
3423 // | | args | 6 Holes in outgoing args owned by CALLEE
3424 // Owned by new | |
3425 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3426 // | |
3427 //
3428 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3429 // known from SELF's arguments and the Java calling convention.
3430 // Region 6-7 is determined per call site.
3431 // Note 2: If the calling convention leaves holes in the incoming argument
3432 // area, those holes are owned by SELF. Holes in the outgoing area
3433 // are owned by the CALLEE. Holes should not be nessecary in the
3434 // incoming area, as the Java calling convention is completely under
3435 // the control of the AD file. Doubles can be sorted and packed to
3436 // avoid holes. Holes in the outgoing arguments may be nessecary for
3437 // varargs C calling conventions.
3438 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3439 // even aligned with pad0 as needed.
3440 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3441 // region 6-11 is even aligned; it may be padded out more so that
3442 // the region from SP to FP meets the minimum stack alignment.
3443 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3444 // alignment. Region 11, pad1, may be dynamically extended so that
3445 // SP meets the minimum alignment.
3448 frame %{
3450 stack_direction(TOWARDS_LOW);
3452 // These two registers define part of the calling convention
3453 // between compiled code and the interpreter.
3454 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3455 // for more information. by yjl 3/16/2006
3457 inline_cache_reg(T1); // Inline Cache Register
3458 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3459 /*
3460 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3461 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3462 */
3464 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3465 cisc_spilling_operand_name(indOffset32);
3467 // Number of stack slots consumed by locking an object
3468 // generate Compile::sync_stack_slots
3469 #ifdef _LP64
3470 sync_stack_slots(2);
3471 #else
3472 sync_stack_slots(1);
3473 #endif
3475 frame_pointer(SP);
3477 // Interpreter stores its frame pointer in a register which is
3478 // stored to the stack by I2CAdaptors.
3479 // I2CAdaptors convert from interpreted java to compiled java.
3481 interpreter_frame_pointer(FP);
3483 // generate Matcher::stack_alignment
3484 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3486 // Number of stack slots between incoming argument block and the start of
3487 // a new frame. The PROLOG must add this many slots to the stack. The
3488 // EPILOG must remove this many slots. Intel needs one slot for
3489 // return address.
3490 // generate Matcher::in_preserve_stack_slots
3491 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3492 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3494 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3495 // for calls to C. Supports the var-args backing area for register parms.
3496 varargs_C_out_slots_killed(0);
3498 // The after-PROLOG location of the return address. Location of
3499 // return address specifies a type (REG or STACK) and a number
3500 // representing the register number (i.e. - use a register name) or
3501 // stack slot.
3502 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3503 // Otherwise, it is above the locks and verification slot and alignment word
3504 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3505 return_addr(REG RA);
3507 // Body of function which returns an integer array locating
3508 // arguments either in registers or in stack slots. Passed an array
3509 // of ideal registers called "sig" and a "length" count. Stack-slot
3510 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3511 // arguments for a CALLEE. Incoming stack arguments are
3512 // automatically biased by the preserve_stack_slots field above.
3515 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3516 // StartNode::calling_convention call this. by yjl 3/16/2006
3517 calling_convention %{
3518 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3519 %}
3524 // Body of function which returns an integer array locating
3525 // arguments either in registers or in stack slots. Passed an array
3526 // of ideal registers called "sig" and a "length" count. Stack-slot
3527 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3528 // arguments for a CALLEE. Incoming stack arguments are
3529 // automatically biased by the preserve_stack_slots field above.
3532 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3533 c_calling_convention %{
3534 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3535 %}
3538 // Location of C & interpreter return values
3539 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3540 // SEE Matcher::match. by yjl 3/16/2006
3541 c_return_value %{
3542 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3543 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3544 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3545 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3546 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3547 %}
3549 // Location of return values
3550 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3551 // SEE Matcher::match. by yjl 3/16/2006
3553 return_value %{
3554 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3555 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3556 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3557 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3558 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3559 %}
3561 %}
3563 //----------ATTRIBUTES---------------------------------------------------------
3564 //----------Operand Attributes-------------------------------------------------
3565 op_attrib op_cost(0); // Required cost attribute
3567 //----------Instruction Attributes---------------------------------------------
3568 ins_attrib ins_cost(100); // Required cost attribute
3569 ins_attrib ins_size(32); // Required size attribute (in bits)
3570 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3571 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3572 // non-matching short branch variant of some
3573 // long branch?
3574 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3575 // specifies the alignment that some part of the instruction (not
3576 // necessarily the start) requires. If > 1, a compute_padding()
3577 // function must be provided for the instruction
3579 //----------OPERANDS-----------------------------------------------------------
3580 // Operand definitions must precede instruction definitions for correct parsing
3581 // in the ADLC because operands constitute user defined types which are used in
3582 // instruction definitions.
3584 // Vectors
3585 operand vecD() %{
3586 constraint(ALLOC_IN_RC(dbl_reg));
3587 match(VecD);
3589 format %{ %}
3590 interface(REG_INTER);
3591 %}
3593 // Flags register, used as output of compare instructions
3594 operand FlagsReg() %{
3595 constraint(ALLOC_IN_RC(mips_flags));
3596 match(RegFlags);
3598 format %{ "EFLAGS" %}
3599 interface(REG_INTER);
3600 %}
3602 //----------Simple Operands----------------------------------------------------
3603 //TODO: Should we need to define some more special immediate number ?
3604 // Immediate Operands
3605 // Integer Immediate
3606 operand immI() %{
3607 match(ConI);
3608 //TODO: should not match immI8 here LEE
3609 match(immI8);
3611 op_cost(20);
3612 format %{ %}
3613 interface(CONST_INTER);
3614 %}
3616 // Long Immediate 8-bit
3617 operand immL8()
3618 %{
3619 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3620 match(ConL);
3622 op_cost(5);
3623 format %{ %}
3624 interface(CONST_INTER);
3625 %}
3627 // Constant for test vs zero
3628 operand immI0() %{
3629 predicate(n->get_int() == 0);
3630 match(ConI);
3632 op_cost(0);
3633 format %{ %}
3634 interface(CONST_INTER);
3635 %}
3637 // Constant for increment
3638 operand immI1() %{
3639 predicate(n->get_int() == 1);
3640 match(ConI);
3642 op_cost(0);
3643 format %{ %}
3644 interface(CONST_INTER);
3645 %}
3647 // Constant for decrement
3648 operand immI_M1() %{
3649 predicate(n->get_int() == -1);
3650 match(ConI);
3652 op_cost(0);
3653 format %{ %}
3654 interface(CONST_INTER);
3655 %}
3657 operand immI_MaxI() %{
3658 predicate(n->get_int() == 2147483647);
3659 match(ConI);
3661 op_cost(0);
3662 format %{ %}
3663 interface(CONST_INTER);
3664 %}
3666 // Valid scale values for addressing modes
3667 operand immI2() %{
3668 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3669 match(ConI);
3671 format %{ %}
3672 interface(CONST_INTER);
3673 %}
3675 operand immI8() %{
3676 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3677 match(ConI);
3679 op_cost(5);
3680 format %{ %}
3681 interface(CONST_INTER);
3682 %}
3684 operand immI16() %{
3685 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3686 match(ConI);
3688 op_cost(10);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 // Constant for long shifts
3694 operand immI_32() %{
3695 predicate( n->get_int() == 32 );
3696 match(ConI);
3698 op_cost(0);
3699 format %{ %}
3700 interface(CONST_INTER);
3701 %}
3703 operand immI_63() %{
3704 predicate( n->get_int() == 63 );
3705 match(ConI);
3707 op_cost(0);
3708 format %{ %}
3709 interface(CONST_INTER);
3710 %}
3712 operand immI_0_31() %{
3713 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3714 match(ConI);
3716 op_cost(0);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 // Operand for non-negtive integer mask
3722 operand immI_nonneg_mask() %{
3723 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3724 match(ConI);
3726 op_cost(0);
3727 format %{ %}
3728 interface(CONST_INTER);
3729 %}
3731 operand immI_32_63() %{
3732 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3733 match(ConI);
3734 op_cost(0);
3736 format %{ %}
3737 interface(CONST_INTER);
3738 %}
3740 operand immI16_sub() %{
3741 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3742 match(ConI);
3744 op_cost(10);
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 operand immI_0_32767() %{
3750 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3751 match(ConI);
3752 op_cost(0);
3754 format %{ %}
3755 interface(CONST_INTER);
3756 %}
3758 operand immI_0_65535() %{
3759 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3760 match(ConI);
3761 op_cost(0);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 operand immI_1() %{
3768 predicate( n->get_int() == 1 );
3769 match(ConI);
3771 op_cost(0);
3772 format %{ %}
3773 interface(CONST_INTER);
3774 %}
3776 operand immI_2() %{
3777 predicate( n->get_int() == 2 );
3778 match(ConI);
3780 op_cost(0);
3781 format %{ %}
3782 interface(CONST_INTER);
3783 %}
3785 operand immI_3() %{
3786 predicate( n->get_int() == 3 );
3787 match(ConI);
3789 op_cost(0);
3790 format %{ %}
3791 interface(CONST_INTER);
3792 %}
3794 operand immI_7() %{
3795 predicate( n->get_int() == 7 );
3796 match(ConI);
3798 format %{ %}
3799 interface(CONST_INTER);
3800 %}
3802 // Immediates for special shifts (sign extend)
3804 // Constants for increment
3805 operand immI_16() %{
3806 predicate( n->get_int() == 16 );
3807 match(ConI);
3809 format %{ %}
3810 interface(CONST_INTER);
3811 %}
3813 operand immI_24() %{
3814 predicate( n->get_int() == 24 );
3815 match(ConI);
3817 format %{ %}
3818 interface(CONST_INTER);
3819 %}
3821 // Constant for byte-wide masking
3822 operand immI_255() %{
3823 predicate( n->get_int() == 255 );
3824 match(ConI);
3826 op_cost(0);
3827 format %{ %}
3828 interface(CONST_INTER);
3829 %}
3831 operand immI_65535() %{
3832 predicate( n->get_int() == 65535 );
3833 match(ConI);
3835 op_cost(5);
3836 format %{ %}
3837 interface(CONST_INTER);
3838 %}
3840 operand immI_65536() %{
3841 predicate( n->get_int() == 65536 );
3842 match(ConI);
3844 op_cost(5);
3845 format %{ %}
3846 interface(CONST_INTER);
3847 %}
3849 operand immI_M65536() %{
3850 predicate( n->get_int() == -65536 );
3851 match(ConI);
3853 op_cost(5);
3854 format %{ %}
3855 interface(CONST_INTER);
3856 %}
3858 // Pointer Immediate
3859 operand immP() %{
3860 match(ConP);
3862 op_cost(10);
3863 format %{ %}
3864 interface(CONST_INTER);
3865 %}
3867 // NULL Pointer Immediate
3868 operand immP0() %{
3869 predicate( n->get_ptr() == 0 );
3870 match(ConP);
3871 op_cost(0);
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 // Pointer Immediate: 64-bit
3878 operand immP_set() %{
3879 match(ConP);
3881 op_cost(5);
3882 // formats are generated automatically for constants and base registers
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3887 // Pointer Immediate: 64-bit
3888 operand immP_load() %{
3889 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3890 match(ConP);
3892 op_cost(5);
3893 // formats are generated automatically for constants and base registers
3894 format %{ %}
3895 interface(CONST_INTER);
3896 %}
3898 // Pointer Immediate: 64-bit
3899 operand immP_no_oop_cheap() %{
3900 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3901 match(ConP);
3903 op_cost(5);
3904 // formats are generated automatically for constants and base registers
3905 format %{ %}
3906 interface(CONST_INTER);
3907 %}
3909 // Pointer for polling page
3910 operand immP_poll() %{
3911 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3912 match(ConP);
3913 op_cost(5);
3915 format %{ %}
3916 interface(CONST_INTER);
3917 %}
3919 // Pointer Immediate
3920 operand immN() %{
3921 match(ConN);
3923 op_cost(10);
3924 format %{ %}
3925 interface(CONST_INTER);
3926 %}
3928 operand immNKlass() %{
3929 match(ConNKlass);
3931 op_cost(10);
3932 format %{ %}
3933 interface(CONST_INTER);
3934 %}
3936 // NULL Pointer Immediate
3937 operand immN0() %{
3938 predicate(n->get_narrowcon() == 0);
3939 match(ConN);
3941 op_cost(5);
3942 format %{ %}
3943 interface(CONST_INTER);
3944 %}
3946 // Long Immediate
3947 operand immL() %{
3948 match(ConL);
3950 op_cost(20);
3951 format %{ %}
3952 interface(CONST_INTER);
3953 %}
3955 // Long Immediate zero
3956 operand immL0() %{
3957 predicate( n->get_long() == 0L );
3958 match(ConL);
3959 op_cost(0);
3961 format %{ %}
3962 interface(CONST_INTER);
3963 %}
3965 operand immL7() %{
3966 predicate( n->get_long() == 7L );
3967 match(ConL);
3968 op_cost(0);
3970 format %{ %}
3971 interface(CONST_INTER);
3972 %}
3974 operand immL_M1() %{
3975 predicate( n->get_long() == -1L );
3976 match(ConL);
3977 op_cost(0);
3979 format %{ %}
3980 interface(CONST_INTER);
3981 %}
3983 // bit 0..2 zero
3984 operand immL_M8() %{
3985 predicate( n->get_long() == -8L );
3986 match(ConL);
3987 op_cost(0);
3989 format %{ %}
3990 interface(CONST_INTER);
3991 %}
3993 // bit 2 zero
3994 operand immL_M5() %{
3995 predicate( n->get_long() == -5L );
3996 match(ConL);
3997 op_cost(0);
3999 format %{ %}
4000 interface(CONST_INTER);
4001 %}
4003 // bit 1..2 zero
4004 operand immL_M7() %{
4005 predicate( n->get_long() == -7L );
4006 match(ConL);
4007 op_cost(0);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // bit 0..1 zero
4014 operand immL_M4() %{
4015 predicate( n->get_long() == -4L );
4016 match(ConL);
4017 op_cost(0);
4019 format %{ %}
4020 interface(CONST_INTER);
4021 %}
4023 // bit 3..6 zero
4024 operand immL_M121() %{
4025 predicate( n->get_long() == -121L );
4026 match(ConL);
4027 op_cost(0);
4029 format %{ %}
4030 interface(CONST_INTER);
4031 %}
4033 // Long immediate from 0 to 127.
4034 // Used for a shorter form of long mul by 10.
4035 operand immL_127() %{
4036 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4037 match(ConL);
4038 op_cost(0);
4040 format %{ %}
4041 interface(CONST_INTER);
4042 %}
4044 // Operand for non-negtive long mask
4045 operand immL_nonneg_mask() %{
4046 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4047 match(ConL);
4049 op_cost(0);
4050 format %{ %}
4051 interface(CONST_INTER);
4052 %}
4054 operand immL_0_65535() %{
4055 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4056 match(ConL);
4057 op_cost(0);
4059 format %{ %}
4060 interface(CONST_INTER);
4061 %}
4063 // Long Immediate: cheap (materialize in <= 3 instructions)
4064 operand immL_cheap() %{
4065 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4066 match(ConL);
4067 op_cost(0);
4069 format %{ %}
4070 interface(CONST_INTER);
4071 %}
4073 // Long Immediate: expensive (materialize in > 3 instructions)
4074 operand immL_expensive() %{
4075 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4076 match(ConL);
4077 op_cost(0);
4079 format %{ %}
4080 interface(CONST_INTER);
4081 %}
4083 operand immL16() %{
4084 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4085 match(ConL);
4087 op_cost(10);
4088 format %{ %}
4089 interface(CONST_INTER);
4090 %}
4092 operand immL16_sub() %{
4093 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4094 match(ConL);
4096 op_cost(10);
4097 format %{ %}
4098 interface(CONST_INTER);
4099 %}
4101 // Long Immediate: low 32-bit mask
4102 operand immL_32bits() %{
4103 predicate(n->get_long() == 0xFFFFFFFFL);
4104 match(ConL);
4105 op_cost(20);
4107 format %{ %}
4108 interface(CONST_INTER);
4109 %}
4111 // Long Immediate 32-bit signed
4112 operand immL32()
4113 %{
4114 predicate(n->get_long() == (int) (n->get_long()));
4115 match(ConL);
4117 op_cost(15);
4118 format %{ %}
4119 interface(CONST_INTER);
4120 %}
4123 //single-precision floating-point zero
4124 operand immF0() %{
4125 predicate(jint_cast(n->getf()) == 0);
4126 match(ConF);
4128 op_cost(5);
4129 format %{ %}
4130 interface(CONST_INTER);
4131 %}
4133 //single-precision floating-point immediate
4134 operand immF() %{
4135 match(ConF);
4137 op_cost(20);
4138 format %{ %}
4139 interface(CONST_INTER);
4140 %}
4142 //double-precision floating-point zero
4143 operand immD0() %{
4144 predicate(jlong_cast(n->getd()) == 0);
4145 match(ConD);
4147 op_cost(5);
4148 format %{ %}
4149 interface(CONST_INTER);
4150 %}
4152 //double-precision floating-point immediate
4153 operand immD() %{
4154 match(ConD);
4156 op_cost(20);
4157 format %{ %}
4158 interface(CONST_INTER);
4159 %}
4161 // Register Operands
4162 // Integer Register
4163 operand mRegI() %{
4164 constraint(ALLOC_IN_RC(int_reg));
4165 match(RegI);
4167 format %{ %}
4168 interface(REG_INTER);
4169 %}
4171 operand no_Ax_mRegI() %{
4172 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4173 match(RegI);
4174 match(mRegI);
4176 format %{ %}
4177 interface(REG_INTER);
4178 %}
4180 operand mS0RegI() %{
4181 constraint(ALLOC_IN_RC(s0_reg));
4182 match(RegI);
4183 match(mRegI);
4185 format %{ "S0" %}
4186 interface(REG_INTER);
4187 %}
4189 operand mS1RegI() %{
4190 constraint(ALLOC_IN_RC(s1_reg));
4191 match(RegI);
4192 match(mRegI);
4194 format %{ "S1" %}
4195 interface(REG_INTER);
4196 %}
4198 operand mS2RegI() %{
4199 constraint(ALLOC_IN_RC(s2_reg));
4200 match(RegI);
4201 match(mRegI);
4203 format %{ "S2" %}
4204 interface(REG_INTER);
4205 %}
4207 operand mS3RegI() %{
4208 constraint(ALLOC_IN_RC(s3_reg));
4209 match(RegI);
4210 match(mRegI);
4212 format %{ "S3" %}
4213 interface(REG_INTER);
4214 %}
4216 operand mS4RegI() %{
4217 constraint(ALLOC_IN_RC(s4_reg));
4218 match(RegI);
4219 match(mRegI);
4221 format %{ "S4" %}
4222 interface(REG_INTER);
4223 %}
4225 operand mS5RegI() %{
4226 constraint(ALLOC_IN_RC(s5_reg));
4227 match(RegI);
4228 match(mRegI);
4230 format %{ "S5" %}
4231 interface(REG_INTER);
4232 %}
4234 operand mS6RegI() %{
4235 constraint(ALLOC_IN_RC(s6_reg));
4236 match(RegI);
4237 match(mRegI);
4239 format %{ "S6" %}
4240 interface(REG_INTER);
4241 %}
4243 operand mS7RegI() %{
4244 constraint(ALLOC_IN_RC(s7_reg));
4245 match(RegI);
4246 match(mRegI);
4248 format %{ "S7" %}
4249 interface(REG_INTER);
4250 %}
4253 operand mT0RegI() %{
4254 constraint(ALLOC_IN_RC(t0_reg));
4255 match(RegI);
4256 match(mRegI);
4258 format %{ "T0" %}
4259 interface(REG_INTER);
4260 %}
4262 operand mT1RegI() %{
4263 constraint(ALLOC_IN_RC(t1_reg));
4264 match(RegI);
4265 match(mRegI);
4267 format %{ "T1" %}
4268 interface(REG_INTER);
4269 %}
4271 operand mT2RegI() %{
4272 constraint(ALLOC_IN_RC(t2_reg));
4273 match(RegI);
4274 match(mRegI);
4276 format %{ "T2" %}
4277 interface(REG_INTER);
4278 %}
4280 operand mT3RegI() %{
4281 constraint(ALLOC_IN_RC(t3_reg));
4282 match(RegI);
4283 match(mRegI);
4285 format %{ "T3" %}
4286 interface(REG_INTER);
4287 %}
4289 operand mT8RegI() %{
4290 constraint(ALLOC_IN_RC(t8_reg));
4291 match(RegI);
4292 match(mRegI);
4294 format %{ "T8" %}
4295 interface(REG_INTER);
4296 %}
4298 operand mT9RegI() %{
4299 constraint(ALLOC_IN_RC(t9_reg));
4300 match(RegI);
4301 match(mRegI);
4303 format %{ "T9" %}
4304 interface(REG_INTER);
4305 %}
4307 operand mA0RegI() %{
4308 constraint(ALLOC_IN_RC(a0_reg));
4309 match(RegI);
4310 match(mRegI);
4312 format %{ "A0" %}
4313 interface(REG_INTER);
4314 %}
4316 operand mA1RegI() %{
4317 constraint(ALLOC_IN_RC(a1_reg));
4318 match(RegI);
4319 match(mRegI);
4321 format %{ "A1" %}
4322 interface(REG_INTER);
4323 %}
4325 operand mA2RegI() %{
4326 constraint(ALLOC_IN_RC(a2_reg));
4327 match(RegI);
4328 match(mRegI);
4330 format %{ "A2" %}
4331 interface(REG_INTER);
4332 %}
4334 operand mA3RegI() %{
4335 constraint(ALLOC_IN_RC(a3_reg));
4336 match(RegI);
4337 match(mRegI);
4339 format %{ "A3" %}
4340 interface(REG_INTER);
4341 %}
4343 operand mA4RegI() %{
4344 constraint(ALLOC_IN_RC(a4_reg));
4345 match(RegI);
4346 match(mRegI);
4348 format %{ "A4" %}
4349 interface(REG_INTER);
4350 %}
4352 operand mA5RegI() %{
4353 constraint(ALLOC_IN_RC(a5_reg));
4354 match(RegI);
4355 match(mRegI);
4357 format %{ "A5" %}
4358 interface(REG_INTER);
4359 %}
4361 operand mA6RegI() %{
4362 constraint(ALLOC_IN_RC(a6_reg));
4363 match(RegI);
4364 match(mRegI);
4366 format %{ "A6" %}
4367 interface(REG_INTER);
4368 %}
4370 operand mA7RegI() %{
4371 constraint(ALLOC_IN_RC(a7_reg));
4372 match(RegI);
4373 match(mRegI);
4375 format %{ "A7" %}
4376 interface(REG_INTER);
4377 %}
4379 operand mV0RegI() %{
4380 constraint(ALLOC_IN_RC(v0_reg));
4381 match(RegI);
4382 match(mRegI);
4384 format %{ "V0" %}
4385 interface(REG_INTER);
4386 %}
4388 operand mV1RegI() %{
4389 constraint(ALLOC_IN_RC(v1_reg));
4390 match(RegI);
4391 match(mRegI);
4393 format %{ "V1" %}
4394 interface(REG_INTER);
4395 %}
4397 operand mRegN() %{
4398 constraint(ALLOC_IN_RC(int_reg));
4399 match(RegN);
4401 format %{ %}
4402 interface(REG_INTER);
4403 %}
4405 operand t0_RegN() %{
4406 constraint(ALLOC_IN_RC(t0_reg));
4407 match(RegN);
4408 match(mRegN);
4410 format %{ %}
4411 interface(REG_INTER);
4412 %}
4414 operand t1_RegN() %{
4415 constraint(ALLOC_IN_RC(t1_reg));
4416 match(RegN);
4417 match(mRegN);
4419 format %{ %}
4420 interface(REG_INTER);
4421 %}
4423 operand t2_RegN() %{
4424 constraint(ALLOC_IN_RC(t2_reg));
4425 match(RegN);
4426 match(mRegN);
4428 format %{ %}
4429 interface(REG_INTER);
4430 %}
4432 operand t3_RegN() %{
4433 constraint(ALLOC_IN_RC(t3_reg));
4434 match(RegN);
4435 match(mRegN);
4437 format %{ %}
4438 interface(REG_INTER);
4439 %}
4441 operand t8_RegN() %{
4442 constraint(ALLOC_IN_RC(t8_reg));
4443 match(RegN);
4444 match(mRegN);
4446 format %{ %}
4447 interface(REG_INTER);
4448 %}
4450 operand t9_RegN() %{
4451 constraint(ALLOC_IN_RC(t9_reg));
4452 match(RegN);
4453 match(mRegN);
4455 format %{ %}
4456 interface(REG_INTER);
4457 %}
4459 operand a0_RegN() %{
4460 constraint(ALLOC_IN_RC(a0_reg));
4461 match(RegN);
4462 match(mRegN);
4464 format %{ %}
4465 interface(REG_INTER);
4466 %}
4468 operand a1_RegN() %{
4469 constraint(ALLOC_IN_RC(a1_reg));
4470 match(RegN);
4471 match(mRegN);
4473 format %{ %}
4474 interface(REG_INTER);
4475 %}
4477 operand a2_RegN() %{
4478 constraint(ALLOC_IN_RC(a2_reg));
4479 match(RegN);
4480 match(mRegN);
4482 format %{ %}
4483 interface(REG_INTER);
4484 %}
4486 operand a3_RegN() %{
4487 constraint(ALLOC_IN_RC(a3_reg));
4488 match(RegN);
4489 match(mRegN);
4491 format %{ %}
4492 interface(REG_INTER);
4493 %}
4495 operand a4_RegN() %{
4496 constraint(ALLOC_IN_RC(a4_reg));
4497 match(RegN);
4498 match(mRegN);
4500 format %{ %}
4501 interface(REG_INTER);
4502 %}
4504 operand a5_RegN() %{
4505 constraint(ALLOC_IN_RC(a5_reg));
4506 match(RegN);
4507 match(mRegN);
4509 format %{ %}
4510 interface(REG_INTER);
4511 %}
4513 operand a6_RegN() %{
4514 constraint(ALLOC_IN_RC(a6_reg));
4515 match(RegN);
4516 match(mRegN);
4518 format %{ %}
4519 interface(REG_INTER);
4520 %}
4522 operand a7_RegN() %{
4523 constraint(ALLOC_IN_RC(a7_reg));
4524 match(RegN);
4525 match(mRegN);
4527 format %{ %}
4528 interface(REG_INTER);
4529 %}
4531 operand s0_RegN() %{
4532 constraint(ALLOC_IN_RC(s0_reg));
4533 match(RegN);
4534 match(mRegN);
4536 format %{ %}
4537 interface(REG_INTER);
4538 %}
4540 operand s1_RegN() %{
4541 constraint(ALLOC_IN_RC(s1_reg));
4542 match(RegN);
4543 match(mRegN);
4545 format %{ %}
4546 interface(REG_INTER);
4547 %}
4549 operand s2_RegN() %{
4550 constraint(ALLOC_IN_RC(s2_reg));
4551 match(RegN);
4552 match(mRegN);
4554 format %{ %}
4555 interface(REG_INTER);
4556 %}
4558 operand s3_RegN() %{
4559 constraint(ALLOC_IN_RC(s3_reg));
4560 match(RegN);
4561 match(mRegN);
4563 format %{ %}
4564 interface(REG_INTER);
4565 %}
4567 operand s4_RegN() %{
4568 constraint(ALLOC_IN_RC(s4_reg));
4569 match(RegN);
4570 match(mRegN);
4572 format %{ %}
4573 interface(REG_INTER);
4574 %}
4576 operand s5_RegN() %{
4577 constraint(ALLOC_IN_RC(s5_reg));
4578 match(RegN);
4579 match(mRegN);
4581 format %{ %}
4582 interface(REG_INTER);
4583 %}
4585 operand s6_RegN() %{
4586 constraint(ALLOC_IN_RC(s6_reg));
4587 match(RegN);
4588 match(mRegN);
4590 format %{ %}
4591 interface(REG_INTER);
4592 %}
4594 operand s7_RegN() %{
4595 constraint(ALLOC_IN_RC(s7_reg));
4596 match(RegN);
4597 match(mRegN);
4599 format %{ %}
4600 interface(REG_INTER);
4601 %}
4603 operand v0_RegN() %{
4604 constraint(ALLOC_IN_RC(v0_reg));
4605 match(RegN);
4606 match(mRegN);
4608 format %{ %}
4609 interface(REG_INTER);
4610 %}
4612 operand v1_RegN() %{
4613 constraint(ALLOC_IN_RC(v1_reg));
4614 match(RegN);
4615 match(mRegN);
4617 format %{ %}
4618 interface(REG_INTER);
4619 %}
4621 // Pointer Register
4622 operand mRegP() %{
4623 constraint(ALLOC_IN_RC(p_reg));
4624 match(RegP);
4626 format %{ %}
4627 interface(REG_INTER);
4628 %}
4630 operand no_T8_mRegP() %{
4631 constraint(ALLOC_IN_RC(no_T8_p_reg));
4632 match(RegP);
4633 match(mRegP);
4635 format %{ %}
4636 interface(REG_INTER);
4637 %}
4639 operand s0_RegP()
4640 %{
4641 constraint(ALLOC_IN_RC(s0_long_reg));
4642 match(RegP);
4643 match(mRegP);
4644 match(no_T8_mRegP);
4646 format %{ %}
4647 interface(REG_INTER);
4648 %}
4650 operand s1_RegP()
4651 %{
4652 constraint(ALLOC_IN_RC(s1_long_reg));
4653 match(RegP);
4654 match(mRegP);
4655 match(no_T8_mRegP);
4657 format %{ %}
4658 interface(REG_INTER);
4659 %}
4661 operand s2_RegP()
4662 %{
4663 constraint(ALLOC_IN_RC(s2_long_reg));
4664 match(RegP);
4665 match(mRegP);
4666 match(no_T8_mRegP);
4668 format %{ %}
4669 interface(REG_INTER);
4670 %}
4672 operand s3_RegP()
4673 %{
4674 constraint(ALLOC_IN_RC(s3_long_reg));
4675 match(RegP);
4676 match(mRegP);
4677 match(no_T8_mRegP);
4679 format %{ %}
4680 interface(REG_INTER);
4681 %}
4683 operand s4_RegP()
4684 %{
4685 constraint(ALLOC_IN_RC(s4_long_reg));
4686 match(RegP);
4687 match(mRegP);
4688 match(no_T8_mRegP);
4690 format %{ %}
4691 interface(REG_INTER);
4692 %}
4694 operand s5_RegP()
4695 %{
4696 constraint(ALLOC_IN_RC(s5_long_reg));
4697 match(RegP);
4698 match(mRegP);
4699 match(no_T8_mRegP);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand s6_RegP()
4706 %{
4707 constraint(ALLOC_IN_RC(s6_long_reg));
4708 match(RegP);
4709 match(mRegP);
4710 match(no_T8_mRegP);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 operand s7_RegP()
4717 %{
4718 constraint(ALLOC_IN_RC(s7_long_reg));
4719 match(RegP);
4720 match(mRegP);
4721 match(no_T8_mRegP);
4723 format %{ %}
4724 interface(REG_INTER);
4725 %}
4727 operand t0_RegP()
4728 %{
4729 constraint(ALLOC_IN_RC(t0_long_reg));
4730 match(RegP);
4731 match(mRegP);
4732 match(no_T8_mRegP);
4734 format %{ %}
4735 interface(REG_INTER);
4736 %}
4738 operand t1_RegP()
4739 %{
4740 constraint(ALLOC_IN_RC(t1_long_reg));
4741 match(RegP);
4742 match(mRegP);
4743 match(no_T8_mRegP);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand t2_RegP()
4750 %{
4751 constraint(ALLOC_IN_RC(t2_long_reg));
4752 match(RegP);
4753 match(mRegP);
4754 match(no_T8_mRegP);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand t3_RegP()
4761 %{
4762 constraint(ALLOC_IN_RC(t3_long_reg));
4763 match(RegP);
4764 match(mRegP);
4765 match(no_T8_mRegP);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand t8_RegP()
4772 %{
4773 constraint(ALLOC_IN_RC(t8_long_reg));
4774 match(RegP);
4775 match(mRegP);
4777 format %{ %}
4778 interface(REG_INTER);
4779 %}
4781 operand t9_RegP()
4782 %{
4783 constraint(ALLOC_IN_RC(t9_long_reg));
4784 match(RegP);
4785 match(mRegP);
4786 match(no_T8_mRegP);
4788 format %{ %}
4789 interface(REG_INTER);
4790 %}
4792 operand a0_RegP()
4793 %{
4794 constraint(ALLOC_IN_RC(a0_long_reg));
4795 match(RegP);
4796 match(mRegP);
4797 match(no_T8_mRegP);
4799 format %{ %}
4800 interface(REG_INTER);
4801 %}
4803 operand a1_RegP()
4804 %{
4805 constraint(ALLOC_IN_RC(a1_long_reg));
4806 match(RegP);
4807 match(mRegP);
4808 match(no_T8_mRegP);
4810 format %{ %}
4811 interface(REG_INTER);
4812 %}
4814 operand a2_RegP()
4815 %{
4816 constraint(ALLOC_IN_RC(a2_long_reg));
4817 match(RegP);
4818 match(mRegP);
4819 match(no_T8_mRegP);
4821 format %{ %}
4822 interface(REG_INTER);
4823 %}
4825 operand a3_RegP()
4826 %{
4827 constraint(ALLOC_IN_RC(a3_long_reg));
4828 match(RegP);
4829 match(mRegP);
4830 match(no_T8_mRegP);
4832 format %{ %}
4833 interface(REG_INTER);
4834 %}
4836 operand a4_RegP()
4837 %{
4838 constraint(ALLOC_IN_RC(a4_long_reg));
4839 match(RegP);
4840 match(mRegP);
4841 match(no_T8_mRegP);
4843 format %{ %}
4844 interface(REG_INTER);
4845 %}
4848 operand a5_RegP()
4849 %{
4850 constraint(ALLOC_IN_RC(a5_long_reg));
4851 match(RegP);
4852 match(mRegP);
4853 match(no_T8_mRegP);
4855 format %{ %}
4856 interface(REG_INTER);
4857 %}
4859 operand a6_RegP()
4860 %{
4861 constraint(ALLOC_IN_RC(a6_long_reg));
4862 match(RegP);
4863 match(mRegP);
4864 match(no_T8_mRegP);
4866 format %{ %}
4867 interface(REG_INTER);
4868 %}
4870 operand a7_RegP()
4871 %{
4872 constraint(ALLOC_IN_RC(a7_long_reg));
4873 match(RegP);
4874 match(mRegP);
4875 match(no_T8_mRegP);
4877 format %{ %}
4878 interface(REG_INTER);
4879 %}
4881 operand v0_RegP()
4882 %{
4883 constraint(ALLOC_IN_RC(v0_long_reg));
4884 match(RegP);
4885 match(mRegP);
4886 match(no_T8_mRegP);
4888 format %{ %}
4889 interface(REG_INTER);
4890 %}
4892 operand v1_RegP()
4893 %{
4894 constraint(ALLOC_IN_RC(v1_long_reg));
4895 match(RegP);
4896 match(mRegP);
4897 match(no_T8_mRegP);
4899 format %{ %}
4900 interface(REG_INTER);
4901 %}
4903 /*
4904 operand mSPRegP(mRegP reg) %{
4905 constraint(ALLOC_IN_RC(sp_reg));
4906 match(reg);
4908 format %{ "SP" %}
4909 interface(REG_INTER);
4910 %}
4912 operand mFPRegP(mRegP reg) %{
4913 constraint(ALLOC_IN_RC(fp_reg));
4914 match(reg);
4916 format %{ "FP" %}
4917 interface(REG_INTER);
4918 %}
4919 */
4921 operand mRegL() %{
4922 constraint(ALLOC_IN_RC(long_reg));
4923 match(RegL);
4925 format %{ %}
4926 interface(REG_INTER);
4927 %}
4929 operand v0RegL() %{
4930 constraint(ALLOC_IN_RC(v0_long_reg));
4931 match(RegL);
4932 match(mRegL);
4934 format %{ %}
4935 interface(REG_INTER);
4936 %}
4938 operand v1RegL() %{
4939 constraint(ALLOC_IN_RC(v1_long_reg));
4940 match(RegL);
4941 match(mRegL);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand a0RegL() %{
4948 constraint(ALLOC_IN_RC(a0_long_reg));
4949 match(RegL);
4950 match(mRegL);
4952 format %{ "A0" %}
4953 interface(REG_INTER);
4954 %}
4956 operand a1RegL() %{
4957 constraint(ALLOC_IN_RC(a1_long_reg));
4958 match(RegL);
4959 match(mRegL);
4961 format %{ %}
4962 interface(REG_INTER);
4963 %}
4965 operand a2RegL() %{
4966 constraint(ALLOC_IN_RC(a2_long_reg));
4967 match(RegL);
4968 match(mRegL);
4970 format %{ %}
4971 interface(REG_INTER);
4972 %}
4974 operand a3RegL() %{
4975 constraint(ALLOC_IN_RC(a3_long_reg));
4976 match(RegL);
4977 match(mRegL);
4979 format %{ %}
4980 interface(REG_INTER);
4981 %}
4983 operand t0RegL() %{
4984 constraint(ALLOC_IN_RC(t0_long_reg));
4985 match(RegL);
4986 match(mRegL);
4988 format %{ %}
4989 interface(REG_INTER);
4990 %}
4992 operand t1RegL() %{
4993 constraint(ALLOC_IN_RC(t1_long_reg));
4994 match(RegL);
4995 match(mRegL);
4997 format %{ %}
4998 interface(REG_INTER);
4999 %}
5001 operand t2RegL() %{
5002 constraint(ALLOC_IN_RC(t2_long_reg));
5003 match(RegL);
5004 match(mRegL);
5006 format %{ %}
5007 interface(REG_INTER);
5008 %}
5010 operand t3RegL() %{
5011 constraint(ALLOC_IN_RC(t3_long_reg));
5012 match(RegL);
5013 match(mRegL);
5015 format %{ %}
5016 interface(REG_INTER);
5017 %}
5019 operand t8RegL() %{
5020 constraint(ALLOC_IN_RC(t8_long_reg));
5021 match(RegL);
5022 match(mRegL);
5024 format %{ %}
5025 interface(REG_INTER);
5026 %}
5028 operand a4RegL() %{
5029 constraint(ALLOC_IN_RC(a4_long_reg));
5030 match(RegL);
5031 match(mRegL);
5033 format %{ %}
5034 interface(REG_INTER);
5035 %}
5037 operand a5RegL() %{
5038 constraint(ALLOC_IN_RC(a5_long_reg));
5039 match(RegL);
5040 match(mRegL);
5042 format %{ %}
5043 interface(REG_INTER);
5044 %}
5046 operand a6RegL() %{
5047 constraint(ALLOC_IN_RC(a6_long_reg));
5048 match(RegL);
5049 match(mRegL);
5051 format %{ %}
5052 interface(REG_INTER);
5053 %}
5055 operand a7RegL() %{
5056 constraint(ALLOC_IN_RC(a7_long_reg));
5057 match(RegL);
5058 match(mRegL);
5060 format %{ %}
5061 interface(REG_INTER);
5062 %}
5064 operand s0RegL() %{
5065 constraint(ALLOC_IN_RC(s0_long_reg));
5066 match(RegL);
5067 match(mRegL);
5069 format %{ %}
5070 interface(REG_INTER);
5071 %}
5073 operand s1RegL() %{
5074 constraint(ALLOC_IN_RC(s1_long_reg));
5075 match(RegL);
5076 match(mRegL);
5078 format %{ %}
5079 interface(REG_INTER);
5080 %}
5082 operand s2RegL() %{
5083 constraint(ALLOC_IN_RC(s2_long_reg));
5084 match(RegL);
5085 match(mRegL);
5087 format %{ %}
5088 interface(REG_INTER);
5089 %}
5091 operand s3RegL() %{
5092 constraint(ALLOC_IN_RC(s3_long_reg));
5093 match(RegL);
5094 match(mRegL);
5096 format %{ %}
5097 interface(REG_INTER);
5098 %}
5100 operand s4RegL() %{
5101 constraint(ALLOC_IN_RC(s4_long_reg));
5102 match(RegL);
5103 match(mRegL);
5105 format %{ %}
5106 interface(REG_INTER);
5107 %}
5109 operand s7RegL() %{
5110 constraint(ALLOC_IN_RC(s7_long_reg));
5111 match(RegL);
5112 match(mRegL);
5114 format %{ %}
5115 interface(REG_INTER);
5116 %}
5118 // Floating register operands
5119 operand regF() %{
5120 constraint(ALLOC_IN_RC(flt_reg));
5121 match(RegF);
5123 format %{ %}
5124 interface(REG_INTER);
5125 %}
5127 //Double Precision Floating register operands
5128 operand regD() %{
5129 constraint(ALLOC_IN_RC(dbl_reg));
5130 match(RegD);
5132 format %{ %}
5133 interface(REG_INTER);
5134 %}
5136 //----------Memory Operands----------------------------------------------------
5137 // Indirect Memory Operand
5138 operand indirect(mRegP reg) %{
5139 constraint(ALLOC_IN_RC(p_reg));
5140 match(reg);
5142 format %{ "[$reg] @ indirect" %}
5143 interface(MEMORY_INTER) %{
5144 base($reg);
5145 index(0x0); /* NO_INDEX */
5146 scale(0x0);
5147 disp(0x0);
5148 %}
5149 %}
5151 // Indirect Memory Plus Short Offset Operand
5152 operand indOffset8(mRegP reg, immL8 off)
5153 %{
5154 constraint(ALLOC_IN_RC(p_reg));
5155 match(AddP reg off);
5157 op_cost(10);
5158 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5159 interface(MEMORY_INTER) %{
5160 base($reg);
5161 index(0x0); /* NO_INDEX */
5162 scale(0x0);
5163 disp($off);
5164 %}
5165 %}
5167 // Indirect Memory Times Scale Plus Index Register
5168 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5169 %{
5170 constraint(ALLOC_IN_RC(p_reg));
5171 match(AddP reg (LShiftL lreg scale));
5173 op_cost(10);
5174 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5175 interface(MEMORY_INTER) %{
5176 base($reg);
5177 index($lreg);
5178 scale($scale);
5179 disp(0x0);
5180 %}
5181 %}
5184 // [base + index + offset]
5185 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5186 %{
5187 constraint(ALLOC_IN_RC(p_reg));
5188 op_cost(5);
5189 match(AddP (AddP base index) off);
5191 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5192 interface(MEMORY_INTER) %{
5193 base($base);
5194 index($index);
5195 scale(0x0);
5196 disp($off);
5197 %}
5198 %}
5200 // [base + index + offset]
5201 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5202 %{
5203 constraint(ALLOC_IN_RC(p_reg));
5204 op_cost(5);
5205 match(AddP (AddP base (ConvI2L index)) off);
5207 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5208 interface(MEMORY_INTER) %{
5209 base($base);
5210 index($index);
5211 scale(0x0);
5212 disp($off);
5213 %}
5214 %}
5216 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5217 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5218 %{
5219 constraint(ALLOC_IN_RC(p_reg));
5220 match(AddP (AddP reg (LShiftL lreg scale)) off);
5222 op_cost(10);
5223 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5224 interface(MEMORY_INTER) %{
5225 base($reg);
5226 index($lreg);
5227 scale($scale);
5228 disp($off);
5229 %}
5230 %}
5232 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5233 %{
5234 constraint(ALLOC_IN_RC(p_reg));
5235 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5237 op_cost(10);
5238 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5239 interface(MEMORY_INTER) %{
5240 base($reg);
5241 index($ireg);
5242 scale($scale);
5243 disp($off);
5244 %}
5245 %}
5247 // [base + index<<scale + offset]
5248 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5249 %{
5250 constraint(ALLOC_IN_RC(p_reg));
5251 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5252 op_cost(10);
5253 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5255 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5256 interface(MEMORY_INTER) %{
5257 base($base);
5258 index($index);
5259 scale($scale);
5260 disp($off);
5261 %}
5262 %}
5264 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5265 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5266 %{
5267 predicate(Universe::narrow_oop_shift() == 0);
5268 constraint(ALLOC_IN_RC(p_reg));
5269 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5271 op_cost(10);
5272 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5273 interface(MEMORY_INTER) %{
5274 base($reg);
5275 index($lreg);
5276 scale($scale);
5277 disp($off);
5278 %}
5279 %}
5281 // [base + index<<scale + offset] for compressd Oops
5282 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5283 %{
5284 constraint(ALLOC_IN_RC(p_reg));
5285 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5286 predicate(Universe::narrow_oop_shift() == 0);
5287 op_cost(10);
5288 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5290 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5291 interface(MEMORY_INTER) %{
5292 base($base);
5293 index($index);
5294 scale($scale);
5295 disp($off);
5296 %}
5297 %}
5299 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5300 // Indirect Memory Plus Long Offset Operand
5301 operand indOffset32(mRegP reg, immL32 off) %{
5302 constraint(ALLOC_IN_RC(p_reg));
5303 op_cost(20);
5304 match(AddP reg off);
5306 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5307 interface(MEMORY_INTER) %{
5308 base($reg);
5309 index(0x0); /* NO_INDEX */
5310 scale(0x0);
5311 disp($off);
5312 %}
5313 %}
5315 // Indirect Memory Plus Index Register
5316 operand indIndex(mRegP addr, mRegL index) %{
5317 constraint(ALLOC_IN_RC(p_reg));
5318 match(AddP addr index);
5320 op_cost(20);
5321 format %{"[$addr + $index] @ indIndex" %}
5322 interface(MEMORY_INTER) %{
5323 base($addr);
5324 index($index);
5325 scale(0x0);
5326 disp(0x0);
5327 %}
5328 %}
5330 operand indirectNarrowKlass(mRegN reg)
5331 %{
5332 predicate(Universe::narrow_klass_shift() == 0);
5333 constraint(ALLOC_IN_RC(p_reg));
5334 op_cost(10);
5335 match(DecodeNKlass reg);
5337 format %{ "[$reg] @ indirectNarrowKlass" %}
5338 interface(MEMORY_INTER) %{
5339 base($reg);
5340 index(0x0);
5341 scale(0x0);
5342 disp(0x0);
5343 %}
5344 %}
5346 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5347 %{
5348 predicate(Universe::narrow_klass_shift() == 0);
5349 constraint(ALLOC_IN_RC(p_reg));
5350 op_cost(10);
5351 match(AddP (DecodeNKlass reg) off);
5353 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5354 interface(MEMORY_INTER) %{
5355 base($reg);
5356 index(0x0);
5357 scale(0x0);
5358 disp($off);
5359 %}
5360 %}
5362 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5363 %{
5364 predicate(Universe::narrow_klass_shift() == 0);
5365 constraint(ALLOC_IN_RC(p_reg));
5366 op_cost(10);
5367 match(AddP (DecodeNKlass reg) off);
5369 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5370 interface(MEMORY_INTER) %{
5371 base($reg);
5372 index(0x0);
5373 scale(0x0);
5374 disp($off);
5375 %}
5376 %}
5378 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5379 %{
5380 predicate(Universe::narrow_klass_shift() == 0);
5381 constraint(ALLOC_IN_RC(p_reg));
5382 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5384 op_cost(10);
5385 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5386 interface(MEMORY_INTER) %{
5387 base($reg);
5388 index($lreg);
5389 scale(0x0);
5390 disp($off);
5391 %}
5392 %}
5394 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5395 %{
5396 predicate(Universe::narrow_klass_shift() == 0);
5397 constraint(ALLOC_IN_RC(p_reg));
5398 match(AddP (DecodeNKlass reg) lreg);
5400 op_cost(10);
5401 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5402 interface(MEMORY_INTER) %{
5403 base($reg);
5404 index($lreg);
5405 scale(0x0);
5406 disp(0x0);
5407 %}
5408 %}
5410 // Indirect Memory Operand
5411 operand indirectNarrow(mRegN reg)
5412 %{
5413 predicate(Universe::narrow_oop_shift() == 0);
5414 constraint(ALLOC_IN_RC(p_reg));
5415 op_cost(10);
5416 match(DecodeN reg);
5418 format %{ "[$reg] @ indirectNarrow" %}
5419 interface(MEMORY_INTER) %{
5420 base($reg);
5421 index(0x0);
5422 scale(0x0);
5423 disp(0x0);
5424 %}
5425 %}
5427 // Indirect Memory Plus Short Offset Operand
5428 operand indOffset8Narrow(mRegN reg, immL8 off)
5429 %{
5430 predicate(Universe::narrow_oop_shift() == 0);
5431 constraint(ALLOC_IN_RC(p_reg));
5432 op_cost(10);
5433 match(AddP (DecodeN reg) off);
5435 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5436 interface(MEMORY_INTER) %{
5437 base($reg);
5438 index(0x0);
5439 scale(0x0);
5440 disp($off);
5441 %}
5442 %}
5444 // Indirect Memory Plus Index Register Plus Offset Operand
5445 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5446 %{
5447 predicate(Universe::narrow_oop_shift() == 0);
5448 constraint(ALLOC_IN_RC(p_reg));
5449 match(AddP (AddP (DecodeN reg) lreg) off);
5451 op_cost(10);
5452 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5453 interface(MEMORY_INTER) %{
5454 base($reg);
5455 index($lreg);
5456 scale(0x0);
5457 disp($off);
5458 %}
5459 %}
5461 //----------Load Long Memory Operands------------------------------------------
5462 // The load-long idiom will use it's address expression again after loading
5463 // the first word of the long. If the load-long destination overlaps with
5464 // registers used in the addressing expression, the 2nd half will be loaded
5465 // from a clobbered address. Fix this by requiring that load-long use
5466 // address registers that do not overlap with the load-long target.
5468 // load-long support
5469 operand load_long_RegP() %{
5470 constraint(ALLOC_IN_RC(p_reg));
5471 match(RegP);
5472 match(mRegP);
5473 op_cost(100);
5474 format %{ %}
5475 interface(REG_INTER);
5476 %}
5478 // Indirect Memory Operand Long
5479 operand load_long_indirect(load_long_RegP reg) %{
5480 constraint(ALLOC_IN_RC(p_reg));
5481 match(reg);
5483 format %{ "[$reg]" %}
5484 interface(MEMORY_INTER) %{
5485 base($reg);
5486 index(0x0);
5487 scale(0x0);
5488 disp(0x0);
5489 %}
5490 %}
5492 // Indirect Memory Plus Long Offset Operand
5493 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5494 match(AddP reg off);
5496 format %{ "[$reg + $off]" %}
5497 interface(MEMORY_INTER) %{
5498 base($reg);
5499 index(0x0);
5500 scale(0x0);
5501 disp($off);
5502 %}
5503 %}
5505 //----------Conditional Branch Operands----------------------------------------
5506 // Comparison Op - This is the operation of the comparison, and is limited to
5507 // the following set of codes:
5508 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5509 //
5510 // Other attributes of the comparison, such as unsignedness, are specified
5511 // by the comparison instruction that sets a condition code flags register.
5512 // That result is represented by a flags operand whose subtype is appropriate
5513 // to the unsignedness (etc.) of the comparison.
5514 //
5515 // Later, the instruction which matches both the Comparison Op (a Bool) and
5516 // the flags (produced by the Cmp) specifies the coding of the comparison op
5517 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5519 // Comparision Code
5520 operand cmpOp() %{
5521 match(Bool);
5523 format %{ "" %}
5524 interface(COND_INTER) %{
5525 equal(0x01);
5526 not_equal(0x02);
5527 greater(0x03);
5528 greater_equal(0x04);
5529 less(0x05);
5530 less_equal(0x06);
5531 overflow(0x7);
5532 no_overflow(0x8);
5533 %}
5534 %}
5537 // Comparision Code
5538 // Comparison Code, unsigned compare. Used by FP also, with
5539 // C2 (unordered) turned into GT or LT already. The other bits
5540 // C0 and C3 are turned into Carry & Zero flags.
5541 operand cmpOpU() %{
5542 match(Bool);
5544 format %{ "" %}
5545 interface(COND_INTER) %{
5546 equal(0x01);
5547 not_equal(0x02);
5548 greater(0x03);
5549 greater_equal(0x04);
5550 less(0x05);
5551 less_equal(0x06);
5552 overflow(0x7);
5553 no_overflow(0x8);
5554 %}
5555 %}
5558 //----------Special Memory Operands--------------------------------------------
5559 // Stack Slot Operand - This operand is used for loading and storing temporary
5560 // values on the stack where a match requires a value to
5561 // flow through memory.
5562 operand stackSlotP(sRegP reg) %{
5563 constraint(ALLOC_IN_RC(stack_slots));
5564 // No match rule because this operand is only generated in matching
5565 op_cost(50);
5566 format %{ "[$reg]" %}
5567 interface(MEMORY_INTER) %{
5568 base(0x1d); // SP
5569 index(0x0); // No Index
5570 scale(0x0); // No Scale
5571 disp($reg); // Stack Offset
5572 %}
5573 %}
5575 operand stackSlotI(sRegI reg) %{
5576 constraint(ALLOC_IN_RC(stack_slots));
5577 // No match rule because this operand is only generated in matching
5578 op_cost(50);
5579 format %{ "[$reg]" %}
5580 interface(MEMORY_INTER) %{
5581 base(0x1d); // SP
5582 index(0x0); // No Index
5583 scale(0x0); // No Scale
5584 disp($reg); // Stack Offset
5585 %}
5586 %}
5588 operand stackSlotF(sRegF reg) %{
5589 constraint(ALLOC_IN_RC(stack_slots));
5590 // No match rule because this operand is only generated in matching
5591 op_cost(50);
5592 format %{ "[$reg]" %}
5593 interface(MEMORY_INTER) %{
5594 base(0x1d); // SP
5595 index(0x0); // No Index
5596 scale(0x0); // No Scale
5597 disp($reg); // Stack Offset
5598 %}
5599 %}
5601 operand stackSlotD(sRegD reg) %{
5602 constraint(ALLOC_IN_RC(stack_slots));
5603 // No match rule because this operand is only generated in matching
5604 op_cost(50);
5605 format %{ "[$reg]" %}
5606 interface(MEMORY_INTER) %{
5607 base(0x1d); // SP
5608 index(0x0); // No Index
5609 scale(0x0); // No Scale
5610 disp($reg); // Stack Offset
5611 %}
5612 %}
5614 operand stackSlotL(sRegL reg) %{
5615 constraint(ALLOC_IN_RC(stack_slots));
5616 // No match rule because this operand is only generated in matching
5617 op_cost(50);
5618 format %{ "[$reg]" %}
5619 interface(MEMORY_INTER) %{
5620 base(0x1d); // SP
5621 index(0x0); // No Index
5622 scale(0x0); // No Scale
5623 disp($reg); // Stack Offset
5624 %}
5625 %}
5628 //------------------------OPERAND CLASSES--------------------------------------
5629 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5630 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5633 //----------PIPELINE-----------------------------------------------------------
5634 // Rules which define the behavior of the target architectures pipeline.
5636 pipeline %{
5638 //----------ATTRIBUTES---------------------------------------------------------
5639 attributes %{
5640 fixed_size_instructions; // Fixed size instructions
5641 branch_has_delay_slot; // branch have delay slot in gs2
5642 max_instructions_per_bundle = 1; // 1 instruction per bundle
5643 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5644 bundle_unit_size=4;
5645 instruction_unit_size = 4; // An instruction is 4 bytes long
5646 instruction_fetch_unit_size = 16; // The processor fetches one line
5647 instruction_fetch_units = 1; // of 16 bytes
5649 // List of nop instructions
5650 nops( MachNop );
5651 %}
5653 //----------RESOURCES----------------------------------------------------------
5654 // Resources are the functional units available to the machine
5656 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5658 //----------PIPELINE DESCRIPTION-----------------------------------------------
5659 // Pipeline Description specifies the stages in the machine's pipeline
5661 // IF: fetch
5662 // ID: decode
5663 // RD: read
5664 // CA: caculate
5665 // WB: write back
5666 // CM: commit
5668 pipe_desc(IF, ID, RD, CA, WB, CM);
5671 //----------PIPELINE CLASSES---------------------------------------------------
5672 // Pipeline Classes describe the stages in which input and output are
5673 // referenced by the hardware pipeline.
5675 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5676 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5677 single_instruction;
5678 src1 : RD(read);
5679 src2 : RD(read);
5680 dst : WB(write)+1;
5681 DECODE : ID;
5682 ALU : CA;
5683 %}
5685 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5686 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5687 src1 : RD(read);
5688 src2 : RD(read);
5689 dst : WB(write)+5;
5690 DECODE : ID;
5691 ALU2 : CA;
5692 %}
5694 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5695 src1 : RD(read);
5696 src2 : RD(read);
5697 dst : WB(write)+10;
5698 DECODE : ID;
5699 ALU2 : CA;
5700 %}
5702 //No.19 Integer div operation : dst <-- reg1 div reg2
5703 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5704 src1 : RD(read);
5705 src2 : RD(read);
5706 dst : WB(write)+10;
5707 DECODE : ID;
5708 ALU2 : CA;
5709 %}
5711 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5712 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5713 instruction_count(2);
5714 src1 : RD(read);
5715 src2 : RD(read);
5716 dst : WB(write)+10;
5717 DECODE : ID;
5718 ALU2 : CA;
5719 %}
5721 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5722 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5723 instruction_count(2);
5724 src1 : RD(read);
5725 src2 : RD(read);
5726 dst : WB(write);
5727 DECODE : ID;
5728 ALU : CA;
5729 %}
5731 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5732 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5733 instruction_count(2);
5734 src : RD(read);
5735 dst : WB(write);
5736 DECODE : ID;
5737 ALU : CA;
5738 %}
5740 //no.16 load Long from memory :
5741 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5742 instruction_count(2);
5743 mem : RD(read);
5744 dst : WB(write)+5;
5745 DECODE : ID;
5746 MEM : RD;
5747 %}
5749 //No.17 Store Long to Memory :
5750 pipe_class ialu_storeL(mRegL src, memory mem) %{
5751 instruction_count(2);
5752 mem : RD(read);
5753 src : RD(read);
5754 DECODE : ID;
5755 MEM : RD;
5756 %}
5758 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5759 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5760 single_instruction;
5761 src : RD(read);
5762 dst : WB(write);
5763 DECODE : ID;
5764 ALU : CA;
5765 %}
5767 //No.3 Integer move operation : dst <-- reg
5768 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5769 src : RD(read);
5770 dst : WB(write);
5771 DECODE : ID;
5772 ALU : CA;
5773 %}
5775 //No.4 No instructions : do nothing
5776 pipe_class empty( ) %{
5777 instruction_count(0);
5778 %}
5780 //No.5 UnConditional branch :
5781 pipe_class pipe_jump( label labl ) %{
5782 multiple_bundles;
5783 DECODE : ID;
5784 BR : RD;
5785 %}
5787 //No.6 ALU Conditional branch :
5788 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5789 multiple_bundles;
5790 src1 : RD(read);
5791 src2 : RD(read);
5792 DECODE : ID;
5793 BR : RD;
5794 %}
5796 //no.7 load integer from memory :
5797 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5798 mem : RD(read);
5799 dst : WB(write)+3;
5800 DECODE : ID;
5801 MEM : RD;
5802 %}
5804 //No.8 Store Integer to Memory :
5805 pipe_class ialu_storeI(mRegI src, memory mem) %{
5806 mem : RD(read);
5807 src : RD(read);
5808 DECODE : ID;
5809 MEM : RD;
5810 %}
5813 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5814 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5815 src1 : RD(read);
5816 src2 : RD(read);
5817 dst : WB(write);
5818 DECODE : ID;
5819 FPU : CA;
5820 %}
5822 //No.22 Floating div operation : dst <-- reg1 div reg2
5823 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5824 src1 : RD(read);
5825 src2 : RD(read);
5826 dst : WB(write);
5827 DECODE : ID;
5828 FPU2 : CA;
5829 %}
5831 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5832 src : RD(read);
5833 dst : WB(write);
5834 DECODE : ID;
5835 FPU1 : CA;
5836 %}
5838 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5839 src : RD(read);
5840 dst : WB(write);
5841 DECODE : ID;
5842 FPU1 : CA;
5843 %}
5845 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5846 src : RD(read);
5847 dst : WB(write);
5848 DECODE : ID;
5849 MEM : RD;
5850 %}
5852 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5853 src : RD(read);
5854 dst : WB(write);
5855 DECODE : ID;
5856 MEM : RD(5);
5857 %}
5859 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5860 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5861 multiple_bundles;
5862 src1 : RD(read);
5863 src2 : RD(read);
5864 dst : WB(write);
5865 DECODE : ID;
5866 FPU2 : CA;
5867 %}
5869 //No.11 Load Floating from Memory :
5870 pipe_class fpu_loadF(regF dst, memory mem) %{
5871 instruction_count(1);
5872 mem : RD(read);
5873 dst : WB(write)+3;
5874 DECODE : ID;
5875 MEM : RD;
5876 %}
5878 //No.12 Store Floating to Memory :
5879 pipe_class fpu_storeF(regF src, memory mem) %{
5880 instruction_count(1);
5881 mem : RD(read);
5882 src : RD(read);
5883 DECODE : ID;
5884 MEM : RD;
5885 %}
5887 //No.13 FPU Conditional branch :
5888 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5889 multiple_bundles;
5890 src1 : RD(read);
5891 src2 : RD(read);
5892 DECODE : ID;
5893 BR : RD;
5894 %}
5896 //No.14 Floating FPU reg operation : dst <-- op reg
5897 pipe_class fpu1_regF(regF dst, regF src) %{
5898 src : RD(read);
5899 dst : WB(write);
5900 DECODE : ID;
5901 FPU : CA;
5902 %}
5904 pipe_class long_memory_op() %{
5905 instruction_count(10); multiple_bundles; force_serialization;
5906 fixed_latency(30);
5907 %}
5909 pipe_class simple_call() %{
5910 instruction_count(10); multiple_bundles; force_serialization;
5911 fixed_latency(200);
5912 BR : RD;
5913 %}
5915 pipe_class call() %{
5916 instruction_count(10); multiple_bundles; force_serialization;
5917 fixed_latency(200);
5918 %}
5920 //FIXME:
5921 //No.9 Piple slow : for multi-instructions
5922 pipe_class pipe_slow( ) %{
5923 instruction_count(20);
5924 force_serialization;
5925 multiple_bundles;
5926 fixed_latency(50);
5927 %}
5929 %}
5933 //----------INSTRUCTIONS-------------------------------------------------------
5934 //
5935 // match -- States which machine-independent subtree may be replaced
5936 // by this instruction.
5937 // ins_cost -- The estimated cost of this instruction is used by instruction
5938 // selection to identify a minimum cost tree of machine
5939 // instructions that matches a tree of machine-independent
5940 // instructions.
5941 // format -- A string providing the disassembly for this instruction.
5942 // The value of an instruction's operand may be inserted
5943 // by referring to it with a '$' prefix.
5944 // opcode -- Three instruction opcodes may be provided. These are referred
5945 // to within an encode class as $primary, $secondary, and $tertiary
5946 // respectively. The primary opcode is commonly used to
5947 // indicate the type of machine instruction, while secondary
5948 // and tertiary are often used for prefix options or addressing
5949 // modes.
5950 // ins_encode -- A list of encode classes with parameters. The encode class
5951 // name must have been defined in an 'enc_class' specification
5952 // in the encode section of the architecture description.
5955 // Load Integer
5956 instruct loadI(mRegI dst, memory mem) %{
5957 match(Set dst (LoadI mem));
5959 ins_cost(125);
5960 format %{ "lw $dst, $mem #@loadI" %}
5961 ins_encode (load_I_enc(dst, mem));
5962 ins_pipe( ialu_loadI );
5963 %}
5965 instruct loadI_convI2L(mRegL dst, memory mem) %{
5966 match(Set dst (ConvI2L (LoadI mem)));
5968 ins_cost(125);
5969 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5970 ins_encode (load_I_enc(dst, mem));
5971 ins_pipe( ialu_loadI );
5972 %}
5974 // Load Integer (32 bit signed) to Byte (8 bit signed)
5975 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5976 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5978 ins_cost(125);
5979 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5980 ins_encode(load_B_enc(dst, mem));
5981 ins_pipe(ialu_loadI);
5982 %}
5984 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5985 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5986 match(Set dst (AndI (LoadI mem) mask));
5988 ins_cost(125);
5989 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5990 ins_encode(load_UB_enc(dst, mem));
5991 ins_pipe(ialu_loadI);
5992 %}
5994 // Load Integer (32 bit signed) to Short (16 bit signed)
5995 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5996 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5998 ins_cost(125);
5999 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6000 ins_encode(load_S_enc(dst, mem));
6001 ins_pipe(ialu_loadI);
6002 %}
6004 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6005 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6006 match(Set dst (AndI (LoadI mem) mask));
6008 ins_cost(125);
6009 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6010 ins_encode(load_C_enc(dst, mem));
6011 ins_pipe(ialu_loadI);
6012 %}
6014 // Load Long.
6015 instruct loadL(mRegL dst, memory mem) %{
6016 // predicate(!((LoadLNode*)n)->require_atomic_access());
6017 match(Set dst (LoadL mem));
6019 ins_cost(250);
6020 format %{ "ld $dst, $mem #@loadL" %}
6021 ins_encode(load_L_enc(dst, mem));
6022 ins_pipe( ialu_loadL );
6023 %}
6025 // Load Long - UNaligned
6026 instruct loadL_unaligned(mRegL dst, memory mem) %{
6027 match(Set dst (LoadL_unaligned mem));
6029 // FIXME: Jin: Need more effective ldl/ldr
6030 ins_cost(450);
6031 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6032 ins_encode(load_L_enc(dst, mem));
6033 ins_pipe( ialu_loadL );
6034 %}
6036 // Store Long
6037 instruct storeL_reg(memory mem, mRegL src) %{
6038 match(Set mem (StoreL mem src));
6040 ins_cost(200);
6041 format %{ "sd $mem, $src #@storeL_reg\n" %}
6042 ins_encode(store_L_reg_enc(mem, src));
6043 ins_pipe( ialu_storeL );
6044 %}
6046 instruct storeL_immL0(memory mem, immL0 zero) %{
6047 match(Set mem (StoreL mem zero));
6049 ins_cost(180);
6050 format %{ "sd zero, $mem #@storeL_immL0" %}
6051 ins_encode(store_L_immL0_enc(mem, zero));
6052 ins_pipe( ialu_storeL );
6053 %}
6055 instruct storeL_imm(memory mem, immL src) %{
6056 match(Set mem (StoreL mem src));
6058 ins_cost(200);
6059 format %{ "sd $src, $mem #@storeL_imm" %}
6060 ins_encode(store_L_immL_enc(mem, src));
6061 ins_pipe( ialu_storeL );
6062 %}
6064 // Load Compressed Pointer
6065 instruct loadN(mRegN dst, memory mem)
6066 %{
6067 match(Set dst (LoadN mem));
6069 ins_cost(125); // XXX
6070 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6071 ins_encode (load_N_enc(dst, mem));
6072 ins_pipe( ialu_loadI ); // XXX
6073 %}
6075 instruct loadN2P(mRegP dst, memory mem)
6076 %{
6077 match(Set dst (DecodeN (LoadN mem)));
6078 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6080 ins_cost(125); // XXX
6081 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6082 ins_encode (load_N_enc(dst, mem));
6083 ins_pipe( ialu_loadI ); // XXX
6084 %}
6086 // Load Pointer
6087 instruct loadP(mRegP dst, memory mem) %{
6088 match(Set dst (LoadP mem));
6090 ins_cost(125);
6091 format %{ "ld $dst, $mem #@loadP" %}
6092 ins_encode (load_P_enc(dst, mem));
6093 ins_pipe( ialu_loadI );
6094 %}
6096 // Load Klass Pointer
6097 instruct loadKlass(mRegP dst, memory mem) %{
6098 match(Set dst (LoadKlass mem));
6100 ins_cost(125);
6101 format %{ "MOV $dst,$mem @ loadKlass" %}
6102 ins_encode (load_P_enc(dst, mem));
6103 ins_pipe( ialu_loadI );
6104 %}
6106 // Load narrow Klass Pointer
6107 instruct loadNKlass(mRegN dst, memory mem)
6108 %{
6109 match(Set dst (LoadNKlass mem));
6111 ins_cost(125); // XXX
6112 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6113 ins_encode (load_N_enc(dst, mem));
6114 ins_pipe( ialu_loadI ); // XXX
6115 %}
6117 instruct loadN2PKlass(mRegP dst, memory mem)
6118 %{
6119 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6120 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6122 ins_cost(125); // XXX
6123 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6124 ins_encode (load_N_enc(dst, mem));
6125 ins_pipe( ialu_loadI ); // XXX
6126 %}
6128 // Load Constant
6129 instruct loadConI(mRegI dst, immI src) %{
6130 match(Set dst src);
6132 ins_cost(150);
6133 format %{ "mov $dst, $src #@loadConI" %}
6134 ins_encode %{
6135 Register dst = $dst$$Register;
6136 int value = $src$$constant;
6137 __ move(dst, value);
6138 %}
6139 ins_pipe( ialu_regI_regI );
6140 %}
6143 instruct loadConL_set64(mRegL dst, immL src) %{
6144 match(Set dst src);
6145 ins_cost(120);
6146 format %{ "li $dst, $src @ loadConL_set64" %}
6147 ins_encode %{
6148 __ set64($dst$$Register, $src$$constant);
6149 %}
6150 ins_pipe(ialu_regL_regL);
6151 %}
6153 /*
6154 // Load long value from constant table (predicated by immL_expensive).
6155 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6156 match(Set dst src);
6157 ins_cost(150);
6158 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6159 ins_encode %{
6160 int con_offset = $constantoffset($src);
6162 if (Assembler::is_simm16(con_offset)) {
6163 __ ld($dst$$Register, $constanttablebase, con_offset);
6164 } else {
6165 __ set64(AT, con_offset);
6166 if (UseLoongsonISA) {
6167 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6168 } else {
6169 __ daddu(AT, $constanttablebase, AT);
6170 __ ld($dst$$Register, AT, 0);
6171 }
6172 }
6173 %}
6174 ins_pipe(ialu_loadI);
6175 %}
6176 */
6178 instruct loadConL16(mRegL dst, immL16 src) %{
6179 match(Set dst src);
6180 ins_cost(105);
6181 format %{ "mov $dst, $src #@loadConL16" %}
6182 ins_encode %{
6183 Register dst_reg = as_Register($dst$$reg);
6184 int value = $src$$constant;
6185 __ daddiu(dst_reg, R0, value);
6186 %}
6187 ins_pipe( ialu_regL_regL );
6188 %}
6191 instruct loadConL0(mRegL dst, immL0 src) %{
6192 match(Set dst src);
6193 ins_cost(100);
6194 format %{ "mov $dst, zero #@loadConL0" %}
6195 ins_encode %{
6196 Register dst_reg = as_Register($dst$$reg);
6197 __ daddu(dst_reg, R0, R0);
6198 %}
6199 ins_pipe( ialu_regL_regL );
6200 %}
6202 // Load Range
6203 instruct loadRange(mRegI dst, memory mem) %{
6204 match(Set dst (LoadRange mem));
6206 ins_cost(125);
6207 format %{ "MOV $dst,$mem @ loadRange" %}
6208 ins_encode(load_I_enc(dst, mem));
6209 ins_pipe( ialu_loadI );
6210 %}
6213 instruct storeP(memory mem, mRegP src ) %{
6214 match(Set mem (StoreP mem src));
6216 ins_cost(125);
6217 format %{ "sd $src, $mem #@storeP" %}
6218 ins_encode(store_P_reg_enc(mem, src));
6219 ins_pipe( ialu_storeI );
6220 %}
6222 // Store NULL Pointer, mark word, or other simple pointer constant.
6223 instruct storeImmP0(memory mem, immP0 zero) %{
6224 match(Set mem (StoreP mem zero));
6226 ins_cost(125);
6227 format %{ "mov $mem, $zero #@storeImmP0" %}
6228 ins_encode(store_P_immP0_enc(mem));
6229 ins_pipe( ialu_storeI );
6230 %}
6232 // Store Byte Immediate
6233 instruct storeImmB(memory mem, immI8 src) %{
6234 match(Set mem (StoreB mem src));
6236 ins_cost(150);
6237 format %{ "movb $mem, $src #@storeImmB" %}
6238 ins_encode(store_B_immI_enc(mem, src));
6239 ins_pipe( ialu_storeI );
6240 %}
6242 // Store Compressed Pointer
6243 instruct storeN(memory mem, mRegN src)
6244 %{
6245 match(Set mem (StoreN mem src));
6247 ins_cost(125); // XXX
6248 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6249 ins_encode(store_N_reg_enc(mem, src));
6250 ins_pipe( ialu_storeI );
6251 %}
6253 instruct storeP2N(memory mem, mRegP src)
6254 %{
6255 match(Set mem (StoreN mem (EncodeP src)));
6256 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6258 ins_cost(125); // XXX
6259 format %{ "sw $mem, $src\t# @ storeP2N" %}
6260 ins_encode(store_N_reg_enc(mem, src));
6261 ins_pipe( ialu_storeI );
6262 %}
6264 instruct storeNKlass(memory mem, mRegN src)
6265 %{
6266 match(Set mem (StoreNKlass mem src));
6268 ins_cost(125); // XXX
6269 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6270 ins_encode(store_N_reg_enc(mem, src));
6271 ins_pipe( ialu_storeI );
6272 %}
6274 instruct storeP2NKlass(memory mem, mRegP src)
6275 %{
6276 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6277 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6279 ins_cost(125); // XXX
6280 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6281 ins_encode(store_N_reg_enc(mem, src));
6282 ins_pipe( ialu_storeI );
6283 %}
6285 instruct storeImmN0(memory mem, immN0 zero)
6286 %{
6287 match(Set mem (StoreN mem zero));
6289 ins_cost(125); // XXX
6290 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6291 ins_encode(storeImmN0_enc(mem, zero));
6292 ins_pipe( ialu_storeI );
6293 %}
6295 // Store Byte
6296 instruct storeB(memory mem, mRegI src) %{
6297 match(Set mem (StoreB mem src));
6299 ins_cost(125);
6300 format %{ "sb $src, $mem #@storeB" %}
6301 ins_encode(store_B_reg_enc(mem, src));
6302 ins_pipe( ialu_storeI );
6303 %}
6305 instruct storeB_convL2I(memory mem, mRegL src) %{
6306 match(Set mem (StoreB mem (ConvL2I src)));
6308 ins_cost(125);
6309 format %{ "sb $src, $mem #@storeB_convL2I" %}
6310 ins_encode(store_B_reg_enc(mem, src));
6311 ins_pipe( ialu_storeI );
6312 %}
6314 // Load Byte (8bit signed)
6315 instruct loadB(mRegI dst, memory mem) %{
6316 match(Set dst (LoadB mem));
6318 ins_cost(125);
6319 format %{ "lb $dst, $mem #@loadB" %}
6320 ins_encode(load_B_enc(dst, mem));
6321 ins_pipe( ialu_loadI );
6322 %}
6324 instruct loadB_convI2L(mRegL dst, memory mem) %{
6325 match(Set dst (ConvI2L (LoadB mem)));
6327 ins_cost(125);
6328 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6329 ins_encode(load_B_enc(dst, mem));
6330 ins_pipe( ialu_loadI );
6331 %}
6333 // Load Byte (8bit UNsigned)
6334 instruct loadUB(mRegI dst, memory mem) %{
6335 match(Set dst (LoadUB mem));
6337 ins_cost(125);
6338 format %{ "lbu $dst, $mem #@loadUB" %}
6339 ins_encode(load_UB_enc(dst, mem));
6340 ins_pipe( ialu_loadI );
6341 %}
6343 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6344 match(Set dst (ConvI2L (LoadUB mem)));
6346 ins_cost(125);
6347 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6348 ins_encode(load_UB_enc(dst, mem));
6349 ins_pipe( ialu_loadI );
6350 %}
6352 // Load Short (16bit signed)
6353 instruct loadS(mRegI dst, memory mem) %{
6354 match(Set dst (LoadS mem));
6356 ins_cost(125);
6357 format %{ "lh $dst, $mem #@loadS" %}
6358 ins_encode(load_S_enc(dst, mem));
6359 ins_pipe( ialu_loadI );
6360 %}
6362 // Load Short (16 bit signed) to Byte (8 bit signed)
6363 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6364 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6366 ins_cost(125);
6367 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6368 ins_encode(load_B_enc(dst, mem));
6369 ins_pipe(ialu_loadI);
6370 %}
6372 instruct loadS_convI2L(mRegL dst, memory mem) %{
6373 match(Set dst (ConvI2L (LoadS mem)));
6375 ins_cost(125);
6376 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6377 ins_encode(load_S_enc(dst, mem));
6378 ins_pipe( ialu_loadI );
6379 %}
6381 // Store Integer Immediate
6382 instruct storeImmI(memory mem, immI src) %{
6383 match(Set mem (StoreI mem src));
6385 ins_cost(150);
6386 format %{ "mov $mem, $src #@storeImmI" %}
6387 ins_encode(store_I_immI_enc(mem, src));
6388 ins_pipe( ialu_storeI );
6389 %}
6391 // Store Integer
6392 instruct storeI(memory mem, mRegI src) %{
6393 match(Set mem (StoreI mem src));
6395 ins_cost(125);
6396 format %{ "sw $mem, $src #@storeI" %}
6397 ins_encode(store_I_reg_enc(mem, src));
6398 ins_pipe( ialu_storeI );
6399 %}
6401 instruct storeI_convL2I(memory mem, mRegL src) %{
6402 match(Set mem (StoreI mem (ConvL2I src)));
6404 ins_cost(125);
6405 format %{ "sw $mem, $src #@storeI_convL2I" %}
6406 ins_encode(store_I_reg_enc(mem, src));
6407 ins_pipe( ialu_storeI );
6408 %}
6410 // Load Float
6411 instruct loadF(regF dst, memory mem) %{
6412 match(Set dst (LoadF mem));
6414 ins_cost(150);
6415 format %{ "loadF $dst, $mem #@loadF" %}
6416 ins_encode(load_F_enc(dst, mem));
6417 ins_pipe( ialu_loadI );
6418 %}
6420 instruct loadConP_general(mRegP dst, immP src) %{
6421 match(Set dst src);
6423 ins_cost(120);
6424 format %{ "li $dst, $src #@loadConP_general" %}
6426 ins_encode %{
6427 Register dst = $dst$$Register;
6428 long* value = (long*)$src$$constant;
6430 if($src->constant_reloc() == relocInfo::metadata_type){
6431 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6432 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6434 __ relocate(rspec);
6435 __ patchable_set48(dst, (long)value);
6436 }else if($src->constant_reloc() == relocInfo::oop_type){
6437 int oop_index = __ oop_recorder()->find_index((jobject)value);
6438 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6440 __ relocate(rspec);
6441 __ patchable_set48(dst, (long)value);
6442 } else if ($src->constant_reloc() == relocInfo::none) {
6443 __ set64(dst, (long)value);
6444 }
6445 %}
6447 ins_pipe( ialu_regI_regI );
6448 %}
6450 /*
6451 instruct loadConP_load(mRegP dst, immP_load src) %{
6452 match(Set dst src);
6454 ins_cost(100);
6455 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6457 ins_encode %{
6459 int con_offset = $constantoffset($src);
6461 if (Assembler::is_simm16(con_offset)) {
6462 __ ld($dst$$Register, $constanttablebase, con_offset);
6463 } else {
6464 __ set64(AT, con_offset);
6465 if (UseLoongsonISA) {
6466 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6467 } else {
6468 __ daddu(AT, $constanttablebase, AT);
6469 __ ld($dst$$Register, AT, 0);
6470 }
6471 }
6472 %}
6474 ins_pipe(ialu_loadI);
6475 %}
6476 */
6478 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6479 match(Set dst src);
6481 ins_cost(80);
6482 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6484 ins_encode %{
6485 __ set64($dst$$Register, $src$$constant);
6486 %}
6488 ins_pipe(ialu_regI_regI);
6489 %}
6492 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6493 match(Set dst src);
6495 ins_cost(50);
6496 format %{ "li $dst, $src #@loadConP_poll" %}
6498 ins_encode %{
6499 Register dst = $dst$$Register;
6500 intptr_t value = (intptr_t)$src$$constant;
6502 __ set64(dst, (jlong)value);
6503 %}
6505 ins_pipe( ialu_regI_regI );
6506 %}
6508 instruct loadConP0(mRegP dst, immP0 src)
6509 %{
6510 match(Set dst src);
6512 ins_cost(50);
6513 format %{ "mov $dst, R0\t# ptr" %}
6514 ins_encode %{
6515 Register dst_reg = $dst$$Register;
6516 __ daddu(dst_reg, R0, R0);
6517 %}
6518 ins_pipe( ialu_regI_regI );
6519 %}
6521 instruct loadConN0(mRegN dst, immN0 src) %{
6522 match(Set dst src);
6523 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6524 ins_encode %{
6525 __ move($dst$$Register, R0);
6526 %}
6527 ins_pipe( ialu_regI_regI );
6528 %}
6530 instruct loadConN(mRegN dst, immN src) %{
6531 match(Set dst src);
6533 ins_cost(125);
6534 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6535 ins_encode %{
6536 Register dst = $dst$$Register;
6537 __ set_narrow_oop(dst, (jobject)$src$$constant);
6538 %}
6539 ins_pipe( ialu_regI_regI ); // XXX
6540 %}
6542 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6543 match(Set dst src);
6545 ins_cost(125);
6546 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6547 ins_encode %{
6548 Register dst = $dst$$Register;
6549 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6550 %}
6551 ins_pipe( ialu_regI_regI ); // XXX
6552 %}
6554 //FIXME
6555 // Tail Call; Jump from runtime stub to Java code.
6556 // Also known as an 'interprocedural jump'.
6557 // Target of jump will eventually return to caller.
6558 // TailJump below removes the return address.
6559 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6560 match(TailCall jump_target method_oop );
6561 ins_cost(300);
6562 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6564 ins_encode %{
6565 Register target = $jump_target$$Register;
6566 Register oop = $method_oop$$Register;
6568 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6569 __ push(RA);
6571 __ move(S3, oop);
6572 __ jr(target);
6573 __ nop();
6574 %}
6576 ins_pipe( pipe_jump );
6577 %}
6579 // Create exception oop: created by stack-crawling runtime code.
6580 // Created exception is now available to this handler, and is setup
6581 // just prior to jumping to this handler. No code emitted.
6582 instruct CreateException( a0_RegP ex_oop )
6583 %{
6584 match(Set ex_oop (CreateEx));
6586 // use the following format syntax
6587 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6588 ins_encode %{
6589 /* Jin: X86 leaves this function empty */
6590 __ block_comment("CreateException is empty in X86/MIPS");
6591 %}
6592 ins_pipe( empty );
6593 // ins_pipe( pipe_jump );
6594 %}
6597 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6599 - Common try/catch:
6600 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6601 |- V0, V1 are created
6602 |- T9 <= SharedRuntime::exception_handler_for_return_address
6603 `- jr T9
6604 `- the caller's exception_handler
6605 `- jr OptoRuntime::exception_blob
6606 `- here
6607 - Rethrow(e.g. 'unwind'):
6608 * The callee:
6609 |- an exception is triggered during execution
6610 `- exits the callee method through RethrowException node
6611 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6612 `- The callee jumps to OptoRuntime::rethrow_stub()
6613 * In OptoRuntime::rethrow_stub:
6614 |- The VM calls _rethrow_Java to determine the return address in the caller method
6615 `- exits the stub with tailjmpInd
6616 |- pops exception_oop(V0) and exception_pc(V1)
6617 `- jumps to the return address(usually an exception_handler)
6618 * The caller:
6619 `- continues processing the exception_blob with V0/V1
6620 */
6622 /*
6623 Disassembling OptoRuntime::rethrow_stub()
6625 ; locals
6626 0x2d3bf320: addiu sp, sp, 0xfffffff8
6627 0x2d3bf324: sw ra, 0x4(sp)
6628 0x2d3bf328: sw fp, 0x0(sp)
6629 0x2d3bf32c: addu fp, sp, zero
6630 0x2d3bf330: addiu sp, sp, 0xfffffff0
6631 0x2d3bf334: sw ra, 0x8(sp)
6632 0x2d3bf338: sw t0, 0x4(sp)
6633 0x2d3bf33c: sw sp, 0x0(sp)
6635 ; get_thread(S2)
6636 0x2d3bf340: addu s2, sp, zero
6637 0x2d3bf344: srl s2, s2, 12
6638 0x2d3bf348: sll s2, s2, 2
6639 0x2d3bf34c: lui at, 0x2c85
6640 0x2d3bf350: addu at, at, s2
6641 0x2d3bf354: lw s2, 0xffffcc80(at)
6643 0x2d3bf358: lw s0, 0x0(sp)
6644 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6645 0x2d3bf360: sw s2, 0xc(sp)
6647 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6648 0x2d3bf364: lw a0, 0x4(sp)
6649 0x2d3bf368: lw a1, 0xc(sp)
6650 0x2d3bf36c: lw a2, 0x8(sp)
6651 ;; Java_To_Runtime
6652 0x2d3bf370: lui t9, 0x2c34
6653 0x2d3bf374: addiu t9, t9, 0xffff8a48
6654 0x2d3bf378: jalr t9
6655 0x2d3bf37c: nop
6657 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6659 0x2d3bf384: lw s0, 0xc(sp)
6660 0x2d3bf388: sw zero, 0x118(s0)
6661 0x2d3bf38c: sw zero, 0x11c(s0)
6662 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6663 0x2d3bf394: addu s2, s0, zero
6664 0x2d3bf398: sw zero, 0x144(s2)
6665 0x2d3bf39c: lw s0, 0x4(s2)
6666 0x2d3bf3a0: addiu s4, zero, 0x0
6667 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6668 0x2d3bf3a8: nop
6669 0x2d3bf3ac: addiu sp, sp, 0x10
6670 0x2d3bf3b0: addiu sp, sp, 0x8
6671 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6672 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6673 0x2d3bf3bc: lui at, 0x2b48
6674 0x2d3bf3c0: lw at, 0x100(at)
6676 ; tailjmpInd: Restores exception_oop & exception_pc
6677 0x2d3bf3c4: addu v1, ra, zero
6678 0x2d3bf3c8: addu v0, s1, zero
6679 0x2d3bf3cc: jr s3
6680 0x2d3bf3d0: nop
6681 ; Exception:
6682 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6683 0x2d3bf3d8: addiu s1, s1, 0x40
6684 0x2d3bf3dc: addiu s2, zero, 0x0
6685 0x2d3bf3e0: addiu sp, sp, 0x10
6686 0x2d3bf3e4: addiu sp, sp, 0x8
6687 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6688 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6689 0x2d3bf3f0: lui at, 0x2b48
6690 0x2d3bf3f4: lw at, 0x100(at)
6691 ; TailCalljmpInd
6692 __ push(RA); ; to be used in generate_forward_exception()
6693 0x2d3bf3f8: addu t7, s2, zero
6694 0x2d3bf3fc: jr s1
6695 0x2d3bf400: nop
6696 */
6697 // Rethrow exception:
6698 // The exception oop will come in the first argument position.
6699 // Then JUMP (not call) to the rethrow stub code.
6700 instruct RethrowException()
6701 %{
6702 match(Rethrow);
6704 // use the following format syntax
6705 format %{ "JMP rethrow_stub #@RethrowException" %}
6706 ins_encode %{
6707 __ block_comment("@ RethrowException");
6709 cbuf.set_insts_mark();
6710 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6712 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6713 __ patchable_jump((address)OptoRuntime::rethrow_stub());
6714 %}
6715 ins_pipe( pipe_jump );
6716 %}
6718 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6719 match(If cmp (CmpP op1 zero));
6720 effect(USE labl);
6722 ins_cost(180);
6723 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6725 ins_encode %{
6726 Register op1 = $op1$$Register;
6727 Register op2 = R0;
6728 Label &L = *($labl$$label);
6729 int flag = $cmp$$cmpcode;
6731 switch(flag)
6732 {
6733 case 0x01: //equal
6734 if (&L)
6735 __ beq(op1, op2, L);
6736 else
6737 __ beq(op1, op2, (int)0);
6738 break;
6739 case 0x02: //not_equal
6740 if (&L)
6741 __ bne(op1, op2, L);
6742 else
6743 __ bne(op1, op2, (int)0);
6744 break;
6745 default:
6746 Unimplemented();
6747 }
6748 __ nop();
6749 %}
6751 ins_pc_relative(1);
6752 ins_pipe( pipe_alu_branch );
6753 %}
6755 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6756 match(If cmp (CmpP (DecodeN op1) zero));
6757 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6758 effect(USE labl);
6760 ins_cost(180);
6761 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6763 ins_encode %{
6764 Register op1 = $op1$$Register;
6765 Register op2 = R0;
6766 Label &L = *($labl$$label);
6767 int flag = $cmp$$cmpcode;
6769 switch(flag)
6770 {
6771 case 0x01: //equal
6772 if (&L)
6773 __ beq(op1, op2, L);
6774 else
6775 __ beq(op1, op2, (int)0);
6776 break;
6777 case 0x02: //not_equal
6778 if (&L)
6779 __ bne(op1, op2, L);
6780 else
6781 __ bne(op1, op2, (int)0);
6782 break;
6783 default:
6784 Unimplemented();
6785 }
6786 __ nop();
6787 %}
6789 ins_pc_relative(1);
6790 ins_pipe( pipe_alu_branch );
6791 %}
6794 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6795 match(If cmp (CmpP op1 op2));
6796 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6797 effect(USE labl);
6799 ins_cost(200);
6800 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6802 ins_encode %{
6803 Register op1 = $op1$$Register;
6804 Register op2 = $op2$$Register;
6805 Label &L = *($labl$$label);
6806 int flag = $cmp$$cmpcode;
6808 switch(flag)
6809 {
6810 case 0x01: //equal
6811 if (&L)
6812 __ beq(op1, op2, L);
6813 else
6814 __ beq(op1, op2, (int)0);
6815 break;
6816 case 0x02: //not_equal
6817 if (&L)
6818 __ bne(op1, op2, L);
6819 else
6820 __ bne(op1, op2, (int)0);
6821 break;
6822 case 0x03: //above
6823 __ sltu(AT, op2, op1);
6824 if(&L)
6825 __ bne(R0, AT, L);
6826 else
6827 __ bne(R0, AT, (int)0);
6828 break;
6829 case 0x04: //above_equal
6830 __ sltu(AT, op1, op2);
6831 if(&L)
6832 __ beq(AT, R0, L);
6833 else
6834 __ beq(AT, R0, (int)0);
6835 break;
6836 case 0x05: //below
6837 __ sltu(AT, op1, op2);
6838 if(&L)
6839 __ bne(R0, AT, L);
6840 else
6841 __ bne(R0, AT, (int)0);
6842 break;
6843 case 0x06: //below_equal
6844 __ sltu(AT, op2, op1);
6845 if(&L)
6846 __ beq(AT, R0, L);
6847 else
6848 __ beq(AT, R0, (int)0);
6849 break;
6850 default:
6851 Unimplemented();
6852 }
6853 __ nop();
6854 %}
6856 ins_pc_relative(1);
6857 ins_pipe( pipe_alu_branch );
6858 %}
6860 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6861 match(If cmp (CmpN op1 null));
6862 effect(USE labl);
6864 ins_cost(180);
6865 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6866 "BP$cmp $labl @ cmpN_null_branch" %}
6867 ins_encode %{
6868 Register op1 = $op1$$Register;
6869 Register op2 = R0;
6870 Label &L = *($labl$$label);
6871 int flag = $cmp$$cmpcode;
6873 switch(flag)
6874 {
6875 case 0x01: //equal
6876 if (&L)
6877 __ beq(op1, op2, L);
6878 else
6879 __ beq(op1, op2, (int)0);
6880 break;
6881 case 0x02: //not_equal
6882 if (&L)
6883 __ bne(op1, op2, L);
6884 else
6885 __ bne(op1, op2, (int)0);
6886 break;
6887 default:
6888 Unimplemented();
6889 }
6890 __ nop();
6891 %}
6892 //TODO: pipe_branchP or create pipe_branchN LEE
6893 ins_pc_relative(1);
6894 ins_pipe( pipe_alu_branch );
6895 %}
6897 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6898 match(If cmp (CmpN op1 op2));
6899 effect(USE labl);
6901 ins_cost(180);
6902 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6903 "BP$cmp $labl" %}
6904 ins_encode %{
6905 Register op1_reg = $op1$$Register;
6906 Register op2_reg = $op2$$Register;
6907 Label &L = *($labl$$label);
6908 int flag = $cmp$$cmpcode;
6910 switch(flag)
6911 {
6912 case 0x01: //equal
6913 if (&L)
6914 __ beq(op1_reg, op2_reg, L);
6915 else
6916 __ beq(op1_reg, op2_reg, (int)0);
6917 break;
6918 case 0x02: //not_equal
6919 if (&L)
6920 __ bne(op1_reg, op2_reg, L);
6921 else
6922 __ bne(op1_reg, op2_reg, (int)0);
6923 break;
6924 case 0x03: //above
6925 __ sltu(AT, op2_reg, op1_reg);
6926 if(&L)
6927 __ bne(R0, AT, L);
6928 else
6929 __ bne(R0, AT, (int)0);
6930 break;
6931 case 0x04: //above_equal
6932 __ sltu(AT, op1_reg, op2_reg);
6933 if(&L)
6934 __ beq(AT, R0, L);
6935 else
6936 __ beq(AT, R0, (int)0);
6937 break;
6938 case 0x05: //below
6939 __ sltu(AT, op1_reg, op2_reg);
6940 if(&L)
6941 __ bne(R0, AT, L);
6942 else
6943 __ bne(R0, AT, (int)0);
6944 break;
6945 case 0x06: //below_equal
6946 __ sltu(AT, op2_reg, op1_reg);
6947 if(&L)
6948 __ beq(AT, R0, L);
6949 else
6950 __ beq(AT, R0, (int)0);
6951 break;
6952 default:
6953 Unimplemented();
6954 }
6955 __ nop();
6956 %}
6957 ins_pc_relative(1);
6958 ins_pipe( pipe_alu_branch );
6959 %}
6961 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6962 match( If cmp (CmpU src1 src2) );
6963 effect(USE labl);
6964 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6966 ins_encode %{
6967 Register op1 = $src1$$Register;
6968 Register op2 = $src2$$Register;
6969 Label &L = *($labl$$label);
6970 int flag = $cmp$$cmpcode;
6972 switch(flag)
6973 {
6974 case 0x01: //equal
6975 if (&L)
6976 __ beq(op1, op2, L);
6977 else
6978 __ beq(op1, op2, (int)0);
6979 break;
6980 case 0x02: //not_equal
6981 if (&L)
6982 __ bne(op1, op2, L);
6983 else
6984 __ bne(op1, op2, (int)0);
6985 break;
6986 case 0x03: //above
6987 __ sltu(AT, op2, op1);
6988 if(&L)
6989 __ bne(AT, R0, L);
6990 else
6991 __ bne(AT, R0, (int)0);
6992 break;
6993 case 0x04: //above_equal
6994 __ sltu(AT, op1, op2);
6995 if(&L)
6996 __ beq(AT, R0, L);
6997 else
6998 __ beq(AT, R0, (int)0);
6999 break;
7000 case 0x05: //below
7001 __ sltu(AT, op1, op2);
7002 if(&L)
7003 __ bne(AT, R0, L);
7004 else
7005 __ bne(AT, R0, (int)0);
7006 break;
7007 case 0x06: //below_equal
7008 __ sltu(AT, op2, op1);
7009 if(&L)
7010 __ beq(AT, R0, L);
7011 else
7012 __ beq(AT, R0, (int)0);
7013 break;
7014 default:
7015 Unimplemented();
7016 }
7017 __ nop();
7018 %}
7020 ins_pc_relative(1);
7021 ins_pipe( pipe_alu_branch );
7022 %}
7025 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7026 match( If cmp (CmpU src1 src2) );
7027 effect(USE labl);
7028 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7030 ins_encode %{
7031 Register op1 = $src1$$Register;
7032 int val = $src2$$constant;
7033 Label &L = *($labl$$label);
7034 int flag = $cmp$$cmpcode;
7036 __ move(AT, val);
7037 switch(flag)
7038 {
7039 case 0x01: //equal
7040 if (&L)
7041 __ beq(op1, AT, L);
7042 else
7043 __ beq(op1, AT, (int)0);
7044 break;
7045 case 0x02: //not_equal
7046 if (&L)
7047 __ bne(op1, AT, L);
7048 else
7049 __ bne(op1, AT, (int)0);
7050 break;
7051 case 0x03: //above
7052 __ sltu(AT, AT, op1);
7053 if(&L)
7054 __ bne(R0, AT, L);
7055 else
7056 __ bne(R0, AT, (int)0);
7057 break;
7058 case 0x04: //above_equal
7059 __ sltu(AT, op1, AT);
7060 if(&L)
7061 __ beq(AT, R0, L);
7062 else
7063 __ beq(AT, R0, (int)0);
7064 break;
7065 case 0x05: //below
7066 __ sltu(AT, op1, AT);
7067 if(&L)
7068 __ bne(R0, AT, L);
7069 else
7070 __ bne(R0, AT, (int)0);
7071 break;
7072 case 0x06: //below_equal
7073 __ sltu(AT, AT, op1);
7074 if(&L)
7075 __ beq(AT, R0, L);
7076 else
7077 __ beq(AT, R0, (int)0);
7078 break;
7079 default:
7080 Unimplemented();
7081 }
7082 __ nop();
7083 %}
7085 ins_pc_relative(1);
7086 ins_pipe( pipe_alu_branch );
7087 %}
7089 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7090 match( If cmp (CmpI src1 src2) );
7091 effect(USE labl);
7092 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7094 ins_encode %{
7095 Register op1 = $src1$$Register;
7096 Register op2 = $src2$$Register;
7097 Label &L = *($labl$$label);
7098 int flag = $cmp$$cmpcode;
7100 switch(flag)
7101 {
7102 case 0x01: //equal
7103 if (&L)
7104 __ beq(op1, op2, L);
7105 else
7106 __ beq(op1, op2, (int)0);
7107 break;
7108 case 0x02: //not_equal
7109 if (&L)
7110 __ bne(op1, op2, L);
7111 else
7112 __ bne(op1, op2, (int)0);
7113 break;
7114 case 0x03: //above
7115 __ slt(AT, op2, op1);
7116 if(&L)
7117 __ bne(R0, AT, L);
7118 else
7119 __ bne(R0, AT, (int)0);
7120 break;
7121 case 0x04: //above_equal
7122 __ slt(AT, op1, op2);
7123 if(&L)
7124 __ beq(AT, R0, L);
7125 else
7126 __ beq(AT, R0, (int)0);
7127 break;
7128 case 0x05: //below
7129 __ slt(AT, op1, op2);
7130 if(&L)
7131 __ bne(R0, AT, L);
7132 else
7133 __ bne(R0, AT, (int)0);
7134 break;
7135 case 0x06: //below_equal
7136 __ slt(AT, op2, op1);
7137 if(&L)
7138 __ beq(AT, R0, L);
7139 else
7140 __ beq(AT, R0, (int)0);
7141 break;
7142 default:
7143 Unimplemented();
7144 }
7145 __ nop();
7146 %}
7148 ins_pc_relative(1);
7149 ins_pipe( pipe_alu_branch );
7150 %}
7152 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7153 match( If cmp (CmpI src1 src2) );
7154 effect(USE labl);
7155 ins_cost(170);
7156 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7158 ins_encode %{
7159 Register op1 = $src1$$Register;
7160 // int val = $src2$$constant;
7161 Label &L = *($labl$$label);
7162 int flag = $cmp$$cmpcode;
7164 //__ move(AT, val);
7165 switch(flag)
7166 {
7167 case 0x01: //equal
7168 if (&L)
7169 __ beq(op1, R0, L);
7170 else
7171 __ beq(op1, R0, (int)0);
7172 break;
7173 case 0x02: //not_equal
7174 if (&L)
7175 __ bne(op1, R0, L);
7176 else
7177 __ bne(op1, R0, (int)0);
7178 break;
7179 case 0x03: //greater
7180 if(&L)
7181 __ bgtz(op1, L);
7182 else
7183 __ bgtz(op1, (int)0);
7184 break;
7185 case 0x04: //greater_equal
7186 if(&L)
7187 __ bgez(op1, L);
7188 else
7189 __ bgez(op1, (int)0);
7190 break;
7191 case 0x05: //less
7192 if(&L)
7193 __ bltz(op1, L);
7194 else
7195 __ bltz(op1, (int)0);
7196 break;
7197 case 0x06: //less_equal
7198 if(&L)
7199 __ blez(op1, L);
7200 else
7201 __ blez(op1, (int)0);
7202 break;
7203 default:
7204 Unimplemented();
7205 }
7206 __ nop();
7207 %}
7209 ins_pc_relative(1);
7210 ins_pipe( pipe_alu_branch );
7211 %}
7214 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7215 match( If cmp (CmpI src1 src2) );
7216 effect(USE labl);
7217 ins_cost(200);
7218 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7220 ins_encode %{
7221 Register op1 = $src1$$Register;
7222 int val = $src2$$constant;
7223 Label &L = *($labl$$label);
7224 int flag = $cmp$$cmpcode;
7226 __ move(AT, val);
7227 switch(flag)
7228 {
7229 case 0x01: //equal
7230 if (&L)
7231 __ beq(op1, AT, L);
7232 else
7233 __ beq(op1, AT, (int)0);
7234 break;
7235 case 0x02: //not_equal
7236 if (&L)
7237 __ bne(op1, AT, L);
7238 else
7239 __ bne(op1, AT, (int)0);
7240 break;
7241 case 0x03: //greater
7242 __ slt(AT, AT, op1);
7243 if(&L)
7244 __ bne(R0, AT, L);
7245 else
7246 __ bne(R0, AT, (int)0);
7247 break;
7248 case 0x04: //greater_equal
7249 __ slt(AT, op1, AT);
7250 if(&L)
7251 __ beq(AT, R0, L);
7252 else
7253 __ beq(AT, R0, (int)0);
7254 break;
7255 case 0x05: //less
7256 __ slt(AT, op1, AT);
7257 if(&L)
7258 __ bne(R0, AT, L);
7259 else
7260 __ bne(R0, AT, (int)0);
7261 break;
7262 case 0x06: //less_equal
7263 __ slt(AT, AT, op1);
7264 if(&L)
7265 __ beq(AT, R0, L);
7266 else
7267 __ beq(AT, R0, (int)0);
7268 break;
7269 default:
7270 Unimplemented();
7271 }
7272 __ nop();
7273 %}
7275 ins_pc_relative(1);
7276 ins_pipe( pipe_alu_branch );
7277 %}
7279 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7280 match( If cmp (CmpU src1 zero) );
7281 effect(USE labl);
7282 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7284 ins_encode %{
7285 Register op1 = $src1$$Register;
7286 Label &L = *($labl$$label);
7287 int flag = $cmp$$cmpcode;
7289 switch(flag)
7290 {
7291 case 0x01: //equal
7292 if (&L)
7293 __ beq(op1, R0, L);
7294 else
7295 __ beq(op1, R0, (int)0);
7296 break;
7297 case 0x02: //not_equal
7298 if (&L)
7299 __ bne(op1, R0, L);
7300 else
7301 __ bne(op1, R0, (int)0);
7302 break;
7303 case 0x03: //above
7304 if(&L)
7305 __ bne(R0, op1, L);
7306 else
7307 __ bne(R0, op1, (int)0);
7308 break;
7309 case 0x04: //above_equal
7310 if(&L)
7311 __ beq(R0, R0, L);
7312 else
7313 __ beq(R0, R0, (int)0);
7314 break;
7315 case 0x05: //below
7316 return;
7317 break;
7318 case 0x06: //below_equal
7319 if(&L)
7320 __ beq(op1, R0, L);
7321 else
7322 __ beq(op1, R0, (int)0);
7323 break;
7324 default:
7325 Unimplemented();
7326 }
7327 __ nop();
7328 %}
7330 ins_pc_relative(1);
7331 ins_pipe( pipe_alu_branch );
7332 %}
7335 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7336 match( If cmp (CmpU src1 src2) );
7337 effect(USE labl);
7338 ins_cost(180);
7339 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7341 ins_encode %{
7342 Register op1 = $src1$$Register;
7343 int val = $src2$$constant;
7344 Label &L = *($labl$$label);
7345 int flag = $cmp$$cmpcode;
7347 switch(flag)
7348 {
7349 case 0x01: //equal
7350 __ move(AT, val);
7351 if (&L)
7352 __ beq(op1, AT, L);
7353 else
7354 __ beq(op1, AT, (int)0);
7355 break;
7356 case 0x02: //not_equal
7357 __ move(AT, val);
7358 if (&L)
7359 __ bne(op1, AT, L);
7360 else
7361 __ bne(op1, AT, (int)0);
7362 break;
7363 case 0x03: //above
7364 __ move(AT, val);
7365 __ sltu(AT, AT, op1);
7366 if(&L)
7367 __ bne(R0, AT, L);
7368 else
7369 __ bne(R0, AT, (int)0);
7370 break;
7371 case 0x04: //above_equal
7372 __ sltiu(AT, op1, val);
7373 if(&L)
7374 __ beq(AT, R0, L);
7375 else
7376 __ beq(AT, R0, (int)0);
7377 break;
7378 case 0x05: //below
7379 __ sltiu(AT, op1, val);
7380 if(&L)
7381 __ bne(R0, AT, L);
7382 else
7383 __ bne(R0, AT, (int)0);
7384 break;
7385 case 0x06: //below_equal
7386 __ move(AT, val);
7387 __ sltu(AT, AT, op1);
7388 if(&L)
7389 __ beq(AT, R0, L);
7390 else
7391 __ beq(AT, R0, (int)0);
7392 break;
7393 default:
7394 Unimplemented();
7395 }
7396 __ nop();
7397 %}
7399 ins_pc_relative(1);
7400 ins_pipe( pipe_alu_branch );
7401 %}
7404 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7405 match( If cmp (CmpL src1 src2) );
7406 effect(USE labl);
7407 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7408 ins_cost(250);
7410 ins_encode %{
7411 Register opr1_reg = as_Register($src1$$reg);
7412 Register opr2_reg = as_Register($src2$$reg);
7414 Label &target = *($labl$$label);
7415 int flag = $cmp$$cmpcode;
7417 switch(flag)
7418 {
7419 case 0x01: //equal
7420 if (&target)
7421 __ beq(opr1_reg, opr2_reg, target);
7422 else
7423 __ beq(opr1_reg, opr2_reg, (int)0);
7424 __ delayed()->nop();
7425 break;
7427 case 0x02: //not_equal
7428 if(&target)
7429 __ bne(opr1_reg, opr2_reg, target);
7430 else
7431 __ bne(opr1_reg, opr2_reg, (int)0);
7432 __ delayed()->nop();
7433 break;
7435 case 0x03: //greater
7436 __ slt(AT, opr2_reg, opr1_reg);
7437 if(&target)
7438 __ bne(AT, R0, target);
7439 else
7440 __ bne(AT, R0, (int)0);
7441 __ delayed()->nop();
7442 break;
7444 case 0x04: //greater_equal
7445 __ slt(AT, opr1_reg, opr2_reg);
7446 if(&target)
7447 __ beq(AT, R0, target);
7448 else
7449 __ beq(AT, R0, (int)0);
7450 __ delayed()->nop();
7452 break;
7454 case 0x05: //less
7455 __ slt(AT, opr1_reg, opr2_reg);
7456 if(&target)
7457 __ bne(AT, R0, target);
7458 else
7459 __ bne(AT, R0, (int)0);
7460 __ delayed()->nop();
7462 break;
7464 case 0x06: //less_equal
7465 __ slt(AT, opr2_reg, opr1_reg);
7467 if(&target)
7468 __ beq(AT, R0, target);
7469 else
7470 __ beq(AT, R0, (int)0);
7471 __ delayed()->nop();
7473 break;
7475 default:
7476 Unimplemented();
7477 }
7478 %}
7481 ins_pc_relative(1);
7482 ins_pipe( pipe_alu_branch );
7483 %}
7486 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7487 match( If cmp (CmpL src1 zero) );
7488 effect(USE labl);
7489 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7490 ins_cost(150);
7492 ins_encode %{
7493 Register opr1_reg = as_Register($src1$$reg);
7494 Label &target = *($labl$$label);
7495 int flag = $cmp$$cmpcode;
7497 switch(flag)
7498 {
7499 case 0x01: //equal
7500 if (&target)
7501 __ beq(opr1_reg, R0, target);
7502 else
7503 __ beq(opr1_reg, R0, int(0));
7504 break;
7506 case 0x02: //not_equal
7507 if(&target)
7508 __ bne(opr1_reg, R0, target);
7509 else
7510 __ bne(opr1_reg, R0, (int)0);
7511 break;
7513 case 0x03: //greater
7514 if(&target)
7515 __ bgtz(opr1_reg, target);
7516 else
7517 __ bgtz(opr1_reg, (int)0);
7518 break;
7520 case 0x04: //greater_equal
7521 if(&target)
7522 __ bgez(opr1_reg, target);
7523 else
7524 __ bgez(opr1_reg, (int)0);
7525 break;
7527 case 0x05: //less
7528 __ slt(AT, opr1_reg, R0);
7529 if(&target)
7530 __ bne(AT, R0, target);
7531 else
7532 __ bne(AT, R0, (int)0);
7533 break;
7535 case 0x06: //less_equal
7536 if (&target)
7537 __ blez(opr1_reg, target);
7538 else
7539 __ blez(opr1_reg, int(0));
7540 break;
7542 default:
7543 Unimplemented();
7544 }
7545 __ delayed()->nop();
7546 %}
7549 ins_pc_relative(1);
7550 ins_pipe( pipe_alu_branch );
7551 %}
7553 instruct branchConL_regL_immL(cmpOp cmp, mRegL src1, immL src2, label labl) %{
7554 match( If cmp (CmpL src1 src2) );
7555 effect(USE labl);
7556 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL" %}
7557 ins_cost(180);
7559 ins_encode %{
7560 Register opr1_reg = as_Register($src1$$reg);
7561 Register opr2_reg = AT;
7563 Label &target = *($labl$$label);
7564 int flag = $cmp$$cmpcode;
7566 __ set64(opr2_reg, $src2$$constant);
7568 switch(flag)
7569 {
7570 case 0x01: //equal
7571 if (&target)
7572 __ beq(opr1_reg, opr2_reg, target);
7573 else
7574 __ beq(opr1_reg, opr2_reg, (int)0);
7575 break;
7577 case 0x02: //not_equal
7578 if(&target)
7579 __ bne(opr1_reg, opr2_reg, target);
7580 else
7581 __ bne(opr1_reg, opr2_reg, (int)0);
7582 break;
7584 case 0x03: //greater
7585 __ slt(AT, opr2_reg, opr1_reg);
7586 if(&target)
7587 __ bne(AT, R0, target);
7588 else
7589 __ bne(AT, R0, (int)0);
7590 break;
7592 case 0x04: //greater_equal
7593 __ slt(AT, opr1_reg, opr2_reg);
7594 if(&target)
7595 __ beq(AT, R0, target);
7596 else
7597 __ beq(AT, R0, (int)0);
7598 break;
7600 case 0x05: //less
7601 __ slt(AT, opr1_reg, opr2_reg);
7602 if(&target)
7603 __ bne(AT, R0, target);
7604 else
7605 __ bne(AT, R0, (int)0);
7606 break;
7608 case 0x06: //less_equal
7609 __ slt(AT, opr2_reg, opr1_reg);
7610 if(&target)
7611 __ beq(AT, R0, target);
7612 else
7613 __ beq(AT, R0, (int)0);
7614 break;
7616 default:
7617 Unimplemented();
7618 }
7619 __ nop();
7620 %}
7623 ins_pc_relative(1);
7624 ins_pipe( pipe_alu_branch );
7625 %}
7628 //FIXME
7629 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7630 match( If cmp (CmpF src1 src2) );
7631 effect(USE labl);
7632 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7634 ins_encode %{
7635 FloatRegister reg_op1 = $src1$$FloatRegister;
7636 FloatRegister reg_op2 = $src2$$FloatRegister;
7637 Label &L = *($labl$$label);
7638 int flag = $cmp$$cmpcode;
7640 switch(flag)
7641 {
7642 case 0x01: //equal
7643 __ c_eq_s(reg_op1, reg_op2);
7644 if (&L)
7645 __ bc1t(L);
7646 else
7647 __ bc1t((int)0);
7648 break;
7649 case 0x02: //not_equal
7650 __ c_eq_s(reg_op1, reg_op2);
7651 if (&L)
7652 __ bc1f(L);
7653 else
7654 __ bc1f((int)0);
7655 break;
7656 case 0x03: //greater
7657 __ c_ule_s(reg_op1, reg_op2);
7658 if(&L)
7659 __ bc1f(L);
7660 else
7661 __ bc1f((int)0);
7662 break;
7663 case 0x04: //greater_equal
7664 __ c_ult_s(reg_op1, reg_op2);
7665 if(&L)
7666 __ bc1f(L);
7667 else
7668 __ bc1f((int)0);
7669 break;
7670 case 0x05: //less
7671 __ c_ult_s(reg_op1, reg_op2);
7672 if(&L)
7673 __ bc1t(L);
7674 else
7675 __ bc1t((int)0);
7676 break;
7677 case 0x06: //less_equal
7678 __ c_ule_s(reg_op1, reg_op2);
7679 if(&L)
7680 __ bc1t(L);
7681 else
7682 __ bc1t((int)0);
7683 break;
7684 default:
7685 Unimplemented();
7686 }
7687 __ nop();
7688 %}
7690 ins_pc_relative(1);
7691 ins_pipe(pipe_slow);
7692 %}
7694 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7695 match( If cmp (CmpD src1 src2) );
7696 effect(USE labl);
7697 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7699 ins_encode %{
7700 FloatRegister reg_op1 = $src1$$FloatRegister;
7701 FloatRegister reg_op2 = $src2$$FloatRegister;
7702 Label &L = *($labl$$label);
7703 int flag = $cmp$$cmpcode;
7705 switch(flag)
7706 {
7707 case 0x01: //equal
7708 __ c_eq_d(reg_op1, reg_op2);
7709 if (&L)
7710 __ bc1t(L);
7711 else
7712 __ bc1t((int)0);
7713 break;
7714 case 0x02: //not_equal
7715 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7716 __ c_eq_d(reg_op1, reg_op2);
7717 if (&L)
7718 __ bc1f(L);
7719 else
7720 __ bc1f((int)0);
7721 break;
7722 case 0x03: //greater
7723 __ c_ule_d(reg_op1, reg_op2);
7724 if(&L)
7725 __ bc1f(L);
7726 else
7727 __ bc1f((int)0);
7728 break;
7729 case 0x04: //greater_equal
7730 __ c_ult_d(reg_op1, reg_op2);
7731 if(&L)
7732 __ bc1f(L);
7733 else
7734 __ bc1f((int)0);
7735 break;
7736 case 0x05: //less
7737 __ c_ult_d(reg_op1, reg_op2);
7738 if(&L)
7739 __ bc1t(L);
7740 else
7741 __ bc1t((int)0);
7742 break;
7743 case 0x06: //less_equal
7744 __ c_ule_d(reg_op1, reg_op2);
7745 if(&L)
7746 __ bc1t(L);
7747 else
7748 __ bc1t((int)0);
7749 break;
7750 default:
7751 Unimplemented();
7752 }
7753 __ nop();
7754 %}
7756 ins_pc_relative(1);
7757 ins_pipe(pipe_slow);
7758 %}
7761 // Call Runtime Instruction
7762 instruct CallRuntimeDirect(method meth) %{
7763 match(CallRuntime );
7764 effect(USE meth);
7766 ins_cost(300);
7767 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7768 ins_encode( Java_To_Runtime( meth ) );
7769 ins_pipe( pipe_slow );
7770 ins_alignment(16);
7771 %}
7775 //------------------------MemBar Instructions-------------------------------
7776 //Memory barrier flavors
7778 instruct membar_acquire() %{
7779 match(MemBarAcquire);
7780 ins_cost(0);
7782 size(0);
7783 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7784 ins_encode();
7785 ins_pipe(empty);
7786 %}
7788 instruct load_fence() %{
7789 match(LoadFence);
7790 ins_cost(400);
7792 format %{ "MEMBAR @ load_fence" %}
7793 ins_encode %{
7794 __ sync();
7795 %}
7796 ins_pipe(pipe_slow);
7797 %}
7799 instruct membar_acquire_lock()
7800 %{
7801 match(MemBarAcquireLock);
7802 ins_cost(0);
7804 size(0);
7805 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7806 ins_encode();
7807 ins_pipe(empty);
7808 %}
7810 instruct membar_release() %{
7811 match(MemBarRelease);
7812 ins_cost(400);
7814 format %{ "MEMBAR-release @ membar_release" %}
7816 ins_encode %{
7817 // Attention: DO NOT DELETE THIS GUY!
7818 __ sync();
7819 %}
7821 ins_pipe(pipe_slow);
7822 %}
7824 instruct store_fence() %{
7825 match(StoreFence);
7826 ins_cost(400);
7828 format %{ "MEMBAR @ store_fence" %}
7830 ins_encode %{
7831 __ sync();
7832 %}
7834 ins_pipe(pipe_slow);
7835 %}
7837 instruct membar_release_lock()
7838 %{
7839 match(MemBarReleaseLock);
7840 ins_cost(0);
7842 size(0);
7843 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7844 ins_encode();
7845 ins_pipe(empty);
7846 %}
7849 instruct membar_volatile() %{
7850 match(MemBarVolatile);
7851 ins_cost(400);
7853 format %{ "MEMBAR-volatile" %}
7854 ins_encode %{
7855 if( !os::is_MP() ) return; // Not needed on single CPU
7856 __ sync();
7858 %}
7859 ins_pipe(pipe_slow);
7860 %}
7862 instruct unnecessary_membar_volatile() %{
7863 match(MemBarVolatile);
7864 predicate(Matcher::post_store_load_barrier(n));
7865 ins_cost(0);
7867 size(0);
7868 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7869 ins_encode( );
7870 ins_pipe(empty);
7871 %}
7873 instruct membar_storestore() %{
7874 match(MemBarStoreStore);
7876 ins_cost(0);
7877 size(0);
7878 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7879 ins_encode( );
7880 ins_pipe(empty);
7881 %}
7883 //----------Move Instructions--------------------------------------------------
7884 instruct castX2P(mRegP dst, mRegL src) %{
7885 match(Set dst (CastX2P src));
7886 format %{ "castX2P $dst, $src @ castX2P" %}
7887 ins_encode %{
7888 Register src = $src$$Register;
7889 Register dst = $dst$$Register;
7891 if(src != dst)
7892 __ move(dst, src);
7893 %}
7894 ins_cost(10);
7895 ins_pipe( ialu_regI_mov );
7896 %}
7898 instruct castP2X(mRegL dst, mRegP src ) %{
7899 match(Set dst (CastP2X src));
7901 format %{ "mov $dst, $src\t #@castP2X" %}
7902 ins_encode %{
7903 Register src = $src$$Register;
7904 Register dst = $dst$$Register;
7906 if(src != dst)
7907 __ move(dst, src);
7908 %}
7909 ins_pipe( ialu_regI_mov );
7910 %}
7912 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7913 match(Set dst (MoveF2I src));
7914 effect(DEF dst, USE src);
7915 ins_cost(85);
7916 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7917 ins_encode %{
7918 Register dst = as_Register($dst$$reg);
7919 FloatRegister src = as_FloatRegister($src$$reg);
7921 __ mfc1(dst, src);
7922 %}
7923 ins_pipe( pipe_slow );
7924 %}
7926 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7927 match(Set dst (MoveI2F src));
7928 effect(DEF dst, USE src);
7929 ins_cost(85);
7930 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7931 ins_encode %{
7932 Register src = as_Register($src$$reg);
7933 FloatRegister dst = as_FloatRegister($dst$$reg);
7935 __ mtc1(src, dst);
7936 %}
7937 ins_pipe( pipe_slow );
7938 %}
7940 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7941 match(Set dst (MoveD2L src));
7942 effect(DEF dst, USE src);
7943 ins_cost(85);
7944 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7945 ins_encode %{
7946 Register dst = as_Register($dst$$reg);
7947 FloatRegister src = as_FloatRegister($src$$reg);
7949 __ dmfc1(dst, src);
7950 %}
7951 ins_pipe( pipe_slow );
7952 %}
7954 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7955 match(Set dst (MoveL2D src));
7956 effect(DEF dst, USE src);
7957 ins_cost(85);
7958 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7959 ins_encode %{
7960 FloatRegister dst = as_FloatRegister($dst$$reg);
7961 Register src = as_Register($src$$reg);
7963 __ dmtc1(src, dst);
7964 %}
7965 ins_pipe( pipe_slow );
7966 %}
7968 //----------Conditional Move---------------------------------------------------
7969 // Conditional move
7970 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7971 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7972 ins_cost(80);
7973 format %{
7974 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7975 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7976 %}
7978 ins_encode %{
7979 Register op1 = $tmp1$$Register;
7980 Register op2 = $tmp2$$Register;
7981 Register dst = $dst$$Register;
7982 Register src = $src$$Register;
7983 int flag = $cop$$cmpcode;
7985 switch(flag)
7986 {
7987 case 0x01: //equal
7988 __ subu32(AT, op1, op2);
7989 __ movz(dst, src, AT);
7990 break;
7992 case 0x02: //not_equal
7993 __ subu32(AT, op1, op2);
7994 __ movn(dst, src, AT);
7995 break;
7997 case 0x03: //great
7998 __ slt(AT, op2, op1);
7999 __ movn(dst, src, AT);
8000 break;
8002 case 0x04: //great_equal
8003 __ slt(AT, op1, op2);
8004 __ movz(dst, src, AT);
8005 break;
8007 case 0x05: //less
8008 __ slt(AT, op1, op2);
8009 __ movn(dst, src, AT);
8010 break;
8012 case 0x06: //less_equal
8013 __ slt(AT, op2, op1);
8014 __ movz(dst, src, AT);
8015 break;
8017 default:
8018 Unimplemented();
8019 }
8020 %}
8022 ins_pipe( pipe_slow );
8023 %}
8025 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8026 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8027 ins_cost(80);
8028 format %{
8029 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8030 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8031 %}
8032 ins_encode %{
8033 Register op1 = $tmp1$$Register;
8034 Register op2 = $tmp2$$Register;
8035 Register dst = $dst$$Register;
8036 Register src = $src$$Register;
8037 int flag = $cop$$cmpcode;
8039 switch(flag)
8040 {
8041 case 0x01: //equal
8042 __ subu(AT, op1, op2);
8043 __ movz(dst, src, AT);
8044 break;
8046 case 0x02: //not_equal
8047 __ subu(AT, op1, op2);
8048 __ movn(dst, src, AT);
8049 break;
8051 case 0x03: //above
8052 __ sltu(AT, op2, op1);
8053 __ movn(dst, src, AT);
8054 break;
8056 case 0x04: //above_equal
8057 __ sltu(AT, op1, op2);
8058 __ movz(dst, src, AT);
8059 break;
8061 case 0x05: //below
8062 __ sltu(AT, op1, op2);
8063 __ movn(dst, src, AT);
8064 break;
8066 case 0x06: //below_equal
8067 __ sltu(AT, op2, op1);
8068 __ movz(dst, src, AT);
8069 break;
8071 default:
8072 Unimplemented();
8073 }
8074 %}
8076 ins_pipe( pipe_slow );
8077 %}
8079 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8080 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8081 ins_cost(80);
8082 format %{
8083 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8084 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8085 %}
8086 ins_encode %{
8087 Register op1 = $tmp1$$Register;
8088 Register op2 = $tmp2$$Register;
8089 Register dst = $dst$$Register;
8090 Register src = $src$$Register;
8091 int flag = $cop$$cmpcode;
8093 switch(flag)
8094 {
8095 case 0x01: //equal
8096 __ subu32(AT, op1, op2);
8097 __ movz(dst, src, AT);
8098 break;
8100 case 0x02: //not_equal
8101 __ subu32(AT, op1, op2);
8102 __ movn(dst, src, AT);
8103 break;
8105 case 0x03: //above
8106 __ sltu(AT, op2, op1);
8107 __ movn(dst, src, AT);
8108 break;
8110 case 0x04: //above_equal
8111 __ sltu(AT, op1, op2);
8112 __ movz(dst, src, AT);
8113 break;
8115 case 0x05: //below
8116 __ sltu(AT, op1, op2);
8117 __ movn(dst, src, AT);
8118 break;
8120 case 0x06: //below_equal
8121 __ sltu(AT, op2, op1);
8122 __ movz(dst, src, AT);
8123 break;
8125 default:
8126 Unimplemented();
8127 }
8128 %}
8130 ins_pipe( pipe_slow );
8131 %}
8133 instruct cmovP_cmpU_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8134 match(Set dst (CMoveP (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8135 ins_cost(80);
8136 format %{
8137 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpU_reg_reg\n\t"
8138 "CMOV $dst,$src\t @cmovP_cmpU_reg_reg"
8139 %}
8140 ins_encode %{
8141 Register op1 = $tmp1$$Register;
8142 Register op2 = $tmp2$$Register;
8143 Register dst = $dst$$Register;
8144 Register src = $src$$Register;
8145 int flag = $cop$$cmpcode;
8147 switch(flag)
8148 {
8149 case 0x01: //equal
8150 __ subu32(AT, op1, op2);
8151 __ movz(dst, src, AT);
8152 break;
8154 case 0x02: //not_equal
8155 __ subu32(AT, op1, op2);
8156 __ movn(dst, src, AT);
8157 break;
8159 case 0x03: //above
8160 __ sltu(AT, op2, op1);
8161 __ movn(dst, src, AT);
8162 break;
8164 case 0x04: //above_equal
8165 __ sltu(AT, op1, op2);
8166 __ movz(dst, src, AT);
8167 break;
8169 case 0x05: //below
8170 __ sltu(AT, op1, op2);
8171 __ movn(dst, src, AT);
8172 break;
8174 case 0x06: //below_equal
8175 __ sltu(AT, op2, op1);
8176 __ movz(dst, src, AT);
8177 break;
8179 default:
8180 Unimplemented();
8181 }
8182 %}
8184 ins_pipe( pipe_slow );
8185 %}
8187 instruct cmovP_cmpF_reg_reg(mRegP dst, mRegP src, regF tmp1, regF tmp2, cmpOp cop ) %{
8188 match(Set dst (CMoveP (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8189 ins_cost(80);
8190 format %{
8191 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpF_reg_reg\n"
8192 "\tCMOV $dst,$src \t @cmovP_cmpF_reg_reg"
8193 %}
8195 ins_encode %{
8196 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8197 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8198 Register dst = $dst$$Register;
8199 Register src = $src$$Register;
8200 int flag = $cop$$cmpcode;
8202 switch(flag)
8203 {
8204 case 0x01: //equal
8205 __ c_eq_s(reg_op1, reg_op2);
8206 __ movt(dst, src);
8207 break;
8208 case 0x02: //not_equal
8209 __ c_eq_s(reg_op1, reg_op2);
8210 __ movf(dst, src);
8211 break;
8212 case 0x03: //greater
8213 __ c_ole_s(reg_op1, reg_op2);
8214 __ movf(dst, src);
8215 break;
8216 case 0x04: //greater_equal
8217 __ c_olt_s(reg_op1, reg_op2);
8218 __ movf(dst, src);
8219 break;
8220 case 0x05: //less
8221 __ c_ult_s(reg_op1, reg_op2);
8222 __ movt(dst, src);
8223 break;
8224 case 0x06: //less_equal
8225 __ c_ule_s(reg_op1, reg_op2);
8226 __ movt(dst, src);
8227 break;
8228 default:
8229 Unimplemented();
8230 }
8231 %}
8232 ins_pipe( pipe_slow );
8233 %}
8235 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8236 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8237 ins_cost(80);
8238 format %{
8239 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8240 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8241 %}
8242 ins_encode %{
8243 Register op1 = $tmp1$$Register;
8244 Register op2 = $tmp2$$Register;
8245 Register dst = $dst$$Register;
8246 Register src = $src$$Register;
8247 int flag = $cop$$cmpcode;
8249 switch(flag)
8250 {
8251 case 0x01: //equal
8252 __ subu32(AT, op1, op2);
8253 __ movz(dst, src, AT);
8254 break;
8256 case 0x02: //not_equal
8257 __ subu32(AT, op1, op2);
8258 __ movn(dst, src, AT);
8259 break;
8261 case 0x03: //above
8262 __ sltu(AT, op2, op1);
8263 __ movn(dst, src, AT);
8264 break;
8266 case 0x04: //above_equal
8267 __ sltu(AT, op1, op2);
8268 __ movz(dst, src, AT);
8269 break;
8271 case 0x05: //below
8272 __ sltu(AT, op1, op2);
8273 __ movn(dst, src, AT);
8274 break;
8276 case 0x06: //below_equal
8277 __ sltu(AT, op2, op1);
8278 __ movz(dst, src, AT);
8279 break;
8281 default:
8282 Unimplemented();
8283 }
8284 %}
8286 ins_pipe( pipe_slow );
8287 %}
8289 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8290 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8291 ins_cost(80);
8292 format %{
8293 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8294 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8295 %}
8296 ins_encode %{
8297 Register op1 = $tmp1$$Register;
8298 Register op2 = $tmp2$$Register;
8299 Register dst = $dst$$Register;
8300 Register src = $src$$Register;
8301 int flag = $cop$$cmpcode;
8303 switch(flag)
8304 {
8305 case 0x01: //equal
8306 __ subu(AT, op1, op2);
8307 __ movz(dst, src, AT);
8308 break;
8310 case 0x02: //not_equal
8311 __ subu(AT, op1, op2);
8312 __ movn(dst, src, AT);
8313 break;
8315 case 0x03: //above
8316 __ sltu(AT, op2, op1);
8317 __ movn(dst, src, AT);
8318 break;
8320 case 0x04: //above_equal
8321 __ sltu(AT, op1, op2);
8322 __ movz(dst, src, AT);
8323 break;
8325 case 0x05: //below
8326 __ sltu(AT, op1, op2);
8327 __ movn(dst, src, AT);
8328 break;
8330 case 0x06: //below_equal
8331 __ sltu(AT, op2, op1);
8332 __ movz(dst, src, AT);
8333 break;
8335 default:
8336 Unimplemented();
8337 }
8338 %}
8340 ins_pipe( pipe_slow );
8341 %}
8343 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8344 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8345 ins_cost(80);
8346 format %{
8347 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8348 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8349 %}
8350 ins_encode %{
8351 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8352 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8353 Register dst = as_Register($dst$$reg);
8354 Register src = as_Register($src$$reg);
8356 int flag = $cop$$cmpcode;
8358 switch(flag)
8359 {
8360 case 0x01: //equal
8361 __ c_eq_d(reg_op1, reg_op2);
8362 __ movt(dst, src);
8363 break;
8364 case 0x02: //not_equal
8365 __ c_eq_d(reg_op1, reg_op2);
8366 __ movf(dst, src);
8367 break;
8368 case 0x03: //greater
8369 __ c_ole_d(reg_op1, reg_op2);
8370 __ movf(dst, src);
8371 break;
8372 case 0x04: //greater_equal
8373 __ c_olt_d(reg_op1, reg_op2);
8374 __ movf(dst, src);
8375 break;
8376 case 0x05: //less
8377 __ c_ult_d(reg_op1, reg_op2);
8378 __ movt(dst, src);
8379 break;
8380 case 0x06: //less_equal
8381 __ c_ule_d(reg_op1, reg_op2);
8382 __ movt(dst, src);
8383 break;
8384 default:
8385 Unimplemented();
8386 }
8387 %}
8389 ins_pipe( pipe_slow );
8390 %}
8393 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8394 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8395 ins_cost(80);
8396 format %{
8397 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8398 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8399 %}
8400 ins_encode %{
8401 Register op1 = $tmp1$$Register;
8402 Register op2 = $tmp2$$Register;
8403 Register dst = $dst$$Register;
8404 Register src = $src$$Register;
8405 int flag = $cop$$cmpcode;
8407 switch(flag)
8408 {
8409 case 0x01: //equal
8410 __ subu32(AT, op1, op2);
8411 __ movz(dst, src, AT);
8412 break;
8414 case 0x02: //not_equal
8415 __ subu32(AT, op1, op2);
8416 __ movn(dst, src, AT);
8417 break;
8419 case 0x03: //above
8420 __ sltu(AT, op2, op1);
8421 __ movn(dst, src, AT);
8422 break;
8424 case 0x04: //above_equal
8425 __ sltu(AT, op1, op2);
8426 __ movz(dst, src, AT);
8427 break;
8429 case 0x05: //below
8430 __ sltu(AT, op1, op2);
8431 __ movn(dst, src, AT);
8432 break;
8434 case 0x06: //below_equal
8435 __ sltu(AT, op2, op1);
8436 __ movz(dst, src, AT);
8437 break;
8439 default:
8440 Unimplemented();
8441 }
8442 %}
8444 ins_pipe( pipe_slow );
8445 %}
8448 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8449 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8450 ins_cost(80);
8451 format %{
8452 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8453 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8454 %}
8455 ins_encode %{
8456 Register op1 = $tmp1$$Register;
8457 Register op2 = $tmp2$$Register;
8458 Register dst = $dst$$Register;
8459 Register src = $src$$Register;
8460 int flag = $cop$$cmpcode;
8462 switch(flag)
8463 {
8464 case 0x01: //equal
8465 __ subu(AT, op1, op2);
8466 __ movz(dst, src, AT);
8467 break;
8469 case 0x02: //not_equal
8470 __ subu(AT, op1, op2);
8471 __ movn(dst, src, AT);
8472 break;
8474 case 0x03: //above
8475 __ sltu(AT, op2, op1);
8476 __ movn(dst, src, AT);
8477 break;
8479 case 0x04: //above_equal
8480 __ sltu(AT, op1, op2);
8481 __ movz(dst, src, AT);
8482 break;
8484 case 0x05: //below
8485 __ sltu(AT, op1, op2);
8486 __ movn(dst, src, AT);
8487 break;
8489 case 0x06: //below_equal
8490 __ sltu(AT, op2, op1);
8491 __ movz(dst, src, AT);
8492 break;
8494 default:
8495 Unimplemented();
8496 }
8497 %}
8499 ins_pipe( pipe_slow );
8500 %}
8502 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8503 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8504 ins_cost(80);
8505 format %{
8506 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8507 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8508 %}
8509 ins_encode %{
8510 Register opr1 = as_Register($tmp1$$reg);
8511 Register opr2 = as_Register($tmp2$$reg);
8512 Register dst = $dst$$Register;
8513 Register src = $src$$Register;
8514 int flag = $cop$$cmpcode;
8516 switch(flag)
8517 {
8518 case 0x01: //equal
8519 __ subu(AT, opr1, opr2);
8520 __ movz(dst, src, AT);
8521 break;
8523 case 0x02: //not_equal
8524 __ subu(AT, opr1, opr2);
8525 __ movn(dst, src, AT);
8526 break;
8528 case 0x03: //greater
8529 __ slt(AT, opr2, opr1);
8530 __ movn(dst, src, AT);
8531 break;
8533 case 0x04: //greater_equal
8534 __ slt(AT, opr1, opr2);
8535 __ movz(dst, src, AT);
8536 break;
8538 case 0x05: //less
8539 __ slt(AT, opr1, opr2);
8540 __ movn(dst, src, AT);
8541 break;
8543 case 0x06: //less_equal
8544 __ slt(AT, opr2, opr1);
8545 __ movz(dst, src, AT);
8546 break;
8548 default:
8549 Unimplemented();
8550 }
8551 %}
8553 ins_pipe( pipe_slow );
8554 %}
8556 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8557 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8558 ins_cost(80);
8559 format %{
8560 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8561 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8562 %}
8563 ins_encode %{
8564 Register opr1 = as_Register($tmp1$$reg);
8565 Register opr2 = as_Register($tmp2$$reg);
8566 Register dst = $dst$$Register;
8567 Register src = $src$$Register;
8568 int flag = $cop$$cmpcode;
8570 switch(flag)
8571 {
8572 case 0x01: //equal
8573 __ subu(AT, opr1, opr2);
8574 __ movz(dst, src, AT);
8575 break;
8577 case 0x02: //not_equal
8578 __ subu(AT, opr1, opr2);
8579 __ movn(dst, src, AT);
8580 break;
8582 case 0x03: //greater
8583 __ slt(AT, opr2, opr1);
8584 __ movn(dst, src, AT);
8585 break;
8587 case 0x04: //greater_equal
8588 __ slt(AT, opr1, opr2);
8589 __ movz(dst, src, AT);
8590 break;
8592 case 0x05: //less
8593 __ slt(AT, opr1, opr2);
8594 __ movn(dst, src, AT);
8595 break;
8597 case 0x06: //less_equal
8598 __ slt(AT, opr2, opr1);
8599 __ movz(dst, src, AT);
8600 break;
8602 default:
8603 Unimplemented();
8604 }
8605 %}
8607 ins_pipe( pipe_slow );
8608 %}
8610 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8611 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8612 ins_cost(80);
8613 format %{
8614 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8615 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8616 %}
8617 ins_encode %{
8618 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8619 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8620 Register dst = as_Register($dst$$reg);
8621 Register src = as_Register($src$$reg);
8623 int flag = $cop$$cmpcode;
8625 switch(flag)
8626 {
8627 case 0x01: //equal
8628 __ c_eq_d(reg_op1, reg_op2);
8629 __ movt(dst, src);
8630 break;
8631 case 0x02: //not_equal
8632 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8633 __ c_eq_d(reg_op1, reg_op2);
8634 __ movf(dst, src);
8635 break;
8636 case 0x03: //greater
8637 __ c_ole_d(reg_op1, reg_op2);
8638 __ movf(dst, src);
8639 break;
8640 case 0x04: //greater_equal
8641 __ c_olt_d(reg_op1, reg_op2);
8642 __ movf(dst, src);
8643 break;
8644 case 0x05: //less
8645 __ c_ult_d(reg_op1, reg_op2);
8646 __ movt(dst, src);
8647 break;
8648 case 0x06: //less_equal
8649 __ c_ule_d(reg_op1, reg_op2);
8650 __ movt(dst, src);
8651 break;
8652 default:
8653 Unimplemented();
8654 }
8655 %}
8657 ins_pipe( pipe_slow );
8658 %}
8661 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8662 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8663 ins_cost(80);
8664 format %{
8665 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8666 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8667 %}
8668 ins_encode %{
8669 Register op1 = $tmp1$$Register;
8670 Register op2 = $tmp2$$Register;
8671 Register dst = $dst$$Register;
8672 Register src = $src$$Register;
8673 int flag = $cop$$cmpcode;
8675 switch(flag)
8676 {
8677 case 0x01: //equal
8678 __ subu(AT, op1, op2);
8679 __ movz(dst, src, AT);
8680 break;
8682 case 0x02: //not_equal
8683 __ subu(AT, op1, op2);
8684 __ movn(dst, src, AT);
8685 break;
8687 case 0x03: //above
8688 __ sltu(AT, op2, op1);
8689 __ movn(dst, src, AT);
8690 break;
8692 case 0x04: //above_equal
8693 __ sltu(AT, op1, op2);
8694 __ movz(dst, src, AT);
8695 break;
8697 case 0x05: //below
8698 __ sltu(AT, op1, op2);
8699 __ movn(dst, src, AT);
8700 break;
8702 case 0x06: //below_equal
8703 __ sltu(AT, op2, op1);
8704 __ movz(dst, src, AT);
8705 break;
8707 default:
8708 Unimplemented();
8709 }
8710 %}
8712 ins_pipe( pipe_slow );
8713 %}
8715 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8716 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8717 ins_cost(80);
8718 format %{
8719 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8720 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8721 %}
8722 ins_encode %{
8723 Register op1 = $tmp1$$Register;
8724 Register op2 = $tmp2$$Register;
8725 Register dst = $dst$$Register;
8726 Register src = $src$$Register;
8727 int flag = $cop$$cmpcode;
8729 switch(flag)
8730 {
8731 case 0x01: //equal
8732 __ subu32(AT, op1, op2);
8733 __ movz(dst, src, AT);
8734 break;
8736 case 0x02: //not_equal
8737 __ subu32(AT, op1, op2);
8738 __ movn(dst, src, AT);
8739 break;
8741 case 0x03: //above
8742 __ slt(AT, op2, op1);
8743 __ movn(dst, src, AT);
8744 break;
8746 case 0x04: //above_equal
8747 __ slt(AT, op1, op2);
8748 __ movz(dst, src, AT);
8749 break;
8751 case 0x05: //below
8752 __ slt(AT, op1, op2);
8753 __ movn(dst, src, AT);
8754 break;
8756 case 0x06: //below_equal
8757 __ slt(AT, op2, op1);
8758 __ movz(dst, src, AT);
8759 break;
8761 default:
8762 Unimplemented();
8763 }
8764 %}
8766 ins_pipe( pipe_slow );
8767 %}
8769 instruct cmovN_cmpL_reg_reg(mRegN dst, mRegN src, mRegL tmp1, mRegL tmp2, cmpOp cop) %{
8770 match(Set dst (CMoveN (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8771 ins_cost(80);
8772 format %{
8773 "CMP$cop $tmp1, $tmp2\t @cmovN_cmpL_reg_reg\n"
8774 "\tCMOV $dst,$src \t @cmovN_cmpL_reg_reg"
8775 %}
8776 ins_encode %{
8777 Register opr1 = as_Register($tmp1$$reg);
8778 Register opr2 = as_Register($tmp2$$reg);
8779 Register dst = $dst$$Register;
8780 Register src = $src$$Register;
8781 int flag = $cop$$cmpcode;
8783 switch(flag)
8784 {
8785 case 0x01: //equal
8786 __ subu(AT, opr1, opr2);
8787 __ movz(dst, src, AT);
8788 break;
8790 case 0x02: //not_equal
8791 __ subu(AT, opr1, opr2);
8792 __ movn(dst, src, AT);
8793 break;
8795 case 0x03: //greater
8796 __ slt(AT, opr2, opr1);
8797 __ movn(dst, src, AT);
8798 break;
8800 case 0x04: //greater_equal
8801 __ slt(AT, opr1, opr2);
8802 __ movz(dst, src, AT);
8803 break;
8805 case 0x05: //less
8806 __ slt(AT, opr1, opr2);
8807 __ movn(dst, src, AT);
8808 break;
8810 case 0x06: //less_equal
8811 __ slt(AT, opr2, opr1);
8812 __ movz(dst, src, AT);
8813 break;
8815 default:
8816 Unimplemented();
8817 }
8818 %}
8820 ins_pipe( pipe_slow );
8821 %}
8823 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8824 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8825 ins_cost(80);
8826 format %{
8827 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8828 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8829 %}
8830 ins_encode %{
8831 Register op1 = $tmp1$$Register;
8832 Register op2 = $tmp2$$Register;
8833 Register dst = $dst$$Register;
8834 Register src = $src$$Register;
8835 int flag = $cop$$cmpcode;
8837 switch(flag)
8838 {
8839 case 0x01: //equal
8840 __ subu32(AT, op1, op2);
8841 __ movz(dst, src, AT);
8842 break;
8844 case 0x02: //not_equal
8845 __ subu32(AT, op1, op2);
8846 __ movn(dst, src, AT);
8847 break;
8849 case 0x03: //above
8850 __ slt(AT, op2, op1);
8851 __ movn(dst, src, AT);
8852 break;
8854 case 0x04: //above_equal
8855 __ slt(AT, op1, op2);
8856 __ movz(dst, src, AT);
8857 break;
8859 case 0x05: //below
8860 __ slt(AT, op1, op2);
8861 __ movn(dst, src, AT);
8862 break;
8864 case 0x06: //below_equal
8865 __ slt(AT, op2, op1);
8866 __ movz(dst, src, AT);
8867 break;
8869 default:
8870 Unimplemented();
8871 }
8872 %}
8874 ins_pipe( pipe_slow );
8875 %}
8877 instruct cmovL_cmpU_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8878 match(Set dst (CMoveL (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8879 ins_cost(80);
8880 format %{
8881 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpU_reg_reg\n\t"
8882 "CMOV $dst,$src\t @cmovL_cmpU_reg_reg"
8883 %}
8884 ins_encode %{
8885 Register op1 = $tmp1$$Register;
8886 Register op2 = $tmp2$$Register;
8887 Register dst = $dst$$Register;
8888 Register src = $src$$Register;
8889 int flag = $cop$$cmpcode;
8891 switch(flag)
8892 {
8893 case 0x01: //equal
8894 __ subu32(AT, op1, op2);
8895 __ movz(dst, src, AT);
8896 break;
8898 case 0x02: //not_equal
8899 __ subu32(AT, op1, op2);
8900 __ movn(dst, src, AT);
8901 break;
8903 case 0x03: //above
8904 __ sltu(AT, op2, op1);
8905 __ movn(dst, src, AT);
8906 break;
8908 case 0x04: //above_equal
8909 __ sltu(AT, op1, op2);
8910 __ movz(dst, src, AT);
8911 break;
8913 case 0x05: //below
8914 __ sltu(AT, op1, op2);
8915 __ movn(dst, src, AT);
8916 break;
8918 case 0x06: //below_equal
8919 __ sltu(AT, op2, op1);
8920 __ movz(dst, src, AT);
8921 break;
8923 default:
8924 Unimplemented();
8925 }
8926 %}
8928 ins_pipe( pipe_slow );
8929 %}
8931 instruct cmovL_cmpF_reg_reg(mRegL dst, mRegL src, regF tmp1, regF tmp2, cmpOp cop ) %{
8932 match(Set dst (CMoveL (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8933 ins_cost(80);
8934 format %{
8935 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpF_reg_reg\n"
8936 "\tCMOV $dst,$src \t @cmovL_cmpF_reg_reg"
8937 %}
8939 ins_encode %{
8940 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8941 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8942 Register dst = $dst$$Register;
8943 Register src = $src$$Register;
8944 int flag = $cop$$cmpcode;
8946 switch(flag)
8947 {
8948 case 0x01: //equal
8949 __ c_eq_s(reg_op1, reg_op2);
8950 __ movt(dst, src);
8951 break;
8952 case 0x02: //not_equal
8953 __ c_eq_s(reg_op1, reg_op2);
8954 __ movf(dst, src);
8955 break;
8956 case 0x03: //greater
8957 __ c_ole_s(reg_op1, reg_op2);
8958 __ movf(dst, src);
8959 break;
8960 case 0x04: //greater_equal
8961 __ c_olt_s(reg_op1, reg_op2);
8962 __ movf(dst, src);
8963 break;
8964 case 0x05: //less
8965 __ c_ult_s(reg_op1, reg_op2);
8966 __ movt(dst, src);
8967 break;
8968 case 0x06: //less_equal
8969 __ c_ule_s(reg_op1, reg_op2);
8970 __ movt(dst, src);
8971 break;
8972 default:
8973 Unimplemented();
8974 }
8975 %}
8976 ins_pipe( pipe_slow );
8977 %}
8979 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8980 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8981 ins_cost(80);
8982 format %{
8983 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8984 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8985 %}
8987 ins_encode %{
8988 Register op1 = $tmp1$$Register;
8989 Register op2 = $tmp2$$Register;
8990 Register dst = as_Register($dst$$reg);
8991 Register src = as_Register($src$$reg);
8992 int flag = $cop$$cmpcode;
8994 switch(flag)
8995 {
8996 case 0x01: //equal
8997 __ subu32(AT, op1, op2);
8998 __ movz(dst, src, AT);
8999 break;
9001 case 0x02: //not_equal
9002 __ subu32(AT, op1, op2);
9003 __ movn(dst, src, AT);
9004 break;
9006 case 0x03: //great
9007 __ slt(AT, op2, op1);
9008 __ movn(dst, src, AT);
9009 break;
9011 case 0x04: //great_equal
9012 __ slt(AT, op1, op2);
9013 __ movz(dst, src, AT);
9014 break;
9016 case 0x05: //less
9017 __ slt(AT, op1, op2);
9018 __ movn(dst, src, AT);
9019 break;
9021 case 0x06: //less_equal
9022 __ slt(AT, op2, op1);
9023 __ movz(dst, src, AT);
9024 break;
9026 default:
9027 Unimplemented();
9028 }
9029 %}
9031 ins_pipe( pipe_slow );
9032 %}
9034 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9035 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9036 ins_cost(80);
9037 format %{
9038 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9039 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9040 %}
9041 ins_encode %{
9042 Register opr1 = as_Register($tmp1$$reg);
9043 Register opr2 = as_Register($tmp2$$reg);
9044 Register dst = as_Register($dst$$reg);
9045 Register src = as_Register($src$$reg);
9046 int flag = $cop$$cmpcode;
9048 switch(flag)
9049 {
9050 case 0x01: //equal
9051 __ subu(AT, opr1, opr2);
9052 __ movz(dst, src, AT);
9053 break;
9055 case 0x02: //not_equal
9056 __ subu(AT, opr1, opr2);
9057 __ movn(dst, src, AT);
9058 break;
9060 case 0x03: //greater
9061 __ slt(AT, opr2, opr1);
9062 __ movn(dst, src, AT);
9063 break;
9065 case 0x04: //greater_equal
9066 __ slt(AT, opr1, opr2);
9067 __ movz(dst, src, AT);
9068 break;
9070 case 0x05: //less
9071 __ slt(AT, opr1, opr2);
9072 __ movn(dst, src, AT);
9073 break;
9075 case 0x06: //less_equal
9076 __ slt(AT, opr2, opr1);
9077 __ movz(dst, src, AT);
9078 break;
9080 default:
9081 Unimplemented();
9082 }
9083 %}
9085 ins_pipe( pipe_slow );
9086 %}
9088 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9089 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9090 ins_cost(80);
9091 format %{
9092 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9093 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9094 %}
9095 ins_encode %{
9096 Register op1 = $tmp1$$Register;
9097 Register op2 = $tmp2$$Register;
9098 Register dst = $dst$$Register;
9099 Register src = $src$$Register;
9100 int flag = $cop$$cmpcode;
9102 switch(flag)
9103 {
9104 case 0x01: //equal
9105 __ subu32(AT, op1, op2);
9106 __ movz(dst, src, AT);
9107 break;
9109 case 0x02: //not_equal
9110 __ subu32(AT, op1, op2);
9111 __ movn(dst, src, AT);
9112 break;
9114 case 0x03: //above
9115 __ sltu(AT, op2, op1);
9116 __ movn(dst, src, AT);
9117 break;
9119 case 0x04: //above_equal
9120 __ sltu(AT, op1, op2);
9121 __ movz(dst, src, AT);
9122 break;
9124 case 0x05: //below
9125 __ sltu(AT, op1, op2);
9126 __ movn(dst, src, AT);
9127 break;
9129 case 0x06: //below_equal
9130 __ sltu(AT, op2, op1);
9131 __ movz(dst, src, AT);
9132 break;
9134 default:
9135 Unimplemented();
9136 }
9137 %}
9139 ins_pipe( pipe_slow );
9140 %}
9143 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9144 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9145 ins_cost(80);
9146 format %{
9147 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9148 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9149 %}
9150 ins_encode %{
9151 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9152 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9153 Register dst = as_Register($dst$$reg);
9154 Register src = as_Register($src$$reg);
9156 int flag = $cop$$cmpcode;
9158 switch(flag)
9159 {
9160 case 0x01: //equal
9161 __ c_eq_d(reg_op1, reg_op2);
9162 __ movt(dst, src);
9163 break;
9164 case 0x02: //not_equal
9165 __ c_eq_d(reg_op1, reg_op2);
9166 __ movf(dst, src);
9167 break;
9168 case 0x03: //greater
9169 __ c_ole_d(reg_op1, reg_op2);
9170 __ movf(dst, src);
9171 break;
9172 case 0x04: //greater_equal
9173 __ c_olt_d(reg_op1, reg_op2);
9174 __ movf(dst, src);
9175 break;
9176 case 0x05: //less
9177 __ c_ult_d(reg_op1, reg_op2);
9178 __ movt(dst, src);
9179 break;
9180 case 0x06: //less_equal
9181 __ c_ule_d(reg_op1, reg_op2);
9182 __ movt(dst, src);
9183 break;
9184 default:
9185 Unimplemented();
9186 }
9187 %}
9189 ins_pipe( pipe_slow );
9190 %}
9192 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9193 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9194 ins_cost(200);
9195 format %{
9196 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9197 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9198 %}
9199 ins_encode %{
9200 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9201 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9202 FloatRegister dst = as_FloatRegister($dst$$reg);
9203 FloatRegister src = as_FloatRegister($src$$reg);
9205 int flag = $cop$$cmpcode;
9207 switch(flag)
9208 {
9209 case 0x01: //equal
9210 __ c_eq_d(reg_op1, reg_op2);
9211 __ movt_d(dst, src);
9212 break;
9213 case 0x02: //not_equal
9214 __ c_eq_d(reg_op1, reg_op2);
9215 __ movf_d(dst, src);
9216 break;
9217 case 0x03: //greater
9218 __ c_ole_d(reg_op1, reg_op2);
9219 __ movf_d(dst, src);
9220 break;
9221 case 0x04: //greater_equal
9222 __ c_olt_d(reg_op1, reg_op2);
9223 __ movf_d(dst, src);
9224 break;
9225 case 0x05: //less
9226 __ c_ult_d(reg_op1, reg_op2);
9227 __ movt_d(dst, src);
9228 break;
9229 case 0x06: //less_equal
9230 __ c_ule_d(reg_op1, reg_op2);
9231 __ movt_d(dst, src);
9232 break;
9233 default:
9234 Unimplemented();
9235 }
9236 %}
9238 ins_pipe( pipe_slow );
9239 %}
9241 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9242 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9243 ins_cost(200);
9244 format %{
9245 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9246 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9247 %}
9249 ins_encode %{
9250 Register op1 = $tmp1$$Register;
9251 Register op2 = $tmp2$$Register;
9252 FloatRegister dst = as_FloatRegister($dst$$reg);
9253 FloatRegister src = as_FloatRegister($src$$reg);
9254 int flag = $cop$$cmpcode;
9255 Label L;
9257 switch(flag)
9258 {
9259 case 0x01: //equal
9260 __ bne(op1, op2, L);
9261 __ nop();
9262 __ mov_s(dst, src);
9263 __ bind(L);
9264 break;
9265 case 0x02: //not_equal
9266 __ beq(op1, op2, L);
9267 __ nop();
9268 __ mov_s(dst, src);
9269 __ bind(L);
9270 break;
9271 case 0x03: //great
9272 __ slt(AT, op2, op1);
9273 __ beq(AT, R0, L);
9274 __ nop();
9275 __ mov_s(dst, src);
9276 __ bind(L);
9277 break;
9278 case 0x04: //great_equal
9279 __ slt(AT, op1, op2);
9280 __ bne(AT, R0, L);
9281 __ nop();
9282 __ mov_s(dst, src);
9283 __ bind(L);
9284 break;
9285 case 0x05: //less
9286 __ slt(AT, op1, op2);
9287 __ beq(AT, R0, L);
9288 __ nop();
9289 __ mov_s(dst, src);
9290 __ bind(L);
9291 break;
9292 case 0x06: //less_equal
9293 __ slt(AT, op2, op1);
9294 __ bne(AT, R0, L);
9295 __ nop();
9296 __ mov_s(dst, src);
9297 __ bind(L);
9298 break;
9299 default:
9300 Unimplemented();
9301 }
9302 %}
9304 ins_pipe( pipe_slow );
9305 %}
9307 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9308 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9309 ins_cost(200);
9310 format %{
9311 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9312 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9313 %}
9315 ins_encode %{
9316 Register op1 = $tmp1$$Register;
9317 Register op2 = $tmp2$$Register;
9318 FloatRegister dst = as_FloatRegister($dst$$reg);
9319 FloatRegister src = as_FloatRegister($src$$reg);
9320 int flag = $cop$$cmpcode;
9321 Label L;
9323 switch(flag)
9324 {
9325 case 0x01: //equal
9326 __ bne(op1, op2, L);
9327 __ nop();
9328 __ mov_d(dst, src);
9329 __ bind(L);
9330 break;
9331 case 0x02: //not_equal
9332 __ beq(op1, op2, L);
9333 __ nop();
9334 __ mov_d(dst, src);
9335 __ bind(L);
9336 break;
9337 case 0x03: //great
9338 __ slt(AT, op2, op1);
9339 __ beq(AT, R0, L);
9340 __ nop();
9341 __ mov_d(dst, src);
9342 __ bind(L);
9343 break;
9344 case 0x04: //great_equal
9345 __ slt(AT, op1, op2);
9346 __ bne(AT, R0, L);
9347 __ nop();
9348 __ mov_d(dst, src);
9349 __ bind(L);
9350 break;
9351 case 0x05: //less
9352 __ slt(AT, op1, op2);
9353 __ beq(AT, R0, L);
9354 __ nop();
9355 __ mov_d(dst, src);
9356 __ bind(L);
9357 break;
9358 case 0x06: //less_equal
9359 __ slt(AT, op2, op1);
9360 __ bne(AT, R0, L);
9361 __ nop();
9362 __ mov_d(dst, src);
9363 __ bind(L);
9364 break;
9365 default:
9366 Unimplemented();
9367 }
9368 %}
9370 ins_pipe( pipe_slow );
9371 %}
9373 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9374 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9375 ins_cost(200);
9376 format %{
9377 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9378 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9379 %}
9381 ins_encode %{
9382 Register op1 = $tmp1$$Register;
9383 Register op2 = $tmp2$$Register;
9384 FloatRegister dst = as_FloatRegister($dst$$reg);
9385 FloatRegister src = as_FloatRegister($src$$reg);
9386 int flag = $cop$$cmpcode;
9387 Label L;
9389 switch(flag)
9390 {
9391 case 0x01: //equal
9392 __ bne(op1, op2, L);
9393 __ nop();
9394 __ mov_d(dst, src);
9395 __ bind(L);
9396 break;
9397 case 0x02: //not_equal
9398 __ beq(op1, op2, L);
9399 __ nop();
9400 __ mov_d(dst, src);
9401 __ bind(L);
9402 break;
9403 case 0x03: //great
9404 __ slt(AT, op2, op1);
9405 __ beq(AT, R0, L);
9406 __ nop();
9407 __ mov_d(dst, src);
9408 __ bind(L);
9409 break;
9410 case 0x04: //great_equal
9411 __ slt(AT, op1, op2);
9412 __ bne(AT, R0, L);
9413 __ nop();
9414 __ mov_d(dst, src);
9415 __ bind(L);
9416 break;
9417 case 0x05: //less
9418 __ slt(AT, op1, op2);
9419 __ beq(AT, R0, L);
9420 __ nop();
9421 __ mov_d(dst, src);
9422 __ bind(L);
9423 break;
9424 case 0x06: //less_equal
9425 __ slt(AT, op2, op1);
9426 __ bne(AT, R0, L);
9427 __ nop();
9428 __ mov_d(dst, src);
9429 __ bind(L);
9430 break;
9431 default:
9432 Unimplemented();
9433 }
9434 %}
9436 ins_pipe( pipe_slow );
9437 %}
9439 //FIXME
9440 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9441 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9442 ins_cost(80);
9443 format %{
9444 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9445 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9446 %}
9448 ins_encode %{
9449 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9450 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9451 Register dst = $dst$$Register;
9452 Register src = $src$$Register;
9453 int flag = $cop$$cmpcode;
9455 switch(flag)
9456 {
9457 case 0x01: //equal
9458 __ c_eq_s(reg_op1, reg_op2);
9459 __ movt(dst, src);
9460 break;
9461 case 0x02: //not_equal
9462 __ c_eq_s(reg_op1, reg_op2);
9463 __ movf(dst, src);
9464 break;
9465 case 0x03: //greater
9466 __ c_ole_s(reg_op1, reg_op2);
9467 __ movf(dst, src);
9468 break;
9469 case 0x04: //greater_equal
9470 __ c_olt_s(reg_op1, reg_op2);
9471 __ movf(dst, src);
9472 break;
9473 case 0x05: //less
9474 __ c_ult_s(reg_op1, reg_op2);
9475 __ movt(dst, src);
9476 break;
9477 case 0x06: //less_equal
9478 __ c_ule_s(reg_op1, reg_op2);
9479 __ movt(dst, src);
9480 break;
9481 default:
9482 Unimplemented();
9483 }
9484 %}
9485 ins_pipe( pipe_slow );
9486 %}
9488 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9489 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9490 ins_cost(200);
9491 format %{
9492 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9493 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9494 %}
9496 ins_encode %{
9497 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9498 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9499 FloatRegister dst = $dst$$FloatRegister;
9500 FloatRegister src = $src$$FloatRegister;
9501 int flag = $cop$$cmpcode;
9503 switch(flag)
9504 {
9505 case 0x01: //equal
9506 __ c_eq_s(reg_op1, reg_op2);
9507 __ movt_s(dst, src);
9508 break;
9509 case 0x02: //not_equal
9510 __ c_eq_s(reg_op1, reg_op2);
9511 __ movf_s(dst, src);
9512 break;
9513 case 0x03: //greater
9514 __ c_ole_s(reg_op1, reg_op2);
9515 __ movf_s(dst, src);
9516 break;
9517 case 0x04: //greater_equal
9518 __ c_olt_s(reg_op1, reg_op2);
9519 __ movf_s(dst, src);
9520 break;
9521 case 0x05: //less
9522 __ c_ult_s(reg_op1, reg_op2);
9523 __ movt_s(dst, src);
9524 break;
9525 case 0x06: //less_equal
9526 __ c_ule_s(reg_op1, reg_op2);
9527 __ movt_s(dst, src);
9528 break;
9529 default:
9530 Unimplemented();
9531 }
9532 %}
9533 ins_pipe( pipe_slow );
9534 %}
9536 // Manifest a CmpL result in an integer register. Very painful.
9537 // This is the test to avoid.
9538 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9539 match(Set dst (CmpL3 src1 src2));
9540 ins_cost(1000);
9541 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9542 ins_encode %{
9543 Register opr1 = as_Register($src1$$reg);
9544 Register opr2 = as_Register($src2$$reg);
9545 Register dst = as_Register($dst$$reg);
9547 Label Done;
9549 __ subu(AT, opr1, opr2);
9550 __ bltz(AT, Done);
9551 __ delayed()->daddiu(dst, R0, -1);
9553 __ move(dst, 1);
9554 __ movz(dst, R0, AT);
9556 __ bind(Done);
9557 %}
9558 ins_pipe( pipe_slow );
9559 %}
9561 //
9562 // less_rsult = -1
9563 // greater_result = 1
9564 // equal_result = 0
9565 // nan_result = -1
9566 //
9567 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9568 match(Set dst (CmpF3 src1 src2));
9569 ins_cost(1000);
9570 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9571 ins_encode %{
9572 FloatRegister src1 = as_FloatRegister($src1$$reg);
9573 FloatRegister src2 = as_FloatRegister($src2$$reg);
9574 Register dst = as_Register($dst$$reg);
9576 Label Done;
9578 __ c_ult_s(src1, src2);
9579 __ bc1t(Done);
9580 __ delayed()->daddiu(dst, R0, -1);
9582 __ c_eq_s(src1, src2);
9583 __ move(dst, 1);
9584 __ movt(dst, R0);
9586 __ bind(Done);
9587 %}
9588 ins_pipe( pipe_slow );
9589 %}
9591 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9592 match(Set dst (CmpD3 src1 src2));
9593 ins_cost(1000);
9594 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9595 ins_encode %{
9596 FloatRegister src1 = as_FloatRegister($src1$$reg);
9597 FloatRegister src2 = as_FloatRegister($src2$$reg);
9598 Register dst = as_Register($dst$$reg);
9600 Label Done;
9602 __ c_ult_d(src1, src2);
9603 __ bc1t(Done);
9604 __ delayed()->daddiu(dst, R0, -1);
9606 __ c_eq_d(src1, src2);
9607 __ move(dst, 1);
9608 __ movt(dst, R0);
9610 __ bind(Done);
9611 %}
9612 ins_pipe( pipe_slow );
9613 %}
9615 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9616 match(Set dummy (ClearArray cnt base));
9617 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9618 ins_encode %{
9619 //Assume cnt is the number of bytes in an array to be cleared,
9620 //and base points to the starting address of the array.
9621 Register base = $base$$Register;
9622 Register num = $cnt$$Register;
9623 Label Loop, done;
9625 __ beq(num, R0, done);
9626 __ delayed()->daddu(AT, base, R0);
9628 __ move(T9, num); /* T9 = words */
9630 __ bind(Loop);
9631 __ sd(R0, AT, 0);
9632 __ daddi(T9, T9, -1);
9633 __ bne(T9, R0, Loop);
9634 __ delayed()->daddi(AT, AT, wordSize);
9636 __ bind(done);
9637 %}
9638 ins_pipe( pipe_slow );
9639 %}
9641 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9642 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9643 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9645 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9646 ins_encode %{
9647 // Get the first character position in both strings
9648 // [8] char array, [12] offset, [16] count
9649 Register str1 = $str1$$Register;
9650 Register str2 = $str2$$Register;
9651 Register cnt1 = $cnt1$$Register;
9652 Register cnt2 = $cnt2$$Register;
9653 Register result = $result$$Register;
9655 Label L, Loop, haveResult, done;
9657 // compute the and difference of lengths (in result)
9658 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9660 // compute the shorter length (in cnt1)
9661 __ slt(AT, cnt2, cnt1);
9662 __ movn(cnt1, cnt2, AT);
9664 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9665 __ bind(Loop); // Loop begin
9666 __ beq(cnt1, R0, done);
9667 __ delayed()->lhu(AT, str1, 0);;
9669 // compare current character
9670 __ lhu(cnt2, str2, 0);
9671 __ bne(AT, cnt2, haveResult);
9672 __ delayed()->addi(str1, str1, 2);
9673 __ addi(str2, str2, 2);
9674 __ b(Loop);
9675 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9677 __ bind(haveResult);
9678 __ subu(result, AT, cnt2);
9680 __ bind(done);
9681 %}
9683 ins_pipe( pipe_slow );
9684 %}
9686 // intrinsic optimization
9687 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9688 match(Set result (StrEquals (Binary str1 str2) cnt));
9689 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9691 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9692 ins_encode %{
9693 // Get the first character position in both strings
9694 // [8] char array, [12] offset, [16] count
9695 Register str1 = $str1$$Register;
9696 Register str2 = $str2$$Register;
9697 Register cnt = $cnt$$Register;
9698 Register tmp = $temp$$Register;
9699 Register result = $result$$Register;
9701 Label Loop, done;
9704 __ beq(str1, str2, done); // same char[] ?
9705 __ daddiu(result, R0, 1);
9707 __ bind(Loop); // Loop begin
9708 __ beq(cnt, R0, done);
9709 __ daddiu(result, R0, 1); // count == 0
9711 // compare current character
9712 __ lhu(AT, str1, 0);;
9713 __ lhu(tmp, str2, 0);
9714 __ bne(AT, tmp, done);
9715 __ delayed()->daddi(result, R0, 0);
9716 __ addi(str1, str1, 2);
9717 __ addi(str2, str2, 2);
9718 __ b(Loop);
9719 __ delayed()->addi(cnt, cnt, -1); // Loop end
9721 __ bind(done);
9722 %}
9724 ins_pipe( pipe_slow );
9725 %}
9727 //----------Arithmetic Instructions-------------------------------------------
9728 //----------Addition Instructions---------------------------------------------
9729 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9730 match(Set dst (AddI src1 src2));
9732 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9733 ins_encode %{
9734 Register dst = $dst$$Register;
9735 Register src1 = $src1$$Register;
9736 Register src2 = $src2$$Register;
9737 __ addu32(dst, src1, src2);
9738 %}
9739 ins_pipe( ialu_regI_regI );
9740 %}
9742 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9743 match(Set dst (AddI src1 src2));
9745 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9746 ins_encode %{
9747 Register dst = $dst$$Register;
9748 Register src1 = $src1$$Register;
9749 int imm = $src2$$constant;
9751 if(Assembler::is_simm16(imm)) {
9752 __ addiu32(dst, src1, imm);
9753 } else {
9754 __ move(AT, imm);
9755 __ addu32(dst, src1, AT);
9756 }
9757 %}
9758 ins_pipe( ialu_regI_regI );
9759 %}
9761 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9762 match(Set dst (AddP src1 src2));
9764 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9766 ins_encode %{
9767 Register dst = $dst$$Register;
9768 Register src1 = $src1$$Register;
9769 Register src2 = $src2$$Register;
9770 __ daddu(dst, src1, src2);
9771 %}
9773 ins_pipe( ialu_regI_regI );
9774 %}
9776 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9777 match(Set dst (AddP src1 (ConvI2L src2)));
9779 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9781 ins_encode %{
9782 Register dst = $dst$$Register;
9783 Register src1 = $src1$$Register;
9784 Register src2 = $src2$$Register;
9785 __ daddu(dst, src1, src2);
9786 %}
9788 ins_pipe( ialu_regI_regI );
9789 %}
9791 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9792 match(Set dst (AddP src1 src2));
9794 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9795 ins_encode %{
9796 Register src1 = $src1$$Register;
9797 long src2 = $src2$$constant;
9798 Register dst = $dst$$Register;
9800 if(Assembler::is_simm16(src2)) {
9801 __ daddiu(dst, src1, src2);
9802 } else {
9803 __ set64(AT, src2);
9804 __ daddu(dst, src1, AT);
9805 }
9806 %}
9807 ins_pipe( ialu_regI_imm16 );
9808 %}
9810 // Add Long Register with Register
9811 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9812 match(Set dst (AddL src1 src2));
9813 ins_cost(200);
9814 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9816 ins_encode %{
9817 Register dst_reg = as_Register($dst$$reg);
9818 Register src1_reg = as_Register($src1$$reg);
9819 Register src2_reg = as_Register($src2$$reg);
9821 __ daddu(dst_reg, src1_reg, src2_reg);
9822 %}
9824 ins_pipe( ialu_regL_regL );
9825 %}
9827 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9828 %{
9829 match(Set dst (AddL src1 src2));
9831 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9832 ins_encode %{
9833 Register dst_reg = as_Register($dst$$reg);
9834 Register src1_reg = as_Register($src1$$reg);
9835 int src2_imm = $src2$$constant;
9837 __ daddiu(dst_reg, src1_reg, src2_imm);
9838 %}
9840 ins_pipe( ialu_regL_regL );
9841 %}
9843 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9844 %{
9845 match(Set dst (AddL (ConvI2L src1) src2));
9847 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9848 ins_encode %{
9849 Register dst_reg = as_Register($dst$$reg);
9850 Register src1_reg = as_Register($src1$$reg);
9851 int src2_imm = $src2$$constant;
9853 __ daddiu(dst_reg, src1_reg, src2_imm);
9854 %}
9856 ins_pipe( ialu_regL_regL );
9857 %}
9859 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9860 match(Set dst (AddL (ConvI2L src1) src2));
9861 ins_cost(200);
9862 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9864 ins_encode %{
9865 Register dst_reg = as_Register($dst$$reg);
9866 Register src1_reg = as_Register($src1$$reg);
9867 Register src2_reg = as_Register($src2$$reg);
9869 __ daddu(dst_reg, src1_reg, src2_reg);
9870 %}
9872 ins_pipe( ialu_regL_regL );
9873 %}
9875 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9876 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9877 ins_cost(200);
9878 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9880 ins_encode %{
9881 Register dst_reg = as_Register($dst$$reg);
9882 Register src1_reg = as_Register($src1$$reg);
9883 Register src2_reg = as_Register($src2$$reg);
9885 __ daddu(dst_reg, src1_reg, src2_reg);
9886 %}
9888 ins_pipe( ialu_regL_regL );
9889 %}
9891 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9892 match(Set dst (AddL src1 (ConvI2L src2)));
9893 ins_cost(200);
9894 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9896 ins_encode %{
9897 Register dst_reg = as_Register($dst$$reg);
9898 Register src1_reg = as_Register($src1$$reg);
9899 Register src2_reg = as_Register($src2$$reg);
9901 __ daddu(dst_reg, src1_reg, src2_reg);
9902 %}
9904 ins_pipe( ialu_regL_regL );
9905 %}
9907 //----------Subtraction Instructions-------------------------------------------
9908 // Integer Subtraction Instructions
9909 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9910 match(Set dst (SubI src1 src2));
9911 ins_cost(100);
9913 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9914 ins_encode %{
9915 Register dst = $dst$$Register;
9916 Register src1 = $src1$$Register;
9917 Register src2 = $src2$$Register;
9918 __ subu32(dst, src1, src2);
9919 %}
9920 ins_pipe( ialu_regI_regI );
9921 %}
9923 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9924 match(Set dst (SubI src1 src2));
9925 ins_cost(80);
9927 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9928 ins_encode %{
9929 Register dst = $dst$$Register;
9930 Register src1 = $src1$$Register;
9931 __ addiu32(dst, src1, -1 * $src2$$constant);
9932 %}
9933 ins_pipe( ialu_regI_regI );
9934 %}
9936 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9937 match(Set dst (SubI zero src));
9938 ins_cost(80);
9940 format %{ "neg $dst, $src #@negI_Reg" %}
9941 ins_encode %{
9942 Register dst = $dst$$Register;
9943 Register src = $src$$Register;
9944 __ subu32(dst, R0, src);
9945 %}
9946 ins_pipe( ialu_regI_regI );
9947 %}
9949 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9950 match(Set dst (SubL zero src));
9951 ins_cost(80);
9953 format %{ "neg $dst, $src #@negL_Reg" %}
9954 ins_encode %{
9955 Register dst = $dst$$Register;
9956 Register src = $src$$Register;
9957 __ subu(dst, R0, src);
9958 %}
9959 ins_pipe( ialu_regI_regI );
9960 %}
9962 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9963 match(Set dst (SubL src1 src2));
9964 ins_cost(80);
9966 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9967 ins_encode %{
9968 Register dst = $dst$$Register;
9969 Register src1 = $src1$$Register;
9970 __ daddiu(dst, src1, -1 * $src2$$constant);
9971 %}
9972 ins_pipe( ialu_regI_regI );
9973 %}
9975 // Subtract Long Register with Register.
9976 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9977 match(Set dst (SubL src1 src2));
9978 ins_cost(100);
9979 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9980 ins_encode %{
9981 Register dst = as_Register($dst$$reg);
9982 Register src1 = as_Register($src1$$reg);
9983 Register src2 = as_Register($src2$$reg);
9985 __ subu(dst, src1, src2);
9986 %}
9987 ins_pipe( ialu_regL_regL );
9988 %}
9990 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9991 match(Set dst (SubL src1 (ConvI2L src2)));
9992 ins_cost(100);
9993 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9994 ins_encode %{
9995 Register dst = as_Register($dst$$reg);
9996 Register src1 = as_Register($src1$$reg);
9997 Register src2 = as_Register($src2$$reg);
9999 __ subu(dst, src1, src2);
10000 %}
10001 ins_pipe( ialu_regL_regL );
10002 %}
10004 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10005 match(Set dst (SubL (ConvI2L src1) src2));
10006 ins_cost(200);
10007 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10008 ins_encode %{
10009 Register dst = as_Register($dst$$reg);
10010 Register src1 = as_Register($src1$$reg);
10011 Register src2 = as_Register($src2$$reg);
10013 __ subu(dst, src1, src2);
10014 %}
10015 ins_pipe( ialu_regL_regL );
10016 %}
10018 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10019 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10020 ins_cost(200);
10021 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10022 ins_encode %{
10023 Register dst = as_Register($dst$$reg);
10024 Register src1 = as_Register($src1$$reg);
10025 Register src2 = as_Register($src2$$reg);
10027 __ subu(dst, src1, src2);
10028 %}
10029 ins_pipe( ialu_regL_regL );
10030 %}
10032 // Integer MOD with Register
10033 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10034 match(Set dst (ModI src1 src2));
10035 ins_cost(300);
10036 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10037 ins_encode %{
10038 Register dst = $dst$$Register;
10039 Register src1 = $src1$$Register;
10040 Register src2 = $src2$$Register;
10042 //if (UseLoongsonISA) {
10043 if (0) {
10044 // 2016.08.10
10045 // Experiments show that gsmod is slower that div+mfhi.
10046 // So I just disable it here.
10047 __ gsmod(dst, src1, src2);
10048 } else {
10049 __ div(src1, src2);
10050 __ mfhi(dst);
10051 }
10052 %}
10054 //ins_pipe( ialu_mod );
10055 ins_pipe( ialu_regI_regI );
10056 %}
10058 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10059 match(Set dst (ModL src1 src2));
10060 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10062 ins_encode %{
10063 Register dst = as_Register($dst$$reg);
10064 Register op1 = as_Register($src1$$reg);
10065 Register op2 = as_Register($src2$$reg);
10067 if (UseLoongsonISA) {
10068 __ gsdmod(dst, op1, op2);
10069 } else {
10070 __ ddiv(op1, op2);
10071 __ mfhi(dst);
10072 }
10073 %}
10074 ins_pipe( pipe_slow );
10075 %}
10077 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10078 match(Set dst (MulI src1 src2));
10080 ins_cost(300);
10081 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10082 ins_encode %{
10083 Register src1 = $src1$$Register;
10084 Register src2 = $src2$$Register;
10085 Register dst = $dst$$Register;
10087 __ mul(dst, src1, src2);
10088 %}
10089 ins_pipe( ialu_mult );
10090 %}
10092 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10093 match(Set dst (AddI (MulI src1 src2) src3));
10095 ins_cost(999);
10096 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10097 ins_encode %{
10098 Register src1 = $src1$$Register;
10099 Register src2 = $src2$$Register;
10100 Register src3 = $src3$$Register;
10101 Register dst = $dst$$Register;
10103 __ mtlo(src3);
10104 __ madd(src1, src2);
10105 __ mflo(dst);
10106 %}
10107 ins_pipe( ialu_mult );
10108 %}
10110 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10111 match(Set dst (DivI src1 src2));
10113 ins_cost(300);
10114 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10115 ins_encode %{
10116 Register src1 = $src1$$Register;
10117 Register src2 = $src2$$Register;
10118 Register dst = $dst$$Register;
10120 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10121 We must trap an exception manually. */
10122 __ teq(R0, src2, 0x7);
10124 if (UseLoongsonISA) {
10125 __ gsdiv(dst, src1, src2);
10126 } else {
10127 __ div(src1, src2);
10129 __ nop();
10130 __ nop();
10131 __ mflo(dst);
10132 }
10133 %}
10134 ins_pipe( ialu_mod );
10135 %}
10137 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10138 match(Set dst (DivF src1 src2));
10140 ins_cost(300);
10141 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10142 ins_encode %{
10143 FloatRegister src1 = $src1$$FloatRegister;
10144 FloatRegister src2 = $src2$$FloatRegister;
10145 FloatRegister dst = $dst$$FloatRegister;
10147 /* Here do we need to trap an exception manually ? */
10148 __ div_s(dst, src1, src2);
10149 %}
10150 ins_pipe( pipe_slow );
10151 %}
10153 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10154 match(Set dst (DivD src1 src2));
10156 ins_cost(300);
10157 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10158 ins_encode %{
10159 FloatRegister src1 = $src1$$FloatRegister;
10160 FloatRegister src2 = $src2$$FloatRegister;
10161 FloatRegister dst = $dst$$FloatRegister;
10163 /* Here do we need to trap an exception manually ? */
10164 __ div_d(dst, src1, src2);
10165 %}
10166 ins_pipe( pipe_slow );
10167 %}
10169 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10170 match(Set dst (MulL src1 src2));
10171 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10172 ins_encode %{
10173 Register dst = as_Register($dst$$reg);
10174 Register op1 = as_Register($src1$$reg);
10175 Register op2 = as_Register($src2$$reg);
10177 if (UseLoongsonISA) {
10178 __ gsdmult(dst, op1, op2);
10179 } else {
10180 __ dmult(op1, op2);
10181 __ mflo(dst);
10182 }
10183 %}
10184 ins_pipe( pipe_slow );
10185 %}
10187 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10188 match(Set dst (MulL src1 (ConvI2L src2)));
10189 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10190 ins_encode %{
10191 Register dst = as_Register($dst$$reg);
10192 Register op1 = as_Register($src1$$reg);
10193 Register op2 = as_Register($src2$$reg);
10195 if (UseLoongsonISA) {
10196 __ gsdmult(dst, op1, op2);
10197 } else {
10198 __ dmult(op1, op2);
10199 __ mflo(dst);
10200 }
10201 %}
10202 ins_pipe( pipe_slow );
10203 %}
10205 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10206 match(Set dst (DivL src1 src2));
10207 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10209 ins_encode %{
10210 Register dst = as_Register($dst$$reg);
10211 Register op1 = as_Register($src1$$reg);
10212 Register op2 = as_Register($src2$$reg);
10214 if (UseLoongsonISA) {
10215 __ gsddiv(dst, op1, op2);
10216 } else {
10217 __ ddiv(op1, op2);
10218 __ mflo(dst);
10219 }
10220 %}
10221 ins_pipe( pipe_slow );
10222 %}
10224 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10225 match(Set dst (AddF src1 src2));
10226 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10227 ins_encode %{
10228 FloatRegister src1 = as_FloatRegister($src1$$reg);
10229 FloatRegister src2 = as_FloatRegister($src2$$reg);
10230 FloatRegister dst = as_FloatRegister($dst$$reg);
10232 __ add_s(dst, src1, src2);
10233 %}
10234 ins_pipe( fpu_regF_regF );
10235 %}
10237 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10238 match(Set dst (SubF src1 src2));
10239 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10240 ins_encode %{
10241 FloatRegister src1 = as_FloatRegister($src1$$reg);
10242 FloatRegister src2 = as_FloatRegister($src2$$reg);
10243 FloatRegister dst = as_FloatRegister($dst$$reg);
10245 __ sub_s(dst, src1, src2);
10246 %}
10247 ins_pipe( fpu_regF_regF );
10248 %}
10249 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10250 match(Set dst (AddD src1 src2));
10251 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10252 ins_encode %{
10253 FloatRegister src1 = as_FloatRegister($src1$$reg);
10254 FloatRegister src2 = as_FloatRegister($src2$$reg);
10255 FloatRegister dst = as_FloatRegister($dst$$reg);
10257 __ add_d(dst, src1, src2);
10258 %}
10259 ins_pipe( fpu_regF_regF );
10260 %}
10262 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10263 match(Set dst (SubD src1 src2));
10264 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10265 ins_encode %{
10266 FloatRegister src1 = as_FloatRegister($src1$$reg);
10267 FloatRegister src2 = as_FloatRegister($src2$$reg);
10268 FloatRegister dst = as_FloatRegister($dst$$reg);
10270 __ sub_d(dst, src1, src2);
10271 %}
10272 ins_pipe( fpu_regF_regF );
10273 %}
10275 instruct negF_reg(regF dst, regF src) %{
10276 match(Set dst (NegF src));
10277 format %{ "negF $dst, $src @negF_reg" %}
10278 ins_encode %{
10279 FloatRegister src = as_FloatRegister($src$$reg);
10280 FloatRegister dst = as_FloatRegister($dst$$reg);
10282 __ neg_s(dst, src);
10283 %}
10284 ins_pipe( fpu_regF_regF );
10285 %}
10287 instruct negD_reg(regD dst, regD src) %{
10288 match(Set dst (NegD src));
10289 format %{ "negD $dst, $src @negD_reg" %}
10290 ins_encode %{
10291 FloatRegister src = as_FloatRegister($src$$reg);
10292 FloatRegister dst = as_FloatRegister($dst$$reg);
10294 __ neg_d(dst, src);
10295 %}
10296 ins_pipe( fpu_regF_regF );
10297 %}
10300 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10301 match(Set dst (MulF src1 src2));
10302 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10303 ins_encode %{
10304 FloatRegister src1 = $src1$$FloatRegister;
10305 FloatRegister src2 = $src2$$FloatRegister;
10306 FloatRegister dst = $dst$$FloatRegister;
10308 __ mul_s(dst, src1, src2);
10309 %}
10310 ins_pipe( fpu_regF_regF );
10311 %}
10313 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10314 match(Set dst (AddF (MulF src1 src2) src3));
10315 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10316 ins_cost(44444);
10317 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10318 ins_encode %{
10319 FloatRegister src1 = $src1$$FloatRegister;
10320 FloatRegister src2 = $src2$$FloatRegister;
10321 FloatRegister src3 = $src3$$FloatRegister;
10322 FloatRegister dst = $dst$$FloatRegister;
10324 __ madd_s(dst, src1, src2, src3);
10325 %}
10326 ins_pipe( fpu_regF_regF );
10327 %}
10329 // Mul two double precision floating piont number
10330 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10331 match(Set dst (MulD src1 src2));
10332 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10333 ins_encode %{
10334 FloatRegister src1 = $src1$$FloatRegister;
10335 FloatRegister src2 = $src2$$FloatRegister;
10336 FloatRegister dst = $dst$$FloatRegister;
10338 __ mul_d(dst, src1, src2);
10339 %}
10340 ins_pipe( fpu_regF_regF );
10341 %}
10343 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10344 match(Set dst (AddD (MulD src1 src2) src3));
10345 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10346 ins_cost(44444);
10347 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10348 ins_encode %{
10349 FloatRegister src1 = $src1$$FloatRegister;
10350 FloatRegister src2 = $src2$$FloatRegister;
10351 FloatRegister src3 = $src3$$FloatRegister;
10352 FloatRegister dst = $dst$$FloatRegister;
10354 __ madd_d(dst, src1, src2, src3);
10355 %}
10356 ins_pipe( fpu_regF_regF );
10357 %}
10359 instruct absF_reg(regF dst, regF src) %{
10360 match(Set dst (AbsF src));
10361 ins_cost(100);
10362 format %{ "absF $dst, $src @absF_reg" %}
10363 ins_encode %{
10364 FloatRegister src = as_FloatRegister($src$$reg);
10365 FloatRegister dst = as_FloatRegister($dst$$reg);
10367 __ abs_s(dst, src);
10368 %}
10369 ins_pipe( fpu_regF_regF );
10370 %}
10373 // intrinsics for math_native.
10374 // AbsD SqrtD CosD SinD TanD LogD Log10D
10376 instruct absD_reg(regD dst, regD src) %{
10377 match(Set dst (AbsD src));
10378 ins_cost(100);
10379 format %{ "absD $dst, $src @absD_reg" %}
10380 ins_encode %{
10381 FloatRegister src = as_FloatRegister($src$$reg);
10382 FloatRegister dst = as_FloatRegister($dst$$reg);
10384 __ abs_d(dst, src);
10385 %}
10386 ins_pipe( fpu_regF_regF );
10387 %}
10389 instruct sqrtD_reg(regD dst, regD src) %{
10390 match(Set dst (SqrtD src));
10391 ins_cost(100);
10392 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10393 ins_encode %{
10394 FloatRegister src = as_FloatRegister($src$$reg);
10395 FloatRegister dst = as_FloatRegister($dst$$reg);
10397 __ sqrt_d(dst, src);
10398 %}
10399 ins_pipe( fpu_regF_regF );
10400 %}
10402 instruct sqrtF_reg(regF dst, regF src) %{
10403 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10404 ins_cost(100);
10405 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10406 ins_encode %{
10407 FloatRegister src = as_FloatRegister($src$$reg);
10408 FloatRegister dst = as_FloatRegister($dst$$reg);
10410 __ sqrt_s(dst, src);
10411 %}
10412 ins_pipe( fpu_regF_regF );
10413 %}
10414 //----------------------------------Logical Instructions----------------------
10415 //__________________________________Integer Logical Instructions-------------
10417 //And Instuctions
10418 // And Register with Immediate
10419 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10420 match(Set dst (AndI src1 src2));
10422 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10423 ins_encode %{
10424 Register dst = $dst$$Register;
10425 Register src = $src1$$Register;
10426 int val = $src2$$constant;
10428 __ move(AT, val);
10429 __ andr(dst, src, AT);
10430 %}
10431 ins_pipe( ialu_regI_regI );
10432 %}
10434 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10435 match(Set dst (AndI src1 src2));
10436 ins_cost(60);
10438 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10439 ins_encode %{
10440 Register dst = $dst$$Register;
10441 Register src = $src1$$Register;
10442 int val = $src2$$constant;
10444 __ andi(dst, src, val);
10445 %}
10446 ins_pipe( ialu_regI_regI );
10447 %}
10449 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10450 match(Set dst (AndI src1 mask));
10451 ins_cost(60);
10453 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10454 ins_encode %{
10455 Register dst = $dst$$Register;
10456 Register src = $src1$$Register;
10457 int size = Assembler::is_int_mask($mask$$constant);
10459 __ ext(dst, src, 0, size);
10460 %}
10461 ins_pipe( ialu_regI_regI );
10462 %}
10464 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10465 match(Set dst (AndL src1 mask));
10466 ins_cost(60);
10468 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10469 ins_encode %{
10470 Register dst = $dst$$Register;
10471 Register src = $src1$$Register;
10472 int size = Assembler::is_jlong_mask($mask$$constant);
10474 __ dext(dst, src, 0, size);
10475 %}
10476 ins_pipe( ialu_regI_regI );
10477 %}
10479 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10480 match(Set dst (XorI src1 src2));
10481 ins_cost(60);
10483 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10484 ins_encode %{
10485 Register dst = $dst$$Register;
10486 Register src = $src1$$Register;
10487 int val = $src2$$constant;
10489 __ xori(dst, src, val);
10490 %}
10491 ins_pipe( ialu_regI_regI );
10492 %}
10494 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10495 match(Set dst (XorI src1 M1));
10496 predicate(UseLoongsonISA && Use3A2000);
10497 ins_cost(60);
10499 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10500 ins_encode %{
10501 Register dst = $dst$$Register;
10502 Register src = $src1$$Register;
10504 __ gsorn(dst, R0, src);
10505 %}
10506 ins_pipe( ialu_regI_regI );
10507 %}
10509 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10510 match(Set dst (XorI (ConvL2I src1) M1));
10511 predicate(UseLoongsonISA && Use3A2000);
10512 ins_cost(60);
10514 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10515 ins_encode %{
10516 Register dst = $dst$$Register;
10517 Register src = $src1$$Register;
10519 __ gsorn(dst, R0, src);
10520 %}
10521 ins_pipe( ialu_regI_regI );
10522 %}
10524 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10525 match(Set dst (XorL src1 src2));
10526 ins_cost(60);
10528 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10529 ins_encode %{
10530 Register dst = $dst$$Register;
10531 Register src = $src1$$Register;
10532 int val = $src2$$constant;
10534 __ xori(dst, src, val);
10535 %}
10536 ins_pipe( ialu_regI_regI );
10537 %}
10539 /*
10540 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10541 match(Set dst (XorL src1 M1));
10542 predicate(UseLoongsonISA);
10543 ins_cost(60);
10545 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10546 ins_encode %{
10547 Register dst = $dst$$Register;
10548 Register src = $src1$$Register;
10550 __ gsorn(dst, R0, src);
10551 %}
10552 ins_pipe( ialu_regI_regI );
10553 %}
10554 */
10556 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10557 match(Set dst (AndI mask (LoadB mem)));
10558 ins_cost(60);
10560 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10561 ins_encode(load_UB_enc(dst, mem));
10562 ins_pipe( ialu_loadI );
10563 %}
10565 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10566 match(Set dst (AndI (LoadB mem) mask));
10567 ins_cost(60);
10569 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10570 ins_encode(load_UB_enc(dst, mem));
10571 ins_pipe( ialu_loadI );
10572 %}
10574 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10575 match(Set dst (AndI src1 src2));
10577 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10578 ins_encode %{
10579 Register dst = $dst$$Register;
10580 Register src1 = $src1$$Register;
10581 Register src2 = $src2$$Register;
10582 __ andr(dst, src1, src2);
10583 %}
10584 ins_pipe( ialu_regI_regI );
10585 %}
10587 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10588 match(Set dst (AndI src1 (XorI src2 M1)));
10589 predicate(UseLoongsonISA && Use3A2000);
10591 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10592 ins_encode %{
10593 Register dst = $dst$$Register;
10594 Register src1 = $src1$$Register;
10595 Register src2 = $src2$$Register;
10597 __ gsandn(dst, src1, src2);
10598 %}
10599 ins_pipe( ialu_regI_regI );
10600 %}
10602 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10603 match(Set dst (OrI src1 (XorI src2 M1)));
10604 predicate(UseLoongsonISA && Use3A2000);
10606 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10607 ins_encode %{
10608 Register dst = $dst$$Register;
10609 Register src1 = $src1$$Register;
10610 Register src2 = $src2$$Register;
10612 __ gsorn(dst, src1, src2);
10613 %}
10614 ins_pipe( ialu_regI_regI );
10615 %}
10617 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10618 match(Set dst (AndI (XorI src1 M1) src2));
10619 predicate(UseLoongsonISA && Use3A2000);
10621 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10622 ins_encode %{
10623 Register dst = $dst$$Register;
10624 Register src1 = $src1$$Register;
10625 Register src2 = $src2$$Register;
10627 __ gsandn(dst, src2, src1);
10628 %}
10629 ins_pipe( ialu_regI_regI );
10630 %}
10632 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10633 match(Set dst (OrI (XorI src1 M1) src2));
10634 predicate(UseLoongsonISA && Use3A2000);
10636 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10637 ins_encode %{
10638 Register dst = $dst$$Register;
10639 Register src1 = $src1$$Register;
10640 Register src2 = $src2$$Register;
10642 __ gsorn(dst, src2, src1);
10643 %}
10644 ins_pipe( ialu_regI_regI );
10645 %}
10647 // And Long Register with Register
10648 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10649 match(Set dst (AndL src1 src2));
10650 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10651 ins_encode %{
10652 Register dst_reg = as_Register($dst$$reg);
10653 Register src1_reg = as_Register($src1$$reg);
10654 Register src2_reg = as_Register($src2$$reg);
10656 __ andr(dst_reg, src1_reg, src2_reg);
10657 %}
10658 ins_pipe( ialu_regL_regL );
10659 %}
10661 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10662 match(Set dst (AndL src1 (ConvI2L src2)));
10663 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10664 ins_encode %{
10665 Register dst_reg = as_Register($dst$$reg);
10666 Register src1_reg = as_Register($src1$$reg);
10667 Register src2_reg = as_Register($src2$$reg);
10669 __ andr(dst_reg, src1_reg, src2_reg);
10670 %}
10671 ins_pipe( ialu_regL_regL );
10672 %}
10674 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10675 match(Set dst (AndL src1 src2));
10676 ins_cost(60);
10678 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10679 ins_encode %{
10680 Register dst = $dst$$Register;
10681 Register src = $src1$$Register;
10682 long val = $src2$$constant;
10684 __ andi(dst, src, val);
10685 %}
10686 ins_pipe( ialu_regI_regI );
10687 %}
10689 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10690 match(Set dst (ConvL2I (AndL src1 src2)));
10691 ins_cost(60);
10693 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10694 ins_encode %{
10695 Register dst = $dst$$Register;
10696 Register src = $src1$$Register;
10697 long val = $src2$$constant;
10699 __ andi(dst, src, val);
10700 %}
10701 ins_pipe( ialu_regI_regI );
10702 %}
10704 /*
10705 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10706 match(Set dst (AndL src1 (XorL src2 M1)));
10707 predicate(UseLoongsonISA);
10709 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10710 ins_encode %{
10711 Register dst = $dst$$Register;
10712 Register src1 = $src1$$Register;
10713 Register src2 = $src2$$Register;
10715 __ gsandn(dst, src1, src2);
10716 %}
10717 ins_pipe( ialu_regI_regI );
10718 %}
10719 */
10721 /*
10722 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10723 match(Set dst (OrL src1 (XorL src2 M1)));
10724 predicate(UseLoongsonISA);
10726 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10727 ins_encode %{
10728 Register dst = $dst$$Register;
10729 Register src1 = $src1$$Register;
10730 Register src2 = $src2$$Register;
10732 __ gsorn(dst, src1, src2);
10733 %}
10734 ins_pipe( ialu_regI_regI );
10735 %}
10736 */
10738 /*
10739 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10740 match(Set dst (AndL (XorL src1 M1) src2));
10741 predicate(UseLoongsonISA);
10743 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10744 ins_encode %{
10745 Register dst = $dst$$Register;
10746 Register src1 = $src1$$Register;
10747 Register src2 = $src2$$Register;
10749 __ gsandn(dst, src2, src1);
10750 %}
10751 ins_pipe( ialu_regI_regI );
10752 %}
10753 */
10755 /*
10756 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10757 match(Set dst (OrL (XorL src1 M1) src2));
10758 predicate(UseLoongsonISA);
10760 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10761 ins_encode %{
10762 Register dst = $dst$$Register;
10763 Register src1 = $src1$$Register;
10764 Register src2 = $src2$$Register;
10766 __ gsorn(dst, src2, src1);
10767 %}
10768 ins_pipe( ialu_regI_regI );
10769 %}
10770 */
10772 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10773 match(Set dst (AndL dst M8));
10774 ins_cost(60);
10776 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10777 ins_encode %{
10778 Register dst = $dst$$Register;
10780 __ dins(dst, R0, 0, 3);
10781 %}
10782 ins_pipe( ialu_regI_regI );
10783 %}
10785 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10786 match(Set dst (AndL dst M5));
10787 ins_cost(60);
10789 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10790 ins_encode %{
10791 Register dst = $dst$$Register;
10793 __ dins(dst, R0, 2, 1);
10794 %}
10795 ins_pipe( ialu_regI_regI );
10796 %}
10798 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10799 match(Set dst (AndL dst M7));
10800 ins_cost(60);
10802 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10803 ins_encode %{
10804 Register dst = $dst$$Register;
10806 __ dins(dst, R0, 1, 2);
10807 %}
10808 ins_pipe( ialu_regI_regI );
10809 %}
10811 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10812 match(Set dst (AndL dst M4));
10813 ins_cost(60);
10815 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10816 ins_encode %{
10817 Register dst = $dst$$Register;
10819 __ dins(dst, R0, 0, 2);
10820 %}
10821 ins_pipe( ialu_regI_regI );
10822 %}
10824 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10825 match(Set dst (AndL dst M121));
10826 ins_cost(60);
10828 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10829 ins_encode %{
10830 Register dst = $dst$$Register;
10832 __ dins(dst, R0, 3, 4);
10833 %}
10834 ins_pipe( ialu_regI_regI );
10835 %}
10837 // Or Long Register with Register
10838 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10839 match(Set dst (OrL src1 src2));
10840 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10841 ins_encode %{
10842 Register dst_reg = $dst$$Register;
10843 Register src1_reg = $src1$$Register;
10844 Register src2_reg = $src2$$Register;
10846 __ orr(dst_reg, src1_reg, src2_reg);
10847 %}
10848 ins_pipe( ialu_regL_regL );
10849 %}
10851 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10852 match(Set dst (OrL (CastP2X src1) src2));
10853 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10854 ins_encode %{
10855 Register dst_reg = $dst$$Register;
10856 Register src1_reg = $src1$$Register;
10857 Register src2_reg = $src2$$Register;
10859 __ orr(dst_reg, src1_reg, src2_reg);
10860 %}
10861 ins_pipe( ialu_regL_regL );
10862 %}
10864 // Xor Long Register with Register
10865 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10866 match(Set dst (XorL src1 src2));
10867 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10868 ins_encode %{
10869 Register dst_reg = as_Register($dst$$reg);
10870 Register src1_reg = as_Register($src1$$reg);
10871 Register src2_reg = as_Register($src2$$reg);
10873 __ xorr(dst_reg, src1_reg, src2_reg);
10874 %}
10875 ins_pipe( ialu_regL_regL );
10876 %}
10878 // Shift Left by 8-bit immediate
10879 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10880 match(Set dst (LShiftI src shift));
10882 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10883 ins_encode %{
10884 Register src = $src$$Register;
10885 Register dst = $dst$$Register;
10886 int shamt = $shift$$constant;
10888 __ sll(dst, src, shamt);
10889 %}
10890 ins_pipe( ialu_regI_regI );
10891 %}
10893 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10894 match(Set dst (LShiftI (ConvL2I src) shift));
10896 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10897 ins_encode %{
10898 Register src = $src$$Register;
10899 Register dst = $dst$$Register;
10900 int shamt = $shift$$constant;
10902 __ sll(dst, src, shamt);
10903 %}
10904 ins_pipe( ialu_regI_regI );
10905 %}
10907 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10908 match(Set dst (AndI (LShiftI src shift) mask));
10910 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10911 ins_encode %{
10912 Register src = $src$$Register;
10913 Register dst = $dst$$Register;
10915 __ sll(dst, src, 16);
10916 %}
10917 ins_pipe( ialu_regI_regI );
10918 %}
10920 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10921 %{
10922 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10924 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10925 ins_encode %{
10926 Register src = $src$$Register;
10927 Register dst = $dst$$Register;
10929 __ andi(dst, src, 7);
10930 %}
10931 ins_pipe(ialu_regI_regI);
10932 %}
10934 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10935 %{
10936 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10938 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10939 ins_encode %{
10940 Register src = $src1$$Register;
10941 int val = $src2$$constant;
10942 Register dst = $dst$$Register;
10944 __ ori(dst, src, val);
10945 %}
10946 ins_pipe(ialu_regI_regI);
10947 %}
10949 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10950 // This idiom is used by the compiler the i2s bytecode.
10951 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10952 %{
10953 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10955 format %{ "i2s $dst, $src\t# @i2s" %}
10956 ins_encode %{
10957 Register src = $src$$Register;
10958 Register dst = $dst$$Register;
10960 __ seh(dst, src);
10961 %}
10962 ins_pipe(ialu_regI_regI);
10963 %}
10965 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10966 // This idiom is used by the compiler for the i2b bytecode.
10967 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10968 %{
10969 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10971 format %{ "i2b $dst, $src\t# @i2b" %}
10972 ins_encode %{
10973 Register src = $src$$Register;
10974 Register dst = $dst$$Register;
10976 __ seb(dst, src);
10977 %}
10978 ins_pipe(ialu_regI_regI);
10979 %}
10982 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10983 match(Set dst (LShiftI (ConvL2I src) shift));
10985 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10986 ins_encode %{
10987 Register src = $src$$Register;
10988 Register dst = $dst$$Register;
10989 int shamt = $shift$$constant;
10991 __ sll(dst, src, shamt);
10992 %}
10993 ins_pipe( ialu_regI_regI );
10994 %}
10996 // Shift Left by 8-bit immediate
10997 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10998 match(Set dst (LShiftI src shift));
11000 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11001 ins_encode %{
11002 Register src = $src$$Register;
11003 Register dst = $dst$$Register;
11004 Register shamt = $shift$$Register;
11005 __ sllv(dst, src, shamt);
11006 %}
11007 ins_pipe( ialu_regI_regI );
11008 %}
11011 // Shift Left Long
11012 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11013 //predicate(UseNewLongLShift);
11014 match(Set dst (LShiftL src shift));
11015 ins_cost(100);
11016 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11017 ins_encode %{
11018 Register src_reg = as_Register($src$$reg);
11019 Register dst_reg = as_Register($dst$$reg);
11020 int shamt = $shift$$constant;
11022 if (__ is_simm(shamt, 5))
11023 __ dsll(dst_reg, src_reg, shamt);
11024 else
11025 {
11026 int sa = Assembler::low(shamt, 6);
11027 if (sa < 32) {
11028 __ dsll(dst_reg, src_reg, sa);
11029 } else {
11030 __ dsll32(dst_reg, src_reg, sa - 32);
11031 }
11032 }
11033 %}
11034 ins_pipe( ialu_regL_regL );
11035 %}
11037 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11038 //predicate(UseNewLongLShift);
11039 match(Set dst (LShiftL (ConvI2L src) shift));
11040 ins_cost(100);
11041 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11042 ins_encode %{
11043 Register src_reg = as_Register($src$$reg);
11044 Register dst_reg = as_Register($dst$$reg);
11045 int shamt = $shift$$constant;
11047 if (__ is_simm(shamt, 5))
11048 __ dsll(dst_reg, src_reg, shamt);
11049 else
11050 {
11051 int sa = Assembler::low(shamt, 6);
11052 if (sa < 32) {
11053 __ dsll(dst_reg, src_reg, sa);
11054 } else {
11055 __ dsll32(dst_reg, src_reg, sa - 32);
11056 }
11057 }
11058 %}
11059 ins_pipe( ialu_regL_regL );
11060 %}
11062 // Shift Left Long
11063 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11064 //predicate(UseNewLongLShift);
11065 match(Set dst (LShiftL src shift));
11066 ins_cost(100);
11067 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11068 ins_encode %{
11069 Register src_reg = as_Register($src$$reg);
11070 Register dst_reg = as_Register($dst$$reg);
11072 __ dsllv(dst_reg, src_reg, $shift$$Register);
11073 %}
11074 ins_pipe( ialu_regL_regL );
11075 %}
11077 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11078 match(Set dst (LShiftL (ConvI2L src) shift));
11079 ins_cost(100);
11080 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11081 ins_encode %{
11082 Register src_reg = as_Register($src$$reg);
11083 Register dst_reg = as_Register($dst$$reg);
11084 int shamt = $shift$$constant;
11086 if (__ is_simm(shamt, 5)) {
11087 __ dsll(dst_reg, src_reg, shamt);
11088 } else {
11089 int sa = Assembler::low(shamt, 6);
11090 if (sa < 32) {
11091 __ dsll(dst_reg, src_reg, sa);
11092 } else {
11093 __ dsll32(dst_reg, src_reg, sa - 32);
11094 }
11095 }
11096 %}
11097 ins_pipe( ialu_regL_regL );
11098 %}
11100 // Shift Right Long
11101 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11102 match(Set dst (RShiftL src shift));
11103 ins_cost(100);
11104 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11105 ins_encode %{
11106 Register src_reg = as_Register($src$$reg);
11107 Register dst_reg = as_Register($dst$$reg);
11108 int shamt = ($shift$$constant & 0x3f);
11109 if (__ is_simm(shamt, 5))
11110 __ dsra(dst_reg, src_reg, shamt);
11111 else {
11112 int sa = Assembler::low(shamt, 6);
11113 if (sa < 32) {
11114 __ dsra(dst_reg, src_reg, sa);
11115 } else {
11116 __ dsra32(dst_reg, src_reg, sa - 32);
11117 }
11118 }
11119 %}
11120 ins_pipe( ialu_regL_regL );
11121 %}
11123 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11124 match(Set dst (ConvL2I (RShiftL src shift)));
11125 ins_cost(100);
11126 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11127 ins_encode %{
11128 Register src_reg = as_Register($src$$reg);
11129 Register dst_reg = as_Register($dst$$reg);
11130 int shamt = $shift$$constant;
11132 __ dsra32(dst_reg, src_reg, shamt - 32);
11133 %}
11134 ins_pipe( ialu_regL_regL );
11135 %}
11137 // Shift Right Long arithmetically
11138 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11139 //predicate(UseNewLongLShift);
11140 match(Set dst (RShiftL src shift));
11141 ins_cost(100);
11142 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11143 ins_encode %{
11144 Register src_reg = as_Register($src$$reg);
11145 Register dst_reg = as_Register($dst$$reg);
11147 __ dsrav(dst_reg, src_reg, $shift$$Register);
11148 %}
11149 ins_pipe( ialu_regL_regL );
11150 %}
11152 // Shift Right Long logically
11153 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11154 match(Set dst (URShiftL src shift));
11155 ins_cost(100);
11156 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11157 ins_encode %{
11158 Register src_reg = as_Register($src$$reg);
11159 Register dst_reg = as_Register($dst$$reg);
11161 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11162 %}
11163 ins_pipe( ialu_regL_regL );
11164 %}
11166 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11167 match(Set dst (URShiftL src shift));
11168 ins_cost(80);
11169 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11170 ins_encode %{
11171 Register src_reg = as_Register($src$$reg);
11172 Register dst_reg = as_Register($dst$$reg);
11173 int shamt = $shift$$constant;
11175 __ dsrl(dst_reg, src_reg, shamt);
11176 %}
11177 ins_pipe( ialu_regL_regL );
11178 %}
11180 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11181 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11182 ins_cost(80);
11183 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11184 ins_encode %{
11185 Register src_reg = as_Register($src$$reg);
11186 Register dst_reg = as_Register($dst$$reg);
11187 int shamt = $shift$$constant;
11189 __ dext(dst_reg, src_reg, shamt, 31);
11190 %}
11191 ins_pipe( ialu_regL_regL );
11192 %}
11194 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11195 match(Set dst (URShiftL (CastP2X src) shift));
11196 ins_cost(80);
11197 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11198 ins_encode %{
11199 Register src_reg = as_Register($src$$reg);
11200 Register dst_reg = as_Register($dst$$reg);
11201 int shamt = $shift$$constant;
11203 __ dsrl(dst_reg, src_reg, shamt);
11204 %}
11205 ins_pipe( ialu_regL_regL );
11206 %}
11208 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11209 match(Set dst (URShiftL src shift));
11210 ins_cost(80);
11211 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11212 ins_encode %{
11213 Register src_reg = as_Register($src$$reg);
11214 Register dst_reg = as_Register($dst$$reg);
11215 int shamt = $shift$$constant;
11217 __ dsrl32(dst_reg, src_reg, shamt - 32);
11218 %}
11219 ins_pipe( ialu_regL_regL );
11220 %}
11222 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11223 match(Set dst (ConvL2I (URShiftL src shift)));
11224 predicate(n->in(1)->in(2)->get_int() > 32);
11225 ins_cost(80);
11226 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11227 ins_encode %{
11228 Register src_reg = as_Register($src$$reg);
11229 Register dst_reg = as_Register($dst$$reg);
11230 int shamt = $shift$$constant;
11232 __ dsrl32(dst_reg, src_reg, shamt - 32);
11233 %}
11234 ins_pipe( ialu_regL_regL );
11235 %}
11237 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11238 match(Set dst (URShiftL (CastP2X src) shift));
11239 ins_cost(80);
11240 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11241 ins_encode %{
11242 Register src_reg = as_Register($src$$reg);
11243 Register dst_reg = as_Register($dst$$reg);
11244 int shamt = $shift$$constant;
11246 __ dsrl32(dst_reg, src_reg, shamt - 32);
11247 %}
11248 ins_pipe( ialu_regL_regL );
11249 %}
11251 // Xor Instructions
11252 // Xor Register with Register
11253 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11254 match(Set dst (XorI src1 src2));
11256 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11258 ins_encode %{
11259 Register dst = $dst$$Register;
11260 Register src1 = $src1$$Register;
11261 Register src2 = $src2$$Register;
11262 __ xorr(dst, src1, src2);
11263 __ sll(dst, dst, 0); /* long -> int */
11264 %}
11266 ins_pipe( ialu_regI_regI );
11267 %}
11269 // Or Instructions
11270 // Or Register with Register
11271 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11272 match(Set dst (OrI src1 src2));
11274 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11275 ins_encode %{
11276 Register dst = $dst$$Register;
11277 Register src1 = $src1$$Register;
11278 Register src2 = $src2$$Register;
11279 __ orr(dst, src1, src2);
11280 %}
11282 ins_pipe( ialu_regI_regI );
11283 %}
11285 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11286 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11287 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11289 format %{ "rotr $dst, $src, 1 ...\n\t"
11290 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11291 ins_encode %{
11292 Register dst = $dst$$Register;
11293 Register src = $src$$Register;
11294 int rshift = $rshift$$constant;
11296 __ rotr(dst, src, 1);
11297 if (rshift - 1) {
11298 __ srl(dst, dst, rshift - 1);
11299 }
11300 %}
11302 ins_pipe( ialu_regI_regI );
11303 %}
11305 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11306 match(Set dst (OrI src1 (CastP2X src2)));
11308 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11309 ins_encode %{
11310 Register dst = $dst$$Register;
11311 Register src1 = $src1$$Register;
11312 Register src2 = $src2$$Register;
11313 __ orr(dst, src1, src2);
11314 %}
11316 ins_pipe( ialu_regI_regI );
11317 %}
11319 // Logical Shift Right by 8-bit immediate
11320 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11321 match(Set dst (URShiftI src shift));
11322 // effect(KILL cr);
11324 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11325 ins_encode %{
11326 Register src = $src$$Register;
11327 Register dst = $dst$$Register;
11328 int shift = $shift$$constant;
11330 __ srl(dst, src, shift);
11331 %}
11332 ins_pipe( ialu_regI_regI );
11333 %}
11335 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11336 match(Set dst (AndI (URShiftI src shift) mask));
11338 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11339 ins_encode %{
11340 Register src = $src$$Register;
11341 Register dst = $dst$$Register;
11342 int pos = $shift$$constant;
11343 int size = Assembler::is_int_mask($mask$$constant);
11345 __ ext(dst, src, pos, size);
11346 %}
11347 ins_pipe( ialu_regI_regI );
11348 %}
11350 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11351 %{
11352 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11353 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11355 ins_cost(100);
11356 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11357 ins_encode %{
11358 Register dst = $dst$$Register;
11359 int sa = $rshift$$constant;
11361 __ rotr(dst, dst, sa);
11362 %}
11363 ins_pipe( ialu_regI_regI );
11364 %}
11366 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11367 %{
11368 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11369 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11371 ins_cost(100);
11372 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11373 ins_encode %{
11374 Register dst = $dst$$Register;
11375 int sa = $rshift$$constant;
11377 __ drotr(dst, dst, sa);
11378 %}
11379 ins_pipe( ialu_regI_regI );
11380 %}
11382 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11383 %{
11384 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11385 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11387 ins_cost(100);
11388 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11389 ins_encode %{
11390 Register dst = $dst$$Register;
11391 int sa = $rshift$$constant;
11393 __ drotr32(dst, dst, sa - 32);
11394 %}
11395 ins_pipe( ialu_regI_regI );
11396 %}
11398 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11399 %{
11400 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11401 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11403 ins_cost(100);
11404 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11405 ins_encode %{
11406 Register dst = $dst$$Register;
11407 int sa = $rshift$$constant;
11409 __ rotr(dst, dst, sa);
11410 %}
11411 ins_pipe( ialu_regI_regI );
11412 %}
11414 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11415 %{
11416 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11417 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11419 ins_cost(100);
11420 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11421 ins_encode %{
11422 Register dst = $dst$$Register;
11423 int sa = $rshift$$constant;
11425 __ drotr(dst, dst, sa);
11426 %}
11427 ins_pipe( ialu_regI_regI );
11428 %}
11430 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11431 %{
11432 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11433 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11435 ins_cost(100);
11436 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11437 ins_encode %{
11438 Register dst = $dst$$Register;
11439 int sa = $rshift$$constant;
11441 __ drotr32(dst, dst, sa - 32);
11442 %}
11443 ins_pipe( ialu_regI_regI );
11444 %}
11446 // Logical Shift Right
11447 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11448 match(Set dst (URShiftI src shift));
11450 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11451 ins_encode %{
11452 Register src = $src$$Register;
11453 Register dst = $dst$$Register;
11454 Register shift = $shift$$Register;
11455 __ srlv(dst, src, shift);
11456 %}
11457 ins_pipe( ialu_regI_regI );
11458 %}
11461 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11462 match(Set dst (RShiftI src shift));
11463 // effect(KILL cr);
11465 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11466 ins_encode %{
11467 Register src = $src$$Register;
11468 Register dst = $dst$$Register;
11469 int shift = $shift$$constant;
11470 __ sra(dst, src, shift);
11471 %}
11472 ins_pipe( ialu_regI_regI );
11473 %}
11475 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11476 match(Set dst (RShiftI src shift));
11477 // effect(KILL cr);
11479 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11480 ins_encode %{
11481 Register src = $src$$Register;
11482 Register dst = $dst$$Register;
11483 Register shift = $shift$$Register;
11484 __ srav(dst, src, shift);
11485 %}
11486 ins_pipe( ialu_regI_regI );
11487 %}
11489 //----------Convert Int to Boolean---------------------------------------------
11491 instruct convI2B(mRegI dst, mRegI src) %{
11492 match(Set dst (Conv2B src));
11494 ins_cost(100);
11495 format %{ "convI2B $dst, $src @ convI2B" %}
11496 ins_encode %{
11497 Register dst = as_Register($dst$$reg);
11498 Register src = as_Register($src$$reg);
11500 if (dst != src) {
11501 __ daddiu(dst, R0, 1);
11502 __ movz(dst, R0, src);
11503 } else {
11504 __ move(AT, src);
11505 __ daddiu(dst, R0, 1);
11506 __ movz(dst, R0, AT);
11507 }
11508 %}
11510 ins_pipe( ialu_regL_regL );
11511 %}
11513 instruct convI2L_reg( mRegL dst, mRegI src) %{
11514 match(Set dst (ConvI2L src));
11516 ins_cost(100);
11517 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11518 ins_encode %{
11519 Register dst = as_Register($dst$$reg);
11520 Register src = as_Register($src$$reg);
11522 if(dst != src) __ sll(dst, src, 0);
11523 %}
11524 ins_pipe( ialu_regL_regL );
11525 %}
11528 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11529 match(Set dst (ConvL2I src));
11531 format %{ "MOV $dst, $src @ convL2I_reg" %}
11532 ins_encode %{
11533 Register dst = as_Register($dst$$reg);
11534 Register src = as_Register($src$$reg);
11536 __ sll(dst, src, 0);
11537 %}
11539 ins_pipe( ialu_regI_regI );
11540 %}
11542 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11543 match(Set dst (ConvI2L (ConvL2I src)));
11545 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11546 ins_encode %{
11547 Register dst = as_Register($dst$$reg);
11548 Register src = as_Register($src$$reg);
11550 __ sll(dst, src, 0);
11551 %}
11553 ins_pipe( ialu_regI_regI );
11554 %}
11556 instruct convL2D_reg( regD dst, mRegL src ) %{
11557 match(Set dst (ConvL2D src));
11558 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11559 ins_encode %{
11560 Register src = as_Register($src$$reg);
11561 FloatRegister dst = as_FloatRegister($dst$$reg);
11563 __ dmtc1(src, dst);
11564 __ cvt_d_l(dst, dst);
11565 %}
11567 ins_pipe( pipe_slow );
11568 %}
11571 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11572 match(Set dst (ConvD2L src));
11573 ins_cost(150);
11574 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11575 ins_encode %{
11576 Register dst = as_Register($dst$$reg);
11577 FloatRegister src = as_FloatRegister($src$$reg);
11579 Label Done;
11581 __ trunc_l_d(F30, src);
11582 // max_long: 0x7fffffffffffffff
11583 // __ set64(AT, 0x7fffffffffffffff);
11584 __ daddiu(AT, R0, -1);
11585 __ dsrl(AT, AT, 1);
11586 __ dmfc1(dst, F30);
11588 __ bne(dst, AT, Done);
11589 __ delayed()->mtc1(R0, F30);
11591 __ cvt_d_w(F30, F30);
11592 __ c_ult_d(src, F30);
11593 __ bc1f(Done);
11594 __ delayed()->daddiu(T9, R0, -1);
11596 __ c_un_d(src, src); //NaN?
11597 __ subu(dst, T9, AT);
11598 __ movt(dst, R0);
11600 __ bind(Done);
11601 %}
11603 ins_pipe( pipe_slow );
11604 %}
11607 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11608 match(Set dst (ConvD2L src));
11609 ins_cost(250);
11610 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11611 ins_encode %{
11612 Register dst = as_Register($dst$$reg);
11613 FloatRegister src = as_FloatRegister($src$$reg);
11615 Label L;
11617 __ c_un_d(src, src); //NaN?
11618 __ bc1t(L);
11619 __ delayed();
11620 __ move(dst, R0);
11622 __ trunc_l_d(F30, src);
11623 __ cfc1(AT, 31);
11624 __ li(T9, 0x10000);
11625 __ andr(AT, AT, T9);
11626 __ beq(AT, R0, L);
11627 __ delayed()->dmfc1(dst, F30);
11629 __ mov_d(F12, src);
11630 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11631 __ move(dst, V0);
11632 __ bind(L);
11633 %}
11635 ins_pipe( pipe_slow );
11636 %}
11639 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11640 match(Set dst (ConvF2I src));
11641 ins_cost(150);
11642 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11643 ins_encode %{
11644 Register dreg = $dst$$Register;
11645 FloatRegister fval = $src$$FloatRegister;
11646 Label L;
11648 __ trunc_w_s(F30, fval);
11649 __ move(AT, 0x7fffffff);
11650 __ mfc1(dreg, F30);
11651 __ c_un_s(fval, fval); //NaN?
11652 __ movt(dreg, R0);
11654 __ bne(AT, dreg, L);
11655 __ delayed()->lui(T9, 0x8000);
11657 __ mfc1(AT, fval);
11658 __ andr(AT, AT, T9);
11660 __ movn(dreg, T9, AT);
11662 __ bind(L);
11664 %}
11666 ins_pipe( pipe_slow );
11667 %}
11671 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11672 match(Set dst (ConvF2I src));
11673 ins_cost(250);
11674 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11675 ins_encode %{
11676 Register dreg = $dst$$Register;
11677 FloatRegister fval = $src$$FloatRegister;
11678 Label L;
11680 __ c_un_s(fval, fval); //NaN?
11681 __ bc1t(L);
11682 __ delayed();
11683 __ move(dreg, R0);
11685 __ trunc_w_s(F30, fval);
11687 /* Call SharedRuntime:f2i() to do valid convention */
11688 __ cfc1(AT, 31);
11689 __ li(T9, 0x10000);
11690 __ andr(AT, AT, T9);
11691 __ beq(AT, R0, L);
11692 __ delayed()->mfc1(dreg, F30);
11694 __ mov_s(F12, fval);
11696 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11697 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11698 *
11699 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11700 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11701 */
11702 __ push(fval);
11703 if(dreg != V0) {
11704 __ push(V0);
11705 }
11706 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11707 if(dreg != V0) {
11708 __ move(dreg, V0);
11709 __ pop(V0);
11710 }
11711 __ pop(fval);
11712 __ bind(L);
11713 %}
11715 ins_pipe( pipe_slow );
11716 %}
11719 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11720 match(Set dst (ConvF2L src));
11721 ins_cost(150);
11722 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11723 ins_encode %{
11724 Register dreg = $dst$$Register;
11725 FloatRegister fval = $src$$FloatRegister;
11726 Label L;
11728 __ trunc_l_s(F30, fval);
11729 __ daddiu(AT, R0, -1);
11730 __ dsrl(AT, AT, 1);
11731 __ dmfc1(dreg, F30);
11732 __ c_un_s(fval, fval); //NaN?
11733 __ movt(dreg, R0);
11735 __ bne(AT, dreg, L);
11736 __ delayed()->lui(T9, 0x8000);
11738 __ mfc1(AT, fval);
11739 __ andr(AT, AT, T9);
11741 __ dsll32(T9, T9, 0);
11742 __ movn(dreg, T9, AT);
11744 __ bind(L);
11745 %}
11747 ins_pipe( pipe_slow );
11748 %}
11751 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11752 match(Set dst (ConvF2L src));
11753 ins_cost(250);
11754 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11755 ins_encode %{
11756 Register dst = as_Register($dst$$reg);
11757 FloatRegister fval = $src$$FloatRegister;
11758 Label L;
11760 __ c_un_s(fval, fval); //NaN?
11761 __ bc1t(L);
11762 __ delayed();
11763 __ move(dst, R0);
11765 __ trunc_l_s(F30, fval);
11766 __ cfc1(AT, 31);
11767 __ li(T9, 0x10000);
11768 __ andr(AT, AT, T9);
11769 __ beq(AT, R0, L);
11770 __ delayed()->dmfc1(dst, F30);
11772 __ mov_s(F12, fval);
11773 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11774 __ move(dst, V0);
11775 __ bind(L);
11776 %}
11778 ins_pipe( pipe_slow );
11779 %}
11781 instruct convL2F_reg( regF dst, mRegL src ) %{
11782 match(Set dst (ConvL2F src));
11783 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11784 ins_encode %{
11785 FloatRegister dst = $dst$$FloatRegister;
11786 Register src = as_Register($src$$reg);
11787 Label L;
11789 __ dmtc1(src, dst);
11790 __ cvt_s_l(dst, dst);
11791 %}
11793 ins_pipe( pipe_slow );
11794 %}
11796 instruct convI2F_reg( regF dst, mRegI src ) %{
11797 match(Set dst (ConvI2F src));
11798 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11799 ins_encode %{
11800 Register src = $src$$Register;
11801 FloatRegister dst = $dst$$FloatRegister;
11803 __ mtc1(src, dst);
11804 __ cvt_s_w(dst, dst);
11805 %}
11807 ins_pipe( fpu_regF_regF );
11808 %}
11810 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11811 match(Set dst (CmpLTMask p zero));
11812 ins_cost(100);
11814 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11815 ins_encode %{
11816 Register src = $p$$Register;
11817 Register dst = $dst$$Register;
11819 __ sra(dst, src, 31);
11820 %}
11821 ins_pipe( pipe_slow );
11822 %}
11825 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11826 match(Set dst (CmpLTMask p q));
11827 ins_cost(400);
11829 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11830 ins_encode %{
11831 Register p = $p$$Register;
11832 Register q = $q$$Register;
11833 Register dst = $dst$$Register;
11835 __ slt(dst, p, q);
11836 __ subu(dst, R0, dst);
11837 %}
11838 ins_pipe( pipe_slow );
11839 %}
11841 instruct convP2B(mRegI dst, mRegP src) %{
11842 match(Set dst (Conv2B src));
11844 ins_cost(100);
11845 format %{ "convP2B $dst, $src @ convP2B" %}
11846 ins_encode %{
11847 Register dst = as_Register($dst$$reg);
11848 Register src = as_Register($src$$reg);
11850 if (dst != src) {
11851 __ daddiu(dst, R0, 1);
11852 __ movz(dst, R0, src);
11853 } else {
11854 __ move(AT, src);
11855 __ daddiu(dst, R0, 1);
11856 __ movz(dst, R0, AT);
11857 }
11858 %}
11860 ins_pipe( ialu_regL_regL );
11861 %}
11864 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11865 match(Set dst (ConvI2D src));
11866 format %{ "conI2D $dst, $src @convI2D_reg" %}
11867 ins_encode %{
11868 Register src = $src$$Register;
11869 FloatRegister dst = $dst$$FloatRegister;
11870 __ mtc1(src, dst);
11871 __ cvt_d_w(dst, dst);
11872 %}
11873 ins_pipe( fpu_regF_regF );
11874 %}
11876 instruct convF2D_reg_reg(regD dst, regF src) %{
11877 match(Set dst (ConvF2D src));
11878 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11879 ins_encode %{
11880 FloatRegister dst = $dst$$FloatRegister;
11881 FloatRegister src = $src$$FloatRegister;
11883 __ cvt_d_s(dst, src);
11884 %}
11885 ins_pipe( fpu_regF_regF );
11886 %}
11888 instruct convD2F_reg_reg(regF dst, regD src) %{
11889 match(Set dst (ConvD2F src));
11890 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11891 ins_encode %{
11892 FloatRegister dst = $dst$$FloatRegister;
11893 FloatRegister src = $src$$FloatRegister;
11895 __ cvt_s_d(dst, src);
11896 %}
11897 ins_pipe( fpu_regF_regF );
11898 %}
11901 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11902 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11903 match(Set dst (ConvD2I src));
11905 ins_cost(150);
11906 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11908 ins_encode %{
11909 FloatRegister src = $src$$FloatRegister;
11910 Register dst = $dst$$Register;
11912 Label Done;
11914 __ trunc_w_d(F30, src);
11915 // max_int: 2147483647
11916 __ move(AT, 0x7fffffff);
11917 __ mfc1(dst, F30);
11919 __ bne(dst, AT, Done);
11920 __ delayed()->mtc1(R0, F30);
11922 __ cvt_d_w(F30, F30);
11923 __ c_ult_d(src, F30);
11924 __ bc1f(Done);
11925 __ delayed()->addiu(T9, R0, -1);
11927 __ c_un_d(src, src); //NaN?
11928 __ subu32(dst, T9, AT);
11929 __ movt(dst, R0);
11931 __ bind(Done);
11932 %}
11933 ins_pipe( pipe_slow );
11934 %}
11937 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11938 match(Set dst (ConvD2I src));
11940 ins_cost(250);
11941 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11943 ins_encode %{
11944 FloatRegister src = $src$$FloatRegister;
11945 Register dst = $dst$$Register;
11946 Label L;
11948 __ trunc_w_d(F30, src);
11949 __ cfc1(AT, 31);
11950 __ li(T9, 0x10000);
11951 __ andr(AT, AT, T9);
11952 __ beq(AT, R0, L);
11953 __ delayed()->mfc1(dst, F30);
11955 __ mov_d(F12, src);
11956 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11957 __ move(dst, V0);
11958 __ bind(L);
11960 %}
11961 ins_pipe( pipe_slow );
11962 %}
11964 // Convert oop pointer into compressed form
11965 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11966 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11967 match(Set dst (EncodeP src));
11968 format %{ "encode_heap_oop $dst,$src" %}
11969 ins_encode %{
11970 Register src = $src$$Register;
11971 Register dst = $dst$$Register;
11973 __ encode_heap_oop(dst, src);
11974 %}
11975 ins_pipe( ialu_regL_regL );
11976 %}
11978 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11979 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11980 match(Set dst (EncodeP src));
11981 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11982 ins_encode %{
11983 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11984 %}
11985 ins_pipe( ialu_regL_regL );
11986 %}
11988 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11989 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11990 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11991 match(Set dst (DecodeN src));
11992 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11993 ins_encode %{
11994 Register s = $src$$Register;
11995 Register d = $dst$$Register;
11997 __ decode_heap_oop(d, s);
11998 %}
11999 ins_pipe( ialu_regL_regL );
12000 %}
12002 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12003 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12004 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12005 match(Set dst (DecodeN src));
12006 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12007 ins_encode %{
12008 Register s = $src$$Register;
12009 Register d = $dst$$Register;
12010 if (s != d) {
12011 __ decode_heap_oop_not_null(d, s);
12012 } else {
12013 __ decode_heap_oop_not_null(d);
12014 }
12015 %}
12016 ins_pipe( ialu_regL_regL );
12017 %}
12019 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12020 match(Set dst (EncodePKlass src));
12021 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12022 ins_encode %{
12023 __ encode_klass_not_null($dst$$Register, $src$$Register);
12024 %}
12025 ins_pipe( ialu_regL_regL );
12026 %}
12028 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12029 match(Set dst (DecodeNKlass src));
12030 format %{ "decode_heap_klass_not_null $dst,$src" %}
12031 ins_encode %{
12032 Register s = $src$$Register;
12033 Register d = $dst$$Register;
12034 if (s != d) {
12035 __ decode_klass_not_null(d, s);
12036 } else {
12037 __ decode_klass_not_null(d);
12038 }
12039 %}
12040 ins_pipe( ialu_regL_regL );
12041 %}
12043 //FIXME
12044 instruct tlsLoadP(mRegP dst) %{
12045 match(Set dst (ThreadLocal));
12047 ins_cost(0);
12048 format %{ " get_thread in $dst #@tlsLoadP" %}
12049 ins_encode %{
12050 Register dst = $dst$$Register;
12051 #ifdef OPT_THREAD
12052 __ move(dst, TREG);
12053 #else
12054 __ get_thread(dst);
12055 #endif
12056 %}
12058 ins_pipe( ialu_loadI );
12059 %}
12062 instruct checkCastPP( mRegP dst ) %{
12063 match(Set dst (CheckCastPP dst));
12065 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12066 ins_encode( /*empty encoding*/ );
12067 ins_pipe( empty );
12068 %}
12070 instruct castPP(mRegP dst)
12071 %{
12072 match(Set dst (CastPP dst));
12074 size(0);
12075 format %{ "# castPP of $dst" %}
12076 ins_encode(/* empty encoding */);
12077 ins_pipe(empty);
12078 %}
12080 instruct castII( mRegI dst ) %{
12081 match(Set dst (CastII dst));
12082 format %{ "#castII of $dst empty encoding" %}
12083 ins_encode( /*empty encoding*/ );
12084 ins_cost(0);
12085 ins_pipe( empty );
12086 %}
12088 // Return Instruction
12089 // Remove the return address & jump to it.
12090 instruct Ret() %{
12091 match(Return);
12092 format %{ "RET #@Ret" %}
12094 ins_encode %{
12095 __ jr(RA);
12096 __ nop();
12097 %}
12099 ins_pipe( pipe_jump );
12100 %}
12102 /*
12103 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12104 instruct jumpXtnd(mRegL switch_val) %{
12105 match(Jump switch_val);
12107 ins_cost(350);
12109 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12110 "jr T9\n\t"
12111 "nop" %}
12112 ins_encode %{
12113 Register table_base = $constanttablebase;
12114 int con_offset = $constantoffset;
12115 Register switch_reg = $switch_val$$Register;
12117 if (UseLoongsonISA) {
12118 if (Assembler::is_simm(con_offset, 8)) {
12119 __ gsldx(T9, table_base, switch_reg, con_offset);
12120 } else if (Assembler::is_simm16(con_offset)) {
12121 __ daddu(T9, table_base, switch_reg);
12122 __ ld(T9, T9, con_offset);
12123 } else {
12124 __ move(T9, con_offset);
12125 __ daddu(AT, table_base, switch_reg);
12126 __ gsldx(T9, AT, T9, 0);
12127 }
12128 } else {
12129 if (Assembler::is_simm16(con_offset)) {
12130 __ daddu(T9, table_base, switch_reg);
12131 __ ld(T9, T9, con_offset);
12132 } else {
12133 __ move(T9, con_offset);
12134 __ daddu(AT, table_base, switch_reg);
12135 __ daddu(AT, T9, AT);
12136 __ ld(T9, AT, 0);
12137 }
12138 }
12140 __ jr(T9);
12141 __ nop();
12143 %}
12144 ins_pipe(pipe_jump);
12145 %}
12146 */
12148 // Jump Direct - Label defines a relative address from JMP
12149 instruct jmpDir(label labl) %{
12150 match(Goto);
12151 effect(USE labl);
12153 ins_cost(300);
12154 format %{ "JMP $labl #@jmpDir" %}
12156 ins_encode %{
12157 Label &L = *($labl$$label);
12158 if(&L)
12159 __ b(L);
12160 else
12161 __ b(int(0));
12162 __ nop();
12163 %}
12165 ins_pipe( pipe_jump );
12166 ins_pc_relative(1);
12167 %}
12171 // Tail Jump; remove the return address; jump to target.
12172 // TailCall above leaves the return address around.
12173 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12174 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12175 // "restore" before this instruction (in Epilogue), we need to materialize it
12176 // in %i0.
12177 //FIXME
12178 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12179 match( TailJump jump_target ex_oop );
12180 ins_cost(200);
12181 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12182 ins_encode %{
12183 Register target = $jump_target$$Register;
12185 /* 2012/9/14 Jin: V0, V1 are indicated in:
12186 * [stubGenerator_mips.cpp] generate_forward_exception()
12187 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12188 */
12189 Register oop = $ex_oop$$Register;
12190 Register exception_oop = V0;
12191 Register exception_pc = V1;
12193 __ move(exception_pc, RA);
12194 __ move(exception_oop, oop);
12196 __ jr(target);
12197 __ nop();
12198 %}
12199 ins_pipe( pipe_jump );
12200 %}
12202 // ============================================================================
12203 // Procedure Call/Return Instructions
12204 // Call Java Static Instruction
12205 // Note: If this code changes, the corresponding ret_addr_offset() and
12206 // compute_padding() functions will have to be adjusted.
12207 instruct CallStaticJavaDirect(method meth) %{
12208 match(CallStaticJava);
12209 effect(USE meth);
12211 ins_cost(300);
12212 format %{ "CALL,static #@CallStaticJavaDirect " %}
12213 ins_encode( Java_Static_Call( meth ) );
12214 ins_pipe( pipe_slow );
12215 ins_pc_relative(1);
12216 %}
12218 // Call Java Dynamic Instruction
12219 // Note: If this code changes, the corresponding ret_addr_offset() and
12220 // compute_padding() functions will have to be adjusted.
12221 instruct CallDynamicJavaDirect(method meth) %{
12222 match(CallDynamicJava);
12223 effect(USE meth);
12225 ins_cost(300);
12226 format %{"MOV IC_Klass, (oop)-1\n\t"
12227 "CallDynamic @ CallDynamicJavaDirect" %}
12228 ins_encode( Java_Dynamic_Call( meth ) );
12229 ins_pipe( pipe_slow );
12230 ins_pc_relative(1);
12231 %}
12233 instruct CallLeafNoFPDirect(method meth) %{
12234 match(CallLeafNoFP);
12235 effect(USE meth);
12237 ins_cost(300);
12238 format %{ "CALL_LEAF_NOFP,runtime " %}
12239 ins_encode(Java_To_Runtime(meth));
12240 ins_pipe( pipe_slow );
12241 ins_pc_relative(1);
12242 ins_alignment(16);
12243 %}
12245 // Prefetch instructions.
12247 instruct prefetchrNTA( memory mem ) %{
12248 match(PrefetchRead mem);
12249 ins_cost(125);
12251 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12252 ins_encode %{
12253 int base = $mem$$base;
12254 int index = $mem$$index;
12255 int scale = $mem$$scale;
12256 int disp = $mem$$disp;
12258 if( index != 0 ) {
12259 if (scale == 0) {
12260 __ daddu(AT, as_Register(base), as_Register(index));
12261 } else {
12262 __ dsll(AT, as_Register(index), scale);
12263 __ daddu(AT, as_Register(base), AT);
12264 }
12265 } else {
12266 __ move(AT, as_Register(base));
12267 }
12268 if( Assembler::is_simm16(disp) ) {
12269 __ daddiu(AT, as_Register(base), disp);
12270 __ daddiu(AT, AT, disp);
12271 } else {
12272 __ move(T9, disp);
12273 __ daddu(AT, as_Register(base), T9);
12274 }
12275 __ pref(0, AT, 0); //hint: 0:load
12276 %}
12277 ins_pipe(pipe_slow);
12278 %}
12280 instruct prefetchwNTA( memory mem ) %{
12281 match(PrefetchWrite mem);
12282 ins_cost(125);
12283 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12284 ins_encode %{
12285 int base = $mem$$base;
12286 int index = $mem$$index;
12287 int scale = $mem$$scale;
12288 int disp = $mem$$disp;
12290 if( index != 0 ) {
12291 if (scale == 0) {
12292 __ daddu(AT, as_Register(base), as_Register(index));
12293 } else {
12294 __ dsll(AT, as_Register(index), scale);
12295 __ daddu(AT, as_Register(base), AT);
12296 }
12297 } else {
12298 __ move(AT, as_Register(base));
12299 }
12300 if( Assembler::is_simm16(disp) ) {
12301 __ daddiu(AT, as_Register(base), disp);
12302 __ daddiu(AT, AT, disp);
12303 } else {
12304 __ move(T9, disp);
12305 __ daddu(AT, as_Register(base), T9);
12306 }
12307 __ pref(1, AT, 0); //hint: 1:store
12308 %}
12309 ins_pipe(pipe_slow);
12310 %}
12312 // Prefetch instructions for allocation.
12314 instruct prefetchAllocNTA( memory mem ) %{
12315 match(PrefetchAllocation mem);
12316 ins_cost(125);
12317 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12318 ins_encode %{
12319 int base = $mem$$base;
12320 int index = $mem$$index;
12321 int scale = $mem$$scale;
12322 int disp = $mem$$disp;
12324 Register dst = R0;
12326 if( index != 0 ) {
12327 if( Assembler::is_simm16(disp) ) {
12328 if( UseLoongsonISA ) {
12329 if (scale == 0) {
12330 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12331 } else {
12332 __ dsll(AT, as_Register(index), scale);
12333 __ gslbx(dst, as_Register(base), AT, disp);
12334 }
12335 } else {
12336 if (scale == 0) {
12337 __ addu(AT, as_Register(base), as_Register(index));
12338 } else {
12339 __ dsll(AT, as_Register(index), scale);
12340 __ addu(AT, as_Register(base), AT);
12341 }
12342 __ lb(dst, AT, disp);
12343 }
12344 } else {
12345 if (scale == 0) {
12346 __ addu(AT, as_Register(base), as_Register(index));
12347 } else {
12348 __ dsll(AT, as_Register(index), scale);
12349 __ addu(AT, as_Register(base), AT);
12350 }
12351 __ move(T9, disp);
12352 if( UseLoongsonISA ) {
12353 __ gslbx(dst, AT, T9, 0);
12354 } else {
12355 __ addu(AT, AT, T9);
12356 __ lb(dst, AT, 0);
12357 }
12358 }
12359 } else {
12360 if( Assembler::is_simm16(disp) ) {
12361 __ lb(dst, as_Register(base), disp);
12362 } else {
12363 __ move(T9, disp);
12364 if( UseLoongsonISA ) {
12365 __ gslbx(dst, as_Register(base), T9, 0);
12366 } else {
12367 __ addu(AT, as_Register(base), T9);
12368 __ lb(dst, AT, 0);
12369 }
12370 }
12371 }
12372 %}
12373 ins_pipe(pipe_slow);
12374 %}
12377 // Call runtime without safepoint
12378 instruct CallLeafDirect(method meth) %{
12379 match(CallLeaf);
12380 effect(USE meth);
12382 ins_cost(300);
12383 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12384 ins_encode(Java_To_Runtime(meth));
12385 ins_pipe( pipe_slow );
12386 ins_pc_relative(1);
12387 ins_alignment(16);
12388 %}
12390 // Load Char (16bit unsigned)
12391 instruct loadUS(mRegI dst, memory mem) %{
12392 match(Set dst (LoadUS mem));
12394 ins_cost(125);
12395 format %{ "loadUS $dst,$mem @ loadC" %}
12396 ins_encode(load_C_enc(dst, mem));
12397 ins_pipe( ialu_loadI );
12398 %}
12400 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12401 match(Set dst (ConvI2L (LoadUS mem)));
12403 ins_cost(125);
12404 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12405 ins_encode(load_C_enc(dst, mem));
12406 ins_pipe( ialu_loadI );
12407 %}
12409 // Store Char (16bit unsigned)
12410 instruct storeC(memory mem, mRegI src) %{
12411 match(Set mem (StoreC mem src));
12413 ins_cost(125);
12414 format %{ "storeC $src, $mem @ storeC" %}
12415 ins_encode(store_C_reg_enc(mem, src));
12416 ins_pipe( ialu_loadI );
12417 %}
12419 instruct storeC0(memory mem, immI0 zero) %{
12420 match(Set mem (StoreC mem zero));
12422 ins_cost(125);
12423 format %{ "storeC $zero, $mem @ storeC0" %}
12424 ins_encode(store_C0_enc(mem));
12425 ins_pipe( ialu_loadI );
12426 %}
12429 instruct loadConF0(regF dst, immF0 zero) %{
12430 match(Set dst zero);
12431 ins_cost(100);
12433 format %{ "mov $dst, zero @ loadConF0\n"%}
12434 ins_encode %{
12435 FloatRegister dst = $dst$$FloatRegister;
12437 __ mtc1(R0, dst);
12438 %}
12439 ins_pipe( fpu_loadF );
12440 %}
12443 instruct loadConF(regF dst, immF src) %{
12444 match(Set dst src);
12445 ins_cost(125);
12447 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12448 ins_encode %{
12449 int con_offset = $constantoffset($src);
12451 if (Assembler::is_simm16(con_offset)) {
12452 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12453 } else {
12454 __ set64(AT, con_offset);
12455 if (UseLoongsonISA) {
12456 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12457 } else {
12458 __ daddu(AT, $constanttablebase, AT);
12459 __ lwc1($dst$$FloatRegister, AT, 0);
12460 }
12461 }
12462 %}
12463 ins_pipe( fpu_loadF );
12464 %}
12467 instruct loadConD0(regD dst, immD0 zero) %{
12468 match(Set dst zero);
12469 ins_cost(100);
12471 format %{ "mov $dst, zero @ loadConD0"%}
12472 ins_encode %{
12473 FloatRegister dst = as_FloatRegister($dst$$reg);
12475 __ dmtc1(R0, dst);
12476 %}
12477 ins_pipe( fpu_loadF );
12478 %}
12480 instruct loadConD(regD dst, immD src) %{
12481 match(Set dst src);
12482 ins_cost(125);
12484 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12485 ins_encode %{
12486 int con_offset = $constantoffset($src);
12488 if (Assembler::is_simm16(con_offset)) {
12489 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12490 } else {
12491 __ set64(AT, con_offset);
12492 if (UseLoongsonISA) {
12493 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12494 } else {
12495 __ daddu(AT, $constanttablebase, AT);
12496 __ ldc1($dst$$FloatRegister, AT, 0);
12497 }
12498 }
12499 %}
12500 ins_pipe( fpu_loadF );
12501 %}
12503 // Store register Float value (it is faster than store from FPU register)
12504 instruct storeF_reg( memory mem, regF src) %{
12505 match(Set mem (StoreF mem src));
12507 ins_cost(50);
12508 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12509 ins_encode(store_F_reg_enc(mem, src));
12510 ins_pipe( fpu_storeF );
12511 %}
12513 instruct storeF_imm0( memory mem, immF0 zero) %{
12514 match(Set mem (StoreF mem zero));
12516 ins_cost(40);
12517 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12518 ins_encode %{
12519 int base = $mem$$base;
12520 int index = $mem$$index;
12521 int scale = $mem$$scale;
12522 int disp = $mem$$disp;
12524 if( index != 0 ) {
12525 if ( UseLoongsonISA ) {
12526 if ( Assembler::is_simm(disp, 8) ) {
12527 if ( scale == 0 ) {
12528 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12529 } else {
12530 __ dsll(T9, as_Register(index), scale);
12531 __ gsswx(R0, as_Register(base), T9, disp);
12532 }
12533 } else if ( Assembler::is_simm16(disp) ) {
12534 if ( scale == 0 ) {
12535 __ daddu(AT, as_Register(base), as_Register(index));
12536 } else {
12537 __ dsll(T9, as_Register(index), scale);
12538 __ daddu(AT, as_Register(base), T9);
12539 }
12540 __ sw(R0, AT, disp);
12541 } else {
12542 if ( scale == 0 ) {
12543 __ move(T9, disp);
12544 __ daddu(AT, as_Register(index), T9);
12545 __ gsswx(R0, as_Register(base), AT, 0);
12546 } else {
12547 __ dsll(T9, as_Register(index), scale);
12548 __ move(AT, disp);
12549 __ daddu(AT, AT, T9);
12550 __ gsswx(R0, as_Register(base), AT, 0);
12551 }
12552 }
12553 } else { //not use loongson isa
12554 if(scale != 0) {
12555 __ dsll(T9, as_Register(index), scale);
12556 __ daddu(AT, as_Register(base), T9);
12557 } else {
12558 __ daddu(AT, as_Register(base), as_Register(index));
12559 }
12560 if( Assembler::is_simm16(disp) ) {
12561 __ sw(R0, AT, disp);
12562 } else {
12563 __ move(T9, disp);
12564 __ daddu(AT, AT, T9);
12565 __ sw(R0, AT, 0);
12566 }
12567 }
12568 } else { //index is 0
12569 if ( UseLoongsonISA ) {
12570 if ( Assembler::is_simm16(disp) ) {
12571 __ sw(R0, as_Register(base), disp);
12572 } else {
12573 __ move(T9, disp);
12574 __ gsswx(R0, as_Register(base), T9, 0);
12575 }
12576 } else {
12577 if( Assembler::is_simm16(disp) ) {
12578 __ sw(R0, as_Register(base), disp);
12579 } else {
12580 __ move(T9, disp);
12581 __ daddu(AT, as_Register(base), T9);
12582 __ sw(R0, AT, 0);
12583 }
12584 }
12585 }
12586 %}
12587 ins_pipe( ialu_storeI );
12588 %}
12590 // Load Double
12591 instruct loadD(regD dst, memory mem) %{
12592 match(Set dst (LoadD mem));
12594 ins_cost(150);
12595 format %{ "loadD $dst, $mem #@loadD" %}
12596 ins_encode(load_D_enc(dst, mem));
12597 ins_pipe( ialu_loadI );
12598 %}
12600 // Load Double - UNaligned
12601 instruct loadD_unaligned(regD dst, memory mem ) %{
12602 match(Set dst (LoadD_unaligned mem));
12603 ins_cost(250);
12604 // FIXME: Jin: Need more effective ldl/ldr
12605 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12606 ins_encode(load_D_enc(dst, mem));
12607 ins_pipe( ialu_loadI );
12608 %}
12610 instruct storeD_reg( memory mem, regD src) %{
12611 match(Set mem (StoreD mem src));
12613 ins_cost(50);
12614 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12615 ins_encode(store_D_reg_enc(mem, src));
12616 ins_pipe( fpu_storeF );
12617 %}
12619 instruct storeD_imm0( memory mem, immD0 zero) %{
12620 match(Set mem (StoreD mem zero));
12622 ins_cost(40);
12623 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12624 ins_encode %{
12625 int base = $mem$$base;
12626 int index = $mem$$index;
12627 int scale = $mem$$scale;
12628 int disp = $mem$$disp;
12630 __ mtc1(R0, F30);
12631 __ cvt_d_w(F30, F30);
12633 if( index != 0 ) {
12634 if ( UseLoongsonISA ) {
12635 if ( Assembler::is_simm(disp, 8) ) {
12636 if (scale == 0) {
12637 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12638 } else {
12639 __ dsll(T9, as_Register(index), scale);
12640 __ gssdxc1(F30, as_Register(base), T9, disp);
12641 }
12642 } else if ( Assembler::is_simm16(disp) ) {
12643 if (scale == 0) {
12644 __ daddu(AT, as_Register(base), as_Register(index));
12645 __ sdc1(F30, AT, disp);
12646 } else {
12647 __ dsll(T9, as_Register(index), scale);
12648 __ daddu(AT, as_Register(base), T9);
12649 __ sdc1(F30, AT, disp);
12650 }
12651 } else {
12652 if (scale == 0) {
12653 __ move(T9, disp);
12654 __ daddu(AT, as_Register(index), T9);
12655 __ gssdxc1(F30, as_Register(base), AT, 0);
12656 } else {
12657 __ move(T9, disp);
12658 __ dsll(AT, as_Register(index), scale);
12659 __ daddu(AT, AT, T9);
12660 __ gssdxc1(F30, as_Register(base), AT, 0);
12661 }
12662 }
12663 } else { // not use loongson isa
12664 if(scale != 0) {
12665 __ dsll(T9, as_Register(index), scale);
12666 __ daddu(AT, as_Register(base), T9);
12667 } else {
12668 __ daddu(AT, as_Register(base), as_Register(index));
12669 }
12670 if( Assembler::is_simm16(disp) ) {
12671 __ sdc1(F30, AT, disp);
12672 } else {
12673 __ move(T9, disp);
12674 __ daddu(AT, AT, T9);
12675 __ sdc1(F30, AT, 0);
12676 }
12677 }
12678 } else {// index is 0
12679 if ( UseLoongsonISA ) {
12680 if ( Assembler::is_simm16(disp) ) {
12681 __ sdc1(F30, as_Register(base), disp);
12682 } else {
12683 __ move(T9, disp);
12684 __ gssdxc1(F30, as_Register(base), T9, 0);
12685 }
12686 } else {
12687 if( Assembler::is_simm16(disp) ) {
12688 __ sdc1(F30, as_Register(base), disp);
12689 } else {
12690 __ move(T9, disp);
12691 __ daddu(AT, as_Register(base), T9);
12692 __ sdc1(F30, AT, 0);
12693 }
12694 }
12695 }
12696 %}
12697 ins_pipe( ialu_storeI );
12698 %}
12700 instruct loadSSI(mRegI dst, stackSlotI src)
12701 %{
12702 match(Set dst src);
12704 ins_cost(125);
12705 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12706 ins_encode %{
12707 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12708 __ lw($dst$$Register, SP, $src$$disp);
12709 %}
12710 ins_pipe(ialu_loadI);
12711 %}
12713 instruct storeSSI(stackSlotI dst, mRegI src)
12714 %{
12715 match(Set dst src);
12717 ins_cost(100);
12718 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12719 ins_encode %{
12720 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12721 __ sw($src$$Register, SP, $dst$$disp);
12722 %}
12723 ins_pipe(ialu_storeI);
12724 %}
12726 instruct loadSSL(mRegL dst, stackSlotL src)
12727 %{
12728 match(Set dst src);
12730 ins_cost(125);
12731 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12732 ins_encode %{
12733 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12734 __ ld($dst$$Register, SP, $src$$disp);
12735 %}
12736 ins_pipe(ialu_loadI);
12737 %}
12739 instruct storeSSL(stackSlotL dst, mRegL src)
12740 %{
12741 match(Set dst src);
12743 ins_cost(100);
12744 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12745 ins_encode %{
12746 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12747 __ sd($src$$Register, SP, $dst$$disp);
12748 %}
12749 ins_pipe(ialu_storeI);
12750 %}
12752 instruct loadSSP(mRegP dst, stackSlotP src)
12753 %{
12754 match(Set dst src);
12756 ins_cost(125);
12757 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12758 ins_encode %{
12759 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12760 __ ld($dst$$Register, SP, $src$$disp);
12761 %}
12762 ins_pipe(ialu_loadI);
12763 %}
12765 instruct storeSSP(stackSlotP dst, mRegP src)
12766 %{
12767 match(Set dst src);
12769 ins_cost(100);
12770 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12771 ins_encode %{
12772 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12773 __ sd($src$$Register, SP, $dst$$disp);
12774 %}
12775 ins_pipe(ialu_storeI);
12776 %}
12778 instruct loadSSF(regF dst, stackSlotF src)
12779 %{
12780 match(Set dst src);
12782 ins_cost(125);
12783 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12784 ins_encode %{
12785 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12786 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12787 %}
12788 ins_pipe(ialu_loadI);
12789 %}
12791 instruct storeSSF(stackSlotF dst, regF src)
12792 %{
12793 match(Set dst src);
12795 ins_cost(100);
12796 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12797 ins_encode %{
12798 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12799 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12800 %}
12801 ins_pipe(fpu_storeF);
12802 %}
12804 // Use the same format since predicate() can not be used here.
12805 instruct loadSSD(regD dst, stackSlotD src)
12806 %{
12807 match(Set dst src);
12809 ins_cost(125);
12810 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12811 ins_encode %{
12812 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12813 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12814 %}
12815 ins_pipe(ialu_loadI);
12816 %}
12818 instruct storeSSD(stackSlotD dst, regD src)
12819 %{
12820 match(Set dst src);
12822 ins_cost(100);
12823 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12824 ins_encode %{
12825 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12826 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12827 %}
12828 ins_pipe(fpu_storeF);
12829 %}
12831 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12832 match( Set cr (FastLock object box) );
12833 effect( TEMP tmp, TEMP scr, USE_KILL box );
12834 ins_cost(300);
12835 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12836 ins_encode %{
12837 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12838 %}
12840 ins_pipe( pipe_slow );
12841 ins_pc_relative(1);
12842 %}
12844 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12845 match( Set cr (FastUnlock object box) );
12846 effect( TEMP tmp, USE_KILL box );
12847 ins_cost(300);
12848 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12849 ins_encode %{
12850 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12851 %}
12853 ins_pipe( pipe_slow );
12854 ins_pc_relative(1);
12855 %}
12857 // Store CMS card-mark Immediate
12858 instruct storeImmCM(memory mem, immI8 src) %{
12859 match(Set mem (StoreCM mem src));
12861 ins_cost(150);
12862 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12863 // opcode(0xC6);
12864 ins_encode(store_B_immI_enc_sync(mem, src));
12865 ins_pipe( ialu_storeI );
12866 %}
12868 // Die now
12869 instruct ShouldNotReachHere( )
12870 %{
12871 match(Halt);
12872 ins_cost(300);
12874 // Use the following format syntax
12875 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12876 ins_encode %{
12877 // Here we should emit illtrap !
12879 __ stop("in ShoudNotReachHere");
12881 %}
12882 ins_pipe( pipe_jump );
12883 %}
12885 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12886 %{
12887 predicate(Universe::narrow_oop_shift() == 0);
12888 match(Set dst mem);
12890 ins_cost(110);
12891 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12892 ins_encode %{
12893 Register dst = $dst$$Register;
12894 Register base = as_Register($mem$$base);
12895 int disp = $mem$$disp;
12897 __ daddiu(dst, base, disp);
12898 %}
12899 ins_pipe( ialu_regI_imm16 );
12900 %}
12902 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12903 %{
12904 match(Set dst mem);
12906 ins_cost(110);
12907 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12908 ins_encode %{
12909 Register dst = $dst$$Register;
12910 Register base = as_Register($mem$$base);
12911 Register index = as_Register($mem$$index);
12912 int scale = $mem$$scale;
12913 int disp = $mem$$disp;
12915 if (scale == 0) {
12916 __ daddu(AT, base, index);
12917 __ daddiu(dst, AT, disp);
12918 } else {
12919 __ dsll(AT, index, scale);
12920 __ daddu(AT, base, AT);
12921 __ daddiu(dst, AT, disp);
12922 }
12923 %}
12925 ins_pipe( ialu_regI_imm16 );
12926 %}
12928 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12929 %{
12930 match(Set dst mem);
12932 ins_cost(110);
12933 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12934 ins_encode %{
12935 Register dst = $dst$$Register;
12936 Register base = as_Register($mem$$base);
12937 Register index = as_Register($mem$$index);
12938 int scale = $mem$$scale;
12940 if (scale == 0) {
12941 __ daddu(dst, base, index);
12942 } else {
12943 __ dsll(AT, index, scale);
12944 __ daddu(dst, base, AT);
12945 }
12946 %}
12948 ins_pipe( ialu_regI_imm16 );
12949 %}
12951 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12952 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12953 match(CountedLoopEnd cop (CmpI src1 src2));
12954 effect(USE labl);
12956 ins_cost(300);
12957 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12958 ins_encode %{
12959 Register op1 = $src1$$Register;
12960 Register op2 = $src2$$Register;
12961 Label &L = *($labl$$label);
12962 int flag = $cop$$cmpcode;
12964 switch(flag)
12965 {
12966 case 0x01: //equal
12967 if (&L)
12968 __ beq(op1, op2, L);
12969 else
12970 __ beq(op1, op2, (int)0);
12971 break;
12972 case 0x02: //not_equal
12973 if (&L)
12974 __ bne(op1, op2, L);
12975 else
12976 __ bne(op1, op2, (int)0);
12977 break;
12978 case 0x03: //above
12979 __ slt(AT, op2, op1);
12980 if(&L)
12981 __ bne(AT, R0, L);
12982 else
12983 __ bne(AT, R0, (int)0);
12984 break;
12985 case 0x04: //above_equal
12986 __ slt(AT, op1, op2);
12987 if(&L)
12988 __ beq(AT, R0, L);
12989 else
12990 __ beq(AT, R0, (int)0);
12991 break;
12992 case 0x05: //below
12993 __ slt(AT, op1, op2);
12994 if(&L)
12995 __ bne(AT, R0, L);
12996 else
12997 __ bne(AT, R0, (int)0);
12998 break;
12999 case 0x06: //below_equal
13000 __ slt(AT, op2, op1);
13001 if(&L)
13002 __ beq(AT, R0, L);
13003 else
13004 __ beq(AT, R0, (int)0);
13005 break;
13006 default:
13007 Unimplemented();
13008 }
13009 __ nop();
13010 %}
13011 ins_pipe( pipe_jump );
13012 ins_pc_relative(1);
13013 %}
13015 instruct jmpLoopEnd_reg_immI(cmpOp cop, mRegI src1, immI src2, label labl) %{
13016 match(CountedLoopEnd cop (CmpI src1 src2));
13017 effect(USE labl);
13019 ins_cost(300);
13020 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI" %}
13021 ins_encode %{
13022 Register op1 = $src1$$Register;
13023 Register op2 = AT;
13024 Label &L = *($labl$$label);
13025 int flag = $cop$$cmpcode;
13027 __ move(op2, $src2$$constant);
13029 switch(flag)
13030 {
13031 case 0x01: //equal
13032 if (&L)
13033 __ beq(op1, op2, L);
13034 else
13035 __ beq(op1, op2, (int)0);
13036 break;
13037 case 0x02: //not_equal
13038 if (&L)
13039 __ bne(op1, op2, L);
13040 else
13041 __ bne(op1, op2, (int)0);
13042 break;
13043 case 0x03: //above
13044 __ slt(AT, op2, op1);
13045 if(&L)
13046 __ bne(AT, R0, L);
13047 else
13048 __ bne(AT, R0, (int)0);
13049 break;
13050 case 0x04: //above_equal
13051 __ slt(AT, op1, op2);
13052 if(&L)
13053 __ beq(AT, R0, L);
13054 else
13055 __ beq(AT, R0, (int)0);
13056 break;
13057 case 0x05: //below
13058 __ slt(AT, op1, op2);
13059 if(&L)
13060 __ bne(AT, R0, L);
13061 else
13062 __ bne(AT, R0, (int)0);
13063 break;
13064 case 0x06: //below_equal
13065 __ slt(AT, op2, op1);
13066 if(&L)
13067 __ beq(AT, R0, L);
13068 else
13069 __ beq(AT, R0, (int)0);
13070 break;
13071 default:
13072 Unimplemented();
13073 }
13074 __ nop();
13075 %}
13076 ins_pipe( pipe_jump );
13077 ins_pc_relative(1);
13078 %}
13081 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13082 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13083 match(If cop cr);
13084 effect(USE labl);
13086 ins_cost(300);
13087 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13089 ins_encode %{
13090 Label &L = *($labl$$label);
13091 switch($cop$$cmpcode)
13092 {
13093 case 0x01: //equal
13094 if (&L)
13095 __ bne(AT, R0, L);
13096 else
13097 __ bne(AT, R0, (int)0);
13098 break;
13099 case 0x02: //not equal
13100 if (&L)
13101 __ beq(AT, R0, L);
13102 else
13103 __ beq(AT, R0, (int)0);
13104 break;
13105 default:
13106 Unimplemented();
13107 }
13108 __ nop();
13109 %}
13111 ins_pipe( pipe_jump );
13112 ins_pc_relative(1);
13113 %}
13116 // ============================================================================
13117 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13118 // array for an instance of the superklass. Set a hidden internal cache on a
13119 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13120 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13121 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13122 match(Set result (PartialSubtypeCheck sub super));
13123 effect(KILL tmp);
13124 ins_cost(1100); // slightly larger than the next version
13125 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13127 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13128 ins_pipe( pipe_slow );
13129 %}
13132 // Conditional-store of an int value.
13133 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13134 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13135 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13136 // effect(KILL oldval);
13137 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13139 ins_encode %{
13140 Register oldval = $oldval$$Register;
13141 Register newval = $newval$$Register;
13142 Address addr(as_Register($mem$$base), $mem$$disp);
13143 Label again, failure;
13145 // int base = $mem$$base;
13146 int index = $mem$$index;
13147 int scale = $mem$$scale;
13148 int disp = $mem$$disp;
13150 guarantee(Assembler::is_simm16(disp), "");
13152 if( index != 0 ) {
13153 __ stop("in storeIConditional: index != 0");
13154 } else {
13155 __ bind(again);
13156 if(!Use3A2000) __ sync();
13157 __ ll(AT, addr);
13158 __ bne(AT, oldval, failure);
13159 __ delayed()->addu(AT, R0, R0);
13161 __ addu(AT, newval, R0);
13162 __ sc(AT, addr);
13163 __ beq(AT, R0, again);
13164 __ delayed()->addiu(AT, R0, 0xFF);
13165 __ bind(failure);
13166 __ sync();
13167 }
13168 %}
13170 ins_pipe( long_memory_op );
13171 %}
13173 // Conditional-store of a long value.
13174 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13175 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13176 %{
13177 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13178 effect(KILL oldval);
13180 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13181 ins_encode%{
13182 Register oldval = $oldval$$Register;
13183 Register newval = $newval$$Register;
13184 Address addr((Register)$mem$$base, $mem$$disp);
13186 int index = $mem$$index;
13187 int scale = $mem$$scale;
13188 int disp = $mem$$disp;
13190 guarantee(Assembler::is_simm16(disp), "");
13192 if( index != 0 ) {
13193 __ stop("in storeIConditional: index != 0");
13194 } else {
13195 __ cmpxchg(newval, addr, oldval);
13196 }
13197 %}
13198 ins_pipe( long_memory_op );
13199 %}
13202 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13203 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13204 effect(KILL oldval);
13205 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13206 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13207 "MOV $res, 1 @ compareAndSwapI\n\t"
13208 "BNE AT, R0 @ compareAndSwapI\n\t"
13209 "MOV $res, 0 @ compareAndSwapI\n"
13210 "L:" %}
13211 ins_encode %{
13212 Register newval = $newval$$Register;
13213 Register oldval = $oldval$$Register;
13214 Register res = $res$$Register;
13215 Address addr($mem_ptr$$Register, 0);
13216 Label L;
13218 __ cmpxchg32(newval, addr, oldval);
13219 __ move(res, AT);
13220 %}
13221 ins_pipe( long_memory_op );
13222 %}
13224 //FIXME:
13225 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13226 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13227 effect(KILL oldval);
13228 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13229 "MOV $res, AT @ compareAndSwapP\n\t"
13230 "L:" %}
13231 ins_encode %{
13232 Register newval = $newval$$Register;
13233 Register oldval = $oldval$$Register;
13234 Register res = $res$$Register;
13235 Address addr($mem_ptr$$Register, 0);
13236 Label L;
13238 __ cmpxchg(newval, addr, oldval);
13239 __ move(res, AT);
13240 %}
13241 ins_pipe( long_memory_op );
13242 %}
13244 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13245 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13246 effect(KILL oldval);
13247 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13248 "MOV $res, AT @ compareAndSwapN\n\t"
13249 "L:" %}
13250 ins_encode %{
13251 Register newval = $newval$$Register;
13252 Register oldval = $oldval$$Register;
13253 Register res = $res$$Register;
13254 Address addr($mem_ptr$$Register, 0);
13255 Label L;
13257 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13258 * Thus, we should extend oldval's sign for correct comparision.
13259 */
13260 __ sll(oldval, oldval, 0);
13262 __ cmpxchg32(newval, addr, oldval);
13263 __ move(res, AT);
13264 %}
13265 ins_pipe( long_memory_op );
13266 %}
13268 //----------Max and Min--------------------------------------------------------
13269 // Min Instructions
13270 ////
13271 // *** Min and Max using the conditional move are slower than the
13272 // *** branch version on a Pentium III.
13273 // // Conditional move for min
13274 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13275 // effect( USE_DEF op2, USE op1, USE cr );
13276 // format %{ "CMOVlt $op2,$op1\t! min" %}
13277 // opcode(0x4C,0x0F);
13278 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13279 // ins_pipe( pipe_cmov_reg );
13280 //%}
13281 //
13282 //// Min Register with Register (P6 version)
13283 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13284 // predicate(VM_Version::supports_cmov() );
13285 // match(Set op2 (MinI op1 op2));
13286 // ins_cost(200);
13287 // expand %{
13288 // eFlagsReg cr;
13289 // compI_eReg(cr,op1,op2);
13290 // cmovI_reg_lt(op2,op1,cr);
13291 // %}
13292 //%}
13294 // Min Register with Register (generic version)
13295 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13296 match(Set dst (MinI dst src));
13297 //effect(KILL flags);
13298 ins_cost(80);
13300 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13301 ins_encode %{
13302 Register dst = $dst$$Register;
13303 Register src = $src$$Register;
13305 __ slt(AT, src, dst);
13306 __ movn(dst, src, AT);
13308 %}
13310 ins_pipe( pipe_slow );
13311 %}
13313 // Max Register with Register
13314 // *** Min and Max using the conditional move are slower than the
13315 // *** branch version on a Pentium III.
13316 // // Conditional move for max
13317 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13318 // effect( USE_DEF op2, USE op1, USE cr );
13319 // format %{ "CMOVgt $op2,$op1\t! max" %}
13320 // opcode(0x4F,0x0F);
13321 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13322 // ins_pipe( pipe_cmov_reg );
13323 //%}
13324 //
13325 // // Max Register with Register (P6 version)
13326 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13327 // predicate(VM_Version::supports_cmov() );
13328 // match(Set op2 (MaxI op1 op2));
13329 // ins_cost(200);
13330 // expand %{
13331 // eFlagsReg cr;
13332 // compI_eReg(cr,op1,op2);
13333 // cmovI_reg_gt(op2,op1,cr);
13334 // %}
13335 //%}
13337 // Max Register with Register (generic version)
13338 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13339 match(Set dst (MaxI dst src));
13340 ins_cost(80);
13342 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13344 ins_encode %{
13345 Register dst = $dst$$Register;
13346 Register src = $src$$Register;
13348 __ slt(AT, dst, src);
13349 __ movn(dst, src, AT);
13351 %}
13353 ins_pipe( pipe_slow );
13354 %}
13356 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13357 match(Set dst (MaxI dst zero));
13358 ins_cost(50);
13360 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13362 ins_encode %{
13363 Register dst = $dst$$Register;
13365 __ slt(AT, dst, R0);
13366 __ movn(dst, R0, AT);
13368 %}
13370 ins_pipe( pipe_slow );
13371 %}
13373 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13374 %{
13375 match(Set dst (AndL src mask));
13377 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13378 ins_encode %{
13379 Register dst = $dst$$Register;
13380 Register src = $src$$Register;
13382 __ dext(dst, src, 0, 32);
13383 %}
13384 ins_pipe(ialu_regI_regI);
13385 %}
13387 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13388 %{
13389 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13391 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13392 ins_encode %{
13393 Register dst = $dst$$Register;
13394 Register src1 = $src1$$Register;
13395 Register src2 = $src2$$Register;
13397 if (src1 == dst) {
13398 __ dinsu(dst, src2, 32, 32);
13399 } else if (src2 == dst) {
13400 __ dsll32(dst, dst, 0);
13401 __ dins(dst, src1, 0, 32);
13402 } else {
13403 __ dext(dst, src1, 0, 32);
13404 __ dinsu(dst, src2, 32, 32);
13405 }
13406 %}
13407 ins_pipe(ialu_regI_regI);
13408 %}
13410 // Zero-extend convert int to long
13411 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13412 %{
13413 match(Set dst (AndL (ConvI2L src) mask));
13415 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13416 ins_encode %{
13417 Register dst = $dst$$Register;
13418 Register src = $src$$Register;
13420 __ dext(dst, src, 0, 32);
13421 %}
13422 ins_pipe(ialu_regI_regI);
13423 %}
13425 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13426 %{
13427 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13429 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13430 ins_encode %{
13431 Register dst = $dst$$Register;
13432 Register src = $src$$Register;
13434 __ dext(dst, src, 0, 32);
13435 %}
13436 ins_pipe(ialu_regI_regI);
13437 %}
13439 // Match loading integer and casting it to unsigned int in long register.
13440 // LoadI + ConvI2L + AndL 0xffffffff.
13441 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13442 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13444 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13445 ins_encode (load_N_enc(dst, mem));
13446 ins_pipe(ialu_loadI);
13447 %}
13449 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13450 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13452 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13453 ins_encode (load_N_enc(dst, mem));
13454 ins_pipe(ialu_loadI);
13455 %}
13458 // ============================================================================
13459 // Safepoint Instruction
13460 instruct safePoint_poll_reg(mRegP poll) %{
13461 match(SafePoint poll);
13462 predicate(false);
13463 effect(USE poll);
13465 ins_cost(125);
13466 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13468 ins_encode %{
13469 Register poll_reg = $poll$$Register;
13471 __ block_comment("Safepoint:");
13472 __ relocate(relocInfo::poll_type);
13473 __ lw(AT, poll_reg, 0);
13474 %}
13476 ins_pipe( ialu_storeI );
13477 %}
13479 instruct safePoint_poll() %{
13480 match(SafePoint);
13482 ins_cost(105);
13483 format %{ "poll for GC @ safePoint_poll" %}
13485 ins_encode %{
13486 __ block_comment("Safepoint:");
13487 __ set64(T9, (long)os::get_polling_page());
13488 __ relocate(relocInfo::poll_type);
13489 __ lw(AT, T9, 0);
13490 %}
13492 ins_pipe( ialu_storeI );
13493 %}
13495 //----------Arithmetic Conversion Instructions---------------------------------
13497 instruct roundFloat_nop(regF dst)
13498 %{
13499 match(Set dst (RoundFloat dst));
13501 ins_cost(0);
13502 ins_encode();
13503 ins_pipe(empty);
13504 %}
13506 instruct roundDouble_nop(regD dst)
13507 %{
13508 match(Set dst (RoundDouble dst));
13510 ins_cost(0);
13511 ins_encode();
13512 ins_pipe(empty);
13513 %}
13515 //---------- Zeros Count Instructions ------------------------------------------
13516 // CountLeadingZerosINode CountTrailingZerosINode
13517 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13518 predicate(UseCountLeadingZerosInstruction);
13519 match(Set dst (CountLeadingZerosI src));
13521 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13522 ins_encode %{
13523 __ clz($dst$$Register, $src$$Register);
13524 %}
13525 ins_pipe( ialu_regL_regL );
13526 %}
13528 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13529 predicate(UseCountLeadingZerosInstruction);
13530 match(Set dst (CountLeadingZerosL src));
13532 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13533 ins_encode %{
13534 __ dclz($dst$$Register, $src$$Register);
13535 %}
13536 ins_pipe( ialu_regL_regL );
13537 %}
13539 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13540 predicate(UseCountTrailingZerosInstruction);
13541 match(Set dst (CountTrailingZerosI src));
13543 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13544 ins_encode %{
13545 // ctz and dctz is gs instructions.
13546 __ ctz($dst$$Register, $src$$Register);
13547 %}
13548 ins_pipe( ialu_regL_regL );
13549 %}
13551 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13552 predicate(UseCountTrailingZerosInstruction);
13553 match(Set dst (CountTrailingZerosL src));
13555 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13556 ins_encode %{
13557 __ dctz($dst$$Register, $src$$Register);
13558 %}
13559 ins_pipe( ialu_regL_regL );
13560 %}
13562 // ====================VECTOR INSTRUCTIONS=====================================
13564 // Load vectors (8 bytes long)
13565 instruct loadV8(vecD dst, memory mem) %{
13566 predicate(n->as_LoadVector()->memory_size() == 8);
13567 match(Set dst (LoadVector mem));
13568 ins_cost(125);
13569 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13570 ins_encode(load_D_enc(dst, mem));
13571 ins_pipe( fpu_loadF );
13572 %}
13574 // Store vectors (8 bytes long)
13575 instruct storeV8(memory mem, vecD src) %{
13576 predicate(n->as_StoreVector()->memory_size() == 8);
13577 match(Set mem (StoreVector mem src));
13578 ins_cost(145);
13579 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13580 ins_encode(store_D_reg_enc(mem, src));
13581 ins_pipe( fpu_storeF );
13582 %}
13584 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13585 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13586 match(Set dst (ReplicateB src));
13587 ins_cost(100);
13588 format %{ "replv_ob AT, $src\n\t"
13589 "dmtc1 AT, $dst\t! replicate8B" %}
13590 ins_encode %{
13591 __ replv_ob(AT, $src$$Register);
13592 __ dmtc1(AT, $dst$$FloatRegister);
13593 %}
13594 ins_pipe( pipe_mtc1 );
13595 %}
13597 instruct Repl8B(vecD dst, mRegI src) %{
13598 predicate(n->as_Vector()->length() == 8);
13599 match(Set dst (ReplicateB src));
13600 ins_cost(140);
13601 format %{ "move AT, $src\n\t"
13602 "dins AT, AT, 8, 8\n\t"
13603 "dins AT, AT, 16, 16\n\t"
13604 "dinsu AT, AT, 32, 32\n\t"
13605 "dmtc1 AT, $dst\t! replicate8B" %}
13606 ins_encode %{
13607 __ move(AT, $src$$Register);
13608 __ dins(AT, AT, 8, 8);
13609 __ dins(AT, AT, 16, 16);
13610 __ dinsu(AT, AT, 32, 32);
13611 __ dmtc1(AT, $dst$$FloatRegister);
13612 %}
13613 ins_pipe( pipe_mtc1 );
13614 %}
13616 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13617 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13618 match(Set dst (ReplicateB con));
13619 ins_cost(110);
13620 format %{ "repl_ob AT, [$con]\n\t"
13621 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13622 ins_encode %{
13623 int val = $con$$constant;
13624 __ repl_ob(AT, val);
13625 __ dmtc1(AT, $dst$$FloatRegister);
13626 %}
13627 ins_pipe( pipe_mtc1 );
13628 %}
13630 instruct Repl8B_imm(vecD dst, immI con) %{
13631 predicate(n->as_Vector()->length() == 8);
13632 match(Set dst (ReplicateB con));
13633 ins_cost(150);
13634 format %{ "move AT, [$con]\n\t"
13635 "dins AT, AT, 8, 8\n\t"
13636 "dins AT, AT, 16, 16\n\t"
13637 "dinsu AT, AT, 32, 32\n\t"
13638 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13639 ins_encode %{
13640 __ move(AT, $con$$constant);
13641 __ dins(AT, AT, 8, 8);
13642 __ dins(AT, AT, 16, 16);
13643 __ dinsu(AT, AT, 32, 32);
13644 __ dmtc1(AT, $dst$$FloatRegister);
13645 %}
13646 ins_pipe( pipe_mtc1 );
13647 %}
13649 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13650 predicate(n->as_Vector()->length() == 8);
13651 match(Set dst (ReplicateB zero));
13652 ins_cost(90);
13653 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13654 ins_encode %{
13655 __ dmtc1(R0, $dst$$FloatRegister);
13656 %}
13657 ins_pipe( pipe_mtc1 );
13658 %}
13660 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13661 predicate(n->as_Vector()->length() == 8);
13662 match(Set dst (ReplicateB M1));
13663 ins_cost(80);
13664 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13665 ins_encode %{
13666 __ nor(AT, R0, R0);
13667 __ dmtc1(AT, $dst$$FloatRegister);
13668 %}
13669 ins_pipe( pipe_mtc1 );
13670 %}
13672 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13673 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13674 match(Set dst (ReplicateS src));
13675 ins_cost(100);
13676 format %{ "replv_qh AT, $src\n\t"
13677 "dmtc1 AT, $dst\t! replicate4S" %}
13678 ins_encode %{
13679 __ replv_qh(AT, $src$$Register);
13680 __ dmtc1(AT, $dst$$FloatRegister);
13681 %}
13682 ins_pipe( pipe_mtc1 );
13683 %}
13685 instruct Repl4S(vecD dst, mRegI src) %{
13686 predicate(n->as_Vector()->length() == 4);
13687 match(Set dst (ReplicateS src));
13688 ins_cost(120);
13689 format %{ "move AT, $src \n\t"
13690 "dins AT, AT, 16, 16\n\t"
13691 "dinsu AT, AT, 32, 32\n\t"
13692 "dmtc1 AT, $dst\t! replicate4S" %}
13693 ins_encode %{
13694 __ move(AT, $src$$Register);
13695 __ dins(AT, AT, 16, 16);
13696 __ dinsu(AT, AT, 32, 32);
13697 __ dmtc1(AT, $dst$$FloatRegister);
13698 %}
13699 ins_pipe( pipe_mtc1 );
13700 %}
13702 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13703 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13704 match(Set dst (ReplicateS con));
13705 ins_cost(100);
13706 format %{ "replv_qh AT, [$con]\n\t"
13707 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13708 ins_encode %{
13709 int val = $con$$constant;
13710 if ( Assembler::is_simm(val, 10)) {
13711 //repl_qh supports 10 bits immediate
13712 __ repl_qh(AT, val);
13713 } else {
13714 __ li32(AT, val);
13715 __ replv_qh(AT, AT);
13716 }
13717 __ dmtc1(AT, $dst$$FloatRegister);
13718 %}
13719 ins_pipe( pipe_mtc1 );
13720 %}
13722 instruct Repl4S_imm(vecD dst, immI con) %{
13723 predicate(n->as_Vector()->length() == 4);
13724 match(Set dst (ReplicateS con));
13725 ins_cost(110);
13726 format %{ "move AT, [$con]\n\t"
13727 "dins AT, AT, 16, 16\n\t"
13728 "dinsu AT, AT, 32, 32\n\t"
13729 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13730 ins_encode %{
13731 __ move(AT, $con$$constant);
13732 __ dins(AT, AT, 16, 16);
13733 __ dinsu(AT, AT, 32, 32);
13734 __ dmtc1(AT, $dst$$FloatRegister);
13735 %}
13736 ins_pipe( pipe_mtc1 );
13737 %}
13739 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13740 predicate(n->as_Vector()->length() == 4);
13741 match(Set dst (ReplicateS zero));
13742 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13743 ins_encode %{
13744 __ dmtc1(R0, $dst$$FloatRegister);
13745 %}
13746 ins_pipe( pipe_mtc1 );
13747 %}
13749 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13750 predicate(n->as_Vector()->length() == 4);
13751 match(Set dst (ReplicateS M1));
13752 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13753 ins_encode %{
13754 __ nor(AT, R0, R0);
13755 __ dmtc1(AT, $dst$$FloatRegister);
13756 %}
13757 ins_pipe( pipe_mtc1 );
13758 %}
13760 // Replicate integer (4 byte) scalar to be vector
13761 instruct Repl2I(vecD dst, mRegI src) %{
13762 predicate(n->as_Vector()->length() == 2);
13763 match(Set dst (ReplicateI src));
13764 format %{ "dins AT, $src, 0, 32\n\t"
13765 "dinsu AT, $src, 32, 32\n\t"
13766 "dmtc1 AT, $dst\t! replicate2I" %}
13767 ins_encode %{
13768 __ dins(AT, $src$$Register, 0, 32);
13769 __ dinsu(AT, $src$$Register, 32, 32);
13770 __ dmtc1(AT, $dst$$FloatRegister);
13771 %}
13772 ins_pipe( pipe_mtc1 );
13773 %}
13775 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13776 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13777 predicate(n->as_Vector()->length() == 2);
13778 match(Set dst (ReplicateI con));
13779 effect(KILL tmp);
13780 format %{ "li32 AT, [$con], 32\n\t"
13781 "dinsu AT, AT\n\t"
13782 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13783 ins_encode %{
13784 int val = $con$$constant;
13785 __ li32(AT, val);
13786 __ dinsu(AT, AT, 32, 32);
13787 __ dmtc1(AT, $dst$$FloatRegister);
13788 %}
13789 ins_pipe( pipe_mtc1 );
13790 %}
13792 // Replicate integer (4 byte) scalar zero to be vector
13793 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13794 predicate(n->as_Vector()->length() == 2);
13795 match(Set dst (ReplicateI zero));
13796 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13797 ins_encode %{
13798 __ dmtc1(R0, $dst$$FloatRegister);
13799 %}
13800 ins_pipe( pipe_mtc1 );
13801 %}
13803 // Replicate integer (4 byte) scalar -1 to be vector
13804 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13805 predicate(n->as_Vector()->length() == 2);
13806 match(Set dst (ReplicateI M1));
13807 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13808 ins_encode %{
13809 __ nor(AT, R0, R0);
13810 __ dmtc1(AT, $dst$$FloatRegister);
13811 %}
13812 ins_pipe( pipe_mtc1 );
13813 %}
13815 // Replicate float (4 byte) scalar to be vector
13816 instruct Repl2F(vecD dst, regF src) %{
13817 predicate(n->as_Vector()->length() == 2);
13818 match(Set dst (ReplicateF src));
13819 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13820 ins_encode %{
13821 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13822 %}
13823 ins_pipe( pipe_slow );
13824 %}
13826 // Replicate float (4 byte) scalar zero to be vector
13827 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13828 predicate(n->as_Vector()->length() == 2);
13829 match(Set dst (ReplicateF zero));
13830 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13831 ins_encode %{
13832 __ dmtc1(R0, $dst$$FloatRegister);
13833 %}
13834 ins_pipe( pipe_mtc1 );
13835 %}
13838 // ====================VECTOR ARITHMETIC=======================================
13840 // --------------------------------- ADD --------------------------------------
13842 // Floats vector add
13843 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13844 instruct vadd2F(vecD dst, vecD src) %{
13845 predicate(n->as_Vector()->length() == 2);
13846 match(Set dst (AddVF dst src));
13847 format %{ "add.ps $dst,$src\t! add packed2F" %}
13848 ins_encode %{
13849 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13850 %}
13851 ins_pipe( pipe_slow );
13852 %}
13854 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13855 predicate(n->as_Vector()->length() == 2);
13856 match(Set dst (AddVF src1 src2));
13857 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13858 ins_encode %{
13859 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13860 %}
13861 ins_pipe( fpu_regF_regF );
13862 %}
13864 // --------------------------------- SUB --------------------------------------
13866 // Floats vector sub
13867 instruct vsub2F(vecD dst, vecD src) %{
13868 predicate(n->as_Vector()->length() == 2);
13869 match(Set dst (SubVF dst src));
13870 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13871 ins_encode %{
13872 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13873 %}
13874 ins_pipe( fpu_regF_regF );
13875 %}
13877 // --------------------------------- MUL --------------------------------------
13879 // Floats vector mul
13880 instruct vmul2F(vecD dst, vecD src) %{
13881 predicate(n->as_Vector()->length() == 2);
13882 match(Set dst (MulVF dst src));
13883 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13884 ins_encode %{
13885 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13886 %}
13887 ins_pipe( fpu_regF_regF );
13888 %}
13890 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13891 predicate(n->as_Vector()->length() == 2);
13892 match(Set dst (MulVF src1 src2));
13893 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13894 ins_encode %{
13895 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13896 %}
13897 ins_pipe( fpu_regF_regF );
13898 %}
13900 // --------------------------------- DIV --------------------------------------
13901 // MIPS do not have div.ps
13903 // --------------------------------- MADD --------------------------------------
13904 // Floats vector madd
13905 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
13906 // predicate(n->as_Vector()->length() == 2);
13907 // match(Set dst (AddVF (MulVF src1 src2) src3));
13908 // ins_cost(50);
13909 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
13910 // ins_encode %{
13911 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13912 // %}
13913 // ins_pipe( fpu_regF_regF );
13914 //%}
13917 //----------PEEPHOLE RULES-----------------------------------------------------
13918 // These must follow all instruction definitions as they use the names
13919 // defined in the instructions definitions.
13920 //
13921 // peepmatch ( root_instr_name [preceeding_instruction]* );
13922 //
13923 // peepconstraint %{
13924 // (instruction_number.operand_name relational_op instruction_number.operand_name
13925 // [, ...] );
13926 // // instruction numbers are zero-based using left to right order in peepmatch
13927 //
13928 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13929 // // provide an instruction_number.operand_name for each operand that appears
13930 // // in the replacement instruction's match rule
13931 //
13932 // ---------VM FLAGS---------------------------------------------------------
13933 //
13934 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13935 //
13936 // Each peephole rule is given an identifying number starting with zero and
13937 // increasing by one in the order seen by the parser. An individual peephole
13938 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13939 // on the command-line.
13940 //
13941 // ---------CURRENT LIMITATIONS----------------------------------------------
13942 //
13943 // Only match adjacent instructions in same basic block
13944 // Only equality constraints
13945 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13946 // Only one replacement instruction
13947 //
13948 // ---------EXAMPLE----------------------------------------------------------
13949 //
13950 // // pertinent parts of existing instructions in architecture description
13951 // instruct movI(eRegI dst, eRegI src) %{
13952 // match(Set dst (CopyI src));
13953 // %}
13954 //
13955 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13956 // match(Set dst (AddI dst src));
13957 // effect(KILL cr);
13958 // %}
13959 //
13960 // // Change (inc mov) to lea
13961 // peephole %{
13962 // // increment preceeded by register-register move
13963 // peepmatch ( incI_eReg movI );
13964 // // require that the destination register of the increment
13965 // // match the destination register of the move
13966 // peepconstraint ( 0.dst == 1.dst );
13967 // // construct a replacement instruction that sets
13968 // // the destination to ( move's source register + one )
13969 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13970 // %}
13971 //
13972 // Implementation no longer uses movX instructions since
13973 // machine-independent system no longer uses CopyX nodes.
13974 //
13975 // peephole %{
13976 // peepmatch ( incI_eReg movI );
13977 // peepconstraint ( 0.dst == 1.dst );
13978 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13979 // %}
13980 //
13981 // peephole %{
13982 // peepmatch ( decI_eReg movI );
13983 // peepconstraint ( 0.dst == 1.dst );
13984 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13985 // %}
13986 //
13987 // peephole %{
13988 // peepmatch ( addI_eReg_imm movI );
13989 // peepconstraint ( 0.dst == 1.dst );
13990 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13991 // %}
13992 //
13993 // peephole %{
13994 // peepmatch ( addP_eReg_imm movP );
13995 // peepconstraint ( 0.dst == 1.dst );
13996 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13997 // %}
13999 // // Change load of spilled value to only a spill
14000 // instruct storeI(memory mem, eRegI src) %{
14001 // match(Set mem (StoreI mem src));
14002 // %}
14003 //
14004 // instruct loadI(eRegI dst, memory mem) %{
14005 // match(Set dst (LoadI mem));
14006 // %}
14007 //
14008 //peephole %{
14009 // peepmatch ( loadI storeI );
14010 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14011 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14012 //%}
14014 //----------SMARTSPILL RULES---------------------------------------------------
14015 // These must follow all instruction definitions as they use the names
14016 // defined in the instructions definitions.