Sat, 18 Mar 2017 07:08:36 +0800
[C2] Use general_jal for java runtime calls.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else {
691 assert(false, "Not implemented yet !" );
692 Unimplemented();
693 }
694 }
697 // No additional cost for CMOVL.
698 const int Matcher::long_cmove_cost() { return 0; }
700 // No CMOVF/CMOVD with SSE2
701 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
703 // Does the CPU require late expand (see block.cpp for description of late expand)?
704 const bool Matcher::require_postalloc_expand = false;
706 // Should the Matcher clone shifts on addressing modes, expecting them
707 // to be subsumed into complex addressing expressions or compute them
708 // into registers? True for Intel but false for most RISCs
709 const bool Matcher::clone_shift_expressions = false;
711 // Do we need to mask the count passed to shift instructions or does
712 // the cpu only look at the lower 5/6 bits anyway?
713 const bool Matcher::need_masked_shift_count = false;
715 bool Matcher::narrow_oop_use_complex_address() {
716 NOT_LP64(ShouldNotCallThis());
717 assert(UseCompressedOops, "only for compressed oops code");
718 return false;
719 }
721 bool Matcher::narrow_klass_use_complex_address() {
722 NOT_LP64(ShouldNotCallThis());
723 assert(UseCompressedClassPointers, "only for compressed klass code");
724 return false;
725 }
727 // This is UltraSparc specific, true just means we have fast l2f conversion
728 const bool Matcher::convL2FSupported(void) {
729 return true;
730 }
732 // Max vector size in bytes. 0 if not supported.
733 const int Matcher::vector_width_in_bytes(BasicType bt) {
734 assert(MaxVectorSize == 8, "");
735 return 8;
736 }
738 // Vector ideal reg
739 const int Matcher::vector_ideal_reg(int size) {
740 assert(MaxVectorSize == 8, "");
741 switch(size) {
742 case 8: return Op_VecD;
743 }
744 ShouldNotReachHere();
745 return 0;
746 }
748 // Only lowest bits of xmm reg are used for vector shift count.
749 const int Matcher::vector_shift_count_ideal_reg(int size) {
750 fatal("vector shift is not supported");
751 return Node::NotAMachineReg;
752 }
754 // Limits on vector size (number of elements) loaded into vector.
755 const int Matcher::max_vector_size(const BasicType bt) {
756 assert(is_java_primitive(bt), "only primitive type vectors");
757 return vector_width_in_bytes(bt)/type2aelembytes(bt);
758 }
760 const int Matcher::min_vector_size(const BasicType bt) {
761 return max_vector_size(bt); // Same as max.
762 }
764 // MIPS supports misaligned vectors store/load? FIXME
765 const bool Matcher::misaligned_vectors_ok() {
766 return false;
767 //return !AlignVector; // can be changed by flag
768 }
770 // Register for DIVI projection of divmodI
771 RegMask Matcher::divI_proj_mask() {
772 ShouldNotReachHere();
773 return RegMask();
774 }
776 // Register for MODI projection of divmodI
777 RegMask Matcher::modI_proj_mask() {
778 ShouldNotReachHere();
779 return RegMask();
780 }
782 // Register for DIVL projection of divmodL
783 RegMask Matcher::divL_proj_mask() {
784 ShouldNotReachHere();
785 return RegMask();
786 }
788 int Matcher::regnum_to_fpu_offset(int regnum) {
789 return regnum - 32; // The FP registers are in the second chunk
790 }
793 const bool Matcher::isSimpleConstant64(jlong value) {
794 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
795 return true;
796 }
799 // Return whether or not this register is ever used as an argument. This
800 // function is used on startup to build the trampoline stubs in generateOptoStub.
801 // Registers not mentioned will be killed by the VM call in the trampoline, and
802 // arguments in those registers not be available to the callee.
803 bool Matcher::can_be_java_arg( int reg ) {
804 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
805 if ( reg == T0_num || reg == T0_H_num
806 || reg == A0_num || reg == A0_H_num
807 || reg == A1_num || reg == A1_H_num
808 || reg == A2_num || reg == A2_H_num
809 || reg == A3_num || reg == A3_H_num
810 || reg == A4_num || reg == A4_H_num
811 || reg == A5_num || reg == A5_H_num
812 || reg == A6_num || reg == A6_H_num
813 || reg == A7_num || reg == A7_H_num )
814 return true;
816 if ( reg == F12_num || reg == F12_H_num
817 || reg == F13_num || reg == F13_H_num
818 || reg == F14_num || reg == F14_H_num
819 || reg == F15_num || reg == F15_H_num
820 || reg == F16_num || reg == F16_H_num
821 || reg == F17_num || reg == F17_H_num
822 || reg == F18_num || reg == F18_H_num
823 || reg == F19_num || reg == F19_H_num )
824 return true;
826 return false;
827 }
829 bool Matcher::is_spillable_arg( int reg ) {
830 return can_be_java_arg(reg);
831 }
833 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
834 return false;
835 }
837 // Register for MODL projection of divmodL
838 RegMask Matcher::modL_proj_mask() {
839 ShouldNotReachHere();
840 return RegMask();
841 }
843 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
844 return FP_REG_mask();
845 }
847 // MIPS doesn't support AES intrinsics
848 const bool Matcher::pass_original_key_for_aes() {
849 return false;
850 }
852 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
853 //lui
854 //ori
855 //dsll
856 //ori
858 //jalr
859 //nop
861 return round_to(current_offset, alignment_required()) - current_offset;
862 }
864 int CallLeafDirectNode::compute_padding(int current_offset) const {
865 //lui
866 //ori
867 //dsll
868 //ori
870 //jalr
871 //nop
873 return round_to(current_offset, alignment_required()) - current_offset;
874 }
876 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
877 //lui
878 //ori
879 //dsll
880 //ori
882 //jalr
883 //nop
885 return round_to(current_offset, alignment_required()) - current_offset;
886 }
888 // If CPU can load and store mis-aligned doubles directly then no fixup is
889 // needed. Else we split the double into 2 integer pieces and move it
890 // piece-by-piece. Only happens when passing doubles into C code as the
891 // Java calling convention forces doubles to be aligned.
892 const bool Matcher::misaligned_doubles_ok = false;
893 // Do floats take an entire double register or just half?
894 //const bool Matcher::float_in_double = true;
895 bool Matcher::float_in_double() { return false; }
896 // Threshold size for cleararray.
897 const int Matcher::init_array_short_size = 8 * BytesPerLong;
898 // Do ints take an entire long register or just half?
899 const bool Matcher::int_in_long = true;
900 // Is it better to copy float constants, or load them directly from memory?
901 // Intel can load a float constant from a direct address, requiring no
902 // extra registers. Most RISCs will have to materialize an address into a
903 // register first, so they would do better to copy the constant from stack.
904 const bool Matcher::rematerialize_float_constants = false;
905 // Advertise here if the CPU requires explicit rounding operations
906 // to implement the UseStrictFP mode.
907 const bool Matcher::strict_fp_requires_explicit_rounding = false;
908 // The ecx parameter to rep stos for the ClearArray node is in dwords.
909 const bool Matcher::init_array_count_is_in_bytes = false;
912 // Indicate if the safepoint node needs the polling page as an input.
913 // Since MIPS doesn't have absolute addressing, it needs.
914 bool SafePointNode::needs_polling_address_input() {
915 return false;
916 }
918 // !!!!! Special hack to get all type of calls to specify the byte offset
919 // from the start of the call to the point where the return address
920 // will point.
921 int MachCallStaticJavaNode::ret_addr_offset() {
922 //lui
923 //ori
924 //nop
925 //nop
926 //jalr
927 //nop
928 return 24;
929 }
931 int MachCallDynamicJavaNode::ret_addr_offset() {
932 //lui IC_Klass,
933 //ori IC_Klass,
934 //dsll IC_Klass
935 //ori IC_Klass
937 //lui T9
938 //ori T9
939 //nop
940 //nop
941 //jalr T9
942 //nop
943 return 4 * 4 + 4 * 6;
944 }
946 //=============================================================================
948 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
949 enum RC { rc_bad, rc_int, rc_float, rc_stack };
950 static enum RC rc_class( OptoReg::Name reg ) {
951 if( !OptoReg::is_valid(reg) ) return rc_bad;
952 if (OptoReg::is_stack(reg)) return rc_stack;
953 VMReg r = OptoReg::as_VMReg(reg);
954 if (r->is_Register()) return rc_int;
955 assert(r->is_FloatRegister(), "must be");
956 return rc_float;
957 }
959 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
960 // Get registers to move
961 OptoReg::Name src_second = ra_->get_reg_second(in(1));
962 OptoReg::Name src_first = ra_->get_reg_first(in(1));
963 OptoReg::Name dst_second = ra_->get_reg_second(this );
964 OptoReg::Name dst_first = ra_->get_reg_first(this );
966 enum RC src_second_rc = rc_class(src_second);
967 enum RC src_first_rc = rc_class(src_first);
968 enum RC dst_second_rc = rc_class(dst_second);
969 enum RC dst_first_rc = rc_class(dst_first);
971 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
973 // Generate spill code!
974 int size = 0;
976 if( src_first == dst_first && src_second == dst_second )
977 return 0; // Self copy, no move
979 if (src_first_rc == rc_stack) {
980 // mem ->
981 if (dst_first_rc == rc_stack) {
982 // mem -> mem
983 assert(src_second != dst_first, "overlap");
984 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
985 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
986 // 64-bit
987 int src_offset = ra_->reg2offset(src_first);
988 int dst_offset = ra_->reg2offset(dst_first);
989 if (cbuf) {
990 MacroAssembler _masm(cbuf);
991 __ ld(AT, Address(SP, src_offset));
992 __ sd(AT, Address(SP, dst_offset));
993 #ifndef PRODUCT
994 } else {
995 if(!do_size){
996 if (size != 0) st->print("\n\t");
997 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
998 "sd AT, [SP + #%d]",
999 src_offset, dst_offset);
1000 }
1001 #endif
1002 }
1003 size += 8;
1004 } else {
1005 // 32-bit
1006 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1007 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1008 // No pushl/popl, so:
1009 int src_offset = ra_->reg2offset(src_first);
1010 int dst_offset = ra_->reg2offset(dst_first);
1011 if (cbuf) {
1012 MacroAssembler _masm(cbuf);
1013 __ lw(AT, Address(SP, src_offset));
1014 __ sw(AT, Address(SP, dst_offset));
1015 #ifndef PRODUCT
1016 } else {
1017 if(!do_size){
1018 if (size != 0) st->print("\n\t");
1019 st->print("lw AT, [SP + #%d] spill 2\n\t"
1020 "sw AT, [SP + #%d]\n\t",
1021 src_offset, dst_offset);
1022 }
1023 #endif
1024 }
1025 size += 8;
1026 }
1027 return size;
1028 } else if (dst_first_rc == rc_int) {
1029 // mem -> gpr
1030 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1031 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1032 // 64-bit
1033 int offset = ra_->reg2offset(src_first);
1034 if (cbuf) {
1035 MacroAssembler _masm(cbuf);
1036 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1037 #ifndef PRODUCT
1038 } else {
1039 if(!do_size){
1040 if (size != 0) st->print("\n\t");
1041 st->print("ld %s, [SP + #%d]\t# spill 3",
1042 Matcher::regName[dst_first],
1043 offset);
1044 }
1045 #endif
1046 }
1047 size += 4;
1048 } else {
1049 // 32-bit
1050 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1051 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1052 int offset = ra_->reg2offset(src_first);
1053 if (cbuf) {
1054 MacroAssembler _masm(cbuf);
1055 if (this->ideal_reg() == Op_RegI)
1056 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1057 else
1058 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1059 #ifndef PRODUCT
1060 } else {
1061 if(!do_size){
1062 if (size != 0) st->print("\n\t");
1063 if (this->ideal_reg() == Op_RegI)
1064 st->print("lw %s, [SP + #%d]\t# spill 4",
1065 Matcher::regName[dst_first],
1066 offset);
1067 else
1068 st->print("lwu %s, [SP + #%d]\t# spill 5",
1069 Matcher::regName[dst_first],
1070 offset);
1071 }
1072 #endif
1073 }
1074 size += 4;
1075 }
1076 return size;
1077 } else if (dst_first_rc == rc_float) {
1078 // mem-> xmm
1079 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1080 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1081 // 64-bit
1082 int offset = ra_->reg2offset(src_first);
1083 if (cbuf) {
1084 MacroAssembler _masm(cbuf);
1085 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1086 #ifndef PRODUCT
1087 } else {
1088 if(!do_size){
1089 if (size != 0) st->print("\n\t");
1090 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1091 Matcher::regName[dst_first],
1092 offset);
1093 }
1094 #endif
1095 }
1096 size += 4;
1097 } else {
1098 // 32-bit
1099 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1100 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1101 int offset = ra_->reg2offset(src_first);
1102 if (cbuf) {
1103 MacroAssembler _masm(cbuf);
1104 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1105 #ifndef PRODUCT
1106 } else {
1107 if(!do_size){
1108 if (size != 0) st->print("\n\t");
1109 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1110 Matcher::regName[dst_first],
1111 offset);
1112 }
1113 #endif
1114 }
1115 size += 4;
1116 }
1117 return size;
1118 }
1119 } else if (src_first_rc == rc_int) {
1120 // gpr ->
1121 if (dst_first_rc == rc_stack) {
1122 // gpr -> mem
1123 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1124 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1125 // 64-bit
1126 int offset = ra_->reg2offset(dst_first);
1127 if (cbuf) {
1128 MacroAssembler _masm(cbuf);
1129 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1130 #ifndef PRODUCT
1131 } else {
1132 if(!do_size){
1133 if (size != 0) st->print("\n\t");
1134 st->print("sd %s, [SP + #%d] # spill 8",
1135 Matcher::regName[src_first],
1136 offset);
1137 }
1138 #endif
1139 }
1140 size += 4;
1141 } else {
1142 // 32-bit
1143 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1144 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1145 int offset = ra_->reg2offset(dst_first);
1146 if (cbuf) {
1147 MacroAssembler _masm(cbuf);
1148 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1149 #ifndef PRODUCT
1150 } else {
1151 if(!do_size){
1152 if (size != 0) st->print("\n\t");
1153 st->print("sw %s, [SP + #%d]\t# spill 9",
1154 Matcher::regName[src_first], offset);
1155 }
1156 #endif
1157 }
1158 size += 4;
1159 }
1160 return size;
1161 } else if (dst_first_rc == rc_int) {
1162 // gpr -> gpr
1163 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1164 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1165 // 64-bit
1166 if (cbuf) {
1167 MacroAssembler _masm(cbuf);
1168 __ move(as_Register(Matcher::_regEncode[dst_first]),
1169 as_Register(Matcher::_regEncode[src_first]));
1170 #ifndef PRODUCT
1171 } else {
1172 if(!do_size){
1173 if (size != 0) st->print("\n\t");
1174 st->print("move(64bit) %s <-- %s\t# spill 10",
1175 Matcher::regName[dst_first],
1176 Matcher::regName[src_first]);
1177 }
1178 #endif
1179 }
1180 size += 4;
1181 return size;
1182 } else {
1183 // 32-bit
1184 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1185 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1186 if (cbuf) {
1187 MacroAssembler _masm(cbuf);
1188 if (this->ideal_reg() == Op_RegI)
1189 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1190 else
1191 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1193 #ifndef PRODUCT
1194 } else {
1195 if(!do_size){
1196 if (size != 0) st->print("\n\t");
1197 st->print("move(32-bit) %s <-- %s\t# spill 11",
1198 Matcher::regName[dst_first],
1199 Matcher::regName[src_first]);
1200 }
1201 #endif
1202 }
1203 size += 4;
1204 return size;
1205 }
1206 } else if (dst_first_rc == rc_float) {
1207 // gpr -> xmm
1208 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1209 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1210 // 64-bit
1211 if (cbuf) {
1212 MacroAssembler _masm(cbuf);
1213 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1214 #ifndef PRODUCT
1215 } else {
1216 if(!do_size){
1217 if (size != 0) st->print("\n\t");
1218 st->print("dmtc1 %s, %s\t# spill 12",
1219 Matcher::regName[dst_first],
1220 Matcher::regName[src_first]);
1221 }
1222 #endif
1223 }
1224 size += 4;
1225 } else {
1226 // 32-bit
1227 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1228 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1229 if (cbuf) {
1230 MacroAssembler _masm(cbuf);
1231 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1232 #ifndef PRODUCT
1233 } else {
1234 if(!do_size){
1235 if (size != 0) st->print("\n\t");
1236 st->print("mtc1 %s, %s\t# spill 13",
1237 Matcher::regName[dst_first],
1238 Matcher::regName[src_first]);
1239 }
1240 #endif
1241 }
1242 size += 4;
1243 }
1244 return size;
1245 }
1246 } else if (src_first_rc == rc_float) {
1247 // xmm ->
1248 if (dst_first_rc == rc_stack) {
1249 // xmm -> mem
1250 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1251 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1252 // 64-bit
1253 int offset = ra_->reg2offset(dst_first);
1254 if (cbuf) {
1255 MacroAssembler _masm(cbuf);
1256 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1257 #ifndef PRODUCT
1258 } else {
1259 if(!do_size){
1260 if (size != 0) st->print("\n\t");
1261 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1262 Matcher::regName[src_first],
1263 offset);
1264 }
1265 #endif
1266 }
1267 size += 4;
1268 } else {
1269 // 32-bit
1270 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1271 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1272 int offset = ra_->reg2offset(dst_first);
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1281 Matcher::regName[src_first],
1282 offset);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 }
1288 return size;
1289 } else if (dst_first_rc == rc_int) {
1290 // xmm -> gpr
1291 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1292 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1293 // 64-bit
1294 if (cbuf) {
1295 MacroAssembler _masm(cbuf);
1296 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1297 #ifndef PRODUCT
1298 } else {
1299 if(!do_size){
1300 if (size != 0) st->print("\n\t");
1301 st->print("dmfc1 %s, %s\t# spill 16",
1302 Matcher::regName[dst_first],
1303 Matcher::regName[src_first]);
1304 }
1305 #endif
1306 }
1307 size += 4;
1308 } else {
1309 // 32-bit
1310 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1311 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1312 if (cbuf) {
1313 MacroAssembler _masm(cbuf);
1314 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1315 #ifndef PRODUCT
1316 } else {
1317 if(!do_size){
1318 if (size != 0) st->print("\n\t");
1319 st->print("mfc1 %s, %s\t# spill 17",
1320 Matcher::regName[dst_first],
1321 Matcher::regName[src_first]);
1322 }
1323 #endif
1324 }
1325 size += 4;
1326 }
1327 return size;
1328 } else if (dst_first_rc == rc_float) {
1329 // xmm -> xmm
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 if (cbuf) {
1334 MacroAssembler _masm(cbuf);
1335 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1336 #ifndef PRODUCT
1337 } else {
1338 if(!do_size){
1339 if (size != 0) st->print("\n\t");
1340 st->print("mov_d %s <-- %s\t# spill 18",
1341 Matcher::regName[dst_first],
1342 Matcher::regName[src_first]);
1343 }
1344 #endif
1345 }
1346 size += 4;
1347 } else {
1348 // 32-bit
1349 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1350 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1351 if (cbuf) {
1352 MacroAssembler _masm(cbuf);
1353 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1354 #ifndef PRODUCT
1355 } else {
1356 if(!do_size){
1357 if (size != 0) st->print("\n\t");
1358 st->print("mov_s %s <-- %s\t# spill 19",
1359 Matcher::regName[dst_first],
1360 Matcher::regName[src_first]);
1361 }
1362 #endif
1363 }
1364 size += 4;
1365 }
1366 return size;
1367 }
1368 }
1370 assert(0," foo ");
1371 Unimplemented();
1372 return size;
1374 }
1376 #ifndef PRODUCT
1377 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1378 implementation( NULL, ra_, false, st );
1379 }
1380 #endif
1382 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1383 implementation( &cbuf, ra_, false, NULL );
1384 }
1386 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1387 return implementation( NULL, ra_, true, NULL );
1388 }
1390 //=============================================================================
1391 #
1393 #ifndef PRODUCT
1394 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1395 st->print("INT3");
1396 }
1397 #endif
1399 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1400 MacroAssembler _masm(&cbuf);
1401 __ int3();
1402 }
1404 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1405 return MachNode::size(ra_);
1406 }
1409 //=============================================================================
1410 #ifndef PRODUCT
1411 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1412 Compile *C = ra_->C;
1413 int framesize = C->frame_size_in_bytes();
1415 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1417 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1418 st->cr(); st->print("\t");
1419 if (UseLoongsonISA) {
1420 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1421 } else {
1422 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1423 st->cr(); st->print("\t");
1424 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1425 }
1427 if( do_polling() && C->is_method_compilation() ) {
1428 st->print("Poll Safepoint # MachEpilogNode");
1429 }
1430 }
1431 #endif
1433 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1434 Compile *C = ra_->C;
1435 MacroAssembler _masm(&cbuf);
1436 int framesize = C->frame_size_in_bytes();
1438 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1440 __ daddiu(SP, SP, framesize);
1442 if (UseLoongsonISA) {
1443 __ gslq(RA, FP, SP, -wordSize*2);
1444 } else {
1445 __ ld(RA, SP, -wordSize );
1446 __ ld(FP, SP, -wordSize*2 );
1447 }
1449 if( do_polling() && C->is_method_compilation() ) {
1450 __ set64(AT, (long)os::get_polling_page());
1451 __ relocate(relocInfo::poll_return_type);
1452 __ lw(AT, AT, 0);
1453 }
1454 }
1456 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1457 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1458 }
1460 int MachEpilogNode::reloc() const {
1461 return 0; // a large enough number
1462 }
1464 const Pipeline * MachEpilogNode::pipeline() const {
1465 return MachNode::pipeline_class();
1466 }
1468 int MachEpilogNode::safepoint_offset() const { return 0; }
1470 //=============================================================================
1472 #ifndef PRODUCT
1473 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1474 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1475 int reg = ra_->get_reg_first(this);
1476 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1477 }
1478 #endif
1481 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1482 return 4;
1483 }
1485 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1486 MacroAssembler _masm(&cbuf);
1487 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1488 int reg = ra_->get_encode(this);
1490 __ addi(as_Register(reg), SP, offset);
1491 /*
1492 if( offset >= 128 ) {
1493 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1494 emit_rm(cbuf, 0x2, reg, 0x04);
1495 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1496 emit_d32(cbuf, offset);
1497 }
1498 else {
1499 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1500 emit_rm(cbuf, 0x1, reg, 0x04);
1501 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1502 emit_d8(cbuf, offset);
1503 }
1504 */
1505 }
1508 //static int sizeof_FFree_Float_Stack_All = -1;
1510 int MachCallRuntimeNode::ret_addr_offset() {
1511 //lui
1512 //ori
1513 //dsll
1514 //ori
1515 //jalr
1516 //nop
1517 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1518 return NativeCall::instruction_size;
1519 // return 16;
1520 }
1526 //=============================================================================
1527 #ifndef PRODUCT
1528 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1529 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1530 }
1531 #endif
1533 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1534 MacroAssembler _masm(&cbuf);
1535 int i = 0;
1536 for(i = 0; i < _count; i++)
1537 __ nop();
1538 }
1540 uint MachNopNode::size(PhaseRegAlloc *) const {
1541 return 4 * _count;
1542 }
1543 const Pipeline* MachNopNode::pipeline() const {
1544 return MachNode::pipeline_class();
1545 }
1547 //=============================================================================
1549 //=============================================================================
1550 #ifndef PRODUCT
1551 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1552 st->print_cr("load_klass(T9, T0)");
1553 st->print_cr("\tbeq(T9, iCache, L)");
1554 st->print_cr("\tnop");
1555 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1556 st->print_cr("\tnop");
1557 st->print_cr("\tnop");
1558 st->print_cr(" L:");
1559 }
1560 #endif
1563 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1564 MacroAssembler _masm(&cbuf);
1565 #ifdef ASSERT
1566 //uint code_size = cbuf.code_size();
1567 #endif
1568 int ic_reg = Matcher::inline_cache_reg_encode();
1569 Label L;
1570 Register receiver = T0;
1571 Register iCache = as_Register(ic_reg);
1572 __ load_klass(T9, receiver);
1573 __ beq(T9, iCache, L);
1574 __ nop();
1576 __ relocate(relocInfo::runtime_call_type);
1577 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1578 __ jr(T9);
1579 __ nop();
1581 /* WARNING these NOPs are critical so that verified entry point is properly
1582 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1583 __ align(CodeEntryAlignment);
1584 __ bind(L);
1585 }
1587 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1588 return MachNode::size(ra_);
1589 }
1593 //=============================================================================
1595 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1597 int Compile::ConstantTable::calculate_table_base_offset() const {
1598 return 0; // absolute addressing, no offset
1599 }
1601 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1602 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1603 ShouldNotReachHere();
1604 }
1606 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1607 Compile* C = ra_->C;
1608 Compile::ConstantTable& constant_table = C->constant_table();
1609 MacroAssembler _masm(&cbuf);
1611 Register Rtoc = as_Register(ra_->get_encode(this));
1612 CodeSection* consts_section = __ code()->consts();
1613 int consts_size = consts_section->align_at_start(consts_section->size());
1614 assert(constant_table.size() == consts_size, "must be equal");
1616 if (consts_section->size()) {
1617 // Materialize the constant table base.
1618 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1619 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1620 __ relocate(relocInfo::internal_pc_type);
1621 __ patchable_set48(Rtoc, (long)baseaddr);
1622 }
1623 }
1625 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1626 // patchable_set48 (4 insts)
1627 return 4 * 4;
1628 }
1630 #ifndef PRODUCT
1631 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1632 Register r = as_Register(ra_->get_encode(this));
1633 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1634 }
1635 #endif
1638 //=============================================================================
1639 #ifndef PRODUCT
1640 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1641 Compile* C = ra_->C;
1643 int framesize = C->frame_size_in_bytes();
1644 int bangsize = C->bang_size_in_bytes();
1645 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1647 // Calls to C2R adapters often do not accept exceptional returns.
1648 // We require that their callers must bang for them. But be careful, because
1649 // some VM calls (such as call site linkage) can use several kilobytes of
1650 // stack. But the stack safety zone should account for that.
1651 // See bugs 4446381, 4468289, 4497237.
1652 if (C->need_stack_bang(bangsize)) {
1653 st->print_cr("# stack bang"); st->print("\t");
1654 }
1655 if (UseLoongsonISA) {
1656 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1657 } else {
1658 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1659 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1660 }
1661 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1662 st->print("daddiu SP, SP, -%d \t",framesize);
1663 }
1664 #endif
1667 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1668 Compile* C = ra_->C;
1669 MacroAssembler _masm(&cbuf);
1671 int framesize = C->frame_size_in_bytes();
1672 int bangsize = C->bang_size_in_bytes();
1674 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1676 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1678 if (C->need_stack_bang(framesize)) {
1679 __ generate_stack_overflow_check(framesize);
1680 }
1682 if (UseLoongsonISA) {
1683 __ gssq(RA, FP, SP, -wordSize*2);
1684 } else {
1685 __ sd(RA, SP, -wordSize);
1686 __ sd(FP, SP, -wordSize*2);
1687 }
1688 __ daddiu(FP, SP, -wordSize*2);
1689 __ daddiu(SP, SP, -framesize);
1690 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1691 __ nop();
1693 C->set_frame_complete(cbuf.insts_size());
1694 if (C->has_mach_constant_base_node()) {
1695 // NOTE: We set the table base offset here because users might be
1696 // emitted before MachConstantBaseNode.
1697 Compile::ConstantTable& constant_table = C->constant_table();
1698 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1699 }
1701 }
1704 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1705 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1706 return MachNode::size(ra_); // too many variables; just compute it the hard way
1707 }
1709 int MachPrologNode::reloc() const {
1710 return 0; // a large enough number
1711 }
1713 %}
1715 //----------ENCODING BLOCK-----------------------------------------------------
1716 // This block specifies the encoding classes used by the compiler to output
1717 // byte streams. Encoding classes generate functions which are called by
1718 // Machine Instruction Nodes in order to generate the bit encoding of the
1719 // instruction. Operands specify their base encoding interface with the
1720 // interface keyword. There are currently supported four interfaces,
1721 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1722 // operand to generate a function which returns its register number when
1723 // queried. CONST_INTER causes an operand to generate a function which
1724 // returns the value of the constant when queried. MEMORY_INTER causes an
1725 // operand to generate four functions which return the Base Register, the
1726 // Index Register, the Scale Value, and the Offset Value of the operand when
1727 // queried. COND_INTER causes an operand to generate six functions which
1728 // return the encoding code (ie - encoding bits for the instruction)
1729 // associated with each basic boolean condition for a conditional instruction.
1730 // Instructions specify two basic values for encoding. They use the
1731 // ins_encode keyword to specify their encoding class (which must be one of
1732 // the class names specified in the encoding block), and they use the
1733 // opcode keyword to specify, in order, their primary, secondary, and
1734 // tertiary opcode. Only the opcode sections which a particular instruction
1735 // needs for encoding need to be specified.
1736 encode %{
1737 /*
1738 Alias:
1739 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1740 118 B14: # B19 B15 <- B13 Freq: 0.899955
1741 118 add S1, S2, V0 #@addP_reg_reg
1742 11c lb S0, [S1 + #-8257524] #@loadB
1743 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1744 */
1745 //Load byte signed
1746 enc_class load_B_enc (mRegI dst, memory mem) %{
1747 MacroAssembler _masm(&cbuf);
1748 int dst = $dst$$reg;
1749 int base = $mem$$base;
1750 int index = $mem$$index;
1751 int scale = $mem$$scale;
1752 int disp = $mem$$disp;
1754 if( index != 0 ) {
1755 if( Assembler::is_simm16(disp) ) {
1756 if( UseLoongsonISA ) {
1757 if (scale == 0) {
1758 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1759 } else {
1760 __ dsll(AT, as_Register(index), scale);
1761 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1762 }
1763 } else {
1764 if (scale == 0) {
1765 __ addu(AT, as_Register(base), as_Register(index));
1766 } else {
1767 __ dsll(AT, as_Register(index), scale);
1768 __ addu(AT, as_Register(base), AT);
1769 }
1770 __ lb(as_Register(dst), AT, disp);
1771 }
1772 } else {
1773 if (scale == 0) {
1774 __ addu(AT, as_Register(base), as_Register(index));
1775 } else {
1776 __ dsll(AT, as_Register(index), scale);
1777 __ addu(AT, as_Register(base), AT);
1778 }
1779 __ move(T9, disp);
1780 if( UseLoongsonISA ) {
1781 __ gslbx(as_Register(dst), AT, T9, 0);
1782 } else {
1783 __ addu(AT, AT, T9);
1784 __ lb(as_Register(dst), AT, 0);
1785 }
1786 }
1787 } else {
1788 if( Assembler::is_simm16(disp) ) {
1789 __ lb(as_Register(dst), as_Register(base), disp);
1790 } else {
1791 __ move(T9, disp);
1792 if( UseLoongsonISA ) {
1793 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1794 } else {
1795 __ addu(AT, as_Register(base), T9);
1796 __ lb(as_Register(dst), AT, 0);
1797 }
1798 }
1799 }
1800 %}
1802 //Load byte unsigned
1803 enc_class load_UB_enc (mRegI dst, memory mem) %{
1804 MacroAssembler _masm(&cbuf);
1805 int dst = $dst$$reg;
1806 int base = $mem$$base;
1807 int index = $mem$$index;
1808 int scale = $mem$$scale;
1809 int disp = $mem$$disp;
1811 if( index != 0 ) {
1812 if (scale == 0) {
1813 __ daddu(AT, as_Register(base), as_Register(index));
1814 } else {
1815 __ dsll(AT, as_Register(index), scale);
1816 __ daddu(AT, as_Register(base), AT);
1817 }
1818 if( Assembler::is_simm16(disp) ) {
1819 __ lbu(as_Register(dst), AT, disp);
1820 } else {
1821 __ move(T9, disp);
1822 __ daddu(AT, AT, T9);
1823 __ lbu(as_Register(dst), AT, 0);
1824 }
1825 } else {
1826 if( Assembler::is_simm16(disp) ) {
1827 __ lbu(as_Register(dst), as_Register(base), disp);
1828 } else {
1829 __ move(T9, disp);
1830 __ daddu(AT, as_Register(base), T9);
1831 __ lbu(as_Register(dst), AT, 0);
1832 }
1833 }
1834 %}
1836 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1837 MacroAssembler _masm(&cbuf);
1838 int src = $src$$reg;
1839 int base = $mem$$base;
1840 int index = $mem$$index;
1841 int scale = $mem$$scale;
1842 int disp = $mem$$disp;
1844 if( index != 0 ) {
1845 if (scale == 0) {
1846 if( Assembler::is_simm(disp, 8) ) {
1847 if (UseLoongsonISA) {
1848 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1849 } else {
1850 __ addu(AT, as_Register(base), as_Register(index));
1851 __ sb(as_Register(src), AT, disp);
1852 }
1853 } else if( Assembler::is_simm16(disp) ) {
1854 __ addu(AT, as_Register(base), as_Register(index));
1855 __ sb(as_Register(src), AT, disp);
1856 } else {
1857 __ addu(AT, as_Register(base), as_Register(index));
1858 __ move(T9, disp);
1859 if (UseLoongsonISA) {
1860 __ gssbx(as_Register(src), AT, T9, 0);
1861 } else {
1862 __ addu(AT, AT, T9);
1863 __ sb(as_Register(src), AT, 0);
1864 }
1865 }
1866 } else {
1867 __ dsll(AT, as_Register(index), scale);
1868 if( Assembler::is_simm(disp, 8) ) {
1869 if (UseLoongsonISA) {
1870 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1871 } else {
1872 __ addu(AT, as_Register(base), AT);
1873 __ sb(as_Register(src), AT, disp);
1874 }
1875 } else if( Assembler::is_simm16(disp) ) {
1876 __ addu(AT, as_Register(base), AT);
1877 __ sb(as_Register(src), AT, disp);
1878 } else {
1879 __ addu(AT, as_Register(base), AT);
1880 __ move(T9, disp);
1881 if (UseLoongsonISA) {
1882 __ gssbx(as_Register(src), AT, T9, 0);
1883 } else {
1884 __ addu(AT, AT, T9);
1885 __ sb(as_Register(src), AT, 0);
1886 }
1887 }
1888 }
1889 } else {
1890 if( Assembler::is_simm16(disp) ) {
1891 __ sb(as_Register(src), as_Register(base), disp);
1892 } else {
1893 __ move(T9, disp);
1894 if (UseLoongsonISA) {
1895 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1896 } else {
1897 __ addu(AT, as_Register(base), T9);
1898 __ sb(as_Register(src), AT, 0);
1899 }
1900 }
1901 }
1902 %}
1904 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1905 MacroAssembler _masm(&cbuf);
1906 int base = $mem$$base;
1907 int index = $mem$$index;
1908 int scale = $mem$$scale;
1909 int disp = $mem$$disp;
1910 int value = $src$$constant;
1912 if( index != 0 ) {
1913 if (!UseLoongsonISA) {
1914 if (scale == 0) {
1915 __ daddu(AT, as_Register(base), as_Register(index));
1916 } else {
1917 __ dsll(AT, as_Register(index), scale);
1918 __ daddu(AT, as_Register(base), AT);
1919 }
1920 if( Assembler::is_simm16(disp) ) {
1921 if (value == 0) {
1922 __ sb(R0, AT, disp);
1923 } else {
1924 __ move(T9, value);
1925 __ sb(T9, AT, disp);
1926 }
1927 } else {
1928 if (value == 0) {
1929 __ move(T9, disp);
1930 __ daddu(AT, AT, T9);
1931 __ sb(R0, AT, 0);
1932 } else {
1933 __ move(T9, disp);
1934 __ daddu(AT, AT, T9);
1935 __ move(T9, value);
1936 __ sb(T9, AT, 0);
1937 }
1938 }
1939 } else {
1941 if (scale == 0) {
1942 if( Assembler::is_simm(disp, 8) ) {
1943 if (value == 0) {
1944 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1945 } else {
1946 __ move(T9, value);
1947 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1948 }
1949 } else if( Assembler::is_simm16(disp) ) {
1950 __ daddu(AT, as_Register(base), as_Register(index));
1951 if (value == 0) {
1952 __ sb(R0, AT, disp);
1953 } else {
1954 __ move(T9, value);
1955 __ sb(T9, AT, disp);
1956 }
1957 } else {
1958 if (value == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 __ move(T9, disp);
1961 __ gssbx(R0, AT, T9, 0);
1962 } else {
1963 __ move(AT, disp);
1964 __ move(T9, value);
1965 __ daddu(AT, as_Register(base), AT);
1966 __ gssbx(T9, AT, as_Register(index), 0);
1967 }
1968 }
1970 } else {
1972 if( Assembler::is_simm(disp, 8) ) {
1973 __ dsll(AT, as_Register(index), scale);
1974 if (value == 0) {
1975 __ gssbx(R0, as_Register(base), AT, disp);
1976 } else {
1977 __ move(T9, value);
1978 __ gssbx(T9, as_Register(base), AT, disp);
1979 }
1980 } else if( Assembler::is_simm16(disp) ) {
1981 __ dsll(AT, as_Register(index), scale);
1982 __ daddu(AT, as_Register(base), AT);
1983 if (value == 0) {
1984 __ sb(R0, AT, disp);
1985 } else {
1986 __ move(T9, value);
1987 __ sb(T9, AT, disp);
1988 }
1989 } else {
1990 __ dsll(AT, as_Register(index), scale);
1991 if (value == 0) {
1992 __ daddu(AT, as_Register(base), AT);
1993 __ move(T9, disp);
1994 __ gssbx(R0, AT, T9, 0);
1995 } else {
1996 __ move(T9, disp);
1997 __ daddu(AT, AT, T9);
1998 __ move(T9, value);
1999 __ gssbx(T9, as_Register(base), AT, 0);
2000 }
2001 }
2002 }
2003 }
2004 } else {
2005 if( Assembler::is_simm16(disp) ) {
2006 if (value == 0) {
2007 __ sb(R0, as_Register(base), disp);
2008 } else {
2009 __ move(AT, value);
2010 __ sb(AT, as_Register(base), disp);
2011 }
2012 } else {
2013 if (value == 0) {
2014 __ move(T9, disp);
2015 if (UseLoongsonISA) {
2016 __ gssbx(R0, as_Register(base), T9, 0);
2017 } else {
2018 __ daddu(AT, as_Register(base), T9);
2019 __ sb(R0, AT, 0);
2020 }
2021 } else {
2022 __ move(T9, disp);
2023 if (UseLoongsonISA) {
2024 __ move(AT, value);
2025 __ gssbx(AT, as_Register(base), T9, 0);
2026 } else {
2027 __ daddu(AT, as_Register(base), T9);
2028 __ move(T9, value);
2029 __ sb(T9, AT, 0);
2030 }
2031 }
2032 }
2033 }
2034 %}
2037 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2038 MacroAssembler _masm(&cbuf);
2039 int base = $mem$$base;
2040 int index = $mem$$index;
2041 int scale = $mem$$scale;
2042 int disp = $mem$$disp;
2043 int value = $src$$constant;
2045 if( index != 0 ) {
2046 if ( UseLoongsonISA ) {
2047 if ( Assembler::is_simm(disp,8) ) {
2048 if ( scale == 0 ) {
2049 if ( value == 0 ) {
2050 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2051 } else {
2052 __ move(AT, value);
2053 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2054 }
2055 } else {
2056 __ dsll(AT, as_Register(index), scale);
2057 if ( value == 0 ) {
2058 __ gssbx(R0, as_Register(base), AT, disp);
2059 } else {
2060 __ move(T9, value);
2061 __ gssbx(T9, as_Register(base), AT, disp);
2062 }
2063 }
2064 } else if ( Assembler::is_simm16(disp) ) {
2065 if ( scale == 0 ) {
2066 __ daddu(AT, as_Register(base), as_Register(index));
2067 if ( value == 0 ){
2068 __ sb(R0, AT, disp);
2069 } else {
2070 __ move(T9, value);
2071 __ sb(T9, AT, disp);
2072 }
2073 } else {
2074 __ dsll(AT, as_Register(index), scale);
2075 __ daddu(AT, as_Register(base), AT);
2076 if ( value == 0 ) {
2077 __ sb(R0, AT, disp);
2078 } else {
2079 __ move(T9, value);
2080 __ sb(T9, AT, disp);
2081 }
2082 }
2083 } else {
2084 if ( scale == 0 ) {
2085 __ move(AT, disp);
2086 __ daddu(AT, as_Register(index), AT);
2087 if ( value == 0 ) {
2088 __ gssbx(R0, as_Register(base), AT, 0);
2089 } else {
2090 __ move(T9, value);
2091 __ gssbx(T9, as_Register(base), AT, 0);
2092 }
2093 } else {
2094 __ dsll(AT, as_Register(index), scale);
2095 __ move(T9, disp);
2096 __ daddu(AT, AT, T9);
2097 if ( value == 0 ) {
2098 __ gssbx(R0, as_Register(base), AT, 0);
2099 } else {
2100 __ move(T9, value);
2101 __ gssbx(T9, as_Register(base), AT, 0);
2102 }
2103 }
2104 }
2105 } else { //not use loongson isa
2106 if (scale == 0) {
2107 __ daddu(AT, as_Register(base), as_Register(index));
2108 } else {
2109 __ dsll(AT, as_Register(index), scale);
2110 __ daddu(AT, as_Register(base), AT);
2111 }
2112 if( Assembler::is_simm16(disp) ) {
2113 if (value == 0) {
2114 __ sb(R0, AT, disp);
2115 } else {
2116 __ move(T9, value);
2117 __ sb(T9, AT, disp);
2118 }
2119 } else {
2120 if (value == 0) {
2121 __ move(T9, disp);
2122 __ daddu(AT, AT, T9);
2123 __ sb(R0, AT, 0);
2124 } else {
2125 __ move(T9, disp);
2126 __ daddu(AT, AT, T9);
2127 __ move(T9, value);
2128 __ sb(T9, AT, 0);
2129 }
2130 }
2131 }
2132 } else {
2133 if ( UseLoongsonISA ){
2134 if ( Assembler::is_simm16(disp) ){
2135 if ( value == 0 ) {
2136 __ sb(R0, as_Register(base), disp);
2137 } else {
2138 __ move(AT, value);
2139 __ sb(AT, as_Register(base), disp);
2140 }
2141 } else {
2142 __ move(AT, disp);
2143 if ( value == 0 ) {
2144 __ gssbx(R0, as_Register(base), AT, 0);
2145 } else {
2146 __ move(T9, value);
2147 __ gssbx(T9, as_Register(base), AT, 0);
2148 }
2149 }
2150 } else {
2151 if( Assembler::is_simm16(disp) ) {
2152 if (value == 0) {
2153 __ sb(R0, as_Register(base), disp);
2154 } else {
2155 __ move(AT, value);
2156 __ sb(AT, as_Register(base), disp);
2157 }
2158 } else {
2159 if (value == 0) {
2160 __ move(T9, disp);
2161 __ daddu(AT, as_Register(base), T9);
2162 __ sb(R0, AT, 0);
2163 } else {
2164 __ move(T9, disp);
2165 __ daddu(AT, as_Register(base), T9);
2166 __ move(T9, value);
2167 __ sb(T9, AT, 0);
2168 }
2169 }
2170 }
2171 }
2173 __ sync();
2174 %}
2176 // Load Short (16bit signed)
2177 enc_class load_S_enc (mRegI dst, memory mem) %{
2178 MacroAssembler _masm(&cbuf);
2179 int dst = $dst$$reg;
2180 int base = $mem$$base;
2181 int index = $mem$$index;
2182 int scale = $mem$$scale;
2183 int disp = $mem$$disp;
2185 if( index != 0 ) {
2186 if ( UseLoongsonISA ) {
2187 if ( Assembler::is_simm(disp, 8) ) {
2188 if (scale == 0) {
2189 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2190 } else {
2191 __ dsll(AT, as_Register(index), scale);
2192 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2193 }
2194 } else if ( Assembler::is_simm16(disp) ) {
2195 if (scale == 0) {
2196 __ daddu(AT, as_Register(base), as_Register(index));
2197 __ lh(as_Register(dst), AT, disp);
2198 } else {
2199 __ dsll(AT, as_Register(index), scale);
2200 __ daddu(AT, as_Register(base), AT);
2201 __ lh(as_Register(dst), AT, disp);
2202 }
2203 } else {
2204 if (scale == 0) {
2205 __ move(AT, disp);
2206 __ daddu(AT, as_Register(index), AT);
2207 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2208 } else {
2209 __ dsll(AT, as_Register(index), scale);
2210 __ move(T9, disp);
2211 __ daddu(AT, AT, T9);
2212 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2213 }
2214 }
2215 } else { // not use loongson isa
2216 if (scale == 0) {
2217 __ daddu(AT, as_Register(base), as_Register(index));
2218 } else {
2219 __ dsll(AT, as_Register(index), scale);
2220 __ daddu(AT, as_Register(base), AT);
2221 }
2222 if( Assembler::is_simm16(disp) ) {
2223 __ lh(as_Register(dst), AT, disp);
2224 } else {
2225 __ move(T9, disp);
2226 __ daddu(AT, AT, T9);
2227 __ lh(as_Register(dst), AT, 0);
2228 }
2229 }
2230 } else { // index is 0
2231 if ( UseLoongsonISA ) {
2232 if ( Assembler::is_simm16(disp) ) {
2233 __ lh(as_Register(dst), as_Register(base), disp);
2234 } else {
2235 __ move(T9, disp);
2236 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2237 }
2238 } else { //not use loongson isa
2239 if( Assembler::is_simm16(disp) ) {
2240 __ lh(as_Register(dst), as_Register(base), disp);
2241 } else {
2242 __ move(T9, disp);
2243 __ daddu(AT, as_Register(base), T9);
2244 __ lh(as_Register(dst), AT, 0);
2245 }
2246 }
2247 }
2248 %}
2250 // Load Char (16bit unsigned)
2251 enc_class load_C_enc (mRegI dst, memory mem) %{
2252 MacroAssembler _masm(&cbuf);
2253 int dst = $dst$$reg;
2254 int base = $mem$$base;
2255 int index = $mem$$index;
2256 int scale = $mem$$scale;
2257 int disp = $mem$$disp;
2259 if( index != 0 ) {
2260 if (scale == 0) {
2261 __ daddu(AT, as_Register(base), as_Register(index));
2262 } else {
2263 __ dsll(AT, as_Register(index), scale);
2264 __ daddu(AT, as_Register(base), AT);
2265 }
2266 if( Assembler::is_simm16(disp) ) {
2267 __ lhu(as_Register(dst), AT, disp);
2268 } else {
2269 __ move(T9, disp);
2270 __ addu(AT, AT, T9);
2271 __ lhu(as_Register(dst), AT, 0);
2272 }
2273 } else {
2274 if( Assembler::is_simm16(disp) ) {
2275 __ lhu(as_Register(dst), as_Register(base), disp);
2276 } else {
2277 __ move(T9, disp);
2278 __ daddu(AT, as_Register(base), T9);
2279 __ lhu(as_Register(dst), AT, 0);
2280 }
2281 }
2282 %}
2284 // Store Char (16bit unsigned)
2285 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2286 MacroAssembler _masm(&cbuf);
2287 int src = $src$$reg;
2288 int base = $mem$$base;
2289 int index = $mem$$index;
2290 int scale = $mem$$scale;
2291 int disp = $mem$$disp;
2293 if( index != 0 ) {
2294 if( Assembler::is_simm16(disp) ) {
2295 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2296 if (scale == 0) {
2297 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2298 } else {
2299 __ dsll(AT, as_Register(index), scale);
2300 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2301 }
2302 } else {
2303 if (scale == 0) {
2304 __ addu(AT, as_Register(base), as_Register(index));
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ addu(AT, as_Register(base), AT);
2308 }
2309 __ sh(as_Register(src), AT, disp);
2310 }
2311 } else {
2312 if (scale == 0) {
2313 __ addu(AT, as_Register(base), as_Register(index));
2314 } else {
2315 __ dsll(AT, as_Register(index), scale);
2316 __ addu(AT, as_Register(base), AT);
2317 }
2318 __ move(T9, disp);
2319 if( UseLoongsonISA ) {
2320 __ gsshx(as_Register(src), AT, T9, 0);
2321 } else {
2322 __ addu(AT, AT, T9);
2323 __ sh(as_Register(src), AT, 0);
2324 }
2325 }
2326 } else {
2327 if( Assembler::is_simm16(disp) ) {
2328 __ sh(as_Register(src), as_Register(base), disp);
2329 } else {
2330 __ move(T9, disp);
2331 if( UseLoongsonISA ) {
2332 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2333 } else {
2334 __ addu(AT, as_Register(base), T9);
2335 __ sh(as_Register(src), AT, 0);
2336 }
2337 }
2338 }
2339 %}
2341 enc_class store_C0_enc (memory mem) %{
2342 MacroAssembler _masm(&cbuf);
2343 int base = $mem$$base;
2344 int index = $mem$$index;
2345 int scale = $mem$$scale;
2346 int disp = $mem$$disp;
2348 if( index != 0 ) {
2349 if( Assembler::is_simm16(disp) ) {
2350 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2351 if (scale == 0) {
2352 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2353 } else {
2354 __ dsll(AT, as_Register(index), scale);
2355 __ gsshx(R0, as_Register(base), AT, disp);
2356 }
2357 } else {
2358 if (scale == 0) {
2359 __ addu(AT, as_Register(base), as_Register(index));
2360 } else {
2361 __ dsll(AT, as_Register(index), scale);
2362 __ addu(AT, as_Register(base), AT);
2363 }
2364 __ sh(R0, AT, disp);
2365 }
2366 } else {
2367 if (scale == 0) {
2368 __ addu(AT, as_Register(base), as_Register(index));
2369 } else {
2370 __ dsll(AT, as_Register(index), scale);
2371 __ addu(AT, as_Register(base), AT);
2372 }
2373 __ move(T9, disp);
2374 if( UseLoongsonISA ) {
2375 __ gsshx(R0, AT, T9, 0);
2376 } else {
2377 __ addu(AT, AT, T9);
2378 __ sh(R0, AT, 0);
2379 }
2380 }
2381 } else {
2382 if( Assembler::is_simm16(disp) ) {
2383 __ sh(R0, as_Register(base), disp);
2384 } else {
2385 __ move(T9, disp);
2386 if( UseLoongsonISA ) {
2387 __ gsshx(R0, as_Register(base), T9, 0);
2388 } else {
2389 __ addu(AT, as_Register(base), T9);
2390 __ sh(R0, AT, 0);
2391 }
2392 }
2393 }
2394 %}
2396 enc_class load_I_enc (mRegI dst, memory mem) %{
2397 MacroAssembler _masm(&cbuf);
2398 int dst = $dst$$reg;
2399 int base = $mem$$base;
2400 int index = $mem$$index;
2401 int scale = $mem$$scale;
2402 int disp = $mem$$disp;
2404 if( index != 0 ) {
2405 if( Assembler::is_simm16(disp) ) {
2406 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2407 if (scale == 0) {
2408 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2409 } else {
2410 __ dsll(AT, as_Register(index), scale);
2411 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2412 }
2413 } else {
2414 if (scale == 0) {
2415 __ addu(AT, as_Register(base), as_Register(index));
2416 } else {
2417 __ dsll(AT, as_Register(index), scale);
2418 __ addu(AT, as_Register(base), AT);
2419 }
2420 __ lw(as_Register(dst), AT, disp);
2421 }
2422 } else {
2423 if (scale == 0) {
2424 __ addu(AT, as_Register(base), as_Register(index));
2425 } else {
2426 __ dsll(AT, as_Register(index), scale);
2427 __ addu(AT, as_Register(base), AT);
2428 }
2429 __ move(T9, disp);
2430 if( UseLoongsonISA ) {
2431 __ gslwx(as_Register(dst), AT, T9, 0);
2432 } else {
2433 __ addu(AT, AT, T9);
2434 __ lw(as_Register(dst), AT, 0);
2435 }
2436 }
2437 } else {
2438 if( Assembler::is_simm16(disp) ) {
2439 __ lw(as_Register(dst), as_Register(base), disp);
2440 } else {
2441 __ move(T9, disp);
2442 if( UseLoongsonISA ) {
2443 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2444 } else {
2445 __ addu(AT, as_Register(base), T9);
2446 __ lw(as_Register(dst), AT, 0);
2447 }
2448 }
2449 }
2450 %}
2452 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2453 MacroAssembler _masm(&cbuf);
2454 int src = $src$$reg;
2455 int base = $mem$$base;
2456 int index = $mem$$index;
2457 int scale = $mem$$scale;
2458 int disp = $mem$$disp;
2460 if( index != 0 ) {
2461 if( Assembler::is_simm16(disp) ) {
2462 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2463 if (scale == 0) {
2464 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2465 } else {
2466 __ dsll(AT, as_Register(index), scale);
2467 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2468 }
2469 } else {
2470 if (scale == 0) {
2471 __ addu(AT, as_Register(base), as_Register(index));
2472 } else {
2473 __ dsll(AT, as_Register(index), scale);
2474 __ addu(AT, as_Register(base), AT);
2475 }
2476 __ sw(as_Register(src), AT, disp);
2477 }
2478 } else {
2479 if (scale == 0) {
2480 __ addu(AT, as_Register(base), as_Register(index));
2481 } else {
2482 __ dsll(AT, as_Register(index), scale);
2483 __ addu(AT, as_Register(base), AT);
2484 }
2485 __ move(T9, disp);
2486 if( UseLoongsonISA ) {
2487 __ gsswx(as_Register(src), AT, T9, 0);
2488 } else {
2489 __ addu(AT, AT, T9);
2490 __ sw(as_Register(src), AT, 0);
2491 }
2492 }
2493 } else {
2494 if( Assembler::is_simm16(disp) ) {
2495 __ sw(as_Register(src), as_Register(base), disp);
2496 } else {
2497 __ move(T9, disp);
2498 if( UseLoongsonISA ) {
2499 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2500 } else {
2501 __ addu(AT, as_Register(base), T9);
2502 __ sw(as_Register(src), AT, 0);
2503 }
2504 }
2505 }
2506 %}
2508 enc_class store_I_immI_enc (memory mem, immI src) %{
2509 MacroAssembler _masm(&cbuf);
2510 int base = $mem$$base;
2511 int index = $mem$$index;
2512 int scale = $mem$$scale;
2513 int disp = $mem$$disp;
2514 int value = $src$$constant;
2516 if( index != 0 ) {
2517 if ( UseLoongsonISA ) {
2518 if ( Assembler::is_simm(disp, 8) ) {
2519 if ( scale == 0 ) {
2520 if ( value == 0 ) {
2521 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2522 } else {
2523 __ move(T9, value);
2524 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2525 }
2526 } else {
2527 __ dsll(AT, as_Register(index), scale);
2528 if ( value == 0 ) {
2529 __ gsswx(R0, as_Register(base), AT, disp);
2530 } else {
2531 __ move(T9, value);
2532 __ gsswx(T9, as_Register(base), AT, disp);
2533 }
2534 }
2535 } else if ( Assembler::is_simm16(disp) ) {
2536 if ( scale == 0 ) {
2537 __ daddu(AT, as_Register(base), as_Register(index));
2538 if ( value == 0 ) {
2539 __ sw(R0, AT, disp);
2540 } else {
2541 __ move(T9, value);
2542 __ sw(T9, AT, disp);
2543 }
2544 } else {
2545 __ dsll(AT, as_Register(index), scale);
2546 __ daddu(AT, as_Register(base), AT);
2547 if ( value == 0 ) {
2548 __ sw(R0, AT, disp);
2549 } else {
2550 __ move(T9, value);
2551 __ sw(T9, AT, disp);
2552 }
2553 }
2554 } else {
2555 if ( scale == 0 ) {
2556 __ move(T9, disp);
2557 __ daddu(AT, as_Register(index), T9);
2558 if ( value ==0 ) {
2559 __ gsswx(R0, as_Register(base), AT, 0);
2560 } else {
2561 __ move(T9, value);
2562 __ gsswx(T9, as_Register(base), AT, 0);
2563 }
2564 } else {
2565 __ dsll(AT, as_Register(index), scale);
2566 __ move(T9, disp);
2567 __ daddu(AT, AT, T9);
2568 if ( value == 0 ) {
2569 __ gsswx(R0, as_Register(base), AT, 0);
2570 } else {
2571 __ move(T9, value);
2572 __ gsswx(T9, as_Register(base), AT, 0);
2573 }
2574 }
2575 }
2576 } else { //not use loongson isa
2577 if (scale == 0) {
2578 __ daddu(AT, as_Register(base), as_Register(index));
2579 } else {
2580 __ dsll(AT, as_Register(index), scale);
2581 __ daddu(AT, as_Register(base), AT);
2582 }
2583 if( Assembler::is_simm16(disp) ) {
2584 if (value == 0) {
2585 __ sw(R0, AT, disp);
2586 } else {
2587 __ move(T9, value);
2588 __ sw(T9, AT, disp);
2589 }
2590 } else {
2591 if (value == 0) {
2592 __ move(T9, disp);
2593 __ daddu(AT, AT, T9);
2594 __ sw(R0, AT, 0);
2595 } else {
2596 __ move(T9, disp);
2597 __ daddu(AT, AT, T9);
2598 __ move(T9, value);
2599 __ sw(T9, AT, 0);
2600 }
2601 }
2602 }
2603 } else {
2604 if ( UseLoongsonISA ) {
2605 if ( Assembler::is_simm16(disp) ) {
2606 if ( value == 0 ) {
2607 __ sw(R0, as_Register(base), disp);
2608 } else {
2609 __ move(AT, value);
2610 __ sw(AT, as_Register(base), disp);
2611 }
2612 } else {
2613 __ move(T9, disp);
2614 if ( value == 0 ) {
2615 __ gsswx(R0, as_Register(base), T9, 0);
2616 } else {
2617 __ move(AT, value);
2618 __ gsswx(AT, as_Register(base), T9, 0);
2619 }
2620 }
2621 } else {
2622 if( Assembler::is_simm16(disp) ) {
2623 if (value == 0) {
2624 __ sw(R0, as_Register(base), disp);
2625 } else {
2626 __ move(AT, value);
2627 __ sw(AT, as_Register(base), disp);
2628 }
2629 } else {
2630 if (value == 0) {
2631 __ move(T9, disp);
2632 __ daddu(AT, as_Register(base), T9);
2633 __ sw(R0, AT, 0);
2634 } else {
2635 __ move(T9, disp);
2636 __ daddu(AT, as_Register(base), T9);
2637 __ move(T9, value);
2638 __ sw(T9, AT, 0);
2639 }
2640 }
2641 }
2642 }
2643 %}
2645 enc_class load_N_enc (mRegN dst, memory mem) %{
2646 MacroAssembler _masm(&cbuf);
2647 int dst = $dst$$reg;
2648 int base = $mem$$base;
2649 int index = $mem$$index;
2650 int scale = $mem$$scale;
2651 int disp = $mem$$disp;
2652 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2653 assert(disp_reloc == relocInfo::none, "cannot have disp");
2655 if( index != 0 ) {
2656 if (scale == 0) {
2657 __ daddu(AT, as_Register(base), as_Register(index));
2658 } else {
2659 __ dsll(AT, as_Register(index), scale);
2660 __ daddu(AT, as_Register(base), AT);
2661 }
2662 if( Assembler::is_simm16(disp) ) {
2663 __ lwu(as_Register(dst), AT, disp);
2664 } else {
2665 __ set64(T9, disp);
2666 __ daddu(AT, AT, T9);
2667 __ lwu(as_Register(dst), AT, 0);
2668 }
2669 } else {
2670 if( Assembler::is_simm16(disp) ) {
2671 __ lwu(as_Register(dst), as_Register(base), disp);
2672 } else {
2673 __ set64(T9, disp);
2674 __ daddu(AT, as_Register(base), T9);
2675 __ lwu(as_Register(dst), AT, 0);
2676 }
2677 }
2679 %}
2682 enc_class load_P_enc (mRegP dst, memory mem) %{
2683 MacroAssembler _masm(&cbuf);
2684 int dst = $dst$$reg;
2685 int base = $mem$$base;
2686 int index = $mem$$index;
2687 int scale = $mem$$scale;
2688 int disp = $mem$$disp;
2689 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2690 assert(disp_reloc == relocInfo::none, "cannot have disp");
2692 if( index != 0 ) {
2693 if ( UseLoongsonISA ) {
2694 if ( Assembler::is_simm(disp, 8) ) {
2695 if ( scale != 0 ) {
2696 __ dsll(AT, as_Register(index), scale);
2697 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2698 } else {
2699 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2700 }
2701 } else if ( Assembler::is_simm16(disp) ){
2702 if ( scale != 0 ) {
2703 __ dsll(AT, as_Register(index), scale);
2704 __ daddu(AT, AT, as_Register(base));
2705 } else {
2706 __ daddu(AT, as_Register(index), as_Register(base));
2707 }
2708 __ ld(as_Register(dst), AT, disp);
2709 } else {
2710 if ( scale != 0 ) {
2711 __ dsll(AT, as_Register(index), scale);
2712 __ move(T9, disp);
2713 __ daddu(AT, AT, T9);
2714 } else {
2715 __ move(T9, disp);
2716 __ daddu(AT, as_Register(index), T9);
2717 }
2718 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2719 }
2720 } else { //not use loongson isa
2721 if (scale == 0) {
2722 __ daddu(AT, as_Register(base), as_Register(index));
2723 } else {
2724 __ dsll(AT, as_Register(index), scale);
2725 __ daddu(AT, as_Register(base), AT);
2726 }
2727 if( Assembler::is_simm16(disp) ) {
2728 __ ld(as_Register(dst), AT, disp);
2729 } else {
2730 __ set64(T9, disp);
2731 __ daddu(AT, AT, T9);
2732 __ ld(as_Register(dst), AT, 0);
2733 }
2734 }
2735 } else {
2736 if ( UseLoongsonISA ) {
2737 if ( Assembler::is_simm16(disp) ){
2738 __ ld(as_Register(dst), as_Register(base), disp);
2739 } else {
2740 __ set64(T9, disp);
2741 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2742 }
2743 } else { //not use loongson isa
2744 if( Assembler::is_simm16(disp) ) {
2745 __ ld(as_Register(dst), as_Register(base), disp);
2746 } else {
2747 __ set64(T9, disp);
2748 __ daddu(AT, as_Register(base), T9);
2749 __ ld(as_Register(dst), AT, 0);
2750 }
2751 }
2752 }
2753 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2754 %}
2756 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2757 MacroAssembler _masm(&cbuf);
2758 int src = $src$$reg;
2759 int base = $mem$$base;
2760 int index = $mem$$index;
2761 int scale = $mem$$scale;
2762 int disp = $mem$$disp;
2764 if( index != 0 ) {
2765 if ( UseLoongsonISA ){
2766 if ( Assembler::is_simm(disp, 8) ) {
2767 if ( scale == 0 ) {
2768 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2769 } else {
2770 __ dsll(AT, as_Register(index), scale);
2771 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2772 }
2773 } else if ( Assembler::is_simm16(disp) ) {
2774 if ( scale == 0 ) {
2775 __ daddu(AT, as_Register(base), as_Register(index));
2776 } else {
2777 __ dsll(AT, as_Register(index), scale);
2778 __ daddu(AT, as_Register(base), AT);
2779 }
2780 __ sd(as_Register(src), AT, disp);
2781 } else {
2782 if ( scale == 0 ) {
2783 __ move(T9, disp);
2784 __ daddu(AT, as_Register(index), T9);
2785 } else {
2786 __ dsll(AT, as_Register(index), scale);
2787 __ move(T9, disp);
2788 __ daddu(AT, AT, T9);
2789 }
2790 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2791 }
2792 } else { //not use loongson isa
2793 if (scale == 0) {
2794 __ daddu(AT, as_Register(base), as_Register(index));
2795 } else {
2796 __ dsll(AT, as_Register(index), scale);
2797 __ daddu(AT, as_Register(base), AT);
2798 }
2799 if( Assembler::is_simm16(disp) ) {
2800 __ sd(as_Register(src), AT, disp);
2801 } else {
2802 __ move(T9, disp);
2803 __ daddu(AT, AT, T9);
2804 __ sd(as_Register(src), AT, 0);
2805 }
2806 }
2807 } else {
2808 if ( UseLoongsonISA ) {
2809 if ( Assembler::is_simm16(disp) ) {
2810 __ sd(as_Register(src), as_Register(base), disp);
2811 } else {
2812 __ move(T9, disp);
2813 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2814 }
2815 } else {
2816 if( Assembler::is_simm16(disp) ) {
2817 __ sd(as_Register(src), as_Register(base), disp);
2818 } else {
2819 __ move(T9, disp);
2820 __ daddu(AT, as_Register(base), T9);
2821 __ sd(as_Register(src), AT, 0);
2822 }
2823 }
2824 }
2825 %}
2827 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2828 MacroAssembler _masm(&cbuf);
2829 int src = $src$$reg;
2830 int base = $mem$$base;
2831 int index = $mem$$index;
2832 int scale = $mem$$scale;
2833 int disp = $mem$$disp;
2835 if( index != 0 ) {
2836 if ( UseLoongsonISA ){
2837 if ( Assembler::is_simm(disp, 8) ) {
2838 if ( scale == 0 ) {
2839 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2840 } else {
2841 __ dsll(AT, as_Register(index), scale);
2842 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2843 }
2844 } else if ( Assembler::is_simm16(disp) ) {
2845 if ( scale == 0 ) {
2846 __ daddu(AT, as_Register(base), as_Register(index));
2847 } else {
2848 __ dsll(AT, as_Register(index), scale);
2849 __ daddu(AT, as_Register(base), AT);
2850 }
2851 __ sw(as_Register(src), AT, disp);
2852 } else {
2853 if ( scale == 0 ) {
2854 __ move(T9, disp);
2855 __ daddu(AT, as_Register(index), T9);
2856 } else {
2857 __ dsll(AT, as_Register(index), scale);
2858 __ move(T9, disp);
2859 __ daddu(AT, AT, T9);
2860 }
2861 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2862 }
2863 } else { //not use loongson isa
2864 if (scale == 0) {
2865 __ daddu(AT, as_Register(base), as_Register(index));
2866 } else {
2867 __ dsll(AT, as_Register(index), scale);
2868 __ daddu(AT, as_Register(base), AT);
2869 }
2870 if( Assembler::is_simm16(disp) ) {
2871 __ sw(as_Register(src), AT, disp);
2872 } else {
2873 __ move(T9, disp);
2874 __ daddu(AT, AT, T9);
2875 __ sw(as_Register(src), AT, 0);
2876 }
2877 }
2878 } else {
2879 if ( UseLoongsonISA ) {
2880 if ( Assembler::is_simm16(disp) ) {
2881 __ sw(as_Register(src), as_Register(base), disp);
2882 } else {
2883 __ move(T9, disp);
2884 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2885 }
2886 } else {
2887 if( Assembler::is_simm16(disp) ) {
2888 __ sw(as_Register(src), as_Register(base), disp);
2889 } else {
2890 __ move(T9, disp);
2891 __ daddu(AT, as_Register(base), T9);
2892 __ sw(as_Register(src), AT, 0);
2893 }
2894 }
2895 }
2896 %}
2898 enc_class store_P_immP0_enc (memory mem) %{
2899 MacroAssembler _masm(&cbuf);
2900 int base = $mem$$base;
2901 int index = $mem$$index;
2902 int scale = $mem$$scale;
2903 int disp = $mem$$disp;
2905 if( index != 0 ) {
2906 if (scale == 0) {
2907 if( Assembler::is_simm16(disp) ) {
2908 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2909 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2910 } else {
2911 __ daddu(AT, as_Register(base), as_Register(index));
2912 __ sd(R0, AT, disp);
2913 }
2914 } else {
2915 __ daddu(AT, as_Register(base), as_Register(index));
2916 __ move(T9, disp);
2917 if(UseLoongsonISA) {
2918 __ gssdx(R0, AT, T9, 0);
2919 } else {
2920 __ daddu(AT, AT, T9);
2921 __ sd(R0, AT, 0);
2922 }
2923 }
2924 } else {
2925 __ dsll(AT, as_Register(index), scale);
2926 if( Assembler::is_simm16(disp) ) {
2927 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2928 __ gssdx(R0, as_Register(base), AT, disp);
2929 } else {
2930 __ daddu(AT, as_Register(base), AT);
2931 __ sd(R0, AT, disp);
2932 }
2933 } else {
2934 __ daddu(AT, as_Register(base), AT);
2935 __ move(T9, disp);
2936 if (UseLoongsonISA) {
2937 __ gssdx(R0, AT, T9, 0);
2938 } else {
2939 __ daddu(AT, AT, T9);
2940 __ sd(R0, AT, 0);
2941 }
2942 }
2943 }
2944 } else {
2945 if( Assembler::is_simm16(disp) ) {
2946 __ sd(R0, as_Register(base), disp);
2947 } else {
2948 __ move(T9, disp);
2949 if (UseLoongsonISA) {
2950 __ gssdx(R0, as_Register(base), T9, 0);
2951 } else {
2952 __ daddu(AT, as_Register(base), T9);
2953 __ sd(R0, AT, 0);
2954 }
2955 }
2956 }
2957 %}
2960 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2961 MacroAssembler _masm(&cbuf);
2962 int base = $mem$$base;
2963 int index = $mem$$index;
2964 int scale = $mem$$scale;
2965 int disp = $mem$$disp;
2967 if(index!=0){
2968 if (scale == 0) {
2969 __ daddu(AT, as_Register(base), as_Register(index));
2970 } else {
2971 __ dsll(AT, as_Register(index), scale);
2972 __ daddu(AT, as_Register(base), AT);
2973 }
2975 if( Assembler::is_simm16(disp) ) {
2976 __ sw(R0, AT, disp);
2977 } else {
2978 __ move(T9, disp);
2979 __ daddu(AT, AT, T9);
2980 __ sw(R0, AT, 0);
2981 }
2982 }
2983 else {
2984 if( Assembler::is_simm16(disp) ) {
2985 __ sw(R0, as_Register(base), disp);
2986 } else {
2987 __ move(T9, disp);
2988 __ daddu(AT, as_Register(base), T9);
2989 __ sw(R0, AT, 0);
2990 }
2991 }
2992 %}
2994 enc_class load_L_enc (mRegL dst, memory mem) %{
2995 MacroAssembler _masm(&cbuf);
2996 int base = $mem$$base;
2997 int index = $mem$$index;
2998 int scale = $mem$$scale;
2999 int disp = $mem$$disp;
3000 Register dst_reg = as_Register($dst$$reg);
3002 /*********************2013/03/27**************************
3003 * Jin: $base may contain a null object.
3004 * Server JIT force the exception_offset to be the pos of
3005 * the first instruction.
3006 * I insert such a 'null_check' at the beginning.
3007 *******************************************************/
3009 __ lw(AT, as_Register(base), 0);
3011 /*********************2012/10/04**************************
3012 * Error case found in SortTest
3013 * 337 b java.util.Arrays::sort1 (401 bytes)
3014 * B73:
3015 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3016 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3017 *
3018 * The original instructions generated here are :
3019 * __ lw(dst_lo, as_Register(base), disp);
3020 * __ lw(dst_hi, as_Register(base), disp + 4);
3021 *******************************************************/
3023 if( index != 0 ) {
3024 if (scale == 0) {
3025 __ daddu(AT, as_Register(base), as_Register(index));
3026 } else {
3027 __ dsll(AT, as_Register(index), scale);
3028 __ daddu(AT, as_Register(base), AT);
3029 }
3030 if( Assembler::is_simm16(disp) ) {
3031 __ ld(dst_reg, AT, disp);
3032 } else {
3033 __ move(T9, disp);
3034 __ daddu(AT, AT, T9);
3035 __ ld(dst_reg, AT, 0);
3036 }
3037 } else {
3038 if( Assembler::is_simm16(disp) ) {
3039 __ move(AT, as_Register(base));
3040 __ ld(dst_reg, AT, disp);
3041 } else {
3042 __ move(T9, disp);
3043 __ daddu(AT, as_Register(base), T9);
3044 __ ld(dst_reg, AT, 0);
3045 }
3046 }
3047 %}
3049 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3050 MacroAssembler _masm(&cbuf);
3051 int base = $mem$$base;
3052 int index = $mem$$index;
3053 int scale = $mem$$scale;
3054 int disp = $mem$$disp;
3055 Register src_reg = as_Register($src$$reg);
3057 if( index != 0 ) {
3058 if (scale == 0) {
3059 __ daddu(AT, as_Register(base), as_Register(index));
3060 } else {
3061 __ dsll(AT, as_Register(index), scale);
3062 __ daddu(AT, as_Register(base), AT);
3063 }
3064 if( Assembler::is_simm16(disp) ) {
3065 __ sd(src_reg, AT, disp);
3066 } else {
3067 __ move(T9, disp);
3068 __ daddu(AT, AT, T9);
3069 __ sd(src_reg, AT, 0);
3070 }
3071 } else {
3072 if( Assembler::is_simm16(disp) ) {
3073 __ move(AT, as_Register(base));
3074 __ sd(src_reg, AT, disp);
3075 } else {
3076 __ move(T9, disp);
3077 __ daddu(AT, as_Register(base), T9);
3078 __ sd(src_reg, AT, 0);
3079 }
3080 }
3081 %}
3083 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3084 MacroAssembler _masm(&cbuf);
3085 int base = $mem$$base;
3086 int index = $mem$$index;
3087 int scale = $mem$$scale;
3088 int disp = $mem$$disp;
3090 if( index != 0 ) {
3091 if (scale == 0) {
3092 __ daddu(AT, as_Register(base), as_Register(index));
3093 } else {
3094 __ dsll(AT, as_Register(index), scale);
3095 __ daddu(AT, as_Register(base), AT);
3096 }
3097 if( Assembler::is_simm16(disp) ) {
3098 __ sd(R0, AT, disp);
3099 } else {
3100 __ move(T9, disp);
3101 __ addu(AT, AT, T9);
3102 __ sd(R0, AT, 0);
3103 }
3104 } else {
3105 if( Assembler::is_simm16(disp) ) {
3106 __ move(AT, as_Register(base));
3107 __ sd(R0, AT, disp);
3108 } else {
3109 __ move(T9, disp);
3110 __ addu(AT, as_Register(base), T9);
3111 __ sd(R0, AT, 0);
3112 }
3113 }
3114 %}
3116 enc_class load_F_enc (regF dst, memory mem) %{
3117 MacroAssembler _masm(&cbuf);
3118 int base = $mem$$base;
3119 int index = $mem$$index;
3120 int scale = $mem$$scale;
3121 int disp = $mem$$disp;
3122 FloatRegister dst = $dst$$FloatRegister;
3124 if( index != 0 ) {
3125 if( Assembler::is_simm16(disp) ) {
3126 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3127 if (scale == 0) {
3128 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3129 } else {
3130 __ dsll(AT, as_Register(index), scale);
3131 __ gslwxc1(dst, as_Register(base), AT, disp);
3132 }
3133 } else {
3134 if (scale == 0) {
3135 __ daddu(AT, as_Register(base), as_Register(index));
3136 } else {
3137 __ dsll(AT, as_Register(index), scale);
3138 __ daddu(AT, as_Register(base), AT);
3139 }
3140 __ lwc1(dst, AT, disp);
3141 }
3142 } else {
3143 if (scale == 0) {
3144 __ daddu(AT, as_Register(base), as_Register(index));
3145 } else {
3146 __ dsll(AT, as_Register(index), scale);
3147 __ daddu(AT, as_Register(base), AT);
3148 }
3149 __ move(T9, disp);
3150 if( UseLoongsonISA ) {
3151 __ gslwxc1(dst, AT, T9, 0);
3152 } else {
3153 __ daddu(AT, AT, T9);
3154 __ lwc1(dst, AT, 0);
3155 }
3156 }
3157 } else {
3158 if( Assembler::is_simm16(disp) ) {
3159 __ lwc1(dst, as_Register(base), disp);
3160 } else {
3161 __ move(T9, disp);
3162 if( UseLoongsonISA ) {
3163 __ gslwxc1(dst, as_Register(base), T9, 0);
3164 } else {
3165 __ daddu(AT, as_Register(base), T9);
3166 __ lwc1(dst, AT, 0);
3167 }
3168 }
3169 }
3170 %}
3172 enc_class store_F_reg_enc (memory mem, regF src) %{
3173 MacroAssembler _masm(&cbuf);
3174 int base = $mem$$base;
3175 int index = $mem$$index;
3176 int scale = $mem$$scale;
3177 int disp = $mem$$disp;
3178 FloatRegister src = $src$$FloatRegister;
3180 if( index != 0 ) {
3181 if( Assembler::is_simm16(disp) ) {
3182 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3183 if (scale == 0) {
3184 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3185 } else {
3186 __ dsll(AT, as_Register(index), scale);
3187 __ gsswxc1(src, as_Register(base), AT, disp);
3188 }
3189 } else {
3190 if (scale == 0) {
3191 __ daddu(AT, as_Register(base), as_Register(index));
3192 } else {
3193 __ dsll(AT, as_Register(index), scale);
3194 __ daddu(AT, as_Register(base), AT);
3195 }
3196 __ swc1(src, AT, disp);
3197 }
3198 } else {
3199 if (scale == 0) {
3200 __ daddu(AT, as_Register(base), as_Register(index));
3201 } else {
3202 __ dsll(AT, as_Register(index), scale);
3203 __ daddu(AT, as_Register(base), AT);
3204 }
3205 __ move(T9, disp);
3206 if( UseLoongsonISA ) {
3207 __ gsswxc1(src, AT, T9, 0);
3208 } else {
3209 __ daddu(AT, AT, T9);
3210 __ swc1(src, AT, 0);
3211 }
3212 }
3213 } else {
3214 if( Assembler::is_simm16(disp) ) {
3215 __ swc1(src, as_Register(base), disp);
3216 } else {
3217 __ move(T9, disp);
3218 if( UseLoongsonISA ) {
3219 __ gslwxc1(src, as_Register(base), T9, 0);
3220 } else {
3221 __ daddu(AT, as_Register(base), T9);
3222 __ swc1(src, AT, 0);
3223 }
3224 }
3225 }
3226 %}
3228 enc_class load_D_enc (regD dst, memory mem) %{
3229 MacroAssembler _masm(&cbuf);
3230 int base = $mem$$base;
3231 int index = $mem$$index;
3232 int scale = $mem$$scale;
3233 int disp = $mem$$disp;
3234 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3236 if( index != 0 ) {
3237 if( Assembler::is_simm16(disp) ) {
3238 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3239 if (scale == 0) {
3240 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3241 } else {
3242 __ dsll(AT, as_Register(index), scale);
3243 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3244 }
3245 } else {
3246 if (scale == 0) {
3247 __ daddu(AT, as_Register(base), as_Register(index));
3248 } else {
3249 __ dsll(AT, as_Register(index), scale);
3250 __ daddu(AT, as_Register(base), AT);
3251 }
3252 __ ldc1(dst_reg, AT, disp);
3253 }
3254 } else {
3255 if (scale == 0) {
3256 __ daddu(AT, as_Register(base), as_Register(index));
3257 } else {
3258 __ dsll(AT, as_Register(index), scale);
3259 __ daddu(AT, as_Register(base), AT);
3260 }
3261 __ move(T9, disp);
3262 if( UseLoongsonISA ) {
3263 __ gsldxc1(dst_reg, AT, T9, 0);
3264 } else {
3265 __ addu(AT, AT, T9);
3266 __ ldc1(dst_reg, AT, 0);
3267 }
3268 }
3269 } else {
3270 if( Assembler::is_simm16(disp) ) {
3271 __ ldc1(dst_reg, as_Register(base), disp);
3272 } else {
3273 __ move(T9, disp);
3274 if( UseLoongsonISA ) {
3275 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3276 } else {
3277 __ addu(AT, as_Register(base), T9);
3278 __ ldc1(dst_reg, AT, 0);
3279 }
3280 }
3281 }
3282 %}
3284 enc_class store_D_reg_enc (memory mem, regD src) %{
3285 MacroAssembler _masm(&cbuf);
3286 int base = $mem$$base;
3287 int index = $mem$$index;
3288 int scale = $mem$$scale;
3289 int disp = $mem$$disp;
3290 FloatRegister src_reg = as_FloatRegister($src$$reg);
3292 if( index != 0 ) {
3293 if( Assembler::is_simm16(disp) ) {
3294 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3295 if (scale == 0) {
3296 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3297 } else {
3298 __ dsll(AT, as_Register(index), scale);
3299 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3300 }
3301 } else {
3302 if (scale == 0) {
3303 __ daddu(AT, as_Register(base), as_Register(index));
3304 } else {
3305 __ dsll(AT, as_Register(index), scale);
3306 __ daddu(AT, as_Register(base), AT);
3307 }
3308 __ sdc1(src_reg, AT, disp);
3309 }
3310 } else {
3311 if (scale == 0) {
3312 __ daddu(AT, as_Register(base), as_Register(index));
3313 } else {
3314 __ dsll(AT, as_Register(index), scale);
3315 __ daddu(AT, as_Register(base), AT);
3316 }
3317 __ move(T9, disp);
3318 if( UseLoongsonISA ) {
3319 __ gssdxc1(src_reg, AT, T9, 0);
3320 } else {
3321 __ addu(AT, AT, T9);
3322 __ sdc1(src_reg, AT, 0);
3323 }
3324 }
3325 } else {
3326 if( Assembler::is_simm16(disp) ) {
3327 __ sdc1(src_reg, as_Register(base), disp);
3328 } else {
3329 __ move(T9, disp);
3330 if( UseLoongsonISA ) {
3331 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3332 } else {
3333 __ addu(AT, as_Register(base), T9);
3334 __ sdc1(src_reg, AT, 0);
3335 }
3336 }
3337 }
3338 %}
3340 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3341 MacroAssembler _masm(&cbuf);
3342 // This is the instruction starting address for relocation info.
3343 __ block_comment("Java_To_Runtime");
3344 cbuf.set_insts_mark();
3345 __ relocate(relocInfo::runtime_call_type);
3347 __ general_jal((address)$meth$$method);
3348 %}
3350 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3351 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3352 // who we intended to call.
3353 MacroAssembler _masm(&cbuf);
3354 cbuf.set_insts_mark();
3356 if ( !_method ) {
3357 __ relocate(relocInfo::runtime_call_type);
3358 } else if(_optimized_virtual) {
3359 __ relocate(relocInfo::opt_virtual_call_type);
3360 } else {
3361 __ relocate(relocInfo::static_call_type);
3362 }
3364 __ general_jal((address)($meth$$method));
3365 if( _method ) { // Emit stub for static call
3366 emit_java_to_interp(cbuf);
3367 }
3368 %}
3371 /*
3372 * [Ref: LIR_Assembler::ic_call() ]
3373 */
3374 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3375 MacroAssembler _masm(&cbuf);
3376 __ block_comment("Java_Dynamic_Call");
3377 __ ic_call((address)$meth$$method);
3378 %}
3381 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3382 Register flags = $cr$$Register;
3383 Label L;
3385 MacroAssembler _masm(&cbuf);
3387 __ addu(flags, R0, R0);
3388 __ beq(AT, R0, L);
3389 __ delayed()->nop();
3390 __ move(flags, 0xFFFFFFFF);
3391 __ bind(L);
3392 %}
3394 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3395 Register result = $result$$Register;
3396 Register sub = $sub$$Register;
3397 Register super = $super$$Register;
3398 Register length = $tmp$$Register;
3399 Register tmp = T9;
3400 Label miss;
3402 /* 2012/9/28 Jin: result may be the same as sub
3403 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3404 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3405 * 4bc mov S2, NULL #@loadConP
3406 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3407 */
3408 MacroAssembler _masm(&cbuf);
3409 Label done;
3410 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3411 NULL, &miss,
3412 /*set_cond_codes:*/ true);
3413 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3414 __ move(result, 0);
3415 __ b(done);
3416 __ nop();
3418 __ bind(miss);
3419 __ move(result, 1);
3420 __ bind(done);
3421 %}
3423 %}
3426 //---------MIPS FRAME--------------------------------------------------------------
3427 // Definition of frame structure and management information.
3428 //
3429 // S T A C K L A Y O U T Allocators stack-slot number
3430 // | (to get allocators register number
3431 // G Owned by | | v add SharedInfo::stack0)
3432 // r CALLER | |
3433 // o | +--------+ pad to even-align allocators stack-slot
3434 // w V | pad0 | numbers; owned by CALLER
3435 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3436 // h ^ | in | 5
3437 // | | args | 4 Holes in incoming args owned by SELF
3438 // | | old | | 3
3439 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3440 // v | | ret | 3 return address
3441 // Owned by +--------+
3442 // Self | pad2 | 2 pad to align old SP
3443 // | +--------+ 1
3444 // | | locks | 0
3445 // | +--------+----> SharedInfo::stack0, even aligned
3446 // | | pad1 | 11 pad to align new SP
3447 // | +--------+
3448 // | | | 10
3449 // | | spills | 9 spills
3450 // V | | 8 (pad0 slot for callee)
3451 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3452 // ^ | out | 7
3453 // | | args | 6 Holes in outgoing args owned by CALLEE
3454 // Owned by new | |
3455 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3456 // | |
3457 //
3458 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3459 // known from SELF's arguments and the Java calling convention.
3460 // Region 6-7 is determined per call site.
3461 // Note 2: If the calling convention leaves holes in the incoming argument
3462 // area, those holes are owned by SELF. Holes in the outgoing area
3463 // are owned by the CALLEE. Holes should not be nessecary in the
3464 // incoming area, as the Java calling convention is completely under
3465 // the control of the AD file. Doubles can be sorted and packed to
3466 // avoid holes. Holes in the outgoing arguments may be nessecary for
3467 // varargs C calling conventions.
3468 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3469 // even aligned with pad0 as needed.
3470 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3471 // region 6-11 is even aligned; it may be padded out more so that
3472 // the region from SP to FP meets the minimum stack alignment.
3473 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3474 // alignment. Region 11, pad1, may be dynamically extended so that
3475 // SP meets the minimum alignment.
3478 frame %{
3480 stack_direction(TOWARDS_LOW);
3482 // These two registers define part of the calling convention
3483 // between compiled code and the interpreter.
3484 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3485 // for more information. by yjl 3/16/2006
3487 inline_cache_reg(T1); // Inline Cache Register
3488 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3489 /*
3490 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3491 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3492 */
3494 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3495 cisc_spilling_operand_name(indOffset32);
3497 // Number of stack slots consumed by locking an object
3498 // generate Compile::sync_stack_slots
3499 #ifdef _LP64
3500 sync_stack_slots(2);
3501 #else
3502 sync_stack_slots(1);
3503 #endif
3505 frame_pointer(SP);
3507 // Interpreter stores its frame pointer in a register which is
3508 // stored to the stack by I2CAdaptors.
3509 // I2CAdaptors convert from interpreted java to compiled java.
3511 interpreter_frame_pointer(FP);
3513 // generate Matcher::stack_alignment
3514 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3516 // Number of stack slots between incoming argument block and the start of
3517 // a new frame. The PROLOG must add this many slots to the stack. The
3518 // EPILOG must remove this many slots. Intel needs one slot for
3519 // return address.
3520 // generate Matcher::in_preserve_stack_slots
3521 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3522 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3524 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3525 // for calls to C. Supports the var-args backing area for register parms.
3526 varargs_C_out_slots_killed(0);
3528 // The after-PROLOG location of the return address. Location of
3529 // return address specifies a type (REG or STACK) and a number
3530 // representing the register number (i.e. - use a register name) or
3531 // stack slot.
3532 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3533 // Otherwise, it is above the locks and verification slot and alignment word
3534 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3535 return_addr(REG RA);
3537 // Body of function which returns an integer array locating
3538 // arguments either in registers or in stack slots. Passed an array
3539 // of ideal registers called "sig" and a "length" count. Stack-slot
3540 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3541 // arguments for a CALLEE. Incoming stack arguments are
3542 // automatically biased by the preserve_stack_slots field above.
3545 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3546 // StartNode::calling_convention call this. by yjl 3/16/2006
3547 calling_convention %{
3548 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3549 %}
3554 // Body of function which returns an integer array locating
3555 // arguments either in registers or in stack slots. Passed an array
3556 // of ideal registers called "sig" and a "length" count. Stack-slot
3557 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3558 // arguments for a CALLEE. Incoming stack arguments are
3559 // automatically biased by the preserve_stack_slots field above.
3562 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3563 c_calling_convention %{
3564 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3565 %}
3568 // Location of C & interpreter return values
3569 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3570 // SEE Matcher::match. by yjl 3/16/2006
3571 c_return_value %{
3572 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3573 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3574 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3575 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3576 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3577 %}
3579 // Location of return values
3580 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3581 // SEE Matcher::match. by yjl 3/16/2006
3583 return_value %{
3584 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3585 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3586 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3587 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3588 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3589 %}
3591 %}
3593 //----------ATTRIBUTES---------------------------------------------------------
3594 //----------Operand Attributes-------------------------------------------------
3595 op_attrib op_cost(0); // Required cost attribute
3597 //----------Instruction Attributes---------------------------------------------
3598 ins_attrib ins_cost(100); // Required cost attribute
3599 ins_attrib ins_size(32); // Required size attribute (in bits)
3600 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3601 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3602 // non-matching short branch variant of some
3603 // long branch?
3604 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3605 // specifies the alignment that some part of the instruction (not
3606 // necessarily the start) requires. If > 1, a compute_padding()
3607 // function must be provided for the instruction
3609 //----------OPERANDS-----------------------------------------------------------
3610 // Operand definitions must precede instruction definitions for correct parsing
3611 // in the ADLC because operands constitute user defined types which are used in
3612 // instruction definitions.
3614 // Vectors
3615 operand vecD() %{
3616 constraint(ALLOC_IN_RC(dbl_reg));
3617 match(VecD);
3619 format %{ %}
3620 interface(REG_INTER);
3621 %}
3623 // Flags register, used as output of compare instructions
3624 operand FlagsReg() %{
3625 constraint(ALLOC_IN_RC(mips_flags));
3626 match(RegFlags);
3628 format %{ "EFLAGS" %}
3629 interface(REG_INTER);
3630 %}
3632 //----------Simple Operands----------------------------------------------------
3633 //TODO: Should we need to define some more special immediate number ?
3634 // Immediate Operands
3635 // Integer Immediate
3636 operand immI() %{
3637 match(ConI);
3638 //TODO: should not match immI8 here LEE
3639 match(immI8);
3641 op_cost(20);
3642 format %{ %}
3643 interface(CONST_INTER);
3644 %}
3646 // Long Immediate 8-bit
3647 operand immL8()
3648 %{
3649 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3650 match(ConL);
3652 op_cost(5);
3653 format %{ %}
3654 interface(CONST_INTER);
3655 %}
3657 // Constant for test vs zero
3658 operand immI0() %{
3659 predicate(n->get_int() == 0);
3660 match(ConI);
3662 op_cost(0);
3663 format %{ %}
3664 interface(CONST_INTER);
3665 %}
3667 // Constant for increment
3668 operand immI1() %{
3669 predicate(n->get_int() == 1);
3670 match(ConI);
3672 op_cost(0);
3673 format %{ %}
3674 interface(CONST_INTER);
3675 %}
3677 // Constant for decrement
3678 operand immI_M1() %{
3679 predicate(n->get_int() == -1);
3680 match(ConI);
3682 op_cost(0);
3683 format %{ %}
3684 interface(CONST_INTER);
3685 %}
3687 operand immI_MaxI() %{
3688 predicate(n->get_int() == 2147483647);
3689 match(ConI);
3691 op_cost(0);
3692 format %{ %}
3693 interface(CONST_INTER);
3694 %}
3696 // Valid scale values for addressing modes
3697 operand immI2() %{
3698 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3699 match(ConI);
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 operand immI8() %{
3706 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3707 match(ConI);
3709 op_cost(5);
3710 format %{ %}
3711 interface(CONST_INTER);
3712 %}
3714 operand immI16() %{
3715 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3716 match(ConI);
3718 op_cost(10);
3719 format %{ %}
3720 interface(CONST_INTER);
3721 %}
3723 // Constant for long shifts
3724 operand immI_32() %{
3725 predicate( n->get_int() == 32 );
3726 match(ConI);
3728 op_cost(0);
3729 format %{ %}
3730 interface(CONST_INTER);
3731 %}
3733 operand immI_63() %{
3734 predicate( n->get_int() == 63 );
3735 match(ConI);
3737 op_cost(0);
3738 format %{ %}
3739 interface(CONST_INTER);
3740 %}
3742 operand immI_0_31() %{
3743 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3744 match(ConI);
3746 op_cost(0);
3747 format %{ %}
3748 interface(CONST_INTER);
3749 %}
3751 // Operand for non-negtive integer mask
3752 operand immI_nonneg_mask() %{
3753 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3754 match(ConI);
3756 op_cost(0);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3761 operand immI_32_63() %{
3762 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3763 match(ConI);
3764 op_cost(0);
3766 format %{ %}
3767 interface(CONST_INTER);
3768 %}
3770 operand immI16_sub() %{
3771 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3772 match(ConI);
3774 op_cost(10);
3775 format %{ %}
3776 interface(CONST_INTER);
3777 %}
3779 operand immI_0_32767() %{
3780 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3781 match(ConI);
3782 op_cost(0);
3784 format %{ %}
3785 interface(CONST_INTER);
3786 %}
3788 operand immI_0_65535() %{
3789 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3790 match(ConI);
3791 op_cost(0);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 operand immI_1() %{
3798 predicate( n->get_int() == 1 );
3799 match(ConI);
3801 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 operand immI_2() %{
3807 predicate( n->get_int() == 2 );
3808 match(ConI);
3810 op_cost(0);
3811 format %{ %}
3812 interface(CONST_INTER);
3813 %}
3815 operand immI_3() %{
3816 predicate( n->get_int() == 3 );
3817 match(ConI);
3819 op_cost(0);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 operand immI_7() %{
3825 predicate( n->get_int() == 7 );
3826 match(ConI);
3828 format %{ %}
3829 interface(CONST_INTER);
3830 %}
3832 // Immediates for special shifts (sign extend)
3834 // Constants for increment
3835 operand immI_16() %{
3836 predicate( n->get_int() == 16 );
3837 match(ConI);
3839 format %{ %}
3840 interface(CONST_INTER);
3841 %}
3843 operand immI_24() %{
3844 predicate( n->get_int() == 24 );
3845 match(ConI);
3847 format %{ %}
3848 interface(CONST_INTER);
3849 %}
3851 // Constant for byte-wide masking
3852 operand immI_255() %{
3853 predicate( n->get_int() == 255 );
3854 match(ConI);
3856 op_cost(0);
3857 format %{ %}
3858 interface(CONST_INTER);
3859 %}
3861 operand immI_65535() %{
3862 predicate( n->get_int() == 65535 );
3863 match(ConI);
3865 op_cost(5);
3866 format %{ %}
3867 interface(CONST_INTER);
3868 %}
3870 operand immI_65536() %{
3871 predicate( n->get_int() == 65536 );
3872 match(ConI);
3874 op_cost(5);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 operand immI_M65536() %{
3880 predicate( n->get_int() == -65536 );
3881 match(ConI);
3883 op_cost(5);
3884 format %{ %}
3885 interface(CONST_INTER);
3886 %}
3888 // Pointer Immediate
3889 operand immP() %{
3890 match(ConP);
3892 op_cost(10);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 // NULL Pointer Immediate
3898 operand immP0() %{
3899 predicate( n->get_ptr() == 0 );
3900 match(ConP);
3901 op_cost(0);
3903 format %{ %}
3904 interface(CONST_INTER);
3905 %}
3907 // Pointer Immediate: 64-bit
3908 operand immP_set() %{
3909 match(ConP);
3911 op_cost(5);
3912 // formats are generated automatically for constants and base registers
3913 format %{ %}
3914 interface(CONST_INTER);
3915 %}
3917 // Pointer Immediate: 64-bit
3918 operand immP_load() %{
3919 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3920 match(ConP);
3922 op_cost(5);
3923 // formats are generated automatically for constants and base registers
3924 format %{ %}
3925 interface(CONST_INTER);
3926 %}
3928 // Pointer Immediate: 64-bit
3929 operand immP_no_oop_cheap() %{
3930 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3931 match(ConP);
3933 op_cost(5);
3934 // formats are generated automatically for constants and base registers
3935 format %{ %}
3936 interface(CONST_INTER);
3937 %}
3939 // Pointer for polling page
3940 operand immP_poll() %{
3941 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3942 match(ConP);
3943 op_cost(5);
3945 format %{ %}
3946 interface(CONST_INTER);
3947 %}
3949 // Pointer Immediate
3950 operand immN() %{
3951 match(ConN);
3953 op_cost(10);
3954 format %{ %}
3955 interface(CONST_INTER);
3956 %}
3958 operand immNKlass() %{
3959 match(ConNKlass);
3961 op_cost(10);
3962 format %{ %}
3963 interface(CONST_INTER);
3964 %}
3966 // NULL Pointer Immediate
3967 operand immN0() %{
3968 predicate(n->get_narrowcon() == 0);
3969 match(ConN);
3971 op_cost(5);
3972 format %{ %}
3973 interface(CONST_INTER);
3974 %}
3976 // Long Immediate
3977 operand immL() %{
3978 match(ConL);
3980 op_cost(20);
3981 format %{ %}
3982 interface(CONST_INTER);
3983 %}
3985 // Long Immediate zero
3986 operand immL0() %{
3987 predicate( n->get_long() == 0L );
3988 match(ConL);
3989 op_cost(0);
3991 format %{ %}
3992 interface(CONST_INTER);
3993 %}
3995 operand immL7() %{
3996 predicate( n->get_long() == 7L );
3997 match(ConL);
3998 op_cost(0);
4000 format %{ %}
4001 interface(CONST_INTER);
4002 %}
4004 operand immL_M1() %{
4005 predicate( n->get_long() == -1L );
4006 match(ConL);
4007 op_cost(0);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // bit 0..2 zero
4014 operand immL_M8() %{
4015 predicate( n->get_long() == -8L );
4016 match(ConL);
4017 op_cost(0);
4019 format %{ %}
4020 interface(CONST_INTER);
4021 %}
4023 // bit 2 zero
4024 operand immL_M5() %{
4025 predicate( n->get_long() == -5L );
4026 match(ConL);
4027 op_cost(0);
4029 format %{ %}
4030 interface(CONST_INTER);
4031 %}
4033 // bit 1..2 zero
4034 operand immL_M7() %{
4035 predicate( n->get_long() == -7L );
4036 match(ConL);
4037 op_cost(0);
4039 format %{ %}
4040 interface(CONST_INTER);
4041 %}
4043 // bit 0..1 zero
4044 operand immL_M4() %{
4045 predicate( n->get_long() == -4L );
4046 match(ConL);
4047 op_cost(0);
4049 format %{ %}
4050 interface(CONST_INTER);
4051 %}
4053 // bit 3..6 zero
4054 operand immL_M121() %{
4055 predicate( n->get_long() == -121L );
4056 match(ConL);
4057 op_cost(0);
4059 format %{ %}
4060 interface(CONST_INTER);
4061 %}
4063 // Long immediate from 0 to 127.
4064 // Used for a shorter form of long mul by 10.
4065 operand immL_127() %{
4066 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4067 match(ConL);
4068 op_cost(0);
4070 format %{ %}
4071 interface(CONST_INTER);
4072 %}
4074 // Operand for non-negtive long mask
4075 operand immL_nonneg_mask() %{
4076 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4077 match(ConL);
4079 op_cost(0);
4080 format %{ %}
4081 interface(CONST_INTER);
4082 %}
4084 operand immL_0_65535() %{
4085 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4086 match(ConL);
4087 op_cost(0);
4089 format %{ %}
4090 interface(CONST_INTER);
4091 %}
4093 // Long Immediate: cheap (materialize in <= 3 instructions)
4094 operand immL_cheap() %{
4095 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4096 match(ConL);
4097 op_cost(0);
4099 format %{ %}
4100 interface(CONST_INTER);
4101 %}
4103 // Long Immediate: expensive (materialize in > 3 instructions)
4104 operand immL_expensive() %{
4105 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4106 match(ConL);
4107 op_cost(0);
4109 format %{ %}
4110 interface(CONST_INTER);
4111 %}
4113 operand immL16() %{
4114 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4115 match(ConL);
4117 op_cost(10);
4118 format %{ %}
4119 interface(CONST_INTER);
4120 %}
4122 operand immL16_sub() %{
4123 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4124 match(ConL);
4126 op_cost(10);
4127 format %{ %}
4128 interface(CONST_INTER);
4129 %}
4131 // Long Immediate: low 32-bit mask
4132 operand immL_32bits() %{
4133 predicate(n->get_long() == 0xFFFFFFFFL);
4134 match(ConL);
4135 op_cost(20);
4137 format %{ %}
4138 interface(CONST_INTER);
4139 %}
4141 // Long Immediate 32-bit signed
4142 operand immL32()
4143 %{
4144 predicate(n->get_long() == (int) (n->get_long()));
4145 match(ConL);
4147 op_cost(15);
4148 format %{ %}
4149 interface(CONST_INTER);
4150 %}
4153 //single-precision floating-point zero
4154 operand immF0() %{
4155 predicate(jint_cast(n->getf()) == 0);
4156 match(ConF);
4158 op_cost(5);
4159 format %{ %}
4160 interface(CONST_INTER);
4161 %}
4163 //single-precision floating-point immediate
4164 operand immF() %{
4165 match(ConF);
4167 op_cost(20);
4168 format %{ %}
4169 interface(CONST_INTER);
4170 %}
4172 //double-precision floating-point zero
4173 operand immD0() %{
4174 predicate(jlong_cast(n->getd()) == 0);
4175 match(ConD);
4177 op_cost(5);
4178 format %{ %}
4179 interface(CONST_INTER);
4180 %}
4182 //double-precision floating-point immediate
4183 operand immD() %{
4184 match(ConD);
4186 op_cost(20);
4187 format %{ %}
4188 interface(CONST_INTER);
4189 %}
4191 // Register Operands
4192 // Integer Register
4193 operand mRegI() %{
4194 constraint(ALLOC_IN_RC(int_reg));
4195 match(RegI);
4197 format %{ %}
4198 interface(REG_INTER);
4199 %}
4201 operand no_Ax_mRegI() %{
4202 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4203 match(RegI);
4204 match(mRegI);
4206 format %{ %}
4207 interface(REG_INTER);
4208 %}
4210 operand mS0RegI() %{
4211 constraint(ALLOC_IN_RC(s0_reg));
4212 match(RegI);
4213 match(mRegI);
4215 format %{ "S0" %}
4216 interface(REG_INTER);
4217 %}
4219 operand mS1RegI() %{
4220 constraint(ALLOC_IN_RC(s1_reg));
4221 match(RegI);
4222 match(mRegI);
4224 format %{ "S1" %}
4225 interface(REG_INTER);
4226 %}
4228 operand mS2RegI() %{
4229 constraint(ALLOC_IN_RC(s2_reg));
4230 match(RegI);
4231 match(mRegI);
4233 format %{ "S2" %}
4234 interface(REG_INTER);
4235 %}
4237 operand mS3RegI() %{
4238 constraint(ALLOC_IN_RC(s3_reg));
4239 match(RegI);
4240 match(mRegI);
4242 format %{ "S3" %}
4243 interface(REG_INTER);
4244 %}
4246 operand mS4RegI() %{
4247 constraint(ALLOC_IN_RC(s4_reg));
4248 match(RegI);
4249 match(mRegI);
4251 format %{ "S4" %}
4252 interface(REG_INTER);
4253 %}
4255 operand mS5RegI() %{
4256 constraint(ALLOC_IN_RC(s5_reg));
4257 match(RegI);
4258 match(mRegI);
4260 format %{ "S5" %}
4261 interface(REG_INTER);
4262 %}
4264 operand mS6RegI() %{
4265 constraint(ALLOC_IN_RC(s6_reg));
4266 match(RegI);
4267 match(mRegI);
4269 format %{ "S6" %}
4270 interface(REG_INTER);
4271 %}
4273 operand mS7RegI() %{
4274 constraint(ALLOC_IN_RC(s7_reg));
4275 match(RegI);
4276 match(mRegI);
4278 format %{ "S7" %}
4279 interface(REG_INTER);
4280 %}
4283 operand mT0RegI() %{
4284 constraint(ALLOC_IN_RC(t0_reg));
4285 match(RegI);
4286 match(mRegI);
4288 format %{ "T0" %}
4289 interface(REG_INTER);
4290 %}
4292 operand mT1RegI() %{
4293 constraint(ALLOC_IN_RC(t1_reg));
4294 match(RegI);
4295 match(mRegI);
4297 format %{ "T1" %}
4298 interface(REG_INTER);
4299 %}
4301 operand mT2RegI() %{
4302 constraint(ALLOC_IN_RC(t2_reg));
4303 match(RegI);
4304 match(mRegI);
4306 format %{ "T2" %}
4307 interface(REG_INTER);
4308 %}
4310 operand mT3RegI() %{
4311 constraint(ALLOC_IN_RC(t3_reg));
4312 match(RegI);
4313 match(mRegI);
4315 format %{ "T3" %}
4316 interface(REG_INTER);
4317 %}
4319 operand mT8RegI() %{
4320 constraint(ALLOC_IN_RC(t8_reg));
4321 match(RegI);
4322 match(mRegI);
4324 format %{ "T8" %}
4325 interface(REG_INTER);
4326 %}
4328 operand mT9RegI() %{
4329 constraint(ALLOC_IN_RC(t9_reg));
4330 match(RegI);
4331 match(mRegI);
4333 format %{ "T9" %}
4334 interface(REG_INTER);
4335 %}
4337 operand mA0RegI() %{
4338 constraint(ALLOC_IN_RC(a0_reg));
4339 match(RegI);
4340 match(mRegI);
4342 format %{ "A0" %}
4343 interface(REG_INTER);
4344 %}
4346 operand mA1RegI() %{
4347 constraint(ALLOC_IN_RC(a1_reg));
4348 match(RegI);
4349 match(mRegI);
4351 format %{ "A1" %}
4352 interface(REG_INTER);
4353 %}
4355 operand mA2RegI() %{
4356 constraint(ALLOC_IN_RC(a2_reg));
4357 match(RegI);
4358 match(mRegI);
4360 format %{ "A2" %}
4361 interface(REG_INTER);
4362 %}
4364 operand mA3RegI() %{
4365 constraint(ALLOC_IN_RC(a3_reg));
4366 match(RegI);
4367 match(mRegI);
4369 format %{ "A3" %}
4370 interface(REG_INTER);
4371 %}
4373 operand mA4RegI() %{
4374 constraint(ALLOC_IN_RC(a4_reg));
4375 match(RegI);
4376 match(mRegI);
4378 format %{ "A4" %}
4379 interface(REG_INTER);
4380 %}
4382 operand mA5RegI() %{
4383 constraint(ALLOC_IN_RC(a5_reg));
4384 match(RegI);
4385 match(mRegI);
4387 format %{ "A5" %}
4388 interface(REG_INTER);
4389 %}
4391 operand mA6RegI() %{
4392 constraint(ALLOC_IN_RC(a6_reg));
4393 match(RegI);
4394 match(mRegI);
4396 format %{ "A6" %}
4397 interface(REG_INTER);
4398 %}
4400 operand mA7RegI() %{
4401 constraint(ALLOC_IN_RC(a7_reg));
4402 match(RegI);
4403 match(mRegI);
4405 format %{ "A7" %}
4406 interface(REG_INTER);
4407 %}
4409 operand mV0RegI() %{
4410 constraint(ALLOC_IN_RC(v0_reg));
4411 match(RegI);
4412 match(mRegI);
4414 format %{ "V0" %}
4415 interface(REG_INTER);
4416 %}
4418 operand mV1RegI() %{
4419 constraint(ALLOC_IN_RC(v1_reg));
4420 match(RegI);
4421 match(mRegI);
4423 format %{ "V1" %}
4424 interface(REG_INTER);
4425 %}
4427 operand mRegN() %{
4428 constraint(ALLOC_IN_RC(int_reg));
4429 match(RegN);
4431 format %{ %}
4432 interface(REG_INTER);
4433 %}
4435 operand t0_RegN() %{
4436 constraint(ALLOC_IN_RC(t0_reg));
4437 match(RegN);
4438 match(mRegN);
4440 format %{ %}
4441 interface(REG_INTER);
4442 %}
4444 operand t1_RegN() %{
4445 constraint(ALLOC_IN_RC(t1_reg));
4446 match(RegN);
4447 match(mRegN);
4449 format %{ %}
4450 interface(REG_INTER);
4451 %}
4453 operand t2_RegN() %{
4454 constraint(ALLOC_IN_RC(t2_reg));
4455 match(RegN);
4456 match(mRegN);
4458 format %{ %}
4459 interface(REG_INTER);
4460 %}
4462 operand t3_RegN() %{
4463 constraint(ALLOC_IN_RC(t3_reg));
4464 match(RegN);
4465 match(mRegN);
4467 format %{ %}
4468 interface(REG_INTER);
4469 %}
4471 operand t8_RegN() %{
4472 constraint(ALLOC_IN_RC(t8_reg));
4473 match(RegN);
4474 match(mRegN);
4476 format %{ %}
4477 interface(REG_INTER);
4478 %}
4480 operand t9_RegN() %{
4481 constraint(ALLOC_IN_RC(t9_reg));
4482 match(RegN);
4483 match(mRegN);
4485 format %{ %}
4486 interface(REG_INTER);
4487 %}
4489 operand a0_RegN() %{
4490 constraint(ALLOC_IN_RC(a0_reg));
4491 match(RegN);
4492 match(mRegN);
4494 format %{ %}
4495 interface(REG_INTER);
4496 %}
4498 operand a1_RegN() %{
4499 constraint(ALLOC_IN_RC(a1_reg));
4500 match(RegN);
4501 match(mRegN);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand a2_RegN() %{
4508 constraint(ALLOC_IN_RC(a2_reg));
4509 match(RegN);
4510 match(mRegN);
4512 format %{ %}
4513 interface(REG_INTER);
4514 %}
4516 operand a3_RegN() %{
4517 constraint(ALLOC_IN_RC(a3_reg));
4518 match(RegN);
4519 match(mRegN);
4521 format %{ %}
4522 interface(REG_INTER);
4523 %}
4525 operand a4_RegN() %{
4526 constraint(ALLOC_IN_RC(a4_reg));
4527 match(RegN);
4528 match(mRegN);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand a5_RegN() %{
4535 constraint(ALLOC_IN_RC(a5_reg));
4536 match(RegN);
4537 match(mRegN);
4539 format %{ %}
4540 interface(REG_INTER);
4541 %}
4543 operand a6_RegN() %{
4544 constraint(ALLOC_IN_RC(a6_reg));
4545 match(RegN);
4546 match(mRegN);
4548 format %{ %}
4549 interface(REG_INTER);
4550 %}
4552 operand a7_RegN() %{
4553 constraint(ALLOC_IN_RC(a7_reg));
4554 match(RegN);
4555 match(mRegN);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand s0_RegN() %{
4562 constraint(ALLOC_IN_RC(s0_reg));
4563 match(RegN);
4564 match(mRegN);
4566 format %{ %}
4567 interface(REG_INTER);
4568 %}
4570 operand s1_RegN() %{
4571 constraint(ALLOC_IN_RC(s1_reg));
4572 match(RegN);
4573 match(mRegN);
4575 format %{ %}
4576 interface(REG_INTER);
4577 %}
4579 operand s2_RegN() %{
4580 constraint(ALLOC_IN_RC(s2_reg));
4581 match(RegN);
4582 match(mRegN);
4584 format %{ %}
4585 interface(REG_INTER);
4586 %}
4588 operand s3_RegN() %{
4589 constraint(ALLOC_IN_RC(s3_reg));
4590 match(RegN);
4591 match(mRegN);
4593 format %{ %}
4594 interface(REG_INTER);
4595 %}
4597 operand s4_RegN() %{
4598 constraint(ALLOC_IN_RC(s4_reg));
4599 match(RegN);
4600 match(mRegN);
4602 format %{ %}
4603 interface(REG_INTER);
4604 %}
4606 operand s5_RegN() %{
4607 constraint(ALLOC_IN_RC(s5_reg));
4608 match(RegN);
4609 match(mRegN);
4611 format %{ %}
4612 interface(REG_INTER);
4613 %}
4615 operand s6_RegN() %{
4616 constraint(ALLOC_IN_RC(s6_reg));
4617 match(RegN);
4618 match(mRegN);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand s7_RegN() %{
4625 constraint(ALLOC_IN_RC(s7_reg));
4626 match(RegN);
4627 match(mRegN);
4629 format %{ %}
4630 interface(REG_INTER);
4631 %}
4633 operand v0_RegN() %{
4634 constraint(ALLOC_IN_RC(v0_reg));
4635 match(RegN);
4636 match(mRegN);
4638 format %{ %}
4639 interface(REG_INTER);
4640 %}
4642 operand v1_RegN() %{
4643 constraint(ALLOC_IN_RC(v1_reg));
4644 match(RegN);
4645 match(mRegN);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 // Pointer Register
4652 operand mRegP() %{
4653 constraint(ALLOC_IN_RC(p_reg));
4654 match(RegP);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4660 operand no_T8_mRegP() %{
4661 constraint(ALLOC_IN_RC(no_T8_p_reg));
4662 match(RegP);
4663 match(mRegP);
4665 format %{ %}
4666 interface(REG_INTER);
4667 %}
4669 operand s0_RegP()
4670 %{
4671 constraint(ALLOC_IN_RC(s0_long_reg));
4672 match(RegP);
4673 match(mRegP);
4674 match(no_T8_mRegP);
4676 format %{ %}
4677 interface(REG_INTER);
4678 %}
4680 operand s1_RegP()
4681 %{
4682 constraint(ALLOC_IN_RC(s1_long_reg));
4683 match(RegP);
4684 match(mRegP);
4685 match(no_T8_mRegP);
4687 format %{ %}
4688 interface(REG_INTER);
4689 %}
4691 operand s2_RegP()
4692 %{
4693 constraint(ALLOC_IN_RC(s2_long_reg));
4694 match(RegP);
4695 match(mRegP);
4696 match(no_T8_mRegP);
4698 format %{ %}
4699 interface(REG_INTER);
4700 %}
4702 operand s3_RegP()
4703 %{
4704 constraint(ALLOC_IN_RC(s3_long_reg));
4705 match(RegP);
4706 match(mRegP);
4707 match(no_T8_mRegP);
4709 format %{ %}
4710 interface(REG_INTER);
4711 %}
4713 operand s4_RegP()
4714 %{
4715 constraint(ALLOC_IN_RC(s4_long_reg));
4716 match(RegP);
4717 match(mRegP);
4718 match(no_T8_mRegP);
4720 format %{ %}
4721 interface(REG_INTER);
4722 %}
4724 operand s5_RegP()
4725 %{
4726 constraint(ALLOC_IN_RC(s5_long_reg));
4727 match(RegP);
4728 match(mRegP);
4729 match(no_T8_mRegP);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 operand s6_RegP()
4736 %{
4737 constraint(ALLOC_IN_RC(s6_long_reg));
4738 match(RegP);
4739 match(mRegP);
4740 match(no_T8_mRegP);
4742 format %{ %}
4743 interface(REG_INTER);
4744 %}
4746 operand s7_RegP()
4747 %{
4748 constraint(ALLOC_IN_RC(s7_long_reg));
4749 match(RegP);
4750 match(mRegP);
4751 match(no_T8_mRegP);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 operand t0_RegP()
4758 %{
4759 constraint(ALLOC_IN_RC(t0_long_reg));
4760 match(RegP);
4761 match(mRegP);
4762 match(no_T8_mRegP);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand t1_RegP()
4769 %{
4770 constraint(ALLOC_IN_RC(t1_long_reg));
4771 match(RegP);
4772 match(mRegP);
4773 match(no_T8_mRegP);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand t2_RegP()
4780 %{
4781 constraint(ALLOC_IN_RC(t2_long_reg));
4782 match(RegP);
4783 match(mRegP);
4784 match(no_T8_mRegP);
4786 format %{ %}
4787 interface(REG_INTER);
4788 %}
4790 operand t3_RegP()
4791 %{
4792 constraint(ALLOC_IN_RC(t3_long_reg));
4793 match(RegP);
4794 match(mRegP);
4795 match(no_T8_mRegP);
4797 format %{ %}
4798 interface(REG_INTER);
4799 %}
4801 operand t8_RegP()
4802 %{
4803 constraint(ALLOC_IN_RC(t8_long_reg));
4804 match(RegP);
4805 match(mRegP);
4807 format %{ %}
4808 interface(REG_INTER);
4809 %}
4811 operand t9_RegP()
4812 %{
4813 constraint(ALLOC_IN_RC(t9_long_reg));
4814 match(RegP);
4815 match(mRegP);
4816 match(no_T8_mRegP);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand a0_RegP()
4823 %{
4824 constraint(ALLOC_IN_RC(a0_long_reg));
4825 match(RegP);
4826 match(mRegP);
4827 match(no_T8_mRegP);
4829 format %{ %}
4830 interface(REG_INTER);
4831 %}
4833 operand a1_RegP()
4834 %{
4835 constraint(ALLOC_IN_RC(a1_long_reg));
4836 match(RegP);
4837 match(mRegP);
4838 match(no_T8_mRegP);
4840 format %{ %}
4841 interface(REG_INTER);
4842 %}
4844 operand a2_RegP()
4845 %{
4846 constraint(ALLOC_IN_RC(a2_long_reg));
4847 match(RegP);
4848 match(mRegP);
4849 match(no_T8_mRegP);
4851 format %{ %}
4852 interface(REG_INTER);
4853 %}
4855 operand a3_RegP()
4856 %{
4857 constraint(ALLOC_IN_RC(a3_long_reg));
4858 match(RegP);
4859 match(mRegP);
4860 match(no_T8_mRegP);
4862 format %{ %}
4863 interface(REG_INTER);
4864 %}
4866 operand a4_RegP()
4867 %{
4868 constraint(ALLOC_IN_RC(a4_long_reg));
4869 match(RegP);
4870 match(mRegP);
4871 match(no_T8_mRegP);
4873 format %{ %}
4874 interface(REG_INTER);
4875 %}
4878 operand a5_RegP()
4879 %{
4880 constraint(ALLOC_IN_RC(a5_long_reg));
4881 match(RegP);
4882 match(mRegP);
4883 match(no_T8_mRegP);
4885 format %{ %}
4886 interface(REG_INTER);
4887 %}
4889 operand a6_RegP()
4890 %{
4891 constraint(ALLOC_IN_RC(a6_long_reg));
4892 match(RegP);
4893 match(mRegP);
4894 match(no_T8_mRegP);
4896 format %{ %}
4897 interface(REG_INTER);
4898 %}
4900 operand a7_RegP()
4901 %{
4902 constraint(ALLOC_IN_RC(a7_long_reg));
4903 match(RegP);
4904 match(mRegP);
4905 match(no_T8_mRegP);
4907 format %{ %}
4908 interface(REG_INTER);
4909 %}
4911 operand v0_RegP()
4912 %{
4913 constraint(ALLOC_IN_RC(v0_long_reg));
4914 match(RegP);
4915 match(mRegP);
4916 match(no_T8_mRegP);
4918 format %{ %}
4919 interface(REG_INTER);
4920 %}
4922 operand v1_RegP()
4923 %{
4924 constraint(ALLOC_IN_RC(v1_long_reg));
4925 match(RegP);
4926 match(mRegP);
4927 match(no_T8_mRegP);
4929 format %{ %}
4930 interface(REG_INTER);
4931 %}
4933 /*
4934 operand mSPRegP(mRegP reg) %{
4935 constraint(ALLOC_IN_RC(sp_reg));
4936 match(reg);
4938 format %{ "SP" %}
4939 interface(REG_INTER);
4940 %}
4942 operand mFPRegP(mRegP reg) %{
4943 constraint(ALLOC_IN_RC(fp_reg));
4944 match(reg);
4946 format %{ "FP" %}
4947 interface(REG_INTER);
4948 %}
4949 */
4951 operand mRegL() %{
4952 constraint(ALLOC_IN_RC(long_reg));
4953 match(RegL);
4955 format %{ %}
4956 interface(REG_INTER);
4957 %}
4959 operand v0RegL() %{
4960 constraint(ALLOC_IN_RC(v0_long_reg));
4961 match(RegL);
4962 match(mRegL);
4964 format %{ %}
4965 interface(REG_INTER);
4966 %}
4968 operand v1RegL() %{
4969 constraint(ALLOC_IN_RC(v1_long_reg));
4970 match(RegL);
4971 match(mRegL);
4973 format %{ %}
4974 interface(REG_INTER);
4975 %}
4977 operand a0RegL() %{
4978 constraint(ALLOC_IN_RC(a0_long_reg));
4979 match(RegL);
4980 match(mRegL);
4982 format %{ "A0" %}
4983 interface(REG_INTER);
4984 %}
4986 operand a1RegL() %{
4987 constraint(ALLOC_IN_RC(a1_long_reg));
4988 match(RegL);
4989 match(mRegL);
4991 format %{ %}
4992 interface(REG_INTER);
4993 %}
4995 operand a2RegL() %{
4996 constraint(ALLOC_IN_RC(a2_long_reg));
4997 match(RegL);
4998 match(mRegL);
5000 format %{ %}
5001 interface(REG_INTER);
5002 %}
5004 operand a3RegL() %{
5005 constraint(ALLOC_IN_RC(a3_long_reg));
5006 match(RegL);
5007 match(mRegL);
5009 format %{ %}
5010 interface(REG_INTER);
5011 %}
5013 operand t0RegL() %{
5014 constraint(ALLOC_IN_RC(t0_long_reg));
5015 match(RegL);
5016 match(mRegL);
5018 format %{ %}
5019 interface(REG_INTER);
5020 %}
5022 operand t1RegL() %{
5023 constraint(ALLOC_IN_RC(t1_long_reg));
5024 match(RegL);
5025 match(mRegL);
5027 format %{ %}
5028 interface(REG_INTER);
5029 %}
5031 operand t2RegL() %{
5032 constraint(ALLOC_IN_RC(t2_long_reg));
5033 match(RegL);
5034 match(mRegL);
5036 format %{ %}
5037 interface(REG_INTER);
5038 %}
5040 operand t3RegL() %{
5041 constraint(ALLOC_IN_RC(t3_long_reg));
5042 match(RegL);
5043 match(mRegL);
5045 format %{ %}
5046 interface(REG_INTER);
5047 %}
5049 operand t8RegL() %{
5050 constraint(ALLOC_IN_RC(t8_long_reg));
5051 match(RegL);
5052 match(mRegL);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand a4RegL() %{
5059 constraint(ALLOC_IN_RC(a4_long_reg));
5060 match(RegL);
5061 match(mRegL);
5063 format %{ %}
5064 interface(REG_INTER);
5065 %}
5067 operand a5RegL() %{
5068 constraint(ALLOC_IN_RC(a5_long_reg));
5069 match(RegL);
5070 match(mRegL);
5072 format %{ %}
5073 interface(REG_INTER);
5074 %}
5076 operand a6RegL() %{
5077 constraint(ALLOC_IN_RC(a6_long_reg));
5078 match(RegL);
5079 match(mRegL);
5081 format %{ %}
5082 interface(REG_INTER);
5083 %}
5085 operand a7RegL() %{
5086 constraint(ALLOC_IN_RC(a7_long_reg));
5087 match(RegL);
5088 match(mRegL);
5090 format %{ %}
5091 interface(REG_INTER);
5092 %}
5094 operand s0RegL() %{
5095 constraint(ALLOC_IN_RC(s0_long_reg));
5096 match(RegL);
5097 match(mRegL);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand s1RegL() %{
5104 constraint(ALLOC_IN_RC(s1_long_reg));
5105 match(RegL);
5106 match(mRegL);
5108 format %{ %}
5109 interface(REG_INTER);
5110 %}
5112 operand s2RegL() %{
5113 constraint(ALLOC_IN_RC(s2_long_reg));
5114 match(RegL);
5115 match(mRegL);
5117 format %{ %}
5118 interface(REG_INTER);
5119 %}
5121 operand s3RegL() %{
5122 constraint(ALLOC_IN_RC(s3_long_reg));
5123 match(RegL);
5124 match(mRegL);
5126 format %{ %}
5127 interface(REG_INTER);
5128 %}
5130 operand s4RegL() %{
5131 constraint(ALLOC_IN_RC(s4_long_reg));
5132 match(RegL);
5133 match(mRegL);
5135 format %{ %}
5136 interface(REG_INTER);
5137 %}
5139 operand s7RegL() %{
5140 constraint(ALLOC_IN_RC(s7_long_reg));
5141 match(RegL);
5142 match(mRegL);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 // Floating register operands
5149 operand regF() %{
5150 constraint(ALLOC_IN_RC(flt_reg));
5151 match(RegF);
5153 format %{ %}
5154 interface(REG_INTER);
5155 %}
5157 //Double Precision Floating register operands
5158 operand regD() %{
5159 constraint(ALLOC_IN_RC(dbl_reg));
5160 match(RegD);
5162 format %{ %}
5163 interface(REG_INTER);
5164 %}
5166 //----------Memory Operands----------------------------------------------------
5167 // Indirect Memory Operand
5168 operand indirect(mRegP reg) %{
5169 constraint(ALLOC_IN_RC(p_reg));
5170 match(reg);
5172 format %{ "[$reg] @ indirect" %}
5173 interface(MEMORY_INTER) %{
5174 base($reg);
5175 index(0x0); /* NO_INDEX */
5176 scale(0x0);
5177 disp(0x0);
5178 %}
5179 %}
5181 // Indirect Memory Plus Short Offset Operand
5182 operand indOffset8(mRegP reg, immL8 off)
5183 %{
5184 constraint(ALLOC_IN_RC(p_reg));
5185 match(AddP reg off);
5187 op_cost(10);
5188 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5189 interface(MEMORY_INTER) %{
5190 base($reg);
5191 index(0x0); /* NO_INDEX */
5192 scale(0x0);
5193 disp($off);
5194 %}
5195 %}
5197 // Indirect Memory Times Scale Plus Index Register
5198 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5199 %{
5200 constraint(ALLOC_IN_RC(p_reg));
5201 match(AddP reg (LShiftL lreg scale));
5203 op_cost(10);
5204 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5205 interface(MEMORY_INTER) %{
5206 base($reg);
5207 index($lreg);
5208 scale($scale);
5209 disp(0x0);
5210 %}
5211 %}
5214 // [base + index + offset]
5215 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5216 %{
5217 constraint(ALLOC_IN_RC(p_reg));
5218 op_cost(5);
5219 match(AddP (AddP base index) off);
5221 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5222 interface(MEMORY_INTER) %{
5223 base($base);
5224 index($index);
5225 scale(0x0);
5226 disp($off);
5227 %}
5228 %}
5230 // [base + index + offset]
5231 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5232 %{
5233 constraint(ALLOC_IN_RC(p_reg));
5234 op_cost(5);
5235 match(AddP (AddP base (ConvI2L index)) off);
5237 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5238 interface(MEMORY_INTER) %{
5239 base($base);
5240 index($index);
5241 scale(0x0);
5242 disp($off);
5243 %}
5244 %}
5246 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5247 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5248 %{
5249 constraint(ALLOC_IN_RC(p_reg));
5250 match(AddP (AddP reg (LShiftL lreg scale)) off);
5252 op_cost(10);
5253 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5254 interface(MEMORY_INTER) %{
5255 base($reg);
5256 index($lreg);
5257 scale($scale);
5258 disp($off);
5259 %}
5260 %}
5262 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5263 %{
5264 constraint(ALLOC_IN_RC(p_reg));
5265 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5267 op_cost(10);
5268 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5269 interface(MEMORY_INTER) %{
5270 base($reg);
5271 index($ireg);
5272 scale($scale);
5273 disp($off);
5274 %}
5275 %}
5277 // [base + index<<scale + offset]
5278 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5279 %{
5280 constraint(ALLOC_IN_RC(p_reg));
5281 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5282 op_cost(10);
5283 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5285 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5286 interface(MEMORY_INTER) %{
5287 base($base);
5288 index($index);
5289 scale($scale);
5290 disp($off);
5291 %}
5292 %}
5294 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5295 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5296 %{
5297 predicate(Universe::narrow_oop_shift() == 0);
5298 constraint(ALLOC_IN_RC(p_reg));
5299 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5301 op_cost(10);
5302 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5303 interface(MEMORY_INTER) %{
5304 base($reg);
5305 index($lreg);
5306 scale($scale);
5307 disp($off);
5308 %}
5309 %}
5311 // [base + index<<scale + offset] for compressd Oops
5312 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5313 %{
5314 constraint(ALLOC_IN_RC(p_reg));
5315 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5316 predicate(Universe::narrow_oop_shift() == 0);
5317 op_cost(10);
5318 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5320 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5321 interface(MEMORY_INTER) %{
5322 base($base);
5323 index($index);
5324 scale($scale);
5325 disp($off);
5326 %}
5327 %}
5329 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5330 // Indirect Memory Plus Long Offset Operand
5331 operand indOffset32(mRegP reg, immL32 off) %{
5332 constraint(ALLOC_IN_RC(p_reg));
5333 op_cost(20);
5334 match(AddP reg off);
5336 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5337 interface(MEMORY_INTER) %{
5338 base($reg);
5339 index(0x0); /* NO_INDEX */
5340 scale(0x0);
5341 disp($off);
5342 %}
5343 %}
5345 // Indirect Memory Plus Index Register
5346 operand indIndex(mRegP addr, mRegL index) %{
5347 constraint(ALLOC_IN_RC(p_reg));
5348 match(AddP addr index);
5350 op_cost(20);
5351 format %{"[$addr + $index] @ indIndex" %}
5352 interface(MEMORY_INTER) %{
5353 base($addr);
5354 index($index);
5355 scale(0x0);
5356 disp(0x0);
5357 %}
5358 %}
5360 operand indirectNarrowKlass(mRegN reg)
5361 %{
5362 predicate(Universe::narrow_klass_shift() == 0);
5363 constraint(ALLOC_IN_RC(p_reg));
5364 op_cost(10);
5365 match(DecodeNKlass reg);
5367 format %{ "[$reg] @ indirectNarrowKlass" %}
5368 interface(MEMORY_INTER) %{
5369 base($reg);
5370 index(0x0);
5371 scale(0x0);
5372 disp(0x0);
5373 %}
5374 %}
5376 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5377 %{
5378 predicate(Universe::narrow_klass_shift() == 0);
5379 constraint(ALLOC_IN_RC(p_reg));
5380 op_cost(10);
5381 match(AddP (DecodeNKlass reg) off);
5383 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5384 interface(MEMORY_INTER) %{
5385 base($reg);
5386 index(0x0);
5387 scale(0x0);
5388 disp($off);
5389 %}
5390 %}
5392 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5393 %{
5394 predicate(Universe::narrow_klass_shift() == 0);
5395 constraint(ALLOC_IN_RC(p_reg));
5396 op_cost(10);
5397 match(AddP (DecodeNKlass reg) off);
5399 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5400 interface(MEMORY_INTER) %{
5401 base($reg);
5402 index(0x0);
5403 scale(0x0);
5404 disp($off);
5405 %}
5406 %}
5408 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5409 %{
5410 predicate(Universe::narrow_klass_shift() == 0);
5411 constraint(ALLOC_IN_RC(p_reg));
5412 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5414 op_cost(10);
5415 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5416 interface(MEMORY_INTER) %{
5417 base($reg);
5418 index($lreg);
5419 scale(0x0);
5420 disp($off);
5421 %}
5422 %}
5424 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5425 %{
5426 predicate(Universe::narrow_klass_shift() == 0);
5427 constraint(ALLOC_IN_RC(p_reg));
5428 match(AddP (DecodeNKlass reg) lreg);
5430 op_cost(10);
5431 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5432 interface(MEMORY_INTER) %{
5433 base($reg);
5434 index($lreg);
5435 scale(0x0);
5436 disp(0x0);
5437 %}
5438 %}
5440 // Indirect Memory Operand
5441 operand indirectNarrow(mRegN reg)
5442 %{
5443 predicate(Universe::narrow_oop_shift() == 0);
5444 constraint(ALLOC_IN_RC(p_reg));
5445 op_cost(10);
5446 match(DecodeN reg);
5448 format %{ "[$reg] @ indirectNarrow" %}
5449 interface(MEMORY_INTER) %{
5450 base($reg);
5451 index(0x0);
5452 scale(0x0);
5453 disp(0x0);
5454 %}
5455 %}
5457 // Indirect Memory Plus Short Offset Operand
5458 operand indOffset8Narrow(mRegN reg, immL8 off)
5459 %{
5460 predicate(Universe::narrow_oop_shift() == 0);
5461 constraint(ALLOC_IN_RC(p_reg));
5462 op_cost(10);
5463 match(AddP (DecodeN reg) off);
5465 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5466 interface(MEMORY_INTER) %{
5467 base($reg);
5468 index(0x0);
5469 scale(0x0);
5470 disp($off);
5471 %}
5472 %}
5474 // Indirect Memory Plus Index Register Plus Offset Operand
5475 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5476 %{
5477 predicate(Universe::narrow_oop_shift() == 0);
5478 constraint(ALLOC_IN_RC(p_reg));
5479 match(AddP (AddP (DecodeN reg) lreg) off);
5481 op_cost(10);
5482 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5483 interface(MEMORY_INTER) %{
5484 base($reg);
5485 index($lreg);
5486 scale(0x0);
5487 disp($off);
5488 %}
5489 %}
5491 //----------Load Long Memory Operands------------------------------------------
5492 // The load-long idiom will use it's address expression again after loading
5493 // the first word of the long. If the load-long destination overlaps with
5494 // registers used in the addressing expression, the 2nd half will be loaded
5495 // from a clobbered address. Fix this by requiring that load-long use
5496 // address registers that do not overlap with the load-long target.
5498 // load-long support
5499 operand load_long_RegP() %{
5500 constraint(ALLOC_IN_RC(p_reg));
5501 match(RegP);
5502 match(mRegP);
5503 op_cost(100);
5504 format %{ %}
5505 interface(REG_INTER);
5506 %}
5508 // Indirect Memory Operand Long
5509 operand load_long_indirect(load_long_RegP reg) %{
5510 constraint(ALLOC_IN_RC(p_reg));
5511 match(reg);
5513 format %{ "[$reg]" %}
5514 interface(MEMORY_INTER) %{
5515 base($reg);
5516 index(0x0);
5517 scale(0x0);
5518 disp(0x0);
5519 %}
5520 %}
5522 // Indirect Memory Plus Long Offset Operand
5523 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5524 match(AddP reg off);
5526 format %{ "[$reg + $off]" %}
5527 interface(MEMORY_INTER) %{
5528 base($reg);
5529 index(0x0);
5530 scale(0x0);
5531 disp($off);
5532 %}
5533 %}
5535 //----------Conditional Branch Operands----------------------------------------
5536 // Comparison Op - This is the operation of the comparison, and is limited to
5537 // the following set of codes:
5538 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5539 //
5540 // Other attributes of the comparison, such as unsignedness, are specified
5541 // by the comparison instruction that sets a condition code flags register.
5542 // That result is represented by a flags operand whose subtype is appropriate
5543 // to the unsignedness (etc.) of the comparison.
5544 //
5545 // Later, the instruction which matches both the Comparison Op (a Bool) and
5546 // the flags (produced by the Cmp) specifies the coding of the comparison op
5547 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5549 // Comparision Code
5550 operand cmpOp() %{
5551 match(Bool);
5553 format %{ "" %}
5554 interface(COND_INTER) %{
5555 equal(0x01);
5556 not_equal(0x02);
5557 greater(0x03);
5558 greater_equal(0x04);
5559 less(0x05);
5560 less_equal(0x06);
5561 overflow(0x7);
5562 no_overflow(0x8);
5563 %}
5564 %}
5567 // Comparision Code
5568 // Comparison Code, unsigned compare. Used by FP also, with
5569 // C2 (unordered) turned into GT or LT already. The other bits
5570 // C0 and C3 are turned into Carry & Zero flags.
5571 operand cmpOpU() %{
5572 match(Bool);
5574 format %{ "" %}
5575 interface(COND_INTER) %{
5576 equal(0x01);
5577 not_equal(0x02);
5578 greater(0x03);
5579 greater_equal(0x04);
5580 less(0x05);
5581 less_equal(0x06);
5582 overflow(0x7);
5583 no_overflow(0x8);
5584 %}
5585 %}
5587 /*
5588 // Comparison Code, unsigned compare. Used by FP also, with
5589 // C2 (unordered) turned into GT or LT already. The other bits
5590 // C0 and C3 are turned into Carry & Zero flags.
5591 operand cmpOpU() %{
5592 match(Bool);
5594 format %{ "" %}
5595 interface(COND_INTER) %{
5596 equal(0x4);
5597 not_equal(0x5);
5598 less(0x2);
5599 greater_equal(0x3);
5600 less_equal(0x6);
5601 greater(0x7);
5602 %}
5603 %}
5604 */
5605 /*
5606 // Comparison Code for FP conditional move
5607 operand cmpOp_fcmov() %{
5608 match(Bool);
5610 format %{ "" %}
5611 interface(COND_INTER) %{
5612 equal (0x01);
5613 not_equal (0x02);
5614 greater (0x03);
5615 greater_equal(0x04);
5616 less (0x05);
5617 less_equal (0x06);
5618 %}
5619 %}
5621 // Comparision Code used in long compares
5622 operand cmpOp_commute() %{
5623 match(Bool);
5625 format %{ "" %}
5626 interface(COND_INTER) %{
5627 equal(0x4);
5628 not_equal(0x5);
5629 less(0xF);
5630 greater_equal(0xE);
5631 less_equal(0xD);
5632 greater(0xC);
5633 %}
5634 %}
5635 */
5637 //----------Special Memory Operands--------------------------------------------
5638 // Stack Slot Operand - This operand is used for loading and storing temporary
5639 // values on the stack where a match requires a value to
5640 // flow through memory.
5641 operand stackSlotP(sRegP reg) %{
5642 constraint(ALLOC_IN_RC(stack_slots));
5643 // No match rule because this operand is only generated in matching
5644 op_cost(50);
5645 format %{ "[$reg]" %}
5646 interface(MEMORY_INTER) %{
5647 base(0x1d); // SP
5648 index(0x0); // No Index
5649 scale(0x0); // No Scale
5650 disp($reg); // Stack Offset
5651 %}
5652 %}
5654 operand stackSlotI(sRegI reg) %{
5655 constraint(ALLOC_IN_RC(stack_slots));
5656 // No match rule because this operand is only generated in matching
5657 op_cost(50);
5658 format %{ "[$reg]" %}
5659 interface(MEMORY_INTER) %{
5660 base(0x1d); // SP
5661 index(0x0); // No Index
5662 scale(0x0); // No Scale
5663 disp($reg); // Stack Offset
5664 %}
5665 %}
5667 operand stackSlotF(sRegF reg) %{
5668 constraint(ALLOC_IN_RC(stack_slots));
5669 // No match rule because this operand is only generated in matching
5670 op_cost(50);
5671 format %{ "[$reg]" %}
5672 interface(MEMORY_INTER) %{
5673 base(0x1d); // SP
5674 index(0x0); // No Index
5675 scale(0x0); // No Scale
5676 disp($reg); // Stack Offset
5677 %}
5678 %}
5680 operand stackSlotD(sRegD reg) %{
5681 constraint(ALLOC_IN_RC(stack_slots));
5682 // No match rule because this operand is only generated in matching
5683 op_cost(50);
5684 format %{ "[$reg]" %}
5685 interface(MEMORY_INTER) %{
5686 base(0x1d); // SP
5687 index(0x0); // No Index
5688 scale(0x0); // No Scale
5689 disp($reg); // Stack Offset
5690 %}
5691 %}
5693 operand stackSlotL(sRegL reg) %{
5694 constraint(ALLOC_IN_RC(stack_slots));
5695 // No match rule because this operand is only generated in matching
5696 op_cost(50);
5697 format %{ "[$reg]" %}
5698 interface(MEMORY_INTER) %{
5699 base(0x1d); // SP
5700 index(0x0); // No Index
5701 scale(0x0); // No Scale
5702 disp($reg); // Stack Offset
5703 %}
5704 %}
5707 //------------------------OPERAND CLASSES--------------------------------------
5708 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5709 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5712 //----------PIPELINE-----------------------------------------------------------
5713 // Rules which define the behavior of the target architectures pipeline.
5715 pipeline %{
5717 //----------ATTRIBUTES---------------------------------------------------------
5718 attributes %{
5719 fixed_size_instructions; // Fixed size instructions
5720 branch_has_delay_slot; // branch have delay slot in gs2
5721 max_instructions_per_bundle = 1; // 1 instruction per bundle
5722 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5723 bundle_unit_size=4;
5724 instruction_unit_size = 4; // An instruction is 4 bytes long
5725 instruction_fetch_unit_size = 16; // The processor fetches one line
5726 instruction_fetch_units = 1; // of 16 bytes
5728 // List of nop instructions
5729 nops( MachNop );
5730 %}
5732 //----------RESOURCES----------------------------------------------------------
5733 // Resources are the functional units available to the machine
5735 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5737 //----------PIPELINE DESCRIPTION-----------------------------------------------
5738 // Pipeline Description specifies the stages in the machine's pipeline
5740 // IF: fetch
5741 // ID: decode
5742 // RD: read
5743 // CA: caculate
5744 // WB: write back
5745 // CM: commit
5747 pipe_desc(IF, ID, RD, CA, WB, CM);
5750 //----------PIPELINE CLASSES---------------------------------------------------
5751 // Pipeline Classes describe the stages in which input and output are
5752 // referenced by the hardware pipeline.
5754 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5755 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5756 single_instruction;
5757 src1 : RD(read);
5758 src2 : RD(read);
5759 dst : WB(write)+1;
5760 DECODE : ID;
5761 ALU : CA;
5762 %}
5764 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5765 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5766 src1 : RD(read);
5767 src2 : RD(read);
5768 dst : WB(write)+5;
5769 DECODE : ID;
5770 ALU2 : CA;
5771 %}
5773 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5774 src1 : RD(read);
5775 src2 : RD(read);
5776 dst : WB(write)+10;
5777 DECODE : ID;
5778 ALU2 : CA;
5779 %}
5781 //No.19 Integer div operation : dst <-- reg1 div reg2
5782 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5783 src1 : RD(read);
5784 src2 : RD(read);
5785 dst : WB(write)+10;
5786 DECODE : ID;
5787 ALU2 : CA;
5788 %}
5790 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5791 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5792 instruction_count(2);
5793 src1 : RD(read);
5794 src2 : RD(read);
5795 dst : WB(write)+10;
5796 DECODE : ID;
5797 ALU2 : CA;
5798 %}
5800 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5801 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5802 instruction_count(2);
5803 src1 : RD(read);
5804 src2 : RD(read);
5805 dst : WB(write);
5806 DECODE : ID;
5807 ALU : CA;
5808 %}
5810 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5811 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5812 instruction_count(2);
5813 src : RD(read);
5814 dst : WB(write);
5815 DECODE : ID;
5816 ALU : CA;
5817 %}
5819 //no.16 load Long from memory :
5820 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5821 instruction_count(2);
5822 mem : RD(read);
5823 dst : WB(write)+5;
5824 DECODE : ID;
5825 MEM : RD;
5826 %}
5828 //No.17 Store Long to Memory :
5829 pipe_class ialu_storeL(mRegL src, memory mem) %{
5830 instruction_count(2);
5831 mem : RD(read);
5832 src : RD(read);
5833 DECODE : ID;
5834 MEM : RD;
5835 %}
5837 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5838 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5839 single_instruction;
5840 src : RD(read);
5841 dst : WB(write);
5842 DECODE : ID;
5843 ALU : CA;
5844 %}
5846 //No.3 Integer move operation : dst <-- reg
5847 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5848 src : RD(read);
5849 dst : WB(write);
5850 DECODE : ID;
5851 ALU : CA;
5852 %}
5854 //No.4 No instructions : do nothing
5855 pipe_class empty( ) %{
5856 instruction_count(0);
5857 %}
5859 //No.5 UnConditional branch :
5860 pipe_class pipe_jump( label labl ) %{
5861 multiple_bundles;
5862 DECODE : ID;
5863 BR : RD;
5864 %}
5866 //No.6 ALU Conditional branch :
5867 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5868 multiple_bundles;
5869 src1 : RD(read);
5870 src2 : RD(read);
5871 DECODE : ID;
5872 BR : RD;
5873 %}
5875 //no.7 load integer from memory :
5876 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5877 mem : RD(read);
5878 dst : WB(write)+3;
5879 DECODE : ID;
5880 MEM : RD;
5881 %}
5883 //No.8 Store Integer to Memory :
5884 pipe_class ialu_storeI(mRegI src, memory mem) %{
5885 mem : RD(read);
5886 src : RD(read);
5887 DECODE : ID;
5888 MEM : RD;
5889 %}
5892 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5893 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5894 src1 : RD(read);
5895 src2 : RD(read);
5896 dst : WB(write);
5897 DECODE : ID;
5898 FPU : CA;
5899 %}
5901 //No.22 Floating div operation : dst <-- reg1 div reg2
5902 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5903 src1 : RD(read);
5904 src2 : RD(read);
5905 dst : WB(write);
5906 DECODE : ID;
5907 FPU2 : CA;
5908 %}
5910 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5911 src : RD(read);
5912 dst : WB(write);
5913 DECODE : ID;
5914 FPU1 : CA;
5915 %}
5917 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5918 src : RD(read);
5919 dst : WB(write);
5920 DECODE : ID;
5921 FPU1 : CA;
5922 %}
5924 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5925 src : RD(read);
5926 dst : WB(write);
5927 DECODE : ID;
5928 MEM : RD;
5929 %}
5931 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5932 src : RD(read);
5933 dst : WB(write);
5934 DECODE : ID;
5935 MEM : RD(5);
5936 %}
5938 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5939 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5940 multiple_bundles;
5941 src1 : RD(read);
5942 src2 : RD(read);
5943 dst : WB(write);
5944 DECODE : ID;
5945 FPU2 : CA;
5946 %}
5948 //No.11 Load Floating from Memory :
5949 pipe_class fpu_loadF(regF dst, memory mem) %{
5950 instruction_count(1);
5951 mem : RD(read);
5952 dst : WB(write)+3;
5953 DECODE : ID;
5954 MEM : RD;
5955 %}
5957 //No.12 Store Floating to Memory :
5958 pipe_class fpu_storeF(regF src, memory mem) %{
5959 instruction_count(1);
5960 mem : RD(read);
5961 src : RD(read);
5962 DECODE : ID;
5963 MEM : RD;
5964 %}
5966 //No.13 FPU Conditional branch :
5967 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5968 multiple_bundles;
5969 src1 : RD(read);
5970 src2 : RD(read);
5971 DECODE : ID;
5972 BR : RD;
5973 %}
5975 //No.14 Floating FPU reg operation : dst <-- op reg
5976 pipe_class fpu1_regF(regF dst, regF src) %{
5977 src : RD(read);
5978 dst : WB(write);
5979 DECODE : ID;
5980 FPU : CA;
5981 %}
5983 pipe_class long_memory_op() %{
5984 instruction_count(10); multiple_bundles; force_serialization;
5985 fixed_latency(30);
5986 %}
5988 pipe_class simple_call() %{
5989 instruction_count(10); multiple_bundles; force_serialization;
5990 fixed_latency(200);
5991 BR : RD;
5992 %}
5994 pipe_class call() %{
5995 instruction_count(10); multiple_bundles; force_serialization;
5996 fixed_latency(200);
5997 %}
5999 //FIXME:
6000 //No.9 Piple slow : for multi-instructions
6001 pipe_class pipe_slow( ) %{
6002 instruction_count(20);
6003 force_serialization;
6004 multiple_bundles;
6005 fixed_latency(50);
6006 %}
6008 %}
6012 //----------INSTRUCTIONS-------------------------------------------------------
6013 //
6014 // match -- States which machine-independent subtree may be replaced
6015 // by this instruction.
6016 // ins_cost -- The estimated cost of this instruction is used by instruction
6017 // selection to identify a minimum cost tree of machine
6018 // instructions that matches a tree of machine-independent
6019 // instructions.
6020 // format -- A string providing the disassembly for this instruction.
6021 // The value of an instruction's operand may be inserted
6022 // by referring to it with a '$' prefix.
6023 // opcode -- Three instruction opcodes may be provided. These are referred
6024 // to within an encode class as $primary, $secondary, and $tertiary
6025 // respectively. The primary opcode is commonly used to
6026 // indicate the type of machine instruction, while secondary
6027 // and tertiary are often used for prefix options or addressing
6028 // modes.
6029 // ins_encode -- A list of encode classes with parameters. The encode class
6030 // name must have been defined in an 'enc_class' specification
6031 // in the encode section of the architecture description.
6034 // Load Integer
6035 instruct loadI(mRegI dst, memory mem) %{
6036 match(Set dst (LoadI mem));
6038 ins_cost(125);
6039 format %{ "lw $dst, $mem #@loadI" %}
6040 ins_encode (load_I_enc(dst, mem));
6041 ins_pipe( ialu_loadI );
6042 %}
6044 instruct loadI_convI2L(mRegL dst, memory mem) %{
6045 match(Set dst (ConvI2L (LoadI mem)));
6047 ins_cost(125);
6048 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6049 ins_encode (load_I_enc(dst, mem));
6050 ins_pipe( ialu_loadI );
6051 %}
6053 // Load Integer (32 bit signed) to Byte (8 bit signed)
6054 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6055 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6057 ins_cost(125);
6058 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6059 ins_encode(load_B_enc(dst, mem));
6060 ins_pipe(ialu_loadI);
6061 %}
6063 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6064 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6065 match(Set dst (AndI (LoadI mem) mask));
6067 ins_cost(125);
6068 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6069 ins_encode(load_UB_enc(dst, mem));
6070 ins_pipe(ialu_loadI);
6071 %}
6073 // Load Integer (32 bit signed) to Short (16 bit signed)
6074 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6075 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6077 ins_cost(125);
6078 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6079 ins_encode(load_S_enc(dst, mem));
6080 ins_pipe(ialu_loadI);
6081 %}
6083 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6084 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6085 match(Set dst (AndI (LoadI mem) mask));
6087 ins_cost(125);
6088 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6089 ins_encode(load_C_enc(dst, mem));
6090 ins_pipe(ialu_loadI);
6091 %}
6093 // Load Long.
6094 instruct loadL(mRegL dst, memory mem) %{
6095 // predicate(!((LoadLNode*)n)->require_atomic_access());
6096 match(Set dst (LoadL mem));
6098 ins_cost(250);
6099 format %{ "ld $dst, $mem #@loadL" %}
6100 ins_encode(load_L_enc(dst, mem));
6101 ins_pipe( ialu_loadL );
6102 %}
6104 // Load Long - UNaligned
6105 instruct loadL_unaligned(mRegL dst, memory mem) %{
6106 match(Set dst (LoadL_unaligned mem));
6108 // FIXME: Jin: Need more effective ldl/ldr
6109 ins_cost(450);
6110 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6111 ins_encode(load_L_enc(dst, mem));
6112 ins_pipe( ialu_loadL );
6113 %}
6115 // Store Long
6116 instruct storeL_reg(memory mem, mRegL src) %{
6117 match(Set mem (StoreL mem src));
6119 ins_cost(200);
6120 format %{ "sd $mem, $src #@storeL_reg\n" %}
6121 ins_encode(store_L_reg_enc(mem, src));
6122 ins_pipe( ialu_storeL );
6123 %}
6126 instruct storeL_immL0(memory mem, immL0 zero) %{
6127 match(Set mem (StoreL mem zero));
6129 ins_cost(180);
6130 format %{ "sd $mem, zero #@storeL_immL0" %}
6131 ins_encode(store_L_immL0_enc(mem, zero));
6132 ins_pipe( ialu_storeL );
6133 %}
6135 // Load Compressed Pointer
6136 instruct loadN(mRegN dst, memory mem)
6137 %{
6138 match(Set dst (LoadN mem));
6140 ins_cost(125); // XXX
6141 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6142 ins_encode (load_N_enc(dst, mem));
6143 ins_pipe( ialu_loadI ); // XXX
6144 %}
6146 instruct loadN2P(mRegP dst, memory mem)
6147 %{
6148 match(Set dst (DecodeN (LoadN mem)));
6149 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6151 ins_cost(125); // XXX
6152 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6153 ins_encode (load_N_enc(dst, mem));
6154 ins_pipe( ialu_loadI ); // XXX
6155 %}
6157 // Load Pointer
6158 instruct loadP(mRegP dst, memory mem) %{
6159 match(Set dst (LoadP mem));
6161 ins_cost(125);
6162 format %{ "ld $dst, $mem #@loadP" %}
6163 ins_encode (load_P_enc(dst, mem));
6164 ins_pipe( ialu_loadI );
6165 %}
6167 // Load Klass Pointer
6168 instruct loadKlass(mRegP dst, memory mem) %{
6169 match(Set dst (LoadKlass mem));
6171 ins_cost(125);
6172 format %{ "MOV $dst,$mem @ loadKlass" %}
6173 ins_encode (load_P_enc(dst, mem));
6174 ins_pipe( ialu_loadI );
6175 %}
6177 // Load narrow Klass Pointer
6178 instruct loadNKlass(mRegN dst, memory mem)
6179 %{
6180 match(Set dst (LoadNKlass mem));
6182 ins_cost(125); // XXX
6183 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6184 ins_encode (load_N_enc(dst, mem));
6185 ins_pipe( ialu_loadI ); // XXX
6186 %}
6188 instruct loadN2PKlass(mRegP dst, memory mem)
6189 %{
6190 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6191 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6193 ins_cost(125); // XXX
6194 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6195 ins_encode (load_N_enc(dst, mem));
6196 ins_pipe( ialu_loadI ); // XXX
6197 %}
6199 // Load Constant
6200 instruct loadConI(mRegI dst, immI src) %{
6201 match(Set dst src);
6203 ins_cost(150);
6204 format %{ "mov $dst, $src #@loadConI" %}
6205 ins_encode %{
6206 Register dst = $dst$$Register;
6207 int value = $src$$constant;
6208 __ move(dst, value);
6209 %}
6210 ins_pipe( ialu_regI_regI );
6211 %}
6214 instruct loadConL_set64(mRegL dst, immL src) %{
6215 match(Set dst src);
6216 ins_cost(120);
6217 format %{ "li $dst, $src @ loadConL_set64" %}
6218 ins_encode %{
6219 __ set64($dst$$Register, $src$$constant);
6220 %}
6221 ins_pipe(ialu_regL_regL);
6222 %}
6224 /*
6225 // Load long value from constant table (predicated by immL_expensive).
6226 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6227 match(Set dst src);
6228 ins_cost(150);
6229 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6230 ins_encode %{
6231 int con_offset = $constantoffset($src);
6233 if (Assembler::is_simm16(con_offset)) {
6234 __ ld($dst$$Register, $constanttablebase, con_offset);
6235 } else {
6236 __ set64(AT, con_offset);
6237 if (UseLoongsonISA) {
6238 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6239 } else {
6240 __ daddu(AT, $constanttablebase, AT);
6241 __ ld($dst$$Register, AT, 0);
6242 }
6243 }
6244 %}
6245 ins_pipe(ialu_loadI);
6246 %}
6247 */
6249 instruct loadConL16(mRegL dst, immL16 src) %{
6250 match(Set dst src);
6251 ins_cost(105);
6252 format %{ "mov $dst, $src #@loadConL16" %}
6253 ins_encode %{
6254 Register dst_reg = as_Register($dst$$reg);
6255 int value = $src$$constant;
6256 __ daddiu(dst_reg, R0, value);
6257 %}
6258 ins_pipe( ialu_regL_regL );
6259 %}
6262 instruct loadConL0(mRegL dst, immL0 src) %{
6263 match(Set dst src);
6264 ins_cost(100);
6265 format %{ "mov $dst, zero #@loadConL0" %}
6266 ins_encode %{
6267 Register dst_reg = as_Register($dst$$reg);
6268 __ daddu(dst_reg, R0, R0);
6269 %}
6270 ins_pipe( ialu_regL_regL );
6271 %}
6273 // Load Range
6274 instruct loadRange(mRegI dst, memory mem) %{
6275 match(Set dst (LoadRange mem));
6277 ins_cost(125);
6278 format %{ "MOV $dst,$mem @ loadRange" %}
6279 ins_encode(load_I_enc(dst, mem));
6280 ins_pipe( ialu_loadI );
6281 %}
6284 instruct storeP(memory mem, mRegP src ) %{
6285 match(Set mem (StoreP mem src));
6287 ins_cost(125);
6288 format %{ "sd $src, $mem #@storeP" %}
6289 ins_encode(store_P_reg_enc(mem, src));
6290 ins_pipe( ialu_storeI );
6291 %}
6293 // Store NULL Pointer, mark word, or other simple pointer constant.
6294 instruct storeImmP0(memory mem, immP0 zero) %{
6295 match(Set mem (StoreP mem zero));
6297 ins_cost(125);
6298 format %{ "mov $mem, $zero #@storeImmP0" %}
6299 ins_encode(store_P_immP0_enc(mem));
6300 ins_pipe( ialu_storeI );
6301 %}
6303 // Store Byte Immediate
6304 instruct storeImmB(memory mem, immI8 src) %{
6305 match(Set mem (StoreB mem src));
6307 ins_cost(150);
6308 format %{ "movb $mem, $src #@storeImmB" %}
6309 ins_encode(store_B_immI_enc(mem, src));
6310 ins_pipe( ialu_storeI );
6311 %}
6313 // Store Compressed Pointer
6314 instruct storeN(memory mem, mRegN src)
6315 %{
6316 match(Set mem (StoreN mem src));
6318 ins_cost(125); // XXX
6319 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6320 ins_encode(store_N_reg_enc(mem, src));
6321 ins_pipe( ialu_storeI );
6322 %}
6324 instruct storeP2N(memory mem, mRegP src)
6325 %{
6326 match(Set mem (StoreN mem (EncodeP src)));
6327 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6329 ins_cost(125); // XXX
6330 format %{ "sw $mem, $src\t# @ storeP2N" %}
6331 ins_encode(store_N_reg_enc(mem, src));
6332 ins_pipe( ialu_storeI );
6333 %}
6335 instruct storeNKlass(memory mem, mRegN src)
6336 %{
6337 match(Set mem (StoreNKlass mem src));
6339 ins_cost(125); // XXX
6340 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6341 ins_encode(store_N_reg_enc(mem, src));
6342 ins_pipe( ialu_storeI );
6343 %}
6345 instruct storeP2NKlass(memory mem, mRegP src)
6346 %{
6347 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6348 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6350 ins_cost(125); // XXX
6351 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6352 ins_encode(store_N_reg_enc(mem, src));
6353 ins_pipe( ialu_storeI );
6354 %}
6356 instruct storeImmN0(memory mem, immN0 zero)
6357 %{
6358 match(Set mem (StoreN mem zero));
6360 ins_cost(125); // XXX
6361 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6362 ins_encode(storeImmN0_enc(mem, zero));
6363 ins_pipe( ialu_storeI );
6364 %}
6366 // Store Byte
6367 instruct storeB(memory mem, mRegI src) %{
6368 match(Set mem (StoreB mem src));
6370 ins_cost(125);
6371 format %{ "sb $src, $mem #@storeB" %}
6372 ins_encode(store_B_reg_enc(mem, src));
6373 ins_pipe( ialu_storeI );
6374 %}
6376 instruct storeB_convL2I(memory mem, mRegL src) %{
6377 match(Set mem (StoreB mem (ConvL2I src)));
6379 ins_cost(125);
6380 format %{ "sb $src, $mem #@storeB_convL2I" %}
6381 ins_encode(store_B_reg_enc(mem, src));
6382 ins_pipe( ialu_storeI );
6383 %}
6385 // Load Byte (8bit signed)
6386 instruct loadB(mRegI dst, memory mem) %{
6387 match(Set dst (LoadB mem));
6389 ins_cost(125);
6390 format %{ "lb $dst, $mem #@loadB" %}
6391 ins_encode(load_B_enc(dst, mem));
6392 ins_pipe( ialu_loadI );
6393 %}
6395 instruct loadB_convI2L(mRegL dst, memory mem) %{
6396 match(Set dst (ConvI2L (LoadB mem)));
6398 ins_cost(125);
6399 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6400 ins_encode(load_B_enc(dst, mem));
6401 ins_pipe( ialu_loadI );
6402 %}
6404 // Load Byte (8bit UNsigned)
6405 instruct loadUB(mRegI dst, memory mem) %{
6406 match(Set dst (LoadUB mem));
6408 ins_cost(125);
6409 format %{ "lbu $dst, $mem #@loadUB" %}
6410 ins_encode(load_UB_enc(dst, mem));
6411 ins_pipe( ialu_loadI );
6412 %}
6414 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6415 match(Set dst (ConvI2L (LoadUB mem)));
6417 ins_cost(125);
6418 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6419 ins_encode(load_UB_enc(dst, mem));
6420 ins_pipe( ialu_loadI );
6421 %}
6423 // Load Short (16bit signed)
6424 instruct loadS(mRegI dst, memory mem) %{
6425 match(Set dst (LoadS mem));
6427 ins_cost(125);
6428 format %{ "lh $dst, $mem #@loadS" %}
6429 ins_encode(load_S_enc(dst, mem));
6430 ins_pipe( ialu_loadI );
6431 %}
6433 // Load Short (16 bit signed) to Byte (8 bit signed)
6434 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6435 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6437 ins_cost(125);
6438 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6439 ins_encode(load_B_enc(dst, mem));
6440 ins_pipe(ialu_loadI);
6441 %}
6443 instruct loadS_convI2L(mRegL dst, memory mem) %{
6444 match(Set dst (ConvI2L (LoadS mem)));
6446 ins_cost(125);
6447 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6448 ins_encode(load_S_enc(dst, mem));
6449 ins_pipe( ialu_loadI );
6450 %}
6452 // Store Integer Immediate
6453 instruct storeImmI(memory mem, immI src) %{
6454 match(Set mem (StoreI mem src));
6456 ins_cost(150);
6457 format %{ "mov $mem, $src #@storeImmI" %}
6458 ins_encode(store_I_immI_enc(mem, src));
6459 ins_pipe( ialu_storeI );
6460 %}
6462 // Store Integer
6463 instruct storeI(memory mem, mRegI src) %{
6464 match(Set mem (StoreI mem src));
6466 ins_cost(125);
6467 format %{ "sw $mem, $src #@storeI" %}
6468 ins_encode(store_I_reg_enc(mem, src));
6469 ins_pipe( ialu_storeI );
6470 %}
6472 instruct storeI_convL2I(memory mem, mRegL src) %{
6473 match(Set mem (StoreI mem (ConvL2I src)));
6475 ins_cost(125);
6476 format %{ "sw $mem, $src #@storeI_convL2I" %}
6477 ins_encode(store_I_reg_enc(mem, src));
6478 ins_pipe( ialu_storeI );
6479 %}
6481 // Load Float
6482 instruct loadF(regF dst, memory mem) %{
6483 match(Set dst (LoadF mem));
6485 ins_cost(150);
6486 format %{ "loadF $dst, $mem #@loadF" %}
6487 ins_encode(load_F_enc(dst, mem));
6488 ins_pipe( ialu_loadI );
6489 %}
6491 instruct loadConP_general(mRegP dst, immP src) %{
6492 match(Set dst src);
6494 ins_cost(120);
6495 format %{ "li $dst, $src #@loadConP_general" %}
6497 ins_encode %{
6498 Register dst = $dst$$Register;
6499 long* value = (long*)$src$$constant;
6501 if($src->constant_reloc() == relocInfo::metadata_type){
6502 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6503 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6505 __ relocate(rspec);
6506 __ patchable_set48(dst, (long)value);
6507 }else if($src->constant_reloc() == relocInfo::oop_type){
6508 int oop_index = __ oop_recorder()->find_index((jobject)value);
6509 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6511 __ relocate(rspec);
6512 __ patchable_set48(dst, (long)value);
6513 } else if ($src->constant_reloc() == relocInfo::none) {
6514 __ set64(dst, (long)value);
6515 }
6516 %}
6518 ins_pipe( ialu_regI_regI );
6519 %}
6521 /*
6522 instruct loadConP_load(mRegP dst, immP_load src) %{
6523 match(Set dst src);
6525 ins_cost(100);
6526 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6528 ins_encode %{
6530 int con_offset = $constantoffset($src);
6532 if (Assembler::is_simm16(con_offset)) {
6533 __ ld($dst$$Register, $constanttablebase, con_offset);
6534 } else {
6535 __ set64(AT, con_offset);
6536 if (UseLoongsonISA) {
6537 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6538 } else {
6539 __ daddu(AT, $constanttablebase, AT);
6540 __ ld($dst$$Register, AT, 0);
6541 }
6542 }
6543 %}
6545 ins_pipe(ialu_loadI);
6546 %}
6547 */
6549 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6550 match(Set dst src);
6552 ins_cost(80);
6553 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6555 ins_encode %{
6556 __ set64($dst$$Register, $src$$constant);
6557 %}
6559 ins_pipe(ialu_regI_regI);
6560 %}
6563 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6564 match(Set dst src);
6566 ins_cost(50);
6567 format %{ "li $dst, $src #@loadConP_poll" %}
6569 ins_encode %{
6570 Register dst = $dst$$Register;
6571 intptr_t value = (intptr_t)$src$$constant;
6573 __ set64(dst, (jlong)value);
6574 %}
6576 ins_pipe( ialu_regI_regI );
6577 %}
6579 instruct loadConP0(mRegP dst, immP0 src)
6580 %{
6581 match(Set dst src);
6583 ins_cost(50);
6584 format %{ "mov $dst, R0\t# ptr" %}
6585 ins_encode %{
6586 Register dst_reg = $dst$$Register;
6587 __ daddu(dst_reg, R0, R0);
6588 %}
6589 ins_pipe( ialu_regI_regI );
6590 %}
6592 instruct loadConN0(mRegN dst, immN0 src) %{
6593 match(Set dst src);
6594 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6595 ins_encode %{
6596 __ move($dst$$Register, R0);
6597 %}
6598 ins_pipe( ialu_regI_regI );
6599 %}
6601 instruct loadConN(mRegN dst, immN src) %{
6602 match(Set dst src);
6604 ins_cost(125);
6605 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6606 ins_encode %{
6607 Register dst = $dst$$Register;
6608 __ set_narrow_oop(dst, (jobject)$src$$constant);
6609 %}
6610 ins_pipe( ialu_regI_regI ); // XXX
6611 %}
6613 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6614 match(Set dst src);
6616 ins_cost(125);
6617 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6618 ins_encode %{
6619 Register dst = $dst$$Register;
6620 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6621 %}
6622 ins_pipe( ialu_regI_regI ); // XXX
6623 %}
6625 //FIXME
6626 // Tail Call; Jump from runtime stub to Java code.
6627 // Also known as an 'interprocedural jump'.
6628 // Target of jump will eventually return to caller.
6629 // TailJump below removes the return address.
6630 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6631 match(TailCall jump_target method_oop );
6632 ins_cost(300);
6633 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6635 ins_encode %{
6636 Register target = $jump_target$$Register;
6637 Register oop = $method_oop$$Register;
6639 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6640 __ push(RA);
6642 __ move(S3, oop);
6643 __ jr(target);
6644 __ nop();
6645 %}
6647 ins_pipe( pipe_jump );
6648 %}
6650 // Create exception oop: created by stack-crawling runtime code.
6651 // Created exception is now available to this handler, and is setup
6652 // just prior to jumping to this handler. No code emitted.
6653 instruct CreateException( a0_RegP ex_oop )
6654 %{
6655 match(Set ex_oop (CreateEx));
6657 // use the following format syntax
6658 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6659 ins_encode %{
6660 /* Jin: X86 leaves this function empty */
6661 __ block_comment("CreateException is empty in X86/MIPS");
6662 %}
6663 ins_pipe( empty );
6664 // ins_pipe( pipe_jump );
6665 %}
6668 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6670 - Common try/catch:
6671 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6672 |- V0, V1 are created
6673 |- T9 <= SharedRuntime::exception_handler_for_return_address
6674 `- jr T9
6675 `- the caller's exception_handler
6676 `- jr OptoRuntime::exception_blob
6677 `- here
6678 - Rethrow(e.g. 'unwind'):
6679 * The callee:
6680 |- an exception is triggered during execution
6681 `- exits the callee method through RethrowException node
6682 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6683 `- The callee jumps to OptoRuntime::rethrow_stub()
6684 * In OptoRuntime::rethrow_stub:
6685 |- The VM calls _rethrow_Java to determine the return address in the caller method
6686 `- exits the stub with tailjmpInd
6687 |- pops exception_oop(V0) and exception_pc(V1)
6688 `- jumps to the return address(usually an exception_handler)
6689 * The caller:
6690 `- continues processing the exception_blob with V0/V1
6691 */
6693 /*
6694 Disassembling OptoRuntime::rethrow_stub()
6696 ; locals
6697 0x2d3bf320: addiu sp, sp, 0xfffffff8
6698 0x2d3bf324: sw ra, 0x4(sp)
6699 0x2d3bf328: sw fp, 0x0(sp)
6700 0x2d3bf32c: addu fp, sp, zero
6701 0x2d3bf330: addiu sp, sp, 0xfffffff0
6702 0x2d3bf334: sw ra, 0x8(sp)
6703 0x2d3bf338: sw t0, 0x4(sp)
6704 0x2d3bf33c: sw sp, 0x0(sp)
6706 ; get_thread(S2)
6707 0x2d3bf340: addu s2, sp, zero
6708 0x2d3bf344: srl s2, s2, 12
6709 0x2d3bf348: sll s2, s2, 2
6710 0x2d3bf34c: lui at, 0x2c85
6711 0x2d3bf350: addu at, at, s2
6712 0x2d3bf354: lw s2, 0xffffcc80(at)
6714 0x2d3bf358: lw s0, 0x0(sp)
6715 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6716 0x2d3bf360: sw s2, 0xc(sp)
6718 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6719 0x2d3bf364: lw a0, 0x4(sp)
6720 0x2d3bf368: lw a1, 0xc(sp)
6721 0x2d3bf36c: lw a2, 0x8(sp)
6722 ;; Java_To_Runtime
6723 0x2d3bf370: lui t9, 0x2c34
6724 0x2d3bf374: addiu t9, t9, 0xffff8a48
6725 0x2d3bf378: jalr t9
6726 0x2d3bf37c: nop
6728 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6730 0x2d3bf384: lw s0, 0xc(sp)
6731 0x2d3bf388: sw zero, 0x118(s0)
6732 0x2d3bf38c: sw zero, 0x11c(s0)
6733 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6734 0x2d3bf394: addu s2, s0, zero
6735 0x2d3bf398: sw zero, 0x144(s2)
6736 0x2d3bf39c: lw s0, 0x4(s2)
6737 0x2d3bf3a0: addiu s4, zero, 0x0
6738 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6739 0x2d3bf3a8: nop
6740 0x2d3bf3ac: addiu sp, sp, 0x10
6741 0x2d3bf3b0: addiu sp, sp, 0x8
6742 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6743 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6744 0x2d3bf3bc: lui at, 0x2b48
6745 0x2d3bf3c0: lw at, 0x100(at)
6747 ; tailjmpInd: Restores exception_oop & exception_pc
6748 0x2d3bf3c4: addu v1, ra, zero
6749 0x2d3bf3c8: addu v0, s1, zero
6750 0x2d3bf3cc: jr s3
6751 0x2d3bf3d0: nop
6752 ; Exception:
6753 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6754 0x2d3bf3d8: addiu s1, s1, 0x40
6755 0x2d3bf3dc: addiu s2, zero, 0x0
6756 0x2d3bf3e0: addiu sp, sp, 0x10
6757 0x2d3bf3e4: addiu sp, sp, 0x8
6758 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6759 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6760 0x2d3bf3f0: lui at, 0x2b48
6761 0x2d3bf3f4: lw at, 0x100(at)
6762 ; TailCalljmpInd
6763 __ push(RA); ; to be used in generate_forward_exception()
6764 0x2d3bf3f8: addu t7, s2, zero
6765 0x2d3bf3fc: jr s1
6766 0x2d3bf400: nop
6767 */
6768 // Rethrow exception:
6769 // The exception oop will come in the first argument position.
6770 // Then JUMP (not call) to the rethrow stub code.
6771 instruct RethrowException()
6772 %{
6773 match(Rethrow);
6775 // use the following format syntax
6776 format %{ "JMP rethrow_stub #@RethrowException" %}
6777 ins_encode %{
6778 __ block_comment("@ RethrowException");
6780 cbuf.set_insts_mark();
6781 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6783 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6784 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6785 __ jr(T9);
6786 __ nop();
6787 %}
6788 ins_pipe( pipe_jump );
6789 %}
6791 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6792 match(If cmp (CmpP op1 zero));
6793 effect(USE labl);
6795 ins_cost(180);
6796 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6798 ins_encode %{
6799 Register op1 = $op1$$Register;
6800 Register op2 = R0;
6801 Label &L = *($labl$$label);
6802 int flag = $cmp$$cmpcode;
6804 switch(flag)
6805 {
6806 case 0x01: //equal
6807 if (&L)
6808 __ beq(op1, op2, L);
6809 else
6810 __ beq(op1, op2, (int)0);
6811 break;
6812 case 0x02: //not_equal
6813 if (&L)
6814 __ bne(op1, op2, L);
6815 else
6816 __ bne(op1, op2, (int)0);
6817 break;
6818 /*
6819 case 0x03: //above
6820 __ sltu(AT, op2, op1);
6821 if(&L)
6822 __ bne(R0, AT, L);
6823 else
6824 __ bne(R0, AT, (int)0);
6825 break;
6826 case 0x04: //above_equal
6827 __ sltu(AT, op1, op2);
6828 if(&L)
6829 __ beq(AT, R0, L);
6830 else
6831 __ beq(AT, R0, (int)0);
6832 break;
6833 case 0x05: //below
6834 __ sltu(AT, op1, op2);
6835 if(&L)
6836 __ bne(R0, AT, L);
6837 else
6838 __ bne(R0, AT, (int)0);
6839 break;
6840 case 0x06: //below_equal
6841 __ sltu(AT, op2, op1);
6842 if(&L)
6843 __ beq(AT, R0, L);
6844 else
6845 __ beq(AT, R0, (int)0);
6846 break;
6847 */
6848 default:
6849 Unimplemented();
6850 }
6851 __ nop();
6852 %}
6854 ins_pc_relative(1);
6855 ins_pipe( pipe_alu_branch );
6856 %}
6858 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6859 match(If cmp (CmpP (DecodeN op1) zero));
6860 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6861 effect(USE labl);
6863 ins_cost(180);
6864 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6866 ins_encode %{
6867 Register op1 = $op1$$Register;
6868 Register op2 = R0;
6869 Label &L = *($labl$$label);
6870 int flag = $cmp$$cmpcode;
6872 switch(flag)
6873 {
6874 case 0x01: //equal
6875 if (&L)
6876 __ beq(op1, op2, L);
6877 else
6878 __ beq(op1, op2, (int)0);
6879 break;
6880 case 0x02: //not_equal
6881 if (&L)
6882 __ bne(op1, op2, L);
6883 else
6884 __ bne(op1, op2, (int)0);
6885 break;
6886 default:
6887 Unimplemented();
6888 }
6889 __ nop();
6890 %}
6892 ins_pc_relative(1);
6893 ins_pipe( pipe_alu_branch );
6894 %}
6897 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6898 match(If cmp (CmpP op1 op2));
6899 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6900 effect(USE labl);
6902 ins_cost(200);
6903 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6905 ins_encode %{
6906 Register op1 = $op1$$Register;
6907 Register op2 = $op2$$Register;
6908 Label &L = *($labl$$label);
6909 int flag = $cmp$$cmpcode;
6911 switch(flag)
6912 {
6913 case 0x01: //equal
6914 if (&L)
6915 __ beq(op1, op2, L);
6916 else
6917 __ beq(op1, op2, (int)0);
6918 break;
6919 case 0x02: //not_equal
6920 if (&L)
6921 __ bne(op1, op2, L);
6922 else
6923 __ bne(op1, op2, (int)0);
6924 break;
6925 case 0x03: //above
6926 __ sltu(AT, op2, op1);
6927 if(&L)
6928 __ bne(R0, AT, L);
6929 else
6930 __ bne(R0, AT, (int)0);
6931 break;
6932 case 0x04: //above_equal
6933 __ sltu(AT, op1, op2);
6934 if(&L)
6935 __ beq(AT, R0, L);
6936 else
6937 __ beq(AT, R0, (int)0);
6938 break;
6939 case 0x05: //below
6940 __ sltu(AT, op1, op2);
6941 if(&L)
6942 __ bne(R0, AT, L);
6943 else
6944 __ bne(R0, AT, (int)0);
6945 break;
6946 case 0x06: //below_equal
6947 __ sltu(AT, op2, op1);
6948 if(&L)
6949 __ beq(AT, R0, L);
6950 else
6951 __ beq(AT, R0, (int)0);
6952 break;
6953 default:
6954 Unimplemented();
6955 }
6956 __ nop();
6957 %}
6959 ins_pc_relative(1);
6960 ins_pipe( pipe_alu_branch );
6961 %}
6963 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6964 match(If cmp (CmpN op1 null));
6965 effect(USE labl);
6967 ins_cost(180);
6968 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6969 "BP$cmp $labl @ cmpN_null_branch" %}
6970 ins_encode %{
6971 Register op1 = $op1$$Register;
6972 Register op2 = R0;
6973 Label &L = *($labl$$label);
6974 int flag = $cmp$$cmpcode;
6976 switch(flag)
6977 {
6978 case 0x01: //equal
6979 if (&L)
6980 __ beq(op1, op2, L);
6981 else
6982 __ beq(op1, op2, (int)0);
6983 break;
6984 case 0x02: //not_equal
6985 if (&L)
6986 __ bne(op1, op2, L);
6987 else
6988 __ bne(op1, op2, (int)0);
6989 break;
6990 default:
6991 Unimplemented();
6992 }
6993 __ nop();
6994 %}
6995 //TODO: pipe_branchP or create pipe_branchN LEE
6996 ins_pc_relative(1);
6997 ins_pipe( pipe_alu_branch );
6998 %}
7000 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7001 match(If cmp (CmpN op1 op2));
7002 effect(USE labl);
7004 ins_cost(180);
7005 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7006 "BP$cmp $labl" %}
7007 ins_encode %{
7008 Register op1_reg = $op1$$Register;
7009 Register op2_reg = $op2$$Register;
7010 Label &L = *($labl$$label);
7011 int flag = $cmp$$cmpcode;
7013 switch(flag)
7014 {
7015 case 0x01: //equal
7016 if (&L)
7017 __ beq(op1_reg, op2_reg, L);
7018 else
7019 __ beq(op1_reg, op2_reg, (int)0);
7020 break;
7021 case 0x02: //not_equal
7022 if (&L)
7023 __ bne(op1_reg, op2_reg, L);
7024 else
7025 __ bne(op1_reg, op2_reg, (int)0);
7026 break;
7027 case 0x03: //above
7028 __ sltu(AT, op2_reg, op1_reg);
7029 if(&L)
7030 __ bne(R0, AT, L);
7031 else
7032 __ bne(R0, AT, (int)0);
7033 break;
7034 case 0x04: //above_equal
7035 __ sltu(AT, op1_reg, op2_reg);
7036 if(&L)
7037 __ beq(AT, R0, L);
7038 else
7039 __ beq(AT, R0, (int)0);
7040 break;
7041 case 0x05: //below
7042 __ sltu(AT, op1_reg, op2_reg);
7043 if(&L)
7044 __ bne(R0, AT, L);
7045 else
7046 __ bne(R0, AT, (int)0);
7047 break;
7048 case 0x06: //below_equal
7049 __ sltu(AT, op2_reg, op1_reg);
7050 if(&L)
7051 __ beq(AT, R0, L);
7052 else
7053 __ beq(AT, R0, (int)0);
7054 break;
7055 default:
7056 Unimplemented();
7057 }
7058 __ nop();
7059 %}
7060 ins_pc_relative(1);
7061 ins_pipe( pipe_alu_branch );
7062 %}
7064 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7065 match( If cmp (CmpU src1 src2) );
7066 effect(USE labl);
7067 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7069 ins_encode %{
7070 Register op1 = $src1$$Register;
7071 Register op2 = $src2$$Register;
7072 Label &L = *($labl$$label);
7073 int flag = $cmp$$cmpcode;
7075 switch(flag)
7076 {
7077 case 0x01: //equal
7078 if (&L)
7079 __ beq(op1, op2, L);
7080 else
7081 __ beq(op1, op2, (int)0);
7082 break;
7083 case 0x02: //not_equal
7084 if (&L)
7085 __ bne(op1, op2, L);
7086 else
7087 __ bne(op1, op2, (int)0);
7088 break;
7089 case 0x03: //above
7090 __ sltu(AT, op2, op1);
7091 if(&L)
7092 __ bne(AT, R0, L);
7093 else
7094 __ bne(AT, R0, (int)0);
7095 break;
7096 case 0x04: //above_equal
7097 __ sltu(AT, op1, op2);
7098 if(&L)
7099 __ beq(AT, R0, L);
7100 else
7101 __ beq(AT, R0, (int)0);
7102 break;
7103 case 0x05: //below
7104 __ sltu(AT, op1, op2);
7105 if(&L)
7106 __ bne(AT, R0, L);
7107 else
7108 __ bne(AT, R0, (int)0);
7109 break;
7110 case 0x06: //below_equal
7111 __ sltu(AT, op2, op1);
7112 if(&L)
7113 __ beq(AT, R0, L);
7114 else
7115 __ beq(AT, R0, (int)0);
7116 break;
7117 default:
7118 Unimplemented();
7119 }
7120 __ nop();
7121 %}
7123 ins_pc_relative(1);
7124 ins_pipe( pipe_alu_branch );
7125 %}
7128 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7129 match( If cmp (CmpU src1 src2) );
7130 effect(USE labl);
7131 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7133 ins_encode %{
7134 Register op1 = $src1$$Register;
7135 int val = $src2$$constant;
7136 Label &L = *($labl$$label);
7137 int flag = $cmp$$cmpcode;
7139 __ move(AT, val);
7140 switch(flag)
7141 {
7142 case 0x01: //equal
7143 if (&L)
7144 __ beq(op1, AT, L);
7145 else
7146 __ beq(op1, AT, (int)0);
7147 break;
7148 case 0x02: //not_equal
7149 if (&L)
7150 __ bne(op1, AT, L);
7151 else
7152 __ bne(op1, AT, (int)0);
7153 break;
7154 case 0x03: //above
7155 __ sltu(AT, AT, op1);
7156 if(&L)
7157 __ bne(R0, AT, L);
7158 else
7159 __ bne(R0, AT, (int)0);
7160 break;
7161 case 0x04: //above_equal
7162 __ sltu(AT, op1, AT);
7163 if(&L)
7164 __ beq(AT, R0, L);
7165 else
7166 __ beq(AT, R0, (int)0);
7167 break;
7168 case 0x05: //below
7169 __ sltu(AT, op1, AT);
7170 if(&L)
7171 __ bne(R0, AT, L);
7172 else
7173 __ bne(R0, AT, (int)0);
7174 break;
7175 case 0x06: //below_equal
7176 __ sltu(AT, AT, op1);
7177 if(&L)
7178 __ beq(AT, R0, L);
7179 else
7180 __ beq(AT, R0, (int)0);
7181 break;
7182 default:
7183 Unimplemented();
7184 }
7185 __ nop();
7186 %}
7188 ins_pc_relative(1);
7189 ins_pipe( pipe_alu_branch );
7190 %}
7192 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7193 match( If cmp (CmpI src1 src2) );
7194 effect(USE labl);
7195 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7197 ins_encode %{
7198 Register op1 = $src1$$Register;
7199 Register op2 = $src2$$Register;
7200 Label &L = *($labl$$label);
7201 int flag = $cmp$$cmpcode;
7203 switch(flag)
7204 {
7205 case 0x01: //equal
7206 if (&L)
7207 __ beq(op1, op2, L);
7208 else
7209 __ beq(op1, op2, (int)0);
7210 break;
7211 case 0x02: //not_equal
7212 if (&L)
7213 __ bne(op1, op2, L);
7214 else
7215 __ bne(op1, op2, (int)0);
7216 break;
7217 case 0x03: //above
7218 __ slt(AT, op2, op1);
7219 if(&L)
7220 __ bne(R0, AT, L);
7221 else
7222 __ bne(R0, AT, (int)0);
7223 break;
7224 case 0x04: //above_equal
7225 __ slt(AT, op1, op2);
7226 if(&L)
7227 __ beq(AT, R0, L);
7228 else
7229 __ beq(AT, R0, (int)0);
7230 break;
7231 case 0x05: //below
7232 __ slt(AT, op1, op2);
7233 if(&L)
7234 __ bne(R0, AT, L);
7235 else
7236 __ bne(R0, AT, (int)0);
7237 break;
7238 case 0x06: //below_equal
7239 __ slt(AT, op2, op1);
7240 if(&L)
7241 __ beq(AT, R0, L);
7242 else
7243 __ beq(AT, R0, (int)0);
7244 break;
7245 default:
7246 Unimplemented();
7247 }
7248 __ nop();
7249 %}
7251 ins_pc_relative(1);
7252 ins_pipe( pipe_alu_branch );
7253 %}
7255 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7256 match( If cmp (CmpI src1 src2) );
7257 effect(USE labl);
7258 ins_cost(170);
7259 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7261 ins_encode %{
7262 Register op1 = $src1$$Register;
7263 // int val = $src2$$constant;
7264 Label &L = *($labl$$label);
7265 int flag = $cmp$$cmpcode;
7267 //__ move(AT, val);
7268 switch(flag)
7269 {
7270 case 0x01: //equal
7271 if (&L)
7272 __ beq(op1, R0, L);
7273 else
7274 __ beq(op1, R0, (int)0);
7275 break;
7276 case 0x02: //not_equal
7277 if (&L)
7278 __ bne(op1, R0, L);
7279 else
7280 __ bne(op1, R0, (int)0);
7281 break;
7282 case 0x03: //greater
7283 if(&L)
7284 __ bgtz(op1, L);
7285 else
7286 __ bgtz(op1, (int)0);
7287 break;
7288 case 0x04: //greater_equal
7289 if(&L)
7290 __ bgez(op1, L);
7291 else
7292 __ bgez(op1, (int)0);
7293 break;
7294 case 0x05: //less
7295 if(&L)
7296 __ bltz(op1, L);
7297 else
7298 __ bltz(op1, (int)0);
7299 break;
7300 case 0x06: //less_equal
7301 if(&L)
7302 __ blez(op1, L);
7303 else
7304 __ blez(op1, (int)0);
7305 break;
7306 default:
7307 Unimplemented();
7308 }
7309 __ nop();
7310 %}
7312 ins_pc_relative(1);
7313 ins_pipe( pipe_alu_branch );
7314 %}
7317 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7318 match( If cmp (CmpI src1 src2) );
7319 effect(USE labl);
7320 ins_cost(200);
7321 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7323 ins_encode %{
7324 Register op1 = $src1$$Register;
7325 int val = $src2$$constant;
7326 Label &L = *($labl$$label);
7327 int flag = $cmp$$cmpcode;
7329 __ move(AT, val);
7330 switch(flag)
7331 {
7332 case 0x01: //equal
7333 if (&L)
7334 __ beq(op1, AT, L);
7335 else
7336 __ beq(op1, AT, (int)0);
7337 break;
7338 case 0x02: //not_equal
7339 if (&L)
7340 __ bne(op1, AT, L);
7341 else
7342 __ bne(op1, AT, (int)0);
7343 break;
7344 case 0x03: //greater
7345 __ slt(AT, AT, op1);
7346 if(&L)
7347 __ bne(R0, AT, L);
7348 else
7349 __ bne(R0, AT, (int)0);
7350 break;
7351 case 0x04: //greater_equal
7352 __ slt(AT, op1, AT);
7353 if(&L)
7354 __ beq(AT, R0, L);
7355 else
7356 __ beq(AT, R0, (int)0);
7357 break;
7358 case 0x05: //less
7359 __ slt(AT, op1, AT);
7360 if(&L)
7361 __ bne(R0, AT, L);
7362 else
7363 __ bne(R0, AT, (int)0);
7364 break;
7365 case 0x06: //less_equal
7366 __ slt(AT, AT, op1);
7367 if(&L)
7368 __ beq(AT, R0, L);
7369 else
7370 __ beq(AT, R0, (int)0);
7371 break;
7372 default:
7373 Unimplemented();
7374 }
7375 __ nop();
7376 %}
7378 ins_pc_relative(1);
7379 ins_pipe( pipe_alu_branch );
7380 %}
7382 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7383 match( If cmp (CmpU src1 zero) );
7384 effect(USE labl);
7385 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7387 ins_encode %{
7388 Register op1 = $src1$$Register;
7389 Label &L = *($labl$$label);
7390 int flag = $cmp$$cmpcode;
7392 switch(flag)
7393 {
7394 case 0x01: //equal
7395 if (&L)
7396 __ beq(op1, R0, L);
7397 else
7398 __ beq(op1, R0, (int)0);
7399 break;
7400 case 0x02: //not_equal
7401 if (&L)
7402 __ bne(op1, R0, L);
7403 else
7404 __ bne(op1, R0, (int)0);
7405 break;
7406 case 0x03: //above
7407 if(&L)
7408 __ bne(R0, op1, L);
7409 else
7410 __ bne(R0, op1, (int)0);
7411 break;
7412 case 0x04: //above_equal
7413 if(&L)
7414 __ beq(R0, R0, L);
7415 else
7416 __ beq(R0, R0, (int)0);
7417 break;
7418 case 0x05: //below
7419 return;
7420 break;
7421 case 0x06: //below_equal
7422 if(&L)
7423 __ beq(op1, R0, L);
7424 else
7425 __ beq(op1, R0, (int)0);
7426 break;
7427 default:
7428 Unimplemented();
7429 }
7430 __ nop();
7431 %}
7433 ins_pc_relative(1);
7434 ins_pipe( pipe_alu_branch );
7435 %}
7438 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7439 match( If cmp (CmpU src1 src2) );
7440 effect(USE labl);
7441 ins_cost(180);
7442 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7444 ins_encode %{
7445 Register op1 = $src1$$Register;
7446 int val = $src2$$constant;
7447 Label &L = *($labl$$label);
7448 int flag = $cmp$$cmpcode;
7450 switch(flag)
7451 {
7452 case 0x01: //equal
7453 __ move(AT, val);
7454 if (&L)
7455 __ beq(op1, AT, L);
7456 else
7457 __ beq(op1, AT, (int)0);
7458 break;
7459 case 0x02: //not_equal
7460 __ move(AT, val);
7461 if (&L)
7462 __ bne(op1, AT, L);
7463 else
7464 __ bne(op1, AT, (int)0);
7465 break;
7466 case 0x03: //above
7467 __ move(AT, val);
7468 __ sltu(AT, AT, op1);
7469 if(&L)
7470 __ bne(R0, AT, L);
7471 else
7472 __ bne(R0, AT, (int)0);
7473 break;
7474 case 0x04: //above_equal
7475 __ sltiu(AT, op1, val);
7476 if(&L)
7477 __ beq(AT, R0, L);
7478 else
7479 __ beq(AT, R0, (int)0);
7480 break;
7481 case 0x05: //below
7482 __ sltiu(AT, op1, val);
7483 if(&L)
7484 __ bne(R0, AT, L);
7485 else
7486 __ bne(R0, AT, (int)0);
7487 break;
7488 case 0x06: //below_equal
7489 __ move(AT, val);
7490 __ sltu(AT, AT, op1);
7491 if(&L)
7492 __ beq(AT, R0, L);
7493 else
7494 __ beq(AT, R0, (int)0);
7495 break;
7496 default:
7497 Unimplemented();
7498 }
7499 __ nop();
7500 %}
7502 ins_pc_relative(1);
7503 ins_pipe( pipe_alu_branch );
7504 %}
7507 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7508 match( If cmp (CmpL src1 src2) );
7509 effect(USE labl);
7510 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7511 ins_cost(250);
7513 ins_encode %{
7514 Register opr1_reg = as_Register($src1$$reg);
7515 Register opr2_reg = as_Register($src2$$reg);
7517 Label &target = *($labl$$label);
7518 int flag = $cmp$$cmpcode;
7520 switch(flag)
7521 {
7522 case 0x01: //equal
7523 if (&target)
7524 __ beq(opr1_reg, opr2_reg, target);
7525 else
7526 __ beq(opr1_reg, opr2_reg, (int)0);
7527 __ delayed()->nop();
7528 break;
7530 case 0x02: //not_equal
7531 if(&target)
7532 __ bne(opr1_reg, opr2_reg, target);
7533 else
7534 __ bne(opr1_reg, opr2_reg, (int)0);
7535 __ delayed()->nop();
7536 break;
7538 case 0x03: //greater
7539 __ slt(AT, opr2_reg, opr1_reg);
7540 if(&target)
7541 __ bne(AT, R0, target);
7542 else
7543 __ bne(AT, R0, (int)0);
7544 __ delayed()->nop();
7545 break;
7547 case 0x04: //greater_equal
7548 __ slt(AT, opr1_reg, opr2_reg);
7549 if(&target)
7550 __ beq(AT, R0, target);
7551 else
7552 __ beq(AT, R0, (int)0);
7553 __ delayed()->nop();
7555 break;
7557 case 0x05: //less
7558 __ slt(AT, opr1_reg, opr2_reg);
7559 if(&target)
7560 __ bne(AT, R0, target);
7561 else
7562 __ bne(AT, R0, (int)0);
7563 __ delayed()->nop();
7565 break;
7567 case 0x06: //less_equal
7568 __ slt(AT, opr2_reg, opr1_reg);
7570 if(&target)
7571 __ beq(AT, R0, target);
7572 else
7573 __ beq(AT, R0, (int)0);
7574 __ delayed()->nop();
7576 break;
7578 default:
7579 Unimplemented();
7580 }
7581 %}
7584 ins_pc_relative(1);
7585 ins_pipe( pipe_alu_branch );
7586 %}
7588 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7589 match( If cmp (CmpL src1 src2) );
7590 effect(USE labl);
7591 ins_cost(180);
7592 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7594 ins_encode %{
7595 Register op1 = $src1$$Register;
7596 int val = $src2$$constant;
7597 Label &L = *($labl$$label);
7598 int flag = $cmp$$cmpcode;
7600 __ daddiu(AT, op1, -1 * val);
7601 switch(flag)
7602 {
7603 case 0x01: //equal
7604 if (&L)
7605 __ beq(R0, AT, L);
7606 else
7607 __ beq(R0, AT, (int)0);
7608 break;
7609 case 0x02: //not_equal
7610 if (&L)
7611 __ bne(R0, AT, L);
7612 else
7613 __ bne(R0, AT, (int)0);
7614 break;
7615 case 0x03: //greater
7616 if(&L)
7617 __ bgtz(AT, L);
7618 else
7619 __ bgtz(AT, (int)0);
7620 break;
7621 case 0x04: //greater_equal
7622 if(&L)
7623 __ bgez(AT, L);
7624 else
7625 __ bgez(AT, (int)0);
7626 break;
7627 case 0x05: //less
7628 if(&L)
7629 __ bltz(AT, L);
7630 else
7631 __ bltz(AT, (int)0);
7632 break;
7633 case 0x06: //less_equal
7634 if(&L)
7635 __ blez(AT, L);
7636 else
7637 __ blez(AT, (int)0);
7638 break;
7639 default:
7640 Unimplemented();
7641 }
7642 __ nop();
7643 %}
7645 ins_pc_relative(1);
7646 ins_pipe( pipe_alu_branch );
7647 %}
7650 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7651 match( If cmp (CmpI src1 src2) );
7652 effect(USE labl);
7653 ins_cost(180);
7654 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7656 ins_encode %{
7657 Register op1 = $src1$$Register;
7658 int val = $src2$$constant;
7659 Label &L = *($labl$$label);
7660 int flag = $cmp$$cmpcode;
7662 __ addiu32(AT, op1, -1 * val);
7663 switch(flag)
7664 {
7665 case 0x01: //equal
7666 if (&L)
7667 __ beq(R0, AT, L);
7668 else
7669 __ beq(R0, AT, (int)0);
7670 break;
7671 case 0x02: //not_equal
7672 if (&L)
7673 __ bne(R0, AT, L);
7674 else
7675 __ bne(R0, AT, (int)0);
7676 break;
7677 case 0x03: //greater
7678 if(&L)
7679 __ bgtz(AT, L);
7680 else
7681 __ bgtz(AT, (int)0);
7682 break;
7683 case 0x04: //greater_equal
7684 if(&L)
7685 __ bgez(AT, L);
7686 else
7687 __ bgez(AT, (int)0);
7688 break;
7689 case 0x05: //less
7690 if(&L)
7691 __ bltz(AT, L);
7692 else
7693 __ bltz(AT, (int)0);
7694 break;
7695 case 0x06: //less_equal
7696 if(&L)
7697 __ blez(AT, L);
7698 else
7699 __ blez(AT, (int)0);
7700 break;
7701 default:
7702 Unimplemented();
7703 }
7704 __ nop();
7705 %}
7707 ins_pc_relative(1);
7708 ins_pipe( pipe_alu_branch );
7709 %}
7711 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7712 match( If cmp (CmpL src1 zero) );
7713 effect(USE labl);
7714 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7715 ins_cost(150);
7717 ins_encode %{
7718 Register opr1_reg = as_Register($src1$$reg);
7719 Label &target = *($labl$$label);
7720 int flag = $cmp$$cmpcode;
7722 switch(flag)
7723 {
7724 case 0x01: //equal
7725 if (&target)
7726 __ beq(opr1_reg, R0, target);
7727 else
7728 __ beq(opr1_reg, R0, int(0));
7729 break;
7731 case 0x02: //not_equal
7732 if(&target)
7733 __ bne(opr1_reg, R0, target);
7734 else
7735 __ bne(opr1_reg, R0, (int)0);
7736 break;
7738 case 0x03: //greater
7739 if(&target)
7740 __ bgtz(opr1_reg, target);
7741 else
7742 __ bgtz(opr1_reg, (int)0);
7743 break;
7745 case 0x04: //greater_equal
7746 if(&target)
7747 __ bgez(opr1_reg, target);
7748 else
7749 __ bgez(opr1_reg, (int)0);
7750 break;
7752 case 0x05: //less
7753 __ slt(AT, opr1_reg, R0);
7754 if(&target)
7755 __ bne(AT, R0, target);
7756 else
7757 __ bne(AT, R0, (int)0);
7758 break;
7760 case 0x06: //less_equal
7761 if (&target)
7762 __ blez(opr1_reg, target);
7763 else
7764 __ blez(opr1_reg, int(0));
7765 break;
7767 default:
7768 Unimplemented();
7769 }
7770 __ delayed()->nop();
7771 %}
7774 ins_pc_relative(1);
7775 ins_pipe( pipe_alu_branch );
7776 %}
7779 //FIXME
7780 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7781 match( If cmp (CmpF src1 src2) );
7782 effect(USE labl);
7783 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7785 ins_encode %{
7786 FloatRegister reg_op1 = $src1$$FloatRegister;
7787 FloatRegister reg_op2 = $src2$$FloatRegister;
7788 Label &L = *($labl$$label);
7789 int flag = $cmp$$cmpcode;
7791 switch(flag)
7792 {
7793 case 0x01: //equal
7794 __ c_eq_s(reg_op1, reg_op2);
7795 if (&L)
7796 __ bc1t(L);
7797 else
7798 __ bc1t((int)0);
7799 break;
7800 case 0x02: //not_equal
7801 __ c_eq_s(reg_op1, reg_op2);
7802 if (&L)
7803 __ bc1f(L);
7804 else
7805 __ bc1f((int)0);
7806 break;
7807 case 0x03: //greater
7808 __ c_ule_s(reg_op1, reg_op2);
7809 if(&L)
7810 __ bc1f(L);
7811 else
7812 __ bc1f((int)0);
7813 break;
7814 case 0x04: //greater_equal
7815 __ c_ult_s(reg_op1, reg_op2);
7816 if(&L)
7817 __ bc1f(L);
7818 else
7819 __ bc1f((int)0);
7820 break;
7821 case 0x05: //less
7822 __ c_ult_s(reg_op1, reg_op2);
7823 if(&L)
7824 __ bc1t(L);
7825 else
7826 __ bc1t((int)0);
7827 break;
7828 case 0x06: //less_equal
7829 __ c_ule_s(reg_op1, reg_op2);
7830 if(&L)
7831 __ bc1t(L);
7832 else
7833 __ bc1t((int)0);
7834 break;
7835 default:
7836 Unimplemented();
7837 }
7838 __ nop();
7839 %}
7841 ins_pc_relative(1);
7842 ins_pipe(pipe_slow);
7843 %}
7845 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7846 match( If cmp (CmpD src1 src2) );
7847 effect(USE labl);
7848 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7850 ins_encode %{
7851 FloatRegister reg_op1 = $src1$$FloatRegister;
7852 FloatRegister reg_op2 = $src2$$FloatRegister;
7853 Label &L = *($labl$$label);
7854 int flag = $cmp$$cmpcode;
7856 switch(flag)
7857 {
7858 case 0x01: //equal
7859 __ c_eq_d(reg_op1, reg_op2);
7860 if (&L)
7861 __ bc1t(L);
7862 else
7863 __ bc1t((int)0);
7864 break;
7865 case 0x02: //not_equal
7866 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7867 __ c_eq_d(reg_op1, reg_op2);
7868 if (&L)
7869 __ bc1f(L);
7870 else
7871 __ bc1f((int)0);
7872 break;
7873 case 0x03: //greater
7874 __ c_ule_d(reg_op1, reg_op2);
7875 if(&L)
7876 __ bc1f(L);
7877 else
7878 __ bc1f((int)0);
7879 break;
7880 case 0x04: //greater_equal
7881 __ c_ult_d(reg_op1, reg_op2);
7882 if(&L)
7883 __ bc1f(L);
7884 else
7885 __ bc1f((int)0);
7886 break;
7887 case 0x05: //less
7888 __ c_ult_d(reg_op1, reg_op2);
7889 if(&L)
7890 __ bc1t(L);
7891 else
7892 __ bc1t((int)0);
7893 break;
7894 case 0x06: //less_equal
7895 __ c_ule_d(reg_op1, reg_op2);
7896 if(&L)
7897 __ bc1t(L);
7898 else
7899 __ bc1t((int)0);
7900 break;
7901 default:
7902 Unimplemented();
7903 }
7904 __ nop();
7905 %}
7907 ins_pc_relative(1);
7908 ins_pipe(pipe_slow);
7909 %}
7912 // Call Runtime Instruction
7913 instruct CallRuntimeDirect(method meth) %{
7914 match(CallRuntime );
7915 effect(USE meth);
7917 ins_cost(300);
7918 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7919 ins_encode( Java_To_Runtime( meth ) );
7920 ins_pipe( pipe_slow );
7921 ins_alignment(16);
7922 %}
7926 //------------------------MemBar Instructions-------------------------------
7927 //Memory barrier flavors
7929 instruct membar_acquire() %{
7930 match(MemBarAcquire);
7931 ins_cost(0);
7933 size(0);
7934 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7935 ins_encode();
7936 ins_pipe(empty);
7937 %}
7939 instruct load_fence() %{
7940 match(LoadFence);
7941 ins_cost(400);
7943 format %{ "MEMBAR @ load_fence" %}
7944 ins_encode %{
7945 __ sync();
7946 %}
7947 ins_pipe(pipe_slow);
7948 %}
7950 instruct membar_acquire_lock()
7951 %{
7952 match(MemBarAcquireLock);
7953 ins_cost(0);
7955 size(0);
7956 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7957 ins_encode();
7958 ins_pipe(empty);
7959 %}
7961 instruct membar_release() %{
7962 match(MemBarRelease);
7963 ins_cost(0);
7965 size(0);
7966 format %{ "MEMBAR-release (empty) @ membar_release" %}
7967 ins_encode();
7968 ins_pipe(empty);
7969 %}
7971 instruct store_fence() %{
7972 match(StoreFence);
7973 ins_cost(400);
7975 format %{ "MEMBAR @ store_fence" %}
7977 ins_encode %{
7978 __ sync();
7979 %}
7981 ins_pipe(pipe_slow);
7982 %}
7984 instruct membar_release_lock()
7985 %{
7986 match(MemBarReleaseLock);
7987 ins_cost(0);
7989 size(0);
7990 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7991 ins_encode();
7992 ins_pipe(empty);
7993 %}
7996 instruct membar_volatile() %{
7997 match(MemBarVolatile);
7998 ins_cost(400);
8000 format %{ "MEMBAR-volatile" %}
8001 ins_encode %{
8002 if( !os::is_MP() ) return; // Not needed on single CPU
8003 __ sync();
8005 %}
8006 ins_pipe(pipe_slow);
8007 %}
8009 instruct unnecessary_membar_volatile() %{
8010 match(MemBarVolatile);
8011 predicate(Matcher::post_store_load_barrier(n));
8012 ins_cost(0);
8014 size(0);
8015 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8016 ins_encode( );
8017 ins_pipe(empty);
8018 %}
8020 instruct membar_storestore() %{
8021 match(MemBarStoreStore);
8023 ins_cost(0);
8024 size(0);
8025 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8026 ins_encode( );
8027 ins_pipe(empty);
8028 %}
8030 //----------Move Instructions--------------------------------------------------
8031 instruct castX2P(mRegP dst, mRegL src) %{
8032 match(Set dst (CastX2P src));
8033 format %{ "castX2P $dst, $src @ castX2P" %}
8034 ins_encode %{
8035 Register src = $src$$Register;
8036 Register dst = $dst$$Register;
8038 if(src != dst)
8039 __ move(dst, src);
8040 %}
8041 ins_cost(10);
8042 ins_pipe( ialu_regI_mov );
8043 %}
8045 instruct castP2X(mRegL dst, mRegP src ) %{
8046 match(Set dst (CastP2X src));
8048 format %{ "mov $dst, $src\t #@castP2X" %}
8049 ins_encode %{
8050 Register src = $src$$Register;
8051 Register dst = $dst$$Register;
8053 if(src != dst)
8054 __ move(dst, src);
8055 %}
8056 ins_pipe( ialu_regI_mov );
8057 %}
8059 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8060 match(Set dst (MoveF2I src));
8061 effect(DEF dst, USE src);
8062 ins_cost(85);
8063 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8064 ins_encode %{
8065 Register dst = as_Register($dst$$reg);
8066 FloatRegister src = as_FloatRegister($src$$reg);
8068 __ mfc1(dst, src);
8069 %}
8070 ins_pipe( pipe_slow );
8071 %}
8073 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8074 match(Set dst (MoveI2F src));
8075 effect(DEF dst, USE src);
8076 ins_cost(85);
8077 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8078 ins_encode %{
8079 Register src = as_Register($src$$reg);
8080 FloatRegister dst = as_FloatRegister($dst$$reg);
8082 __ mtc1(src, dst);
8083 %}
8084 ins_pipe( pipe_slow );
8085 %}
8087 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8088 match(Set dst (MoveD2L src));
8089 effect(DEF dst, USE src);
8090 ins_cost(85);
8091 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8092 ins_encode %{
8093 Register dst = as_Register($dst$$reg);
8094 FloatRegister src = as_FloatRegister($src$$reg);
8096 __ dmfc1(dst, src);
8097 %}
8098 ins_pipe( pipe_slow );
8099 %}
8101 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8102 match(Set dst (MoveL2D src));
8103 effect(DEF dst, USE src);
8104 ins_cost(85);
8105 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8106 ins_encode %{
8107 FloatRegister dst = as_FloatRegister($dst$$reg);
8108 Register src = as_Register($src$$reg);
8110 __ dmtc1(src, dst);
8111 %}
8112 ins_pipe( pipe_slow );
8113 %}
8115 //----------Conditional Move---------------------------------------------------
8116 // Conditional move
8117 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8118 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8119 ins_cost(80);
8120 format %{
8121 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8122 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8123 %}
8125 ins_encode %{
8126 Register op1 = $tmp1$$Register;
8127 Register op2 = $tmp2$$Register;
8128 Register dst = $dst$$Register;
8129 Register src = $src$$Register;
8130 int flag = $cop$$cmpcode;
8132 switch(flag)
8133 {
8134 case 0x01: //equal
8135 __ subu32(AT, op1, op2);
8136 __ movz(dst, src, AT);
8137 break;
8139 case 0x02: //not_equal
8140 __ subu32(AT, op1, op2);
8141 __ movn(dst, src, AT);
8142 break;
8144 case 0x03: //great
8145 __ slt(AT, op2, op1);
8146 __ movn(dst, src, AT);
8147 break;
8149 case 0x04: //great_equal
8150 __ slt(AT, op1, op2);
8151 __ movz(dst, src, AT);
8152 break;
8154 case 0x05: //less
8155 __ slt(AT, op1, op2);
8156 __ movn(dst, src, AT);
8157 break;
8159 case 0x06: //less_equal
8160 __ slt(AT, op2, op1);
8161 __ movz(dst, src, AT);
8162 break;
8164 default:
8165 Unimplemented();
8166 }
8167 %}
8169 ins_pipe( pipe_slow );
8170 %}
8172 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8173 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8174 ins_cost(80);
8175 format %{
8176 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8177 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8178 %}
8179 ins_encode %{
8180 Register op1 = $tmp1$$Register;
8181 Register op2 = $tmp2$$Register;
8182 Register dst = $dst$$Register;
8183 Register src = $src$$Register;
8184 int flag = $cop$$cmpcode;
8186 switch(flag)
8187 {
8188 case 0x01: //equal
8189 __ subu(AT, op1, op2);
8190 __ movz(dst, src, AT);
8191 break;
8193 case 0x02: //not_equal
8194 __ subu(AT, op1, op2);
8195 __ movn(dst, src, AT);
8196 break;
8198 case 0x03: //above
8199 __ sltu(AT, op2, op1);
8200 __ movn(dst, src, AT);
8201 break;
8203 case 0x04: //above_equal
8204 __ sltu(AT, op1, op2);
8205 __ movz(dst, src, AT);
8206 break;
8208 case 0x05: //below
8209 __ sltu(AT, op1, op2);
8210 __ movn(dst, src, AT);
8211 break;
8213 case 0x06: //below_equal
8214 __ sltu(AT, op2, op1);
8215 __ movz(dst, src, AT);
8216 break;
8218 default:
8219 Unimplemented();
8220 }
8221 %}
8223 ins_pipe( pipe_slow );
8224 %}
8226 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8227 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8228 ins_cost(80);
8229 format %{
8230 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8231 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8232 %}
8233 ins_encode %{
8234 Register op1 = $tmp1$$Register;
8235 Register op2 = $tmp2$$Register;
8236 Register dst = $dst$$Register;
8237 Register src = $src$$Register;
8238 int flag = $cop$$cmpcode;
8240 switch(flag)
8241 {
8242 case 0x01: //equal
8243 __ subu32(AT, op1, op2);
8244 __ movz(dst, src, AT);
8245 break;
8247 case 0x02: //not_equal
8248 __ subu32(AT, op1, op2);
8249 __ movn(dst, src, AT);
8250 break;
8252 case 0x03: //above
8253 __ sltu(AT, op2, op1);
8254 __ movn(dst, src, AT);
8255 break;
8257 case 0x04: //above_equal
8258 __ sltu(AT, op1, op2);
8259 __ movz(dst, src, AT);
8260 break;
8262 case 0x05: //below
8263 __ sltu(AT, op1, op2);
8264 __ movn(dst, src, AT);
8265 break;
8267 case 0x06: //below_equal
8268 __ sltu(AT, op2, op1);
8269 __ movz(dst, src, AT);
8270 break;
8272 default:
8273 Unimplemented();
8274 }
8275 %}
8277 ins_pipe( pipe_slow );
8278 %}
8280 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8281 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8282 ins_cost(80);
8283 format %{
8284 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8285 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8286 %}
8287 ins_encode %{
8288 Register op1 = $tmp1$$Register;
8289 Register op2 = $tmp2$$Register;
8290 Register dst = $dst$$Register;
8291 Register src = $src$$Register;
8292 int flag = $cop$$cmpcode;
8294 switch(flag)
8295 {
8296 case 0x01: //equal
8297 __ subu32(AT, op1, op2);
8298 __ movz(dst, src, AT);
8299 break;
8301 case 0x02: //not_equal
8302 __ subu32(AT, op1, op2);
8303 __ movn(dst, src, AT);
8304 break;
8306 case 0x03: //above
8307 __ sltu(AT, op2, op1);
8308 __ movn(dst, src, AT);
8309 break;
8311 case 0x04: //above_equal
8312 __ sltu(AT, op1, op2);
8313 __ movz(dst, src, AT);
8314 break;
8316 case 0x05: //below
8317 __ sltu(AT, op1, op2);
8318 __ movn(dst, src, AT);
8319 break;
8321 case 0x06: //below_equal
8322 __ sltu(AT, op2, op1);
8323 __ movz(dst, src, AT);
8324 break;
8326 default:
8327 Unimplemented();
8328 }
8329 %}
8331 ins_pipe( pipe_slow );
8332 %}
8334 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8335 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8336 ins_cost(80);
8337 format %{
8338 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8339 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8340 %}
8341 ins_encode %{
8342 Register op1 = $tmp1$$Register;
8343 Register op2 = $tmp2$$Register;
8344 Register dst = $dst$$Register;
8345 Register src = $src$$Register;
8346 int flag = $cop$$cmpcode;
8348 switch(flag)
8349 {
8350 case 0x01: //equal
8351 __ subu(AT, op1, op2);
8352 __ movz(dst, src, AT);
8353 break;
8355 case 0x02: //not_equal
8356 __ subu(AT, op1, op2);
8357 __ movn(dst, src, AT);
8358 break;
8360 case 0x03: //above
8361 __ sltu(AT, op2, op1);
8362 __ movn(dst, src, AT);
8363 break;
8365 case 0x04: //above_equal
8366 __ sltu(AT, op1, op2);
8367 __ movz(dst, src, AT);
8368 break;
8370 case 0x05: //below
8371 __ sltu(AT, op1, op2);
8372 __ movn(dst, src, AT);
8373 break;
8375 case 0x06: //below_equal
8376 __ sltu(AT, op2, op1);
8377 __ movz(dst, src, AT);
8378 break;
8380 default:
8381 Unimplemented();
8382 }
8383 %}
8385 ins_pipe( pipe_slow );
8386 %}
8388 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8389 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8390 ins_cost(80);
8391 format %{
8392 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8393 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8394 %}
8395 ins_encode %{
8396 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8397 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8398 Register dst = as_Register($dst$$reg);
8399 Register src = as_Register($src$$reg);
8401 int flag = $cop$$cmpcode;
8403 switch(flag)
8404 {
8405 case 0x01: //equal
8406 __ c_eq_d(reg_op1, reg_op2);
8407 __ movt(dst, src);
8408 break;
8409 case 0x02: //not_equal
8410 __ c_eq_d(reg_op1, reg_op2);
8411 __ movf(dst, src);
8412 break;
8413 case 0x03: //greater
8414 __ c_ole_d(reg_op1, reg_op2);
8415 __ movf(dst, src);
8416 break;
8417 case 0x04: //greater_equal
8418 __ c_olt_d(reg_op1, reg_op2);
8419 __ movf(dst, src);
8420 break;
8421 case 0x05: //less
8422 __ c_ult_d(reg_op1, reg_op2);
8423 __ movt(dst, src);
8424 break;
8425 case 0x06: //less_equal
8426 __ c_ule_d(reg_op1, reg_op2);
8427 __ movt(dst, src);
8428 break;
8429 default:
8430 Unimplemented();
8431 }
8432 %}
8434 ins_pipe( pipe_slow );
8435 %}
8438 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8439 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8440 ins_cost(80);
8441 format %{
8442 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8443 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8444 %}
8445 ins_encode %{
8446 Register op1 = $tmp1$$Register;
8447 Register op2 = $tmp2$$Register;
8448 Register dst = $dst$$Register;
8449 Register src = $src$$Register;
8450 int flag = $cop$$cmpcode;
8452 switch(flag)
8453 {
8454 case 0x01: //equal
8455 __ subu32(AT, op1, op2);
8456 __ movz(dst, src, AT);
8457 break;
8459 case 0x02: //not_equal
8460 __ subu32(AT, op1, op2);
8461 __ movn(dst, src, AT);
8462 break;
8464 case 0x03: //above
8465 __ sltu(AT, op2, op1);
8466 __ movn(dst, src, AT);
8467 break;
8469 case 0x04: //above_equal
8470 __ sltu(AT, op1, op2);
8471 __ movz(dst, src, AT);
8472 break;
8474 case 0x05: //below
8475 __ sltu(AT, op1, op2);
8476 __ movn(dst, src, AT);
8477 break;
8479 case 0x06: //below_equal
8480 __ sltu(AT, op2, op1);
8481 __ movz(dst, src, AT);
8482 break;
8484 default:
8485 Unimplemented();
8486 }
8487 %}
8489 ins_pipe( pipe_slow );
8490 %}
8493 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8494 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8495 ins_cost(80);
8496 format %{
8497 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8498 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8499 %}
8500 ins_encode %{
8501 Register op1 = $tmp1$$Register;
8502 Register op2 = $tmp2$$Register;
8503 Register dst = $dst$$Register;
8504 Register src = $src$$Register;
8505 int flag = $cop$$cmpcode;
8507 switch(flag)
8508 {
8509 case 0x01: //equal
8510 __ subu(AT, op1, op2);
8511 __ movz(dst, src, AT);
8512 break;
8514 case 0x02: //not_equal
8515 __ subu(AT, op1, op2);
8516 __ movn(dst, src, AT);
8517 break;
8519 case 0x03: //above
8520 __ sltu(AT, op2, op1);
8521 __ movn(dst, src, AT);
8522 break;
8524 case 0x04: //above_equal
8525 __ sltu(AT, op1, op2);
8526 __ movz(dst, src, AT);
8527 break;
8529 case 0x05: //below
8530 __ sltu(AT, op1, op2);
8531 __ movn(dst, src, AT);
8532 break;
8534 case 0x06: //below_equal
8535 __ sltu(AT, op2, op1);
8536 __ movz(dst, src, AT);
8537 break;
8539 default:
8540 Unimplemented();
8541 }
8542 %}
8544 ins_pipe( pipe_slow );
8545 %}
8547 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8548 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8549 ins_cost(80);
8550 format %{
8551 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8552 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8553 %}
8554 ins_encode %{
8555 Register opr1 = as_Register($tmp1$$reg);
8556 Register opr2 = as_Register($tmp2$$reg);
8557 Register dst = $dst$$Register;
8558 Register src = $src$$Register;
8559 int flag = $cop$$cmpcode;
8561 switch(flag)
8562 {
8563 case 0x01: //equal
8564 __ subu(AT, opr1, opr2);
8565 __ movz(dst, src, AT);
8566 break;
8568 case 0x02: //not_equal
8569 __ subu(AT, opr1, opr2);
8570 __ movn(dst, src, AT);
8571 break;
8573 case 0x03: //greater
8574 __ slt(AT, opr2, opr1);
8575 __ movn(dst, src, AT);
8576 break;
8578 case 0x04: //greater_equal
8579 __ slt(AT, opr1, opr2);
8580 __ movz(dst, src, AT);
8581 break;
8583 case 0x05: //less
8584 __ slt(AT, opr1, opr2);
8585 __ movn(dst, src, AT);
8586 break;
8588 case 0x06: //less_equal
8589 __ slt(AT, opr2, opr1);
8590 __ movz(dst, src, AT);
8591 break;
8593 default:
8594 Unimplemented();
8595 }
8596 %}
8598 ins_pipe( pipe_slow );
8599 %}
8601 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8602 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8603 ins_cost(80);
8604 format %{
8605 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8606 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8607 %}
8608 ins_encode %{
8609 Register opr1 = as_Register($tmp1$$reg);
8610 Register opr2 = as_Register($tmp2$$reg);
8611 Register dst = $dst$$Register;
8612 Register src = $src$$Register;
8613 int flag = $cop$$cmpcode;
8615 switch(flag)
8616 {
8617 case 0x01: //equal
8618 __ subu(AT, opr1, opr2);
8619 __ movz(dst, src, AT);
8620 break;
8622 case 0x02: //not_equal
8623 __ subu(AT, opr1, opr2);
8624 __ movn(dst, src, AT);
8625 break;
8627 case 0x03: //greater
8628 __ slt(AT, opr2, opr1);
8629 __ movn(dst, src, AT);
8630 break;
8632 case 0x04: //greater_equal
8633 __ slt(AT, opr1, opr2);
8634 __ movz(dst, src, AT);
8635 break;
8637 case 0x05: //less
8638 __ slt(AT, opr1, opr2);
8639 __ movn(dst, src, AT);
8640 break;
8642 case 0x06: //less_equal
8643 __ slt(AT, opr2, opr1);
8644 __ movz(dst, src, AT);
8645 break;
8647 default:
8648 Unimplemented();
8649 }
8650 %}
8652 ins_pipe( pipe_slow );
8653 %}
8655 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8656 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8657 ins_cost(80);
8658 format %{
8659 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8660 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8661 %}
8662 ins_encode %{
8663 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8664 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8665 Register dst = as_Register($dst$$reg);
8666 Register src = as_Register($src$$reg);
8668 int flag = $cop$$cmpcode;
8670 switch(flag)
8671 {
8672 case 0x01: //equal
8673 __ c_eq_d(reg_op1, reg_op2);
8674 __ movt(dst, src);
8675 break;
8676 case 0x02: //not_equal
8677 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8678 __ c_eq_d(reg_op1, reg_op2);
8679 __ movf(dst, src);
8680 break;
8681 case 0x03: //greater
8682 __ c_ole_d(reg_op1, reg_op2);
8683 __ movf(dst, src);
8684 break;
8685 case 0x04: //greater_equal
8686 __ c_olt_d(reg_op1, reg_op2);
8687 __ movf(dst, src);
8688 break;
8689 case 0x05: //less
8690 __ c_ult_d(reg_op1, reg_op2);
8691 __ movt(dst, src);
8692 break;
8693 case 0x06: //less_equal
8694 __ c_ule_d(reg_op1, reg_op2);
8695 __ movt(dst, src);
8696 break;
8697 default:
8698 Unimplemented();
8699 }
8700 %}
8702 ins_pipe( pipe_slow );
8703 %}
8706 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8707 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8708 ins_cost(80);
8709 format %{
8710 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8711 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8712 %}
8713 ins_encode %{
8714 Register op1 = $tmp1$$Register;
8715 Register op2 = $tmp2$$Register;
8716 Register dst = $dst$$Register;
8717 Register src = $src$$Register;
8718 int flag = $cop$$cmpcode;
8720 switch(flag)
8721 {
8722 case 0x01: //equal
8723 __ subu(AT, op1, op2);
8724 __ movz(dst, src, AT);
8725 break;
8727 case 0x02: //not_equal
8728 __ subu(AT, op1, op2);
8729 __ movn(dst, src, AT);
8730 break;
8732 case 0x03: //above
8733 __ sltu(AT, op2, op1);
8734 __ movn(dst, src, AT);
8735 break;
8737 case 0x04: //above_equal
8738 __ sltu(AT, op1, op2);
8739 __ movz(dst, src, AT);
8740 break;
8742 case 0x05: //below
8743 __ sltu(AT, op1, op2);
8744 __ movn(dst, src, AT);
8745 break;
8747 case 0x06: //below_equal
8748 __ sltu(AT, op2, op1);
8749 __ movz(dst, src, AT);
8750 break;
8752 default:
8753 Unimplemented();
8754 }
8755 %}
8757 ins_pipe( pipe_slow );
8758 %}
8760 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8761 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8762 ins_cost(80);
8763 format %{
8764 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8765 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8766 %}
8767 ins_encode %{
8768 Register op1 = $tmp1$$Register;
8769 Register op2 = $tmp2$$Register;
8770 Register dst = $dst$$Register;
8771 Register src = $src$$Register;
8772 int flag = $cop$$cmpcode;
8774 switch(flag)
8775 {
8776 case 0x01: //equal
8777 __ subu32(AT, op1, op2);
8778 __ movz(dst, src, AT);
8779 break;
8781 case 0x02: //not_equal
8782 __ subu32(AT, op1, op2);
8783 __ movn(dst, src, AT);
8784 break;
8786 case 0x03: //above
8787 __ slt(AT, op2, op1);
8788 __ movn(dst, src, AT);
8789 break;
8791 case 0x04: //above_equal
8792 __ slt(AT, op1, op2);
8793 __ movz(dst, src, AT);
8794 break;
8796 case 0x05: //below
8797 __ slt(AT, op1, op2);
8798 __ movn(dst, src, AT);
8799 break;
8801 case 0x06: //below_equal
8802 __ slt(AT, op2, op1);
8803 __ movz(dst, src, AT);
8804 break;
8806 default:
8807 Unimplemented();
8808 }
8809 %}
8811 ins_pipe( pipe_slow );
8812 %}
8814 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8815 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8816 ins_cost(80);
8817 format %{
8818 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8819 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8820 %}
8821 ins_encode %{
8822 Register op1 = $tmp1$$Register;
8823 Register op2 = $tmp2$$Register;
8824 Register dst = $dst$$Register;
8825 Register src = $src$$Register;
8826 int flag = $cop$$cmpcode;
8828 switch(flag)
8829 {
8830 case 0x01: //equal
8831 __ subu32(AT, op1, op2);
8832 __ movz(dst, src, AT);
8833 break;
8835 case 0x02: //not_equal
8836 __ subu32(AT, op1, op2);
8837 __ movn(dst, src, AT);
8838 break;
8840 case 0x03: //above
8841 __ slt(AT, op2, op1);
8842 __ movn(dst, src, AT);
8843 break;
8845 case 0x04: //above_equal
8846 __ slt(AT, op1, op2);
8847 __ movz(dst, src, AT);
8848 break;
8850 case 0x05: //below
8851 __ slt(AT, op1, op2);
8852 __ movn(dst, src, AT);
8853 break;
8855 case 0x06: //below_equal
8856 __ slt(AT, op2, op1);
8857 __ movz(dst, src, AT);
8858 break;
8860 default:
8861 Unimplemented();
8862 }
8863 %}
8865 ins_pipe( pipe_slow );
8866 %}
8869 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8870 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8871 ins_cost(80);
8872 format %{
8873 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8874 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8875 %}
8877 ins_encode %{
8878 Register op1 = $tmp1$$Register;
8879 Register op2 = $tmp2$$Register;
8880 Register dst = as_Register($dst$$reg);
8881 Register src = as_Register($src$$reg);
8882 int flag = $cop$$cmpcode;
8884 switch(flag)
8885 {
8886 case 0x01: //equal
8887 __ subu32(AT, op1, op2);
8888 __ movz(dst, src, AT);
8889 break;
8891 case 0x02: //not_equal
8892 __ subu32(AT, op1, op2);
8893 __ movn(dst, src, AT);
8894 break;
8896 case 0x03: //great
8897 __ slt(AT, op2, op1);
8898 __ movn(dst, src, AT);
8899 break;
8901 case 0x04: //great_equal
8902 __ slt(AT, op1, op2);
8903 __ movz(dst, src, AT);
8904 break;
8906 case 0x05: //less
8907 __ slt(AT, op1, op2);
8908 __ movn(dst, src, AT);
8909 break;
8911 case 0x06: //less_equal
8912 __ slt(AT, op2, op1);
8913 __ movz(dst, src, AT);
8914 break;
8916 default:
8917 Unimplemented();
8918 }
8919 %}
8921 ins_pipe( pipe_slow );
8922 %}
8924 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8925 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8926 ins_cost(80);
8927 format %{
8928 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8929 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8930 %}
8931 ins_encode %{
8932 Register opr1 = as_Register($tmp1$$reg);
8933 Register opr2 = as_Register($tmp2$$reg);
8934 Register dst = as_Register($dst$$reg);
8935 Register src = as_Register($src$$reg);
8936 int flag = $cop$$cmpcode;
8938 switch(flag)
8939 {
8940 case 0x01: //equal
8941 __ subu(AT, opr1, opr2);
8942 __ movz(dst, src, AT);
8943 break;
8945 case 0x02: //not_equal
8946 __ subu(AT, opr1, opr2);
8947 __ movn(dst, src, AT);
8948 break;
8950 case 0x03: //greater
8951 __ slt(AT, opr2, opr1);
8952 __ movn(dst, src, AT);
8953 break;
8955 case 0x04: //greater_equal
8956 __ slt(AT, opr1, opr2);
8957 __ movz(dst, src, AT);
8958 break;
8960 case 0x05: //less
8961 __ slt(AT, opr1, opr2);
8962 __ movn(dst, src, AT);
8963 break;
8965 case 0x06: //less_equal
8966 __ slt(AT, opr2, opr1);
8967 __ movz(dst, src, AT);
8968 break;
8970 default:
8971 Unimplemented();
8972 }
8973 %}
8975 ins_pipe( pipe_slow );
8976 %}
8978 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8979 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8980 ins_cost(80);
8981 format %{
8982 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8983 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8984 %}
8985 ins_encode %{
8986 Register op1 = $tmp1$$Register;
8987 Register op2 = $tmp2$$Register;
8988 Register dst = $dst$$Register;
8989 Register src = $src$$Register;
8990 int flag = $cop$$cmpcode;
8992 switch(flag)
8993 {
8994 case 0x01: //equal
8995 __ subu32(AT, op1, op2);
8996 __ movz(dst, src, AT);
8997 break;
8999 case 0x02: //not_equal
9000 __ subu32(AT, op1, op2);
9001 __ movn(dst, src, AT);
9002 break;
9004 case 0x03: //above
9005 __ sltu(AT, op2, op1);
9006 __ movn(dst, src, AT);
9007 break;
9009 case 0x04: //above_equal
9010 __ sltu(AT, op1, op2);
9011 __ movz(dst, src, AT);
9012 break;
9014 case 0x05: //below
9015 __ sltu(AT, op1, op2);
9016 __ movn(dst, src, AT);
9017 break;
9019 case 0x06: //below_equal
9020 __ sltu(AT, op2, op1);
9021 __ movz(dst, src, AT);
9022 break;
9024 default:
9025 Unimplemented();
9026 }
9027 %}
9029 ins_pipe( pipe_slow );
9030 %}
9033 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9034 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9035 ins_cost(80);
9036 format %{
9037 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9038 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9039 %}
9040 ins_encode %{
9041 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9042 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9043 Register dst = as_Register($dst$$reg);
9044 Register src = as_Register($src$$reg);
9046 int flag = $cop$$cmpcode;
9048 switch(flag)
9049 {
9050 case 0x01: //equal
9051 __ c_eq_d(reg_op1, reg_op2);
9052 __ movt(dst, src);
9053 break;
9054 case 0x02: //not_equal
9055 __ c_eq_d(reg_op1, reg_op2);
9056 __ movf(dst, src);
9057 break;
9058 case 0x03: //greater
9059 __ c_ole_d(reg_op1, reg_op2);
9060 __ movf(dst, src);
9061 break;
9062 case 0x04: //greater_equal
9063 __ c_olt_d(reg_op1, reg_op2);
9064 __ movf(dst, src);
9065 break;
9066 case 0x05: //less
9067 __ c_ult_d(reg_op1, reg_op2);
9068 __ movt(dst, src);
9069 break;
9070 case 0x06: //less_equal
9071 __ c_ule_d(reg_op1, reg_op2);
9072 __ movt(dst, src);
9073 break;
9074 default:
9075 Unimplemented();
9076 }
9077 %}
9079 ins_pipe( pipe_slow );
9080 %}
9082 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9083 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9084 ins_cost(200);
9085 format %{
9086 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9087 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9088 %}
9089 ins_encode %{
9090 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9091 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9092 FloatRegister dst = as_FloatRegister($dst$$reg);
9093 FloatRegister src = as_FloatRegister($src$$reg);
9095 int flag = $cop$$cmpcode;
9097 Label L;
9099 switch(flag)
9100 {
9101 case 0x01: //equal
9102 __ c_eq_d(reg_op1, reg_op2);
9103 __ bc1f(L);
9104 __ nop();
9105 __ mov_d(dst, src);
9106 __ bind(L);
9107 break;
9108 case 0x02: //not_equal
9109 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9110 __ c_eq_d(reg_op1, reg_op2);
9111 __ bc1t(L);
9112 __ nop();
9113 __ mov_d(dst, src);
9114 __ bind(L);
9115 break;
9116 case 0x03: //greater
9117 __ c_ole_d(reg_op1, reg_op2);
9118 __ bc1t(L);
9119 __ nop();
9120 __ mov_d(dst, src);
9121 __ bind(L);
9122 break;
9123 case 0x04: //greater_equal
9124 __ c_olt_d(reg_op1, reg_op2);
9125 __ bc1t(L);
9126 __ nop();
9127 __ mov_d(dst, src);
9128 __ bind(L);
9129 break;
9130 case 0x05: //less
9131 __ c_ult_d(reg_op1, reg_op2);
9132 __ bc1f(L);
9133 __ nop();
9134 __ mov_d(dst, src);
9135 __ bind(L);
9136 break;
9137 case 0x06: //less_equal
9138 __ c_ule_d(reg_op1, reg_op2);
9139 __ bc1f(L);
9140 __ nop();
9141 __ mov_d(dst, src);
9142 __ bind(L);
9143 break;
9144 default:
9145 Unimplemented();
9146 }
9147 %}
9149 ins_pipe( pipe_slow );
9150 %}
9152 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9153 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9154 ins_cost(200);
9155 format %{
9156 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9157 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9158 %}
9160 ins_encode %{
9161 Register op1 = $tmp1$$Register;
9162 Register op2 = $tmp2$$Register;
9163 FloatRegister dst = as_FloatRegister($dst$$reg);
9164 FloatRegister src = as_FloatRegister($src$$reg);
9165 int flag = $cop$$cmpcode;
9166 Label L;
9168 switch(flag)
9169 {
9170 case 0x01: //equal
9171 __ bne(op1, op2, L);
9172 __ nop();
9173 __ mov_s(dst, src);
9174 __ bind(L);
9175 break;
9176 case 0x02: //not_equal
9177 __ beq(op1, op2, L);
9178 __ nop();
9179 __ mov_s(dst, src);
9180 __ bind(L);
9181 break;
9182 case 0x03: //great
9183 __ slt(AT, op2, op1);
9184 __ beq(AT, R0, L);
9185 __ nop();
9186 __ mov_s(dst, src);
9187 __ bind(L);
9188 break;
9189 case 0x04: //great_equal
9190 __ slt(AT, op1, op2);
9191 __ bne(AT, R0, L);
9192 __ nop();
9193 __ mov_s(dst, src);
9194 __ bind(L);
9195 break;
9196 case 0x05: //less
9197 __ slt(AT, op1, op2);
9198 __ beq(AT, R0, L);
9199 __ nop();
9200 __ mov_s(dst, src);
9201 __ bind(L);
9202 break;
9203 case 0x06: //less_equal
9204 __ slt(AT, op2, op1);
9205 __ bne(AT, R0, L);
9206 __ nop();
9207 __ mov_s(dst, src);
9208 __ bind(L);
9209 break;
9210 default:
9211 Unimplemented();
9212 }
9213 %}
9215 ins_pipe( pipe_slow );
9216 %}
9218 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9219 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9220 ins_cost(200);
9221 format %{
9222 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9223 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9224 %}
9226 ins_encode %{
9227 Register op1 = $tmp1$$Register;
9228 Register op2 = $tmp2$$Register;
9229 FloatRegister dst = as_FloatRegister($dst$$reg);
9230 FloatRegister src = as_FloatRegister($src$$reg);
9231 int flag = $cop$$cmpcode;
9232 Label L;
9234 switch(flag)
9235 {
9236 case 0x01: //equal
9237 __ bne(op1, op2, L);
9238 __ nop();
9239 __ mov_d(dst, src);
9240 __ bind(L);
9241 break;
9242 case 0x02: //not_equal
9243 __ beq(op1, op2, L);
9244 __ nop();
9245 __ mov_d(dst, src);
9246 __ bind(L);
9247 break;
9248 case 0x03: //great
9249 __ slt(AT, op2, op1);
9250 __ beq(AT, R0, L);
9251 __ nop();
9252 __ mov_d(dst, src);
9253 __ bind(L);
9254 break;
9255 case 0x04: //great_equal
9256 __ slt(AT, op1, op2);
9257 __ bne(AT, R0, L);
9258 __ nop();
9259 __ mov_d(dst, src);
9260 __ bind(L);
9261 break;
9262 case 0x05: //less
9263 __ slt(AT, op1, op2);
9264 __ beq(AT, R0, L);
9265 __ nop();
9266 __ mov_d(dst, src);
9267 __ bind(L);
9268 break;
9269 case 0x06: //less_equal
9270 __ slt(AT, op2, op1);
9271 __ bne(AT, R0, L);
9272 __ nop();
9273 __ mov_d(dst, src);
9274 __ bind(L);
9275 break;
9276 default:
9277 Unimplemented();
9278 }
9279 %}
9281 ins_pipe( pipe_slow );
9282 %}
9284 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9285 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9286 ins_cost(200);
9287 format %{
9288 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9289 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9290 %}
9292 ins_encode %{
9293 Register op1 = $tmp1$$Register;
9294 Register op2 = $tmp2$$Register;
9295 FloatRegister dst = as_FloatRegister($dst$$reg);
9296 FloatRegister src = as_FloatRegister($src$$reg);
9297 int flag = $cop$$cmpcode;
9298 Label L;
9300 switch(flag)
9301 {
9302 case 0x01: //equal
9303 __ bne(op1, op2, L);
9304 __ nop();
9305 __ mov_d(dst, src);
9306 __ bind(L);
9307 break;
9308 case 0x02: //not_equal
9309 __ beq(op1, op2, L);
9310 __ nop();
9311 __ mov_d(dst, src);
9312 __ bind(L);
9313 break;
9314 case 0x03: //great
9315 __ slt(AT, op2, op1);
9316 __ beq(AT, R0, L);
9317 __ nop();
9318 __ mov_d(dst, src);
9319 __ bind(L);
9320 break;
9321 case 0x04: //great_equal
9322 __ slt(AT, op1, op2);
9323 __ bne(AT, R0, L);
9324 __ nop();
9325 __ mov_d(dst, src);
9326 __ bind(L);
9327 break;
9328 case 0x05: //less
9329 __ slt(AT, op1, op2);
9330 __ beq(AT, R0, L);
9331 __ nop();
9332 __ mov_d(dst, src);
9333 __ bind(L);
9334 break;
9335 case 0x06: //less_equal
9336 __ slt(AT, op2, op1);
9337 __ bne(AT, R0, L);
9338 __ nop();
9339 __ mov_d(dst, src);
9340 __ bind(L);
9341 break;
9342 default:
9343 Unimplemented();
9344 }
9345 %}
9347 ins_pipe( pipe_slow );
9348 %}
9350 //FIXME
9351 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9352 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9353 ins_cost(80);
9354 format %{
9355 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9356 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9357 %}
9359 ins_encode %{
9360 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9361 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9362 Register dst = $dst$$Register;
9363 Register src = $src$$Register;
9364 int flag = $cop$$cmpcode;
9366 switch(flag)
9367 {
9368 case 0x01: //equal
9369 __ c_eq_s(reg_op1, reg_op2);
9370 __ movt(dst, src);
9371 break;
9372 case 0x02: //not_equal
9373 __ c_eq_s(reg_op1, reg_op2);
9374 __ movf(dst, src);
9375 break;
9376 case 0x03: //greater
9377 __ c_ole_s(reg_op1, reg_op2);
9378 __ movf(dst, src);
9379 break;
9380 case 0x04: //greater_equal
9381 __ c_olt_s(reg_op1, reg_op2);
9382 __ movf(dst, src);
9383 break;
9384 case 0x05: //less
9385 __ c_ult_s(reg_op1, reg_op2);
9386 __ movt(dst, src);
9387 break;
9388 case 0x06: //less_equal
9389 __ c_ule_s(reg_op1, reg_op2);
9390 __ movt(dst, src);
9391 break;
9392 default:
9393 Unimplemented();
9394 }
9395 %}
9396 ins_pipe( pipe_slow );
9397 %}
9399 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9400 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9401 ins_cost(200);
9402 format %{
9403 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9404 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9405 %}
9407 ins_encode %{
9408 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9409 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9410 FloatRegister dst = $dst$$FloatRegister;
9411 FloatRegister src = $src$$FloatRegister;
9412 Label L;
9413 int flag = $cop$$cmpcode;
9415 switch(flag)
9416 {
9417 case 0x01: //equal
9418 __ c_eq_s(reg_op1, reg_op2);
9419 __ bc1f(L);
9420 __ nop();
9421 __ mov_s(dst, src);
9422 __ bind(L);
9423 break;
9424 case 0x02: //not_equal
9425 __ c_eq_s(reg_op1, reg_op2);
9426 __ bc1t(L);
9427 __ nop();
9428 __ mov_s(dst, src);
9429 __ bind(L);
9430 break;
9431 case 0x03: //greater
9432 __ c_ole_s(reg_op1, reg_op2);
9433 __ bc1t(L);
9434 __ nop();
9435 __ mov_s(dst, src);
9436 __ bind(L);
9437 break;
9438 case 0x04: //greater_equal
9439 __ c_olt_s(reg_op1, reg_op2);
9440 __ bc1t(L);
9441 __ nop();
9442 __ mov_s(dst, src);
9443 __ bind(L);
9444 break;
9445 case 0x05: //less
9446 __ c_ult_s(reg_op1, reg_op2);
9447 __ bc1f(L);
9448 __ nop();
9449 __ mov_s(dst, src);
9450 __ bind(L);
9451 break;
9452 case 0x06: //less_equal
9453 __ c_ule_s(reg_op1, reg_op2);
9454 __ bc1f(L);
9455 __ nop();
9456 __ mov_s(dst, src);
9457 __ bind(L);
9458 break;
9459 default:
9460 Unimplemented();
9461 }
9462 %}
9463 ins_pipe( pipe_slow );
9464 %}
9466 // Manifest a CmpL result in an integer register. Very painful.
9467 // This is the test to avoid.
9468 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9469 match(Set dst (CmpL3 src1 src2));
9470 ins_cost(1000);
9471 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9472 ins_encode %{
9473 Register opr1 = as_Register($src1$$reg);
9474 Register opr2 = as_Register($src2$$reg);
9475 Register dst = as_Register($dst$$reg);
9477 Label Done;
9479 __ subu(AT, opr1, opr2);
9480 __ bltz(AT, Done);
9481 __ delayed()->daddiu(dst, R0, -1);
9483 __ move(dst, 1);
9484 __ movz(dst, R0, AT);
9486 __ bind(Done);
9487 %}
9488 ins_pipe( pipe_slow );
9489 %}
9491 //
9492 // less_rsult = -1
9493 // greater_result = 1
9494 // equal_result = 0
9495 // nan_result = -1
9496 //
9497 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9498 match(Set dst (CmpF3 src1 src2));
9499 ins_cost(1000);
9500 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9501 ins_encode %{
9502 FloatRegister src1 = as_FloatRegister($src1$$reg);
9503 FloatRegister src2 = as_FloatRegister($src2$$reg);
9504 Register dst = as_Register($dst$$reg);
9506 Label Done;
9508 __ c_ult_s(src1, src2);
9509 __ bc1t(Done);
9510 __ delayed()->daddiu(dst, R0, -1);
9512 __ c_eq_s(src1, src2);
9513 __ move(dst, 1);
9514 __ movt(dst, R0);
9516 __ bind(Done);
9517 %}
9518 ins_pipe( pipe_slow );
9519 %}
9521 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9522 match(Set dst (CmpD3 src1 src2));
9523 ins_cost(1000);
9524 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9525 ins_encode %{
9526 FloatRegister src1 = as_FloatRegister($src1$$reg);
9527 FloatRegister src2 = as_FloatRegister($src2$$reg);
9528 Register dst = as_Register($dst$$reg);
9530 Label Done;
9532 __ c_ult_d(src1, src2);
9533 __ bc1t(Done);
9534 __ delayed()->daddiu(dst, R0, -1);
9536 __ c_eq_d(src1, src2);
9537 __ move(dst, 1);
9538 __ movt(dst, R0);
9540 __ bind(Done);
9541 %}
9542 ins_pipe( pipe_slow );
9543 %}
9545 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9546 match(Set dummy (ClearArray cnt base));
9547 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9548 ins_encode %{
9549 //Assume cnt is the number of bytes in an array to be cleared,
9550 //and base points to the starting address of the array.
9551 Register base = $base$$Register;
9552 Register num = $cnt$$Register;
9553 Label Loop, done;
9555 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9556 __ move(T9, num); /* T9 = words */
9557 __ beq(T9, R0, done);
9558 __ nop();
9559 __ move(AT, base);
9561 __ bind(Loop);
9562 __ sd(R0, Address(AT, 0));
9563 __ daddi(AT, AT, wordSize);
9564 __ daddi(T9, T9, -1);
9565 __ bne(T9, R0, Loop);
9566 __ delayed()->nop();
9567 __ bind(done);
9568 %}
9569 ins_pipe( pipe_slow );
9570 %}
9572 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9573 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9574 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9576 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9577 ins_encode %{
9578 // Get the first character position in both strings
9579 // [8] char array, [12] offset, [16] count
9580 Register str1 = $str1$$Register;
9581 Register str2 = $str2$$Register;
9582 Register cnt1 = $cnt1$$Register;
9583 Register cnt2 = $cnt2$$Register;
9584 Register result = $result$$Register;
9586 Label L, Loop, haveResult, done;
9588 // compute the and difference of lengths (in result)
9589 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9591 // compute the shorter length (in cnt1)
9592 __ slt(AT, cnt2, cnt1);
9593 __ movn(cnt1, cnt2, AT);
9595 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9596 __ bind(Loop); // Loop begin
9597 __ beq(cnt1, R0, done);
9598 __ delayed()->lhu(AT, str1, 0);;
9600 // compare current character
9601 __ lhu(cnt2, str2, 0);
9602 __ bne(AT, cnt2, haveResult);
9603 __ delayed()->addi(str1, str1, 2);
9604 __ addi(str2, str2, 2);
9605 __ b(Loop);
9606 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9608 __ bind(haveResult);
9609 __ subu(result, AT, cnt2);
9611 __ bind(done);
9612 %}
9614 ins_pipe( pipe_slow );
9615 %}
9617 // intrinsic optimization
9618 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9619 match(Set result (StrEquals (Binary str1 str2) cnt));
9620 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9622 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9623 ins_encode %{
9624 // Get the first character position in both strings
9625 // [8] char array, [12] offset, [16] count
9626 Register str1 = $str1$$Register;
9627 Register str2 = $str2$$Register;
9628 Register cnt = $cnt$$Register;
9629 Register tmp = $temp$$Register;
9630 Register result = $result$$Register;
9632 Label Loop, done;
9635 __ beq(str1, str2, done); // same char[] ?
9636 __ daddiu(result, R0, 1);
9638 __ bind(Loop); // Loop begin
9639 __ beq(cnt, R0, done);
9640 __ daddiu(result, R0, 1); // count == 0
9642 // compare current character
9643 __ lhu(AT, str1, 0);;
9644 __ lhu(tmp, str2, 0);
9645 __ bne(AT, tmp, done);
9646 __ delayed()->daddi(result, R0, 0);
9647 __ addi(str1, str1, 2);
9648 __ addi(str2, str2, 2);
9649 __ b(Loop);
9650 __ delayed()->addi(cnt, cnt, -1); // Loop end
9652 __ bind(done);
9653 %}
9655 ins_pipe( pipe_slow );
9656 %}
9658 //----------Arithmetic Instructions-------------------------------------------
9659 //----------Addition Instructions---------------------------------------------
9660 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9661 match(Set dst (AddI src1 src2));
9663 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9664 ins_encode %{
9665 Register dst = $dst$$Register;
9666 Register src1 = $src1$$Register;
9667 Register src2 = $src2$$Register;
9668 __ addu32(dst, src1, src2);
9669 %}
9670 ins_pipe( ialu_regI_regI );
9671 %}
9673 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9674 match(Set dst (AddI src1 src2));
9676 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9677 ins_encode %{
9678 Register dst = $dst$$Register;
9679 Register src1 = $src1$$Register;
9680 int imm = $src2$$constant;
9682 if(Assembler::is_simm16(imm)) {
9683 __ addiu32(dst, src1, imm);
9684 } else {
9685 __ move(AT, imm);
9686 __ addu32(dst, src1, AT);
9687 }
9688 %}
9689 ins_pipe( ialu_regI_regI );
9690 %}
9692 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9693 match(Set dst (AddP src1 src2));
9695 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9697 ins_encode %{
9698 Register dst = $dst$$Register;
9699 Register src1 = $src1$$Register;
9700 Register src2 = $src2$$Register;
9701 __ daddu(dst, src1, src2);
9702 %}
9704 ins_pipe( ialu_regI_regI );
9705 %}
9707 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9708 match(Set dst (AddP src1 (ConvI2L src2)));
9710 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9712 ins_encode %{
9713 Register dst = $dst$$Register;
9714 Register src1 = $src1$$Register;
9715 Register src2 = $src2$$Register;
9716 __ daddu(dst, src1, src2);
9717 %}
9719 ins_pipe( ialu_regI_regI );
9720 %}
9722 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9723 match(Set dst (AddP src1 src2));
9725 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9726 ins_encode %{
9727 Register src1 = $src1$$Register;
9728 long src2 = $src2$$constant;
9729 Register dst = $dst$$Register;
9731 if(Assembler::is_simm16(src2)) {
9732 __ daddiu(dst, src1, src2);
9733 } else {
9734 __ set64(AT, src2);
9735 __ daddu(dst, src1, AT);
9736 }
9737 %}
9738 ins_pipe( ialu_regI_imm16 );
9739 %}
9741 // Add Long Register with Register
9742 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9743 match(Set dst (AddL src1 src2));
9744 ins_cost(200);
9745 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9747 ins_encode %{
9748 Register dst_reg = as_Register($dst$$reg);
9749 Register src1_reg = as_Register($src1$$reg);
9750 Register src2_reg = as_Register($src2$$reg);
9752 __ daddu(dst_reg, src1_reg, src2_reg);
9753 %}
9755 ins_pipe( ialu_regL_regL );
9756 %}
9758 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9759 %{
9760 match(Set dst (AddL src1 src2));
9762 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9763 ins_encode %{
9764 Register dst_reg = as_Register($dst$$reg);
9765 Register src1_reg = as_Register($src1$$reg);
9766 int src2_imm = $src2$$constant;
9768 __ daddiu(dst_reg, src1_reg, src2_imm);
9769 %}
9771 ins_pipe( ialu_regL_regL );
9772 %}
9774 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9775 %{
9776 match(Set dst (AddL (ConvI2L src1) src2));
9778 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9779 ins_encode %{
9780 Register dst_reg = as_Register($dst$$reg);
9781 Register src1_reg = as_Register($src1$$reg);
9782 int src2_imm = $src2$$constant;
9784 __ daddiu(dst_reg, src1_reg, src2_imm);
9785 %}
9787 ins_pipe( ialu_regL_regL );
9788 %}
9790 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9791 match(Set dst (AddL (ConvI2L src1) src2));
9792 ins_cost(200);
9793 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9795 ins_encode %{
9796 Register dst_reg = as_Register($dst$$reg);
9797 Register src1_reg = as_Register($src1$$reg);
9798 Register src2_reg = as_Register($src2$$reg);
9800 __ daddu(dst_reg, src1_reg, src2_reg);
9801 %}
9803 ins_pipe( ialu_regL_regL );
9804 %}
9806 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9807 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9808 ins_cost(200);
9809 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9811 ins_encode %{
9812 Register dst_reg = as_Register($dst$$reg);
9813 Register src1_reg = as_Register($src1$$reg);
9814 Register src2_reg = as_Register($src2$$reg);
9816 __ daddu(dst_reg, src1_reg, src2_reg);
9817 %}
9819 ins_pipe( ialu_regL_regL );
9820 %}
9822 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9823 match(Set dst (AddL src1 (ConvI2L src2)));
9824 ins_cost(200);
9825 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9827 ins_encode %{
9828 Register dst_reg = as_Register($dst$$reg);
9829 Register src1_reg = as_Register($src1$$reg);
9830 Register src2_reg = as_Register($src2$$reg);
9832 __ daddu(dst_reg, src1_reg, src2_reg);
9833 %}
9835 ins_pipe( ialu_regL_regL );
9836 %}
9838 //----------Subtraction Instructions-------------------------------------------
9839 // Integer Subtraction Instructions
9840 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9841 match(Set dst (SubI src1 src2));
9842 ins_cost(100);
9844 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9845 ins_encode %{
9846 Register dst = $dst$$Register;
9847 Register src1 = $src1$$Register;
9848 Register src2 = $src2$$Register;
9849 __ subu32(dst, src1, src2);
9850 %}
9851 ins_pipe( ialu_regI_regI );
9852 %}
9854 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9855 match(Set dst (SubI src1 src2));
9856 ins_cost(80);
9858 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9859 ins_encode %{
9860 Register dst = $dst$$Register;
9861 Register src1 = $src1$$Register;
9862 __ addiu32(dst, src1, -1 * $src2$$constant);
9863 %}
9864 ins_pipe( ialu_regI_regI );
9865 %}
9867 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9868 match(Set dst (SubI zero src));
9869 ins_cost(80);
9871 format %{ "neg $dst, $src #@negI_Reg" %}
9872 ins_encode %{
9873 Register dst = $dst$$Register;
9874 Register src = $src$$Register;
9875 __ subu32(dst, R0, src);
9876 %}
9877 ins_pipe( ialu_regI_regI );
9878 %}
9880 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9881 match(Set dst (SubL zero src));
9882 ins_cost(80);
9884 format %{ "neg $dst, $src #@negL_Reg" %}
9885 ins_encode %{
9886 Register dst = $dst$$Register;
9887 Register src = $src$$Register;
9888 __ subu(dst, R0, src);
9889 %}
9890 ins_pipe( ialu_regI_regI );
9891 %}
9893 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9894 match(Set dst (SubL src1 src2));
9895 ins_cost(80);
9897 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9898 ins_encode %{
9899 Register dst = $dst$$Register;
9900 Register src1 = $src1$$Register;
9901 __ daddiu(dst, src1, -1 * $src2$$constant);
9902 %}
9903 ins_pipe( ialu_regI_regI );
9904 %}
9906 // Subtract Long Register with Register.
9907 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9908 match(Set dst (SubL src1 src2));
9909 ins_cost(100);
9910 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9911 ins_encode %{
9912 Register dst = as_Register($dst$$reg);
9913 Register src1 = as_Register($src1$$reg);
9914 Register src2 = as_Register($src2$$reg);
9916 __ subu(dst, src1, src2);
9917 %}
9918 ins_pipe( ialu_regL_regL );
9919 %}
9921 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9922 match(Set dst (SubL src1 (ConvI2L src2)));
9923 ins_cost(100);
9924 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9925 ins_encode %{
9926 Register dst = as_Register($dst$$reg);
9927 Register src1 = as_Register($src1$$reg);
9928 Register src2 = as_Register($src2$$reg);
9930 __ subu(dst, src1, src2);
9931 %}
9932 ins_pipe( ialu_regL_regL );
9933 %}
9935 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9936 match(Set dst (SubL (ConvI2L src1) src2));
9937 ins_cost(200);
9938 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9939 ins_encode %{
9940 Register dst = as_Register($dst$$reg);
9941 Register src1 = as_Register($src1$$reg);
9942 Register src2 = as_Register($src2$$reg);
9944 __ subu(dst, src1, src2);
9945 %}
9946 ins_pipe( ialu_regL_regL );
9947 %}
9949 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9950 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9951 ins_cost(200);
9952 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9953 ins_encode %{
9954 Register dst = as_Register($dst$$reg);
9955 Register src1 = as_Register($src1$$reg);
9956 Register src2 = as_Register($src2$$reg);
9958 __ subu(dst, src1, src2);
9959 %}
9960 ins_pipe( ialu_regL_regL );
9961 %}
9963 // Integer MOD with Register
9964 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9965 match(Set dst (ModI src1 src2));
9966 ins_cost(300);
9967 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9968 ins_encode %{
9969 Register dst = $dst$$Register;
9970 Register src1 = $src1$$Register;
9971 Register src2 = $src2$$Register;
9973 //if (UseLoongsonISA) {
9974 if (0) {
9975 // 2016.08.10
9976 // Experiments show that gsmod is slower that div+mfhi.
9977 // So I just disable it here.
9978 __ gsmod(dst, src1, src2);
9979 } else {
9980 __ div(src1, src2);
9981 __ mfhi(dst);
9982 }
9983 %}
9985 //ins_pipe( ialu_mod );
9986 ins_pipe( ialu_regI_regI );
9987 %}
9989 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9990 match(Set dst (ModL src1 src2));
9991 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9993 ins_encode %{
9994 Register dst = as_Register($dst$$reg);
9995 Register op1 = as_Register($src1$$reg);
9996 Register op2 = as_Register($src2$$reg);
9998 if (UseLoongsonISA) {
9999 __ gsdmod(dst, op1, op2);
10000 } else {
10001 __ ddiv(op1, op2);
10002 __ mfhi(dst);
10003 }
10004 %}
10005 ins_pipe( pipe_slow );
10006 %}
10008 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10009 match(Set dst (MulI src1 src2));
10011 ins_cost(300);
10012 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10013 ins_encode %{
10014 Register src1 = $src1$$Register;
10015 Register src2 = $src2$$Register;
10016 Register dst = $dst$$Register;
10018 __ mul(dst, src1, src2);
10019 %}
10020 ins_pipe( ialu_mult );
10021 %}
10023 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10024 match(Set dst (AddI (MulI src1 src2) src3));
10026 ins_cost(999);
10027 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10028 ins_encode %{
10029 Register src1 = $src1$$Register;
10030 Register src2 = $src2$$Register;
10031 Register src3 = $src3$$Register;
10032 Register dst = $dst$$Register;
10034 __ mtlo(src3);
10035 __ madd(src1, src2);
10036 __ mflo(dst);
10037 %}
10038 ins_pipe( ialu_mult );
10039 %}
10041 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10042 match(Set dst (DivI src1 src2));
10044 ins_cost(300);
10045 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10046 ins_encode %{
10047 Register src1 = $src1$$Register;
10048 Register src2 = $src2$$Register;
10049 Register dst = $dst$$Register;
10051 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10052 We must trap an exception manually. */
10053 __ teq(R0, src2, 0x7);
10055 if (UseLoongsonISA) {
10056 __ gsdiv(dst, src1, src2);
10057 } else {
10058 __ div(src1, src2);
10060 __ nop();
10061 __ nop();
10062 __ mflo(dst);
10063 }
10064 %}
10065 ins_pipe( ialu_mod );
10066 %}
10068 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10069 match(Set dst (DivF src1 src2));
10071 ins_cost(300);
10072 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10073 ins_encode %{
10074 FloatRegister src1 = $src1$$FloatRegister;
10075 FloatRegister src2 = $src2$$FloatRegister;
10076 FloatRegister dst = $dst$$FloatRegister;
10078 /* Here do we need to trap an exception manually ? */
10079 __ div_s(dst, src1, src2);
10080 %}
10081 ins_pipe( pipe_slow );
10082 %}
10084 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10085 match(Set dst (DivD src1 src2));
10087 ins_cost(300);
10088 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10089 ins_encode %{
10090 FloatRegister src1 = $src1$$FloatRegister;
10091 FloatRegister src2 = $src2$$FloatRegister;
10092 FloatRegister dst = $dst$$FloatRegister;
10094 /* Here do we need to trap an exception manually ? */
10095 __ div_d(dst, src1, src2);
10096 %}
10097 ins_pipe( pipe_slow );
10098 %}
10100 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10101 match(Set dst (MulL src1 src2));
10102 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10103 ins_encode %{
10104 Register dst = as_Register($dst$$reg);
10105 Register op1 = as_Register($src1$$reg);
10106 Register op2 = as_Register($src2$$reg);
10108 if (UseLoongsonISA) {
10109 __ gsdmult(dst, op1, op2);
10110 } else {
10111 __ dmult(op1, op2);
10112 __ mflo(dst);
10113 }
10114 %}
10115 ins_pipe( pipe_slow );
10116 %}
10118 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10119 match(Set dst (MulL src1 (ConvI2L src2)));
10120 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10121 ins_encode %{
10122 Register dst = as_Register($dst$$reg);
10123 Register op1 = as_Register($src1$$reg);
10124 Register op2 = as_Register($src2$$reg);
10126 if (UseLoongsonISA) {
10127 __ gsdmult(dst, op1, op2);
10128 } else {
10129 __ dmult(op1, op2);
10130 __ mflo(dst);
10131 }
10132 %}
10133 ins_pipe( pipe_slow );
10134 %}
10136 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10137 match(Set dst (DivL src1 src2));
10138 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10140 ins_encode %{
10141 Register dst = as_Register($dst$$reg);
10142 Register op1 = as_Register($src1$$reg);
10143 Register op2 = as_Register($src2$$reg);
10145 if (UseLoongsonISA) {
10146 __ gsddiv(dst, op1, op2);
10147 } else {
10148 __ ddiv(op1, op2);
10149 __ mflo(dst);
10150 }
10151 %}
10152 ins_pipe( pipe_slow );
10153 %}
10155 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10156 match(Set dst (AddF src1 src2));
10157 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10158 ins_encode %{
10159 FloatRegister src1 = as_FloatRegister($src1$$reg);
10160 FloatRegister src2 = as_FloatRegister($src2$$reg);
10161 FloatRegister dst = as_FloatRegister($dst$$reg);
10163 __ add_s(dst, src1, src2);
10164 %}
10165 ins_pipe( fpu_regF_regF );
10166 %}
10168 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10169 match(Set dst (SubF src1 src2));
10170 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10171 ins_encode %{
10172 FloatRegister src1 = as_FloatRegister($src1$$reg);
10173 FloatRegister src2 = as_FloatRegister($src2$$reg);
10174 FloatRegister dst = as_FloatRegister($dst$$reg);
10176 __ sub_s(dst, src1, src2);
10177 %}
10178 ins_pipe( fpu_regF_regF );
10179 %}
10180 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10181 match(Set dst (AddD src1 src2));
10182 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10183 ins_encode %{
10184 FloatRegister src1 = as_FloatRegister($src1$$reg);
10185 FloatRegister src2 = as_FloatRegister($src2$$reg);
10186 FloatRegister dst = as_FloatRegister($dst$$reg);
10188 __ add_d(dst, src1, src2);
10189 %}
10190 ins_pipe( fpu_regF_regF );
10191 %}
10193 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10194 match(Set dst (SubD src1 src2));
10195 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10196 ins_encode %{
10197 FloatRegister src1 = as_FloatRegister($src1$$reg);
10198 FloatRegister src2 = as_FloatRegister($src2$$reg);
10199 FloatRegister dst = as_FloatRegister($dst$$reg);
10201 __ sub_d(dst, src1, src2);
10202 %}
10203 ins_pipe( fpu_regF_regF );
10204 %}
10206 instruct negF_reg(regF dst, regF src) %{
10207 match(Set dst (NegF src));
10208 format %{ "negF $dst, $src @negF_reg" %}
10209 ins_encode %{
10210 FloatRegister src = as_FloatRegister($src$$reg);
10211 FloatRegister dst = as_FloatRegister($dst$$reg);
10213 __ neg_s(dst, src);
10214 %}
10215 ins_pipe( fpu_regF_regF );
10216 %}
10218 instruct negD_reg(regD dst, regD src) %{
10219 match(Set dst (NegD src));
10220 format %{ "negD $dst, $src @negD_reg" %}
10221 ins_encode %{
10222 FloatRegister src = as_FloatRegister($src$$reg);
10223 FloatRegister dst = as_FloatRegister($dst$$reg);
10225 __ neg_d(dst, src);
10226 %}
10227 ins_pipe( fpu_regF_regF );
10228 %}
10231 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10232 match(Set dst (MulF src1 src2));
10233 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10234 ins_encode %{
10235 FloatRegister src1 = $src1$$FloatRegister;
10236 FloatRegister src2 = $src2$$FloatRegister;
10237 FloatRegister dst = $dst$$FloatRegister;
10239 __ mul_s(dst, src1, src2);
10240 %}
10241 ins_pipe( fpu_regF_regF );
10242 %}
10244 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10245 match(Set dst (AddF (MulF src1 src2) src3));
10246 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10247 ins_cost(44444);
10248 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10249 ins_encode %{
10250 FloatRegister src1 = $src1$$FloatRegister;
10251 FloatRegister src2 = $src2$$FloatRegister;
10252 FloatRegister src3 = $src3$$FloatRegister;
10253 FloatRegister dst = $dst$$FloatRegister;
10255 __ madd_s(dst, src1, src2, src3);
10256 %}
10257 ins_pipe( fpu_regF_regF );
10258 %}
10260 // Mul two double precision floating piont number
10261 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10262 match(Set dst (MulD src1 src2));
10263 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10264 ins_encode %{
10265 FloatRegister src1 = $src1$$FloatRegister;
10266 FloatRegister src2 = $src2$$FloatRegister;
10267 FloatRegister dst = $dst$$FloatRegister;
10269 __ mul_d(dst, src1, src2);
10270 %}
10271 ins_pipe( fpu_regF_regF );
10272 %}
10274 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10275 match(Set dst (AddD (MulD src1 src2) src3));
10276 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10277 ins_cost(44444);
10278 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10279 ins_encode %{
10280 FloatRegister src1 = $src1$$FloatRegister;
10281 FloatRegister src2 = $src2$$FloatRegister;
10282 FloatRegister src3 = $src3$$FloatRegister;
10283 FloatRegister dst = $dst$$FloatRegister;
10285 __ madd_d(dst, src1, src2, src3);
10286 %}
10287 ins_pipe( fpu_regF_regF );
10288 %}
10290 instruct absF_reg(regF dst, regF src) %{
10291 match(Set dst (AbsF src));
10292 ins_cost(100);
10293 format %{ "absF $dst, $src @absF_reg" %}
10294 ins_encode %{
10295 FloatRegister src = as_FloatRegister($src$$reg);
10296 FloatRegister dst = as_FloatRegister($dst$$reg);
10298 __ abs_s(dst, src);
10299 %}
10300 ins_pipe( fpu_regF_regF );
10301 %}
10304 // intrinsics for math_native.
10305 // AbsD SqrtD CosD SinD TanD LogD Log10D
10307 instruct absD_reg(regD dst, regD src) %{
10308 match(Set dst (AbsD src));
10309 ins_cost(100);
10310 format %{ "absD $dst, $src @absD_reg" %}
10311 ins_encode %{
10312 FloatRegister src = as_FloatRegister($src$$reg);
10313 FloatRegister dst = as_FloatRegister($dst$$reg);
10315 __ abs_d(dst, src);
10316 %}
10317 ins_pipe( fpu_regF_regF );
10318 %}
10320 instruct sqrtD_reg(regD dst, regD src) %{
10321 match(Set dst (SqrtD src));
10322 ins_cost(100);
10323 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10324 ins_encode %{
10325 FloatRegister src = as_FloatRegister($src$$reg);
10326 FloatRegister dst = as_FloatRegister($dst$$reg);
10328 __ sqrt_d(dst, src);
10329 %}
10330 ins_pipe( fpu_regF_regF );
10331 %}
10333 instruct sqrtF_reg(regF dst, regF src) %{
10334 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10335 ins_cost(100);
10336 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10337 ins_encode %{
10338 FloatRegister src = as_FloatRegister($src$$reg);
10339 FloatRegister dst = as_FloatRegister($dst$$reg);
10341 __ sqrt_s(dst, src);
10342 %}
10343 ins_pipe( fpu_regF_regF );
10344 %}
10345 //----------------------------------Logical Instructions----------------------
10346 //__________________________________Integer Logical Instructions-------------
10348 //And Instuctions
10349 // And Register with Immediate
10350 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10351 match(Set dst (AndI src1 src2));
10353 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10354 ins_encode %{
10355 Register dst = $dst$$Register;
10356 Register src = $src1$$Register;
10357 int val = $src2$$constant;
10359 __ move(AT, val);
10360 __ andr(dst, src, AT);
10361 %}
10362 ins_pipe( ialu_regI_regI );
10363 %}
10365 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10366 match(Set dst (AndI src1 src2));
10367 ins_cost(60);
10369 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10370 ins_encode %{
10371 Register dst = $dst$$Register;
10372 Register src = $src1$$Register;
10373 int val = $src2$$constant;
10375 __ andi(dst, src, val);
10376 %}
10377 ins_pipe( ialu_regI_regI );
10378 %}
10380 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10381 match(Set dst (AndI src1 mask));
10382 ins_cost(60);
10384 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10385 ins_encode %{
10386 Register dst = $dst$$Register;
10387 Register src = $src1$$Register;
10388 int size = Assembler::is_int_mask($mask$$constant);
10390 __ ext(dst, src, 0, size);
10391 %}
10392 ins_pipe( ialu_regI_regI );
10393 %}
10395 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10396 match(Set dst (AndL src1 mask));
10397 ins_cost(60);
10399 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10400 ins_encode %{
10401 Register dst = $dst$$Register;
10402 Register src = $src1$$Register;
10403 int size = Assembler::is_jlong_mask($mask$$constant);
10405 __ dext(dst, src, 0, size);
10406 %}
10407 ins_pipe( ialu_regI_regI );
10408 %}
10410 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10411 match(Set dst (XorI src1 src2));
10412 ins_cost(60);
10414 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10415 ins_encode %{
10416 Register dst = $dst$$Register;
10417 Register src = $src1$$Register;
10418 int val = $src2$$constant;
10420 __ xori(dst, src, val);
10421 %}
10422 ins_pipe( ialu_regI_regI );
10423 %}
10425 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10426 match(Set dst (XorI src1 M1));
10427 predicate(UseLoongsonISA && Use3A2000);
10428 ins_cost(60);
10430 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10431 ins_encode %{
10432 Register dst = $dst$$Register;
10433 Register src = $src1$$Register;
10435 __ gsorn(dst, R0, src);
10436 %}
10437 ins_pipe( ialu_regI_regI );
10438 %}
10440 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10441 match(Set dst (XorI (ConvL2I src1) M1));
10442 predicate(UseLoongsonISA && Use3A2000);
10443 ins_cost(60);
10445 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10446 ins_encode %{
10447 Register dst = $dst$$Register;
10448 Register src = $src1$$Register;
10450 __ gsorn(dst, R0, src);
10451 %}
10452 ins_pipe( ialu_regI_regI );
10453 %}
10455 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10456 match(Set dst (XorL src1 src2));
10457 ins_cost(60);
10459 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10460 ins_encode %{
10461 Register dst = $dst$$Register;
10462 Register src = $src1$$Register;
10463 int val = $src2$$constant;
10465 __ xori(dst, src, val);
10466 %}
10467 ins_pipe( ialu_regI_regI );
10468 %}
10470 /*
10471 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10472 match(Set dst (XorL src1 M1));
10473 predicate(UseLoongsonISA);
10474 ins_cost(60);
10476 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10477 ins_encode %{
10478 Register dst = $dst$$Register;
10479 Register src = $src1$$Register;
10481 __ gsorn(dst, R0, src);
10482 %}
10483 ins_pipe( ialu_regI_regI );
10484 %}
10485 */
10487 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10488 match(Set dst (AndI mask (LoadB mem)));
10489 ins_cost(60);
10491 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10492 ins_encode(load_UB_enc(dst, mem));
10493 ins_pipe( ialu_loadI );
10494 %}
10496 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10497 match(Set dst (AndI (LoadB mem) mask));
10498 ins_cost(60);
10500 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10501 ins_encode(load_UB_enc(dst, mem));
10502 ins_pipe( ialu_loadI );
10503 %}
10505 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10506 match(Set dst (AndI src1 src2));
10508 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10509 ins_encode %{
10510 Register dst = $dst$$Register;
10511 Register src1 = $src1$$Register;
10512 Register src2 = $src2$$Register;
10513 __ andr(dst, src1, src2);
10514 %}
10515 ins_pipe( ialu_regI_regI );
10516 %}
10518 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10519 match(Set dst (AndI src1 (XorI src2 M1)));
10520 predicate(UseLoongsonISA && Use3A2000);
10522 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10523 ins_encode %{
10524 Register dst = $dst$$Register;
10525 Register src1 = $src1$$Register;
10526 Register src2 = $src2$$Register;
10528 __ gsandn(dst, src1, src2);
10529 %}
10530 ins_pipe( ialu_regI_regI );
10531 %}
10533 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10534 match(Set dst (OrI src1 (XorI src2 M1)));
10535 predicate(UseLoongsonISA && Use3A2000);
10537 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10538 ins_encode %{
10539 Register dst = $dst$$Register;
10540 Register src1 = $src1$$Register;
10541 Register src2 = $src2$$Register;
10543 __ gsorn(dst, src1, src2);
10544 %}
10545 ins_pipe( ialu_regI_regI );
10546 %}
10548 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10549 match(Set dst (AndI (XorI src1 M1) src2));
10550 predicate(UseLoongsonISA && Use3A2000);
10552 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10553 ins_encode %{
10554 Register dst = $dst$$Register;
10555 Register src1 = $src1$$Register;
10556 Register src2 = $src2$$Register;
10558 __ gsandn(dst, src2, src1);
10559 %}
10560 ins_pipe( ialu_regI_regI );
10561 %}
10563 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10564 match(Set dst (OrI (XorI src1 M1) src2));
10565 predicate(UseLoongsonISA && Use3A2000);
10567 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10568 ins_encode %{
10569 Register dst = $dst$$Register;
10570 Register src1 = $src1$$Register;
10571 Register src2 = $src2$$Register;
10573 __ gsorn(dst, src2, src1);
10574 %}
10575 ins_pipe( ialu_regI_regI );
10576 %}
10578 // And Long Register with Register
10579 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10580 match(Set dst (AndL src1 src2));
10581 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10582 ins_encode %{
10583 Register dst_reg = as_Register($dst$$reg);
10584 Register src1_reg = as_Register($src1$$reg);
10585 Register src2_reg = as_Register($src2$$reg);
10587 __ andr(dst_reg, src1_reg, src2_reg);
10588 %}
10589 ins_pipe( ialu_regL_regL );
10590 %}
10592 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10593 match(Set dst (AndL src1 (ConvI2L src2)));
10594 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10595 ins_encode %{
10596 Register dst_reg = as_Register($dst$$reg);
10597 Register src1_reg = as_Register($src1$$reg);
10598 Register src2_reg = as_Register($src2$$reg);
10600 __ andr(dst_reg, src1_reg, src2_reg);
10601 %}
10602 ins_pipe( ialu_regL_regL );
10603 %}
10605 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10606 match(Set dst (AndL src1 src2));
10607 ins_cost(60);
10609 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10610 ins_encode %{
10611 Register dst = $dst$$Register;
10612 Register src = $src1$$Register;
10613 long val = $src2$$constant;
10615 __ andi(dst, src, val);
10616 %}
10617 ins_pipe( ialu_regI_regI );
10618 %}
10620 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10621 match(Set dst (ConvL2I (AndL src1 src2)));
10622 ins_cost(60);
10624 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10625 ins_encode %{
10626 Register dst = $dst$$Register;
10627 Register src = $src1$$Register;
10628 long val = $src2$$constant;
10630 __ andi(dst, src, val);
10631 %}
10632 ins_pipe( ialu_regI_regI );
10633 %}
10635 /*
10636 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10637 match(Set dst (AndL src1 (XorL src2 M1)));
10638 predicate(UseLoongsonISA);
10640 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10641 ins_encode %{
10642 Register dst = $dst$$Register;
10643 Register src1 = $src1$$Register;
10644 Register src2 = $src2$$Register;
10646 __ gsandn(dst, src1, src2);
10647 %}
10648 ins_pipe( ialu_regI_regI );
10649 %}
10650 */
10652 /*
10653 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10654 match(Set dst (OrL src1 (XorL src2 M1)));
10655 predicate(UseLoongsonISA);
10657 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10658 ins_encode %{
10659 Register dst = $dst$$Register;
10660 Register src1 = $src1$$Register;
10661 Register src2 = $src2$$Register;
10663 __ gsorn(dst, src1, src2);
10664 %}
10665 ins_pipe( ialu_regI_regI );
10666 %}
10667 */
10669 /*
10670 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10671 match(Set dst (AndL (XorL src1 M1) src2));
10672 predicate(UseLoongsonISA);
10674 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10675 ins_encode %{
10676 Register dst = $dst$$Register;
10677 Register src1 = $src1$$Register;
10678 Register src2 = $src2$$Register;
10680 __ gsandn(dst, src2, src1);
10681 %}
10682 ins_pipe( ialu_regI_regI );
10683 %}
10684 */
10686 /*
10687 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10688 match(Set dst (OrL (XorL src1 M1) src2));
10689 predicate(UseLoongsonISA);
10691 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10692 ins_encode %{
10693 Register dst = $dst$$Register;
10694 Register src1 = $src1$$Register;
10695 Register src2 = $src2$$Register;
10697 __ gsorn(dst, src2, src1);
10698 %}
10699 ins_pipe( ialu_regI_regI );
10700 %}
10701 */
10703 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10704 match(Set dst (AndL dst M8));
10705 ins_cost(60);
10707 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10708 ins_encode %{
10709 Register dst = $dst$$Register;
10711 __ dins(dst, R0, 0, 3);
10712 %}
10713 ins_pipe( ialu_regI_regI );
10714 %}
10716 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10717 match(Set dst (AndL dst M5));
10718 ins_cost(60);
10720 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10721 ins_encode %{
10722 Register dst = $dst$$Register;
10724 __ dins(dst, R0, 2, 1);
10725 %}
10726 ins_pipe( ialu_regI_regI );
10727 %}
10729 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10730 match(Set dst (AndL dst M7));
10731 ins_cost(60);
10733 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10734 ins_encode %{
10735 Register dst = $dst$$Register;
10737 __ dins(dst, R0, 1, 2);
10738 %}
10739 ins_pipe( ialu_regI_regI );
10740 %}
10742 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10743 match(Set dst (AndL dst M4));
10744 ins_cost(60);
10746 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10747 ins_encode %{
10748 Register dst = $dst$$Register;
10750 __ dins(dst, R0, 0, 2);
10751 %}
10752 ins_pipe( ialu_regI_regI );
10753 %}
10755 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10756 match(Set dst (AndL dst M121));
10757 ins_cost(60);
10759 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10760 ins_encode %{
10761 Register dst = $dst$$Register;
10763 __ dins(dst, R0, 3, 4);
10764 %}
10765 ins_pipe( ialu_regI_regI );
10766 %}
10768 // Or Long Register with Register
10769 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10770 match(Set dst (OrL src1 src2));
10771 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10772 ins_encode %{
10773 Register dst_reg = $dst$$Register;
10774 Register src1_reg = $src1$$Register;
10775 Register src2_reg = $src2$$Register;
10777 __ orr(dst_reg, src1_reg, src2_reg);
10778 %}
10779 ins_pipe( ialu_regL_regL );
10780 %}
10782 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10783 match(Set dst (OrL (CastP2X src1) src2));
10784 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10785 ins_encode %{
10786 Register dst_reg = $dst$$Register;
10787 Register src1_reg = $src1$$Register;
10788 Register src2_reg = $src2$$Register;
10790 __ orr(dst_reg, src1_reg, src2_reg);
10791 %}
10792 ins_pipe( ialu_regL_regL );
10793 %}
10795 // Xor Long Register with Register
10796 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10797 match(Set dst (XorL src1 src2));
10798 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10799 ins_encode %{
10800 Register dst_reg = as_Register($dst$$reg);
10801 Register src1_reg = as_Register($src1$$reg);
10802 Register src2_reg = as_Register($src2$$reg);
10804 __ xorr(dst_reg, src1_reg, src2_reg);
10805 %}
10806 ins_pipe( ialu_regL_regL );
10807 %}
10809 // Shift Left by 8-bit immediate
10810 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10811 match(Set dst (LShiftI src shift));
10813 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10814 ins_encode %{
10815 Register src = $src$$Register;
10816 Register dst = $dst$$Register;
10817 int shamt = $shift$$constant;
10819 __ sll(dst, src, shamt);
10820 %}
10821 ins_pipe( ialu_regI_regI );
10822 %}
10824 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10825 match(Set dst (LShiftI (ConvL2I src) shift));
10827 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10828 ins_encode %{
10829 Register src = $src$$Register;
10830 Register dst = $dst$$Register;
10831 int shamt = $shift$$constant;
10833 __ sll(dst, src, shamt);
10834 %}
10835 ins_pipe( ialu_regI_regI );
10836 %}
10838 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10839 match(Set dst (AndI (LShiftI src shift) mask));
10841 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10842 ins_encode %{
10843 Register src = $src$$Register;
10844 Register dst = $dst$$Register;
10846 __ sll(dst, src, 16);
10847 %}
10848 ins_pipe( ialu_regI_regI );
10849 %}
10851 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10852 %{
10853 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10855 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10856 ins_encode %{
10857 Register src = $src$$Register;
10858 Register dst = $dst$$Register;
10860 __ andi(dst, src, 7);
10861 %}
10862 ins_pipe(ialu_regI_regI);
10863 %}
10865 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10866 %{
10867 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10869 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10870 ins_encode %{
10871 Register src = $src1$$Register;
10872 int val = $src2$$constant;
10873 Register dst = $dst$$Register;
10875 __ ori(dst, src, val);
10876 %}
10877 ins_pipe(ialu_regI_regI);
10878 %}
10880 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10881 // This idiom is used by the compiler the i2s bytecode.
10882 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10883 %{
10884 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10886 format %{ "i2s $dst, $src\t# @i2s" %}
10887 ins_encode %{
10888 Register src = $src$$Register;
10889 Register dst = $dst$$Register;
10891 __ seh(dst, src);
10892 %}
10893 ins_pipe(ialu_regI_regI);
10894 %}
10896 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10897 // This idiom is used by the compiler for the i2b bytecode.
10898 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10899 %{
10900 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10902 format %{ "i2b $dst, $src\t# @i2b" %}
10903 ins_encode %{
10904 Register src = $src$$Register;
10905 Register dst = $dst$$Register;
10907 __ seb(dst, src);
10908 %}
10909 ins_pipe(ialu_regI_regI);
10910 %}
10913 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10914 match(Set dst (LShiftI (ConvL2I src) shift));
10916 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10917 ins_encode %{
10918 Register src = $src$$Register;
10919 Register dst = $dst$$Register;
10920 int shamt = $shift$$constant;
10922 __ sll(dst, src, shamt);
10923 %}
10924 ins_pipe( ialu_regI_regI );
10925 %}
10927 // Shift Left by 8-bit immediate
10928 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10929 match(Set dst (LShiftI src shift));
10931 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10932 ins_encode %{
10933 Register src = $src$$Register;
10934 Register dst = $dst$$Register;
10935 Register shamt = $shift$$Register;
10936 __ sllv(dst, src, shamt);
10937 %}
10938 ins_pipe( ialu_regI_regI );
10939 %}
10942 // Shift Left Long
10943 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10944 //predicate(UseNewLongLShift);
10945 match(Set dst (LShiftL src shift));
10946 ins_cost(100);
10947 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10948 ins_encode %{
10949 Register src_reg = as_Register($src$$reg);
10950 Register dst_reg = as_Register($dst$$reg);
10951 int shamt = $shift$$constant;
10953 if (__ is_simm(shamt, 5))
10954 __ dsll(dst_reg, src_reg, shamt);
10955 else
10956 {
10957 int sa = Assembler::low(shamt, 6);
10958 if (sa < 32) {
10959 __ dsll(dst_reg, src_reg, sa);
10960 } else {
10961 __ dsll32(dst_reg, src_reg, sa - 32);
10962 }
10963 }
10964 %}
10965 ins_pipe( ialu_regL_regL );
10966 %}
10968 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10969 //predicate(UseNewLongLShift);
10970 match(Set dst (LShiftL (ConvI2L src) shift));
10971 ins_cost(100);
10972 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10973 ins_encode %{
10974 Register src_reg = as_Register($src$$reg);
10975 Register dst_reg = as_Register($dst$$reg);
10976 int shamt = $shift$$constant;
10978 if (__ is_simm(shamt, 5))
10979 __ dsll(dst_reg, src_reg, shamt);
10980 else
10981 {
10982 int sa = Assembler::low(shamt, 6);
10983 if (sa < 32) {
10984 __ dsll(dst_reg, src_reg, sa);
10985 } else {
10986 __ dsll32(dst_reg, src_reg, sa - 32);
10987 }
10988 }
10989 %}
10990 ins_pipe( ialu_regL_regL );
10991 %}
10993 // Shift Left Long
10994 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10995 //predicate(UseNewLongLShift);
10996 match(Set dst (LShiftL src shift));
10997 ins_cost(100);
10998 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10999 ins_encode %{
11000 Register src_reg = as_Register($src$$reg);
11001 Register dst_reg = as_Register($dst$$reg);
11003 __ dsllv(dst_reg, src_reg, $shift$$Register);
11004 %}
11005 ins_pipe( ialu_regL_regL );
11006 %}
11008 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11009 match(Set dst (LShiftL (ConvI2L src) shift));
11010 ins_cost(100);
11011 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11012 ins_encode %{
11013 Register src_reg = as_Register($src$$reg);
11014 Register dst_reg = as_Register($dst$$reg);
11015 int shamt = $shift$$constant;
11017 if (__ is_simm(shamt, 5)) {
11018 __ dsll(dst_reg, src_reg, shamt);
11019 } else {
11020 int sa = Assembler::low(shamt, 6);
11021 if (sa < 32) {
11022 __ dsll(dst_reg, src_reg, sa);
11023 } else {
11024 __ dsll32(dst_reg, src_reg, sa - 32);
11025 }
11026 }
11027 %}
11028 ins_pipe( ialu_regL_regL );
11029 %}
11031 // Shift Right Long
11032 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11033 match(Set dst (RShiftL src shift));
11034 ins_cost(100);
11035 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11036 ins_encode %{
11037 Register src_reg = as_Register($src$$reg);
11038 Register dst_reg = as_Register($dst$$reg);
11039 int shamt = ($shift$$constant & 0x3f);
11040 if (__ is_simm(shamt, 5))
11041 __ dsra(dst_reg, src_reg, shamt);
11042 else {
11043 int sa = Assembler::low(shamt, 6);
11044 if (sa < 32) {
11045 __ dsra(dst_reg, src_reg, sa);
11046 } else {
11047 __ dsra32(dst_reg, src_reg, sa - 32);
11048 }
11049 }
11050 %}
11051 ins_pipe( ialu_regL_regL );
11052 %}
11054 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11055 match(Set dst (ConvL2I (RShiftL src shift)));
11056 ins_cost(100);
11057 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11058 ins_encode %{
11059 Register src_reg = as_Register($src$$reg);
11060 Register dst_reg = as_Register($dst$$reg);
11061 int shamt = $shift$$constant;
11063 __ dsra32(dst_reg, src_reg, shamt - 32);
11064 %}
11065 ins_pipe( ialu_regL_regL );
11066 %}
11068 // Shift Right Long arithmetically
11069 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11070 //predicate(UseNewLongLShift);
11071 match(Set dst (RShiftL src shift));
11072 ins_cost(100);
11073 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11074 ins_encode %{
11075 Register src_reg = as_Register($src$$reg);
11076 Register dst_reg = as_Register($dst$$reg);
11078 __ dsrav(dst_reg, src_reg, $shift$$Register);
11079 %}
11080 ins_pipe( ialu_regL_regL );
11081 %}
11083 // Shift Right Long logically
11084 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11085 match(Set dst (URShiftL src shift));
11086 ins_cost(100);
11087 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11088 ins_encode %{
11089 Register src_reg = as_Register($src$$reg);
11090 Register dst_reg = as_Register($dst$$reg);
11092 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11093 %}
11094 ins_pipe( ialu_regL_regL );
11095 %}
11097 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11098 match(Set dst (URShiftL src shift));
11099 ins_cost(80);
11100 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11101 ins_encode %{
11102 Register src_reg = as_Register($src$$reg);
11103 Register dst_reg = as_Register($dst$$reg);
11104 int shamt = $shift$$constant;
11106 __ dsrl(dst_reg, src_reg, shamt);
11107 %}
11108 ins_pipe( ialu_regL_regL );
11109 %}
11111 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11112 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11113 ins_cost(80);
11114 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11115 ins_encode %{
11116 Register src_reg = as_Register($src$$reg);
11117 Register dst_reg = as_Register($dst$$reg);
11118 int shamt = $shift$$constant;
11120 __ dext(dst_reg, src_reg, shamt, 31);
11121 %}
11122 ins_pipe( ialu_regL_regL );
11123 %}
11125 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11126 match(Set dst (URShiftL (CastP2X src) shift));
11127 ins_cost(80);
11128 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11129 ins_encode %{
11130 Register src_reg = as_Register($src$$reg);
11131 Register dst_reg = as_Register($dst$$reg);
11132 int shamt = $shift$$constant;
11134 __ dsrl(dst_reg, src_reg, shamt);
11135 %}
11136 ins_pipe( ialu_regL_regL );
11137 %}
11139 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11140 match(Set dst (URShiftL src shift));
11141 ins_cost(80);
11142 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11143 ins_encode %{
11144 Register src_reg = as_Register($src$$reg);
11145 Register dst_reg = as_Register($dst$$reg);
11146 int shamt = $shift$$constant;
11148 __ dsrl32(dst_reg, src_reg, shamt - 32);
11149 %}
11150 ins_pipe( ialu_regL_regL );
11151 %}
11153 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11154 match(Set dst (ConvL2I (URShiftL src shift)));
11155 predicate(n->in(1)->in(2)->get_int() > 32);
11156 ins_cost(80);
11157 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11158 ins_encode %{
11159 Register src_reg = as_Register($src$$reg);
11160 Register dst_reg = as_Register($dst$$reg);
11161 int shamt = $shift$$constant;
11163 __ dsrl32(dst_reg, src_reg, shamt - 32);
11164 %}
11165 ins_pipe( ialu_regL_regL );
11166 %}
11168 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11169 match(Set dst (URShiftL (CastP2X src) shift));
11170 ins_cost(80);
11171 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11172 ins_encode %{
11173 Register src_reg = as_Register($src$$reg);
11174 Register dst_reg = as_Register($dst$$reg);
11175 int shamt = $shift$$constant;
11177 __ dsrl32(dst_reg, src_reg, shamt - 32);
11178 %}
11179 ins_pipe( ialu_regL_regL );
11180 %}
11182 // Xor Instructions
11183 // Xor Register with Register
11184 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11185 match(Set dst (XorI src1 src2));
11187 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11189 ins_encode %{
11190 Register dst = $dst$$Register;
11191 Register src1 = $src1$$Register;
11192 Register src2 = $src2$$Register;
11193 __ xorr(dst, src1, src2);
11194 __ sll(dst, dst, 0); /* long -> int */
11195 %}
11197 ins_pipe( ialu_regI_regI );
11198 %}
11200 // Or Instructions
11201 // Or Register with Register
11202 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11203 match(Set dst (OrI src1 src2));
11205 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11206 ins_encode %{
11207 Register dst = $dst$$Register;
11208 Register src1 = $src1$$Register;
11209 Register src2 = $src2$$Register;
11210 __ orr(dst, src1, src2);
11211 %}
11213 ins_pipe( ialu_regI_regI );
11214 %}
11216 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11217 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11218 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11220 format %{ "rotr $dst, $src, 1 ...\n\t"
11221 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11222 ins_encode %{
11223 Register dst = $dst$$Register;
11224 Register src = $src$$Register;
11225 int rshift = $rshift$$constant;
11227 __ rotr(dst, src, 1);
11228 if (rshift - 1) {
11229 __ srl(dst, dst, rshift - 1);
11230 }
11231 %}
11233 ins_pipe( ialu_regI_regI );
11234 %}
11236 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11237 match(Set dst (OrI src1 (CastP2X src2)));
11239 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11240 ins_encode %{
11241 Register dst = $dst$$Register;
11242 Register src1 = $src1$$Register;
11243 Register src2 = $src2$$Register;
11244 __ orr(dst, src1, src2);
11245 %}
11247 ins_pipe( ialu_regI_regI );
11248 %}
11250 // Logical Shift Right by 8-bit immediate
11251 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11252 match(Set dst (URShiftI src shift));
11253 // effect(KILL cr);
11255 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11256 ins_encode %{
11257 Register src = $src$$Register;
11258 Register dst = $dst$$Register;
11259 int shift = $shift$$constant;
11261 __ srl(dst, src, shift);
11262 %}
11263 ins_pipe( ialu_regI_regI );
11264 %}
11266 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11267 match(Set dst (AndI (URShiftI src shift) mask));
11269 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11270 ins_encode %{
11271 Register src = $src$$Register;
11272 Register dst = $dst$$Register;
11273 int pos = $shift$$constant;
11274 int size = Assembler::is_int_mask($mask$$constant);
11276 __ ext(dst, src, pos, size);
11277 %}
11278 ins_pipe( ialu_regI_regI );
11279 %}
11281 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11282 %{
11283 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11284 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11286 ins_cost(100);
11287 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11288 ins_encode %{
11289 Register dst = $dst$$Register;
11290 int sa = $rshift$$constant;
11292 __ rotr(dst, dst, sa);
11293 %}
11294 ins_pipe( ialu_regI_regI );
11295 %}
11297 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11298 %{
11299 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11300 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11302 ins_cost(100);
11303 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11304 ins_encode %{
11305 Register dst = $dst$$Register;
11306 int sa = $rshift$$constant;
11308 __ drotr(dst, dst, sa);
11309 %}
11310 ins_pipe( ialu_regI_regI );
11311 %}
11313 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11314 %{
11315 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11316 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11318 ins_cost(100);
11319 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11320 ins_encode %{
11321 Register dst = $dst$$Register;
11322 int sa = $rshift$$constant;
11324 __ drotr32(dst, dst, sa - 32);
11325 %}
11326 ins_pipe( ialu_regI_regI );
11327 %}
11329 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11330 %{
11331 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11332 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11334 ins_cost(100);
11335 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11336 ins_encode %{
11337 Register dst = $dst$$Register;
11338 int sa = $rshift$$constant;
11340 __ rotr(dst, dst, sa);
11341 %}
11342 ins_pipe( ialu_regI_regI );
11343 %}
11345 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11346 %{
11347 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11348 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11350 ins_cost(100);
11351 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11352 ins_encode %{
11353 Register dst = $dst$$Register;
11354 int sa = $rshift$$constant;
11356 __ drotr(dst, dst, sa);
11357 %}
11358 ins_pipe( ialu_regI_regI );
11359 %}
11361 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11362 %{
11363 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11364 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11366 ins_cost(100);
11367 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11368 ins_encode %{
11369 Register dst = $dst$$Register;
11370 int sa = $rshift$$constant;
11372 __ drotr32(dst, dst, sa - 32);
11373 %}
11374 ins_pipe( ialu_regI_regI );
11375 %}
11377 // Logical Shift Right
11378 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11379 match(Set dst (URShiftI src shift));
11381 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11382 ins_encode %{
11383 Register src = $src$$Register;
11384 Register dst = $dst$$Register;
11385 Register shift = $shift$$Register;
11386 __ srlv(dst, src, shift);
11387 %}
11388 ins_pipe( ialu_regI_regI );
11389 %}
11392 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11393 match(Set dst (RShiftI src shift));
11394 // effect(KILL cr);
11396 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11397 ins_encode %{
11398 Register src = $src$$Register;
11399 Register dst = $dst$$Register;
11400 int shift = $shift$$constant;
11401 __ sra(dst, src, shift);
11402 %}
11403 ins_pipe( ialu_regI_regI );
11404 %}
11406 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11407 match(Set dst (RShiftI src shift));
11408 // effect(KILL cr);
11410 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11411 ins_encode %{
11412 Register src = $src$$Register;
11413 Register dst = $dst$$Register;
11414 Register shift = $shift$$Register;
11415 __ srav(dst, src, shift);
11416 %}
11417 ins_pipe( ialu_regI_regI );
11418 %}
11420 //----------Convert Int to Boolean---------------------------------------------
11422 instruct convI2B(mRegI dst, mRegI src) %{
11423 match(Set dst (Conv2B src));
11425 ins_cost(100);
11426 format %{ "convI2B $dst, $src @ convI2B" %}
11427 ins_encode %{
11428 Register dst = as_Register($dst$$reg);
11429 Register src = as_Register($src$$reg);
11431 if (dst != src) {
11432 __ daddiu(dst, R0, 1);
11433 __ movz(dst, R0, src);
11434 } else {
11435 __ move(AT, src);
11436 __ daddiu(dst, R0, 1);
11437 __ movz(dst, R0, AT);
11438 }
11439 %}
11441 ins_pipe( ialu_regL_regL );
11442 %}
11444 instruct convI2L_reg( mRegL dst, mRegI src) %{
11445 match(Set dst (ConvI2L src));
11447 ins_cost(100);
11448 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11449 ins_encode %{
11450 Register dst = as_Register($dst$$reg);
11451 Register src = as_Register($src$$reg);
11453 if(dst != src) __ sll(dst, src, 0);
11454 %}
11455 ins_pipe( ialu_regL_regL );
11456 %}
11459 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11460 match(Set dst (ConvL2I src));
11462 format %{ "MOV $dst, $src @ convL2I_reg" %}
11463 ins_encode %{
11464 Register dst = as_Register($dst$$reg);
11465 Register src = as_Register($src$$reg);
11467 __ sll(dst, src, 0);
11468 %}
11470 ins_pipe( ialu_regI_regI );
11471 %}
11473 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11474 match(Set dst (ConvI2L (ConvL2I src)));
11476 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11477 ins_encode %{
11478 Register dst = as_Register($dst$$reg);
11479 Register src = as_Register($src$$reg);
11481 __ sll(dst, src, 0);
11482 %}
11484 ins_pipe( ialu_regI_regI );
11485 %}
11487 instruct convL2D_reg( regD dst, mRegL src ) %{
11488 match(Set dst (ConvL2D src));
11489 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11490 ins_encode %{
11491 Register src = as_Register($src$$reg);
11492 FloatRegister dst = as_FloatRegister($dst$$reg);
11494 __ dmtc1(src, dst);
11495 __ cvt_d_l(dst, dst);
11496 %}
11498 ins_pipe( pipe_slow );
11499 %}
11501 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11502 match(Set dst (ConvD2L src));
11503 ins_cost(150);
11504 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11505 ins_encode %{
11506 Register dst = as_Register($dst$$reg);
11507 FloatRegister src = as_FloatRegister($src$$reg);
11509 Label Done;
11511 __ trunc_l_d(F30, src);
11512 // max_long: 0x7fffffffffffffff
11513 // __ set64(AT, 0x7fffffffffffffff);
11514 __ daddiu(AT, R0, -1);
11515 __ dsrl(AT, AT, 1);
11516 __ dmfc1(dst, F30);
11518 __ bne(dst, AT, Done);
11519 __ delayed()->mtc1(R0, F30);
11521 __ cvt_d_w(F30, F30);
11522 __ c_ult_d(src, F30);
11523 __ bc1f(Done);
11524 __ delayed()->daddiu(T9, R0, -1);
11526 __ c_un_d(src, src); //NaN?
11527 __ subu(dst, T9, AT);
11528 __ movt(dst, R0);
11530 __ bind(Done);
11531 %}
11533 ins_pipe( pipe_slow );
11534 %}
11536 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11537 match(Set dst (ConvD2L src));
11538 ins_cost(250);
11539 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11540 ins_encode %{
11541 Register dst = as_Register($dst$$reg);
11542 FloatRegister src = as_FloatRegister($src$$reg);
11544 Label L;
11546 __ c_un_d(src, src); //NaN?
11547 __ bc1t(L);
11548 __ delayed();
11549 __ move(dst, R0);
11551 __ trunc_l_d(F30, src);
11552 __ cfc1(AT, 31);
11553 __ li(T9, 0x10000);
11554 __ andr(AT, AT, T9);
11555 __ beq(AT, R0, L);
11556 __ delayed()->dmfc1(dst, F30);
11558 __ mov_d(F12, src);
11559 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11560 __ move(dst, V0);
11561 __ bind(L);
11562 %}
11564 ins_pipe( pipe_slow );
11565 %}
11567 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11568 match(Set dst (ConvF2I src));
11569 ins_cost(150);
11570 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11571 ins_encode %{
11572 Register dreg = $dst$$Register;
11573 FloatRegister fval = $src$$FloatRegister;
11575 __ trunc_w_s(F30, fval);
11576 __ mfc1(dreg, F30);
11577 __ c_un_s(fval, fval); //NaN?
11578 __ movt(dreg, R0);
11579 %}
11581 ins_pipe( pipe_slow );
11582 %}
11584 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11585 match(Set dst (ConvF2I src));
11586 ins_cost(250);
11587 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11588 ins_encode %{
11589 Register dreg = $dst$$Register;
11590 FloatRegister fval = $src$$FloatRegister;
11591 Label L;
11593 __ c_un_s(fval, fval); //NaN?
11594 __ bc1t(L);
11595 __ delayed();
11596 __ move(dreg, R0);
11598 __ trunc_w_s(F30, fval);
11600 /* Call SharedRuntime:f2i() to do valid convention */
11601 __ cfc1(AT, 31);
11602 __ li(T9, 0x10000);
11603 __ andr(AT, AT, T9);
11604 __ beq(AT, R0, L);
11605 __ delayed()->mfc1(dreg, F30);
11607 __ mov_s(F12, fval);
11609 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11610 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11611 *
11612 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11613 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11614 */
11615 if(dreg != V0) {
11616 __ push(V0);
11617 }
11618 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11619 if(dreg != V0) {
11620 __ move(dreg, V0);
11621 __ pop(V0);
11622 }
11623 __ bind(L);
11624 %}
11626 ins_pipe( pipe_slow );
11627 %}
11629 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11630 match(Set dst (ConvF2L src));
11631 ins_cost(150);
11632 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11633 ins_encode %{
11634 Register dreg = $dst$$Register;
11635 FloatRegister fval = $src$$FloatRegister;
11637 __ trunc_l_s(F30, fval);
11638 __ dmfc1(dreg, F30);
11639 __ c_un_s(fval, fval); //NaN?
11640 __ movt(dreg, R0);
11641 %}
11643 ins_pipe( pipe_slow );
11644 %}
11646 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11647 match(Set dst (ConvF2L src));
11648 ins_cost(250);
11649 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11650 ins_encode %{
11651 Register dst = as_Register($dst$$reg);
11652 FloatRegister fval = $src$$FloatRegister;
11653 Label L;
11655 __ c_un_s(fval, fval); //NaN?
11656 __ bc1t(L);
11657 __ delayed();
11658 __ move(dst, R0);
11660 __ trunc_l_s(F30, fval);
11661 __ cfc1(AT, 31);
11662 __ li(T9, 0x10000);
11663 __ andr(AT, AT, T9);
11664 __ beq(AT, R0, L);
11665 __ delayed()->dmfc1(dst, F30);
11667 __ mov_s(F12, fval);
11668 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11669 __ move(dst, V0);
11670 __ bind(L);
11671 %}
11673 ins_pipe( pipe_slow );
11674 %}
11676 instruct convL2F_reg( regF dst, mRegL src ) %{
11677 match(Set dst (ConvL2F src));
11678 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11679 ins_encode %{
11680 FloatRegister dst = $dst$$FloatRegister;
11681 Register src = as_Register($src$$reg);
11682 Label L;
11684 __ dmtc1(src, dst);
11685 __ cvt_s_l(dst, dst);
11686 %}
11688 ins_pipe( pipe_slow );
11689 %}
11691 instruct convI2F_reg( regF dst, mRegI src ) %{
11692 match(Set dst (ConvI2F src));
11693 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11694 ins_encode %{
11695 Register src = $src$$Register;
11696 FloatRegister dst = $dst$$FloatRegister;
11698 __ mtc1(src, dst);
11699 __ cvt_s_w(dst, dst);
11700 %}
11702 ins_pipe( fpu_regF_regF );
11703 %}
11705 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11706 match(Set dst (CmpLTMask p zero));
11707 ins_cost(100);
11709 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11710 ins_encode %{
11711 Register src = $p$$Register;
11712 Register dst = $dst$$Register;
11714 __ sra(dst, src, 31);
11715 %}
11716 ins_pipe( pipe_slow );
11717 %}
11720 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11721 match(Set dst (CmpLTMask p q));
11722 ins_cost(400);
11724 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11725 ins_encode %{
11726 Register p = $p$$Register;
11727 Register q = $q$$Register;
11728 Register dst = $dst$$Register;
11730 __ slt(dst, p, q);
11731 __ subu(dst, R0, dst);
11732 %}
11733 ins_pipe( pipe_slow );
11734 %}
11736 instruct convP2B(mRegI dst, mRegP src) %{
11737 match(Set dst (Conv2B src));
11739 ins_cost(100);
11740 format %{ "convP2B $dst, $src @ convP2B" %}
11741 ins_encode %{
11742 Register dst = as_Register($dst$$reg);
11743 Register src = as_Register($src$$reg);
11745 if (dst != src) {
11746 __ daddiu(dst, R0, 1);
11747 __ movz(dst, R0, src);
11748 } else {
11749 __ move(AT, src);
11750 __ daddiu(dst, R0, 1);
11751 __ movz(dst, R0, AT);
11752 }
11753 %}
11755 ins_pipe( ialu_regL_regL );
11756 %}
11759 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11760 match(Set dst (ConvI2D src));
11761 format %{ "conI2D $dst, $src @convI2D_reg" %}
11762 ins_encode %{
11763 Register src = $src$$Register;
11764 FloatRegister dst = $dst$$FloatRegister;
11765 __ mtc1(src, dst);
11766 __ cvt_d_w(dst, dst);
11767 %}
11768 ins_pipe( fpu_regF_regF );
11769 %}
11771 instruct convF2D_reg_reg(regD dst, regF src) %{
11772 match(Set dst (ConvF2D src));
11773 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11774 ins_encode %{
11775 FloatRegister dst = $dst$$FloatRegister;
11776 FloatRegister src = $src$$FloatRegister;
11778 __ cvt_d_s(dst, src);
11779 %}
11780 ins_pipe( fpu_regF_regF );
11781 %}
11783 instruct convD2F_reg_reg(regF dst, regD src) %{
11784 match(Set dst (ConvD2F src));
11785 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11786 ins_encode %{
11787 FloatRegister dst = $dst$$FloatRegister;
11788 FloatRegister src = $src$$FloatRegister;
11790 __ cvt_s_d(dst, src);
11791 %}
11792 ins_pipe( fpu_regF_regF );
11793 %}
11795 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11796 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11797 match(Set dst (ConvD2I src));
11799 ins_cost(150);
11800 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11802 ins_encode %{
11803 FloatRegister src = $src$$FloatRegister;
11804 Register dst = $dst$$Register;
11806 Label Done;
11808 __ trunc_w_d(F30, src);
11809 // max_int: 2147483647
11810 __ move(AT, 0x7fffffff);
11811 __ mfc1(dst, F30);
11813 __ bne(dst, AT, Done);
11814 __ delayed()->mtc1(R0, F30);
11816 __ cvt_d_w(F30, F30);
11817 __ c_ult_d(src, F30);
11818 __ bc1f(Done);
11819 __ delayed()->addiu(T9, R0, -1);
11821 __ c_un_d(src, src); //NaN?
11822 __ subu32(dst, T9, AT);
11823 __ movt(dst, R0);
11825 __ bind(Done);
11826 %}
11827 ins_pipe( pipe_slow );
11828 %}
11830 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11831 match(Set dst (ConvD2I src));
11833 ins_cost(250);
11834 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11836 ins_encode %{
11837 FloatRegister src = $src$$FloatRegister;
11838 Register dst = $dst$$Register;
11839 Label L;
11841 __ trunc_w_d(F30, src);
11842 __ cfc1(AT, 31);
11843 __ li(T9, 0x10000);
11844 __ andr(AT, AT, T9);
11845 __ beq(AT, R0, L);
11846 __ delayed()->mfc1(dst, F30);
11848 __ mov_d(F12, src);
11849 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11850 __ move(dst, V0);
11851 __ bind(L);
11853 %}
11854 ins_pipe( pipe_slow );
11855 %}
11857 // Convert oop pointer into compressed form
11858 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11859 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11860 match(Set dst (EncodeP src));
11861 format %{ "encode_heap_oop $dst,$src" %}
11862 ins_encode %{
11863 Register src = $src$$Register;
11864 Register dst = $dst$$Register;
11865 if (src != dst) {
11866 __ move(dst, src);
11867 }
11868 __ encode_heap_oop(dst);
11869 %}
11870 ins_pipe( ialu_regL_regL );
11871 %}
11873 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11874 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11875 match(Set dst (EncodeP src));
11876 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11877 ins_encode %{
11878 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11879 %}
11880 ins_pipe( ialu_regL_regL );
11881 %}
11883 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11884 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11885 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11886 match(Set dst (DecodeN src));
11887 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11888 ins_encode %{
11889 Register s = $src$$Register;
11890 Register d = $dst$$Register;
11891 if (s != d) {
11892 __ move(d, s);
11893 }
11894 __ decode_heap_oop(d);
11895 %}
11896 ins_pipe( ialu_regL_regL );
11897 %}
11899 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11900 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11901 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11902 match(Set dst (DecodeN src));
11903 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11904 ins_encode %{
11905 Register s = $src$$Register;
11906 Register d = $dst$$Register;
11907 if (s != d) {
11908 __ decode_heap_oop_not_null(d, s);
11909 } else {
11910 __ decode_heap_oop_not_null(d);
11911 }
11912 %}
11913 ins_pipe( ialu_regL_regL );
11914 %}
11916 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11917 match(Set dst (EncodePKlass src));
11918 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11919 ins_encode %{
11920 __ encode_klass_not_null($dst$$Register, $src$$Register);
11921 %}
11922 ins_pipe( ialu_regL_regL );
11923 %}
11925 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11926 match(Set dst (DecodeNKlass src));
11927 format %{ "decode_heap_klass_not_null $dst,$src" %}
11928 ins_encode %{
11929 Register s = $src$$Register;
11930 Register d = $dst$$Register;
11931 if (s != d) {
11932 __ decode_klass_not_null(d, s);
11933 } else {
11934 __ decode_klass_not_null(d);
11935 }
11936 %}
11937 ins_pipe( ialu_regL_regL );
11938 %}
11940 //FIXME
11941 instruct tlsLoadP(mRegP dst) %{
11942 match(Set dst (ThreadLocal));
11944 ins_cost(0);
11945 format %{ " get_thread in $dst #@tlsLoadP" %}
11946 ins_encode %{
11947 Register dst = $dst$$Register;
11948 #ifdef OPT_THREAD
11949 __ move(dst, TREG);
11950 #else
11951 __ get_thread(dst);
11952 #endif
11953 %}
11955 ins_pipe( ialu_loadI );
11956 %}
11959 instruct checkCastPP( mRegP dst ) %{
11960 match(Set dst (CheckCastPP dst));
11962 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11963 ins_encode( /*empty encoding*/ );
11964 ins_pipe( empty );
11965 %}
11967 instruct castPP(mRegP dst)
11968 %{
11969 match(Set dst (CastPP dst));
11971 size(0);
11972 format %{ "# castPP of $dst" %}
11973 ins_encode(/* empty encoding */);
11974 ins_pipe(empty);
11975 %}
11977 instruct castII( mRegI dst ) %{
11978 match(Set dst (CastII dst));
11979 format %{ "#castII of $dst empty encoding" %}
11980 ins_encode( /*empty encoding*/ );
11981 ins_cost(0);
11982 ins_pipe( empty );
11983 %}
11985 // Return Instruction
11986 // Remove the return address & jump to it.
11987 instruct Ret() %{
11988 match(Return);
11989 format %{ "RET #@Ret" %}
11991 ins_encode %{
11992 __ jr(RA);
11993 __ nop();
11994 %}
11996 ins_pipe( pipe_jump );
11997 %}
11999 /*
12000 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12001 instruct jumpXtnd(mRegL switch_val) %{
12002 match(Jump switch_val);
12004 ins_cost(350);
12006 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12007 "jr T9\n\t"
12008 "nop" %}
12009 ins_encode %{
12010 Register table_base = $constanttablebase;
12011 int con_offset = $constantoffset;
12012 Register switch_reg = $switch_val$$Register;
12014 if (UseLoongsonISA) {
12015 if (Assembler::is_simm(con_offset, 8)) {
12016 __ gsldx(T9, table_base, switch_reg, con_offset);
12017 } else if (Assembler::is_simm16(con_offset)) {
12018 __ daddu(T9, table_base, switch_reg);
12019 __ ld(T9, T9, con_offset);
12020 } else {
12021 __ move(T9, con_offset);
12022 __ daddu(AT, table_base, switch_reg);
12023 __ gsldx(T9, AT, T9, 0);
12024 }
12025 } else {
12026 if (Assembler::is_simm16(con_offset)) {
12027 __ daddu(T9, table_base, switch_reg);
12028 __ ld(T9, T9, con_offset);
12029 } else {
12030 __ move(T9, con_offset);
12031 __ daddu(AT, table_base, switch_reg);
12032 __ daddu(AT, T9, AT);
12033 __ ld(T9, AT, 0);
12034 }
12035 }
12037 __ jr(T9);
12038 __ nop();
12040 %}
12041 ins_pipe(pipe_jump);
12042 %}
12043 */
12045 // Jump Direct - Label defines a relative address from JMP
12046 instruct jmpDir(label labl) %{
12047 match(Goto);
12048 effect(USE labl);
12050 ins_cost(300);
12051 format %{ "JMP $labl #@jmpDir" %}
12053 ins_encode %{
12054 Label &L = *($labl$$label);
12055 if(&L)
12056 __ b(L);
12057 else
12058 __ b(int(0));
12059 __ nop();
12060 %}
12062 ins_pipe( pipe_jump );
12063 ins_pc_relative(1);
12064 %}
12068 // Tail Jump; remove the return address; jump to target.
12069 // TailCall above leaves the return address around.
12070 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12071 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12072 // "restore" before this instruction (in Epilogue), we need to materialize it
12073 // in %i0.
12074 //FIXME
12075 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12076 match( TailJump jump_target ex_oop );
12077 ins_cost(200);
12078 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12079 ins_encode %{
12080 Register target = $jump_target$$Register;
12082 /* 2012/9/14 Jin: V0, V1 are indicated in:
12083 * [stubGenerator_mips.cpp] generate_forward_exception()
12084 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12085 */
12086 Register oop = $ex_oop$$Register;
12087 Register exception_oop = V0;
12088 Register exception_pc = V1;
12090 __ move(exception_pc, RA);
12091 __ move(exception_oop, oop);
12093 __ jr(target);
12094 __ nop();
12095 %}
12096 ins_pipe( pipe_jump );
12097 %}
12099 // ============================================================================
12100 // Procedure Call/Return Instructions
12101 // Call Java Static Instruction
12102 // Note: If this code changes, the corresponding ret_addr_offset() and
12103 // compute_padding() functions will have to be adjusted.
12104 instruct CallStaticJavaDirect(method meth) %{
12105 match(CallStaticJava);
12106 effect(USE meth);
12108 ins_cost(300);
12109 format %{ "CALL,static #@CallStaticJavaDirect " %}
12110 ins_encode( Java_Static_Call( meth ) );
12111 ins_pipe( pipe_slow );
12112 ins_pc_relative(1);
12113 %}
12115 // Call Java Dynamic Instruction
12116 // Note: If this code changes, the corresponding ret_addr_offset() and
12117 // compute_padding() functions will have to be adjusted.
12118 instruct CallDynamicJavaDirect(method meth) %{
12119 match(CallDynamicJava);
12120 effect(USE meth);
12122 ins_cost(300);
12123 format %{"MOV IC_Klass, (oop)-1\n\t"
12124 "CallDynamic @ CallDynamicJavaDirect" %}
12125 ins_encode( Java_Dynamic_Call( meth ) );
12126 ins_pipe( pipe_slow );
12127 ins_pc_relative(1);
12128 %}
12130 instruct CallLeafNoFPDirect(method meth) %{
12131 match(CallLeafNoFP);
12132 effect(USE meth);
12134 ins_cost(300);
12135 format %{ "CALL_LEAF_NOFP,runtime " %}
12136 ins_encode(Java_To_Runtime(meth));
12137 ins_pipe( pipe_slow );
12138 ins_pc_relative(1);
12139 ins_alignment(16);
12140 %}
12142 // Prefetch instructions.
12144 instruct prefetchrNTA( memory mem ) %{
12145 match(PrefetchRead mem);
12146 ins_cost(125);
12148 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12149 ins_encode %{
12150 int base = $mem$$base;
12151 int index = $mem$$index;
12152 int scale = $mem$$scale;
12153 int disp = $mem$$disp;
12155 if( index != 0 ) {
12156 if (scale == 0) {
12157 __ daddu(AT, as_Register(base), as_Register(index));
12158 } else {
12159 __ dsll(AT, as_Register(index), scale);
12160 __ daddu(AT, as_Register(base), AT);
12161 }
12162 } else {
12163 __ move(AT, as_Register(base));
12164 }
12165 if( Assembler::is_simm16(disp) ) {
12166 __ daddiu(AT, as_Register(base), disp);
12167 __ daddiu(AT, AT, disp);
12168 } else {
12169 __ move(T9, disp);
12170 __ daddu(AT, as_Register(base), T9);
12171 }
12172 __ pref(0, AT, 0); //hint: 0:load
12173 %}
12174 ins_pipe(pipe_slow);
12175 %}
12177 instruct prefetchwNTA( memory mem ) %{
12178 match(PrefetchWrite mem);
12179 ins_cost(125);
12180 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12181 ins_encode %{
12182 int base = $mem$$base;
12183 int index = $mem$$index;
12184 int scale = $mem$$scale;
12185 int disp = $mem$$disp;
12187 if( index != 0 ) {
12188 if (scale == 0) {
12189 __ daddu(AT, as_Register(base), as_Register(index));
12190 } else {
12191 __ dsll(AT, as_Register(index), scale);
12192 __ daddu(AT, as_Register(base), AT);
12193 }
12194 } else {
12195 __ move(AT, as_Register(base));
12196 }
12197 if( Assembler::is_simm16(disp) ) {
12198 __ daddiu(AT, as_Register(base), disp);
12199 __ daddiu(AT, AT, disp);
12200 } else {
12201 __ move(T9, disp);
12202 __ daddu(AT, as_Register(base), T9);
12203 }
12204 __ pref(1, AT, 0); //hint: 1:store
12205 %}
12206 ins_pipe(pipe_slow);
12207 %}
12209 // Prefetch instructions for allocation.
12211 instruct prefetchAllocNTA( memory mem ) %{
12212 match(PrefetchAllocation mem);
12213 ins_cost(125);
12214 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12215 ins_encode %{
12216 int base = $mem$$base;
12217 int index = $mem$$index;
12218 int scale = $mem$$scale;
12219 int disp = $mem$$disp;
12221 Register dst = R0;
12223 if( index != 0 ) {
12224 if( Assembler::is_simm16(disp) ) {
12225 if( UseLoongsonISA ) {
12226 if (scale == 0) {
12227 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12228 } else {
12229 __ dsll(AT, as_Register(index), scale);
12230 __ gslbx(dst, as_Register(base), AT, disp);
12231 }
12232 } else {
12233 if (scale == 0) {
12234 __ addu(AT, as_Register(base), as_Register(index));
12235 } else {
12236 __ dsll(AT, as_Register(index), scale);
12237 __ addu(AT, as_Register(base), AT);
12238 }
12239 __ lb(dst, AT, disp);
12240 }
12241 } else {
12242 if (scale == 0) {
12243 __ addu(AT, as_Register(base), as_Register(index));
12244 } else {
12245 __ dsll(AT, as_Register(index), scale);
12246 __ addu(AT, as_Register(base), AT);
12247 }
12248 __ move(T9, disp);
12249 if( UseLoongsonISA ) {
12250 __ gslbx(dst, AT, T9, 0);
12251 } else {
12252 __ addu(AT, AT, T9);
12253 __ lb(dst, AT, 0);
12254 }
12255 }
12256 } else {
12257 if( Assembler::is_simm16(disp) ) {
12258 __ lb(dst, as_Register(base), disp);
12259 } else {
12260 __ move(T9, disp);
12261 if( UseLoongsonISA ) {
12262 __ gslbx(dst, as_Register(base), T9, 0);
12263 } else {
12264 __ addu(AT, as_Register(base), T9);
12265 __ lb(dst, AT, 0);
12266 }
12267 }
12268 }
12269 %}
12270 ins_pipe(pipe_slow);
12271 %}
12274 // Call runtime without safepoint
12275 instruct CallLeafDirect(method meth) %{
12276 match(CallLeaf);
12277 effect(USE meth);
12279 ins_cost(300);
12280 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12281 ins_encode(Java_To_Runtime(meth));
12282 ins_pipe( pipe_slow );
12283 ins_pc_relative(1);
12284 ins_alignment(16);
12285 %}
12287 // Load Char (16bit unsigned)
12288 instruct loadUS(mRegI dst, memory mem) %{
12289 match(Set dst (LoadUS mem));
12291 ins_cost(125);
12292 format %{ "loadUS $dst,$mem @ loadC" %}
12293 ins_encode(load_C_enc(dst, mem));
12294 ins_pipe( ialu_loadI );
12295 %}
12297 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12298 match(Set dst (ConvI2L (LoadUS mem)));
12300 ins_cost(125);
12301 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12302 ins_encode(load_C_enc(dst, mem));
12303 ins_pipe( ialu_loadI );
12304 %}
12306 // Store Char (16bit unsigned)
12307 instruct storeC(memory mem, mRegI src) %{
12308 match(Set mem (StoreC mem src));
12310 ins_cost(125);
12311 format %{ "storeC $src, $mem @ storeC" %}
12312 ins_encode(store_C_reg_enc(mem, src));
12313 ins_pipe( ialu_loadI );
12314 %}
12316 instruct storeC0(memory mem, immI0 zero) %{
12317 match(Set mem (StoreC mem zero));
12319 ins_cost(125);
12320 format %{ "storeC $zero, $mem @ storeC0" %}
12321 ins_encode(store_C0_enc(mem));
12322 ins_pipe( ialu_loadI );
12323 %}
12326 instruct loadConF0(regF dst, immF0 zero) %{
12327 match(Set dst zero);
12328 ins_cost(100);
12330 format %{ "mov $dst, zero @ loadConF0\n"%}
12331 ins_encode %{
12332 FloatRegister dst = $dst$$FloatRegister;
12334 __ mtc1(R0, dst);
12335 %}
12336 ins_pipe( fpu_loadF );
12337 %}
12340 instruct loadConF(regF dst, immF src) %{
12341 match(Set dst src);
12342 ins_cost(125);
12344 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12345 ins_encode %{
12346 int con_offset = $constantoffset($src);
12348 if (Assembler::is_simm16(con_offset)) {
12349 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12350 } else {
12351 __ set64(AT, con_offset);
12352 if (UseLoongsonISA) {
12353 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12354 } else {
12355 __ daddu(AT, $constanttablebase, AT);
12356 __ lwc1($dst$$FloatRegister, AT, 0);
12357 }
12358 }
12359 %}
12360 ins_pipe( fpu_loadF );
12361 %}
12364 instruct loadConD0(regD dst, immD0 zero) %{
12365 match(Set dst zero);
12366 ins_cost(100);
12368 format %{ "mov $dst, zero @ loadConD0"%}
12369 ins_encode %{
12370 FloatRegister dst = as_FloatRegister($dst$$reg);
12372 __ dmtc1(R0, dst);
12373 %}
12374 ins_pipe( fpu_loadF );
12375 %}
12377 instruct loadConD(regD dst, immD src) %{
12378 match(Set dst src);
12379 ins_cost(125);
12381 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12382 ins_encode %{
12383 int con_offset = $constantoffset($src);
12385 if (Assembler::is_simm16(con_offset)) {
12386 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12387 } else {
12388 __ set64(AT, con_offset);
12389 if (UseLoongsonISA) {
12390 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12391 } else {
12392 __ daddu(AT, $constanttablebase, AT);
12393 __ ldc1($dst$$FloatRegister, AT, 0);
12394 }
12395 }
12396 %}
12397 ins_pipe( fpu_loadF );
12398 %}
12400 // Store register Float value (it is faster than store from FPU register)
12401 instruct storeF_reg( memory mem, regF src) %{
12402 match(Set mem (StoreF mem src));
12404 ins_cost(50);
12405 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12406 ins_encode(store_F_reg_enc(mem, src));
12407 ins_pipe( fpu_storeF );
12408 %}
12410 instruct storeF_imm0( memory mem, immF0 zero) %{
12411 match(Set mem (StoreF mem zero));
12413 ins_cost(40);
12414 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12415 ins_encode %{
12416 int base = $mem$$base;
12417 int index = $mem$$index;
12418 int scale = $mem$$scale;
12419 int disp = $mem$$disp;
12421 if( index != 0 ) {
12422 if ( UseLoongsonISA ) {
12423 if ( Assembler::is_simm(disp, 8) ) {
12424 if ( scale == 0 ) {
12425 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12426 } else {
12427 __ dsll(T9, as_Register(index), scale);
12428 __ gsswx(R0, as_Register(base), T9, disp);
12429 }
12430 } else if ( Assembler::is_simm16(disp) ) {
12431 if ( scale == 0 ) {
12432 __ daddu(AT, as_Register(base), as_Register(index));
12433 } else {
12434 __ dsll(T9, as_Register(index), scale);
12435 __ daddu(AT, as_Register(base), T9);
12436 }
12437 __ sw(R0, AT, disp);
12438 } else {
12439 if ( scale == 0 ) {
12440 __ move(T9, disp);
12441 __ daddu(AT, as_Register(index), T9);
12442 __ gsswx(R0, as_Register(base), AT, 0);
12443 } else {
12444 __ dsll(T9, as_Register(index), scale);
12445 __ move(AT, disp);
12446 __ daddu(AT, AT, T9);
12447 __ gsswx(R0, as_Register(base), AT, 0);
12448 }
12449 }
12450 } else { //not use loongson isa
12451 if(scale != 0) {
12452 __ dsll(T9, as_Register(index), scale);
12453 __ daddu(AT, as_Register(base), T9);
12454 } else {
12455 __ daddu(AT, as_Register(base), as_Register(index));
12456 }
12457 if( Assembler::is_simm16(disp) ) {
12458 __ sw(R0, AT, disp);
12459 } else {
12460 __ move(T9, disp);
12461 __ daddu(AT, AT, T9);
12462 __ sw(R0, AT, 0);
12463 }
12464 }
12465 } else { //index is 0
12466 if ( UseLoongsonISA ) {
12467 if ( Assembler::is_simm16(disp) ) {
12468 __ sw(R0, as_Register(base), disp);
12469 } else {
12470 __ move(T9, disp);
12471 __ gsswx(R0, as_Register(base), T9, 0);
12472 }
12473 } else {
12474 if( Assembler::is_simm16(disp) ) {
12475 __ sw(R0, as_Register(base), disp);
12476 } else {
12477 __ move(T9, disp);
12478 __ daddu(AT, as_Register(base), T9);
12479 __ sw(R0, AT, 0);
12480 }
12481 }
12482 }
12483 %}
12484 ins_pipe( ialu_storeI );
12485 %}
12487 // Load Double
12488 instruct loadD(regD dst, memory mem) %{
12489 match(Set dst (LoadD mem));
12491 ins_cost(150);
12492 format %{ "loadD $dst, $mem #@loadD" %}
12493 ins_encode(load_D_enc(dst, mem));
12494 ins_pipe( ialu_loadI );
12495 %}
12497 // Load Double - UNaligned
12498 instruct loadD_unaligned(regD dst, memory mem ) %{
12499 match(Set dst (LoadD_unaligned mem));
12500 ins_cost(250);
12501 // FIXME: Jin: Need more effective ldl/ldr
12502 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12503 ins_encode(load_D_enc(dst, mem));
12504 ins_pipe( ialu_loadI );
12505 %}
12507 instruct storeD_reg( memory mem, regD src) %{
12508 match(Set mem (StoreD mem src));
12510 ins_cost(50);
12511 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12512 ins_encode(store_D_reg_enc(mem, src));
12513 ins_pipe( fpu_storeF );
12514 %}
12516 instruct storeD_imm0( memory mem, immD0 zero) %{
12517 match(Set mem (StoreD mem zero));
12519 ins_cost(40);
12520 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12521 ins_encode %{
12522 int base = $mem$$base;
12523 int index = $mem$$index;
12524 int scale = $mem$$scale;
12525 int disp = $mem$$disp;
12527 __ mtc1(R0, F30);
12528 __ cvt_d_w(F30, F30);
12530 if( index != 0 ) {
12531 if ( UseLoongsonISA ) {
12532 if ( Assembler::is_simm(disp, 8) ) {
12533 if (scale == 0) {
12534 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12535 } else {
12536 __ dsll(T9, as_Register(index), scale);
12537 __ gssdxc1(F30, as_Register(base), T9, disp);
12538 }
12539 } else if ( Assembler::is_simm16(disp) ) {
12540 if (scale == 0) {
12541 __ daddu(AT, as_Register(base), as_Register(index));
12542 __ sdc1(F30, AT, disp);
12543 } else {
12544 __ dsll(T9, as_Register(index), scale);
12545 __ daddu(AT, as_Register(base), T9);
12546 __ sdc1(F30, AT, disp);
12547 }
12548 } else {
12549 if (scale == 0) {
12550 __ move(T9, disp);
12551 __ daddu(AT, as_Register(index), T9);
12552 __ gssdxc1(F30, as_Register(base), AT, 0);
12553 } else {
12554 __ move(T9, disp);
12555 __ dsll(AT, as_Register(index), scale);
12556 __ daddu(AT, AT, T9);
12557 __ gssdxc1(F30, as_Register(base), AT, 0);
12558 }
12559 }
12560 } else { // not use loongson isa
12561 if(scale != 0) {
12562 __ dsll(T9, as_Register(index), scale);
12563 __ daddu(AT, as_Register(base), T9);
12564 } else {
12565 __ daddu(AT, as_Register(base), as_Register(index));
12566 }
12567 if( Assembler::is_simm16(disp) ) {
12568 __ sdc1(F30, AT, disp);
12569 } else {
12570 __ move(T9, disp);
12571 __ daddu(AT, AT, T9);
12572 __ sdc1(F30, AT, 0);
12573 }
12574 }
12575 } else {// index is 0
12576 if ( UseLoongsonISA ) {
12577 if ( Assembler::is_simm16(disp) ) {
12578 __ sdc1(F30, as_Register(base), disp);
12579 } else {
12580 __ move(T9, disp);
12581 __ gssdxc1(F30, as_Register(base), T9, 0);
12582 }
12583 } else {
12584 if( Assembler::is_simm16(disp) ) {
12585 __ sdc1(F30, as_Register(base), disp);
12586 } else {
12587 __ move(T9, disp);
12588 __ daddu(AT, as_Register(base), T9);
12589 __ sdc1(F30, AT, 0);
12590 }
12591 }
12592 }
12593 %}
12594 ins_pipe( ialu_storeI );
12595 %}
12597 instruct loadSSI(mRegI dst, stackSlotI src)
12598 %{
12599 match(Set dst src);
12601 ins_cost(125);
12602 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12603 ins_encode %{
12604 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12605 __ lw($dst$$Register, SP, $src$$disp);
12606 %}
12607 ins_pipe(ialu_loadI);
12608 %}
12610 instruct storeSSI(stackSlotI dst, mRegI src)
12611 %{
12612 match(Set dst src);
12614 ins_cost(100);
12615 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12616 ins_encode %{
12617 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12618 __ sw($src$$Register, SP, $dst$$disp);
12619 %}
12620 ins_pipe(ialu_storeI);
12621 %}
12623 instruct loadSSL(mRegL dst, stackSlotL src)
12624 %{
12625 match(Set dst src);
12627 ins_cost(125);
12628 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12629 ins_encode %{
12630 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12631 __ ld($dst$$Register, SP, $src$$disp);
12632 %}
12633 ins_pipe(ialu_loadI);
12634 %}
12636 instruct storeSSL(stackSlotL dst, mRegL src)
12637 %{
12638 match(Set dst src);
12640 ins_cost(100);
12641 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12642 ins_encode %{
12643 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12644 __ sd($src$$Register, SP, $dst$$disp);
12645 %}
12646 ins_pipe(ialu_storeI);
12647 %}
12649 instruct loadSSP(mRegP dst, stackSlotP src)
12650 %{
12651 match(Set dst src);
12653 ins_cost(125);
12654 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12655 ins_encode %{
12656 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12657 __ ld($dst$$Register, SP, $src$$disp);
12658 %}
12659 ins_pipe(ialu_loadI);
12660 %}
12662 instruct storeSSP(stackSlotP dst, mRegP src)
12663 %{
12664 match(Set dst src);
12666 ins_cost(100);
12667 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12668 ins_encode %{
12669 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12670 __ sd($src$$Register, SP, $dst$$disp);
12671 %}
12672 ins_pipe(ialu_storeI);
12673 %}
12675 instruct loadSSF(regF dst, stackSlotF src)
12676 %{
12677 match(Set dst src);
12679 ins_cost(125);
12680 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12681 ins_encode %{
12682 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12683 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12684 %}
12685 ins_pipe(ialu_loadI);
12686 %}
12688 instruct storeSSF(stackSlotF dst, regF src)
12689 %{
12690 match(Set dst src);
12692 ins_cost(100);
12693 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12694 ins_encode %{
12695 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12696 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12697 %}
12698 ins_pipe(fpu_storeF);
12699 %}
12701 // Use the same format since predicate() can not be used here.
12702 instruct loadSSD(regD dst, stackSlotD src)
12703 %{
12704 match(Set dst src);
12706 ins_cost(125);
12707 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12708 ins_encode %{
12709 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12710 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12711 %}
12712 ins_pipe(ialu_loadI);
12713 %}
12715 instruct storeSSD(stackSlotD dst, regD src)
12716 %{
12717 match(Set dst src);
12719 ins_cost(100);
12720 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12721 ins_encode %{
12722 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12723 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12724 %}
12725 ins_pipe(fpu_storeF);
12726 %}
12728 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12729 match( Set cr (FastLock object box) );
12730 effect( TEMP tmp, TEMP scr, USE_KILL box );
12731 ins_cost(300);
12732 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12733 ins_encode %{
12734 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12735 %}
12737 ins_pipe( pipe_slow );
12738 ins_pc_relative(1);
12739 %}
12741 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12742 match( Set cr (FastUnlock object box) );
12743 effect( TEMP tmp, USE_KILL box );
12744 ins_cost(300);
12745 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12746 ins_encode %{
12747 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12748 %}
12750 ins_pipe( pipe_slow );
12751 ins_pc_relative(1);
12752 %}
12754 // Store CMS card-mark Immediate
12755 instruct storeImmCM(memory mem, immI8 src) %{
12756 match(Set mem (StoreCM mem src));
12758 ins_cost(150);
12759 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12760 // opcode(0xC6);
12761 ins_encode(store_B_immI_enc_sync(mem, src));
12762 ins_pipe( ialu_storeI );
12763 %}
12765 // Die now
12766 instruct ShouldNotReachHere( )
12767 %{
12768 match(Halt);
12769 ins_cost(300);
12771 // Use the following format syntax
12772 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12773 ins_encode %{
12774 // Here we should emit illtrap !
12776 __ stop("in ShoudNotReachHere");
12778 %}
12779 ins_pipe( pipe_jump );
12780 %}
12782 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12783 %{
12784 predicate(Universe::narrow_oop_shift() == 0);
12785 match(Set dst mem);
12787 ins_cost(110);
12788 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12789 ins_encode %{
12790 Register dst = $dst$$Register;
12791 Register base = as_Register($mem$$base);
12792 int disp = $mem$$disp;
12794 __ daddiu(dst, base, disp);
12795 %}
12796 ins_pipe( ialu_regI_imm16 );
12797 %}
12799 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12800 %{
12801 match(Set dst mem);
12803 ins_cost(110);
12804 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12805 ins_encode %{
12806 Register dst = $dst$$Register;
12807 Register base = as_Register($mem$$base);
12808 Register index = as_Register($mem$$index);
12809 int scale = $mem$$scale;
12810 int disp = $mem$$disp;
12812 if (scale == 0) {
12813 __ daddu(AT, base, index);
12814 __ daddiu(dst, AT, disp);
12815 } else {
12816 __ dsll(AT, index, scale);
12817 __ daddu(AT, base, AT);
12818 __ daddiu(dst, AT, disp);
12819 }
12820 %}
12822 ins_pipe( ialu_regI_imm16 );
12823 %}
12825 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12826 %{
12827 match(Set dst mem);
12829 ins_cost(110);
12830 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12831 ins_encode %{
12832 Register dst = $dst$$Register;
12833 Register base = as_Register($mem$$base);
12834 Register index = as_Register($mem$$index);
12835 int scale = $mem$$scale;
12837 if (scale == 0) {
12838 __ daddu(dst, base, index);
12839 } else {
12840 __ dsll(AT, index, scale);
12841 __ daddu(dst, base, AT);
12842 }
12843 %}
12845 ins_pipe( ialu_regI_imm16 );
12846 %}
12848 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12849 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12850 match(CountedLoopEnd cop (CmpI src1 src2));
12851 effect(USE labl);
12853 ins_cost(300);
12854 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12855 ins_encode %{
12856 Register op1 = $src1$$Register;
12857 Register op2 = $src2$$Register;
12858 Label &L = *($labl$$label);
12859 int flag = $cop$$cmpcode;
12861 switch(flag)
12862 {
12863 case 0x01: //equal
12864 if (&L)
12865 __ beq(op1, op2, L);
12866 else
12867 __ beq(op1, op2, (int)0);
12868 break;
12869 case 0x02: //not_equal
12870 if (&L)
12871 __ bne(op1, op2, L);
12872 else
12873 __ bne(op1, op2, (int)0);
12874 break;
12875 case 0x03: //above
12876 __ slt(AT, op2, op1);
12877 if(&L)
12878 __ bne(AT, R0, L);
12879 else
12880 __ bne(AT, R0, (int)0);
12881 break;
12882 case 0x04: //above_equal
12883 __ slt(AT, op1, op2);
12884 if(&L)
12885 __ beq(AT, R0, L);
12886 else
12887 __ beq(AT, R0, (int)0);
12888 break;
12889 case 0x05: //below
12890 __ slt(AT, op1, op2);
12891 if(&L)
12892 __ bne(AT, R0, L);
12893 else
12894 __ bne(AT, R0, (int)0);
12895 break;
12896 case 0x06: //below_equal
12897 __ slt(AT, op2, op1);
12898 if(&L)
12899 __ beq(AT, R0, L);
12900 else
12901 __ beq(AT, R0, (int)0);
12902 break;
12903 default:
12904 Unimplemented();
12905 }
12906 __ nop();
12907 %}
12908 ins_pipe( pipe_jump );
12909 ins_pc_relative(1);
12910 %}
12913 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12914 match(CountedLoopEnd cop (CmpI src1 src2));
12915 effect(USE labl);
12917 ins_cost(250);
12918 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12919 ins_encode %{
12920 Register op1 = $src1$$Register;
12921 int op2 = $src2$$constant;
12922 Label &L = *($labl$$label);
12923 int flag = $cop$$cmpcode;
12925 __ addiu32(AT, op1, -1 * op2);
12927 switch(flag)
12928 {
12929 case 0x01: //equal
12930 if (&L)
12931 __ beq(AT, R0, L);
12932 else
12933 __ beq(AT, R0, (int)0);
12934 break;
12935 case 0x02: //not_equal
12936 if (&L)
12937 __ bne(AT, R0, L);
12938 else
12939 __ bne(AT, R0, (int)0);
12940 break;
12941 case 0x03: //above
12942 if(&L)
12943 __ bgtz(AT, L);
12944 else
12945 __ bgtz(AT, (int)0);
12946 break;
12947 case 0x04: //above_equal
12948 if(&L)
12949 __ bgez(AT, L);
12950 else
12951 __ bgez(AT,(int)0);
12952 break;
12953 case 0x05: //below
12954 if(&L)
12955 __ bltz(AT, L);
12956 else
12957 __ bltz(AT, (int)0);
12958 break;
12959 case 0x06: //below_equal
12960 if(&L)
12961 __ blez(AT, L);
12962 else
12963 __ blez(AT, (int)0);
12964 break;
12965 default:
12966 Unimplemented();
12967 }
12968 __ nop();
12969 %}
12970 ins_pipe( pipe_jump );
12971 ins_pc_relative(1);
12972 %}
12975 /*
12976 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12977 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12978 match(CountedLoopEnd cop cmp);
12979 effect(USE labl);
12981 ins_cost(300);
12982 format %{ "J$cop,u $labl\t# Loop end" %}
12983 size(6);
12984 opcode(0x0F, 0x80);
12985 ins_encode( Jcc( cop, labl) );
12986 ins_pipe( pipe_jump );
12987 ins_pc_relative(1);
12988 %}
12990 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12991 match(CountedLoopEnd cop cmp);
12992 effect(USE labl);
12994 ins_cost(200);
12995 format %{ "J$cop,u $labl\t# Loop end" %}
12996 opcode(0x0F, 0x80);
12997 ins_encode( Jcc( cop, labl) );
12998 ins_pipe( pipe_jump );
12999 ins_pc_relative(1);
13000 %}
13001 */
13003 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13004 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13005 match(If cop cr);
13006 effect(USE labl);
13008 ins_cost(300);
13009 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13011 ins_encode %{
13012 Label &L = *($labl$$label);
13013 switch($cop$$cmpcode)
13014 {
13015 case 0x01: //equal
13016 if (&L)
13017 __ bne(AT, R0, L);
13018 else
13019 __ bne(AT, R0, (int)0);
13020 break;
13021 case 0x02: //not equal
13022 if (&L)
13023 __ beq(AT, R0, L);
13024 else
13025 __ beq(AT, R0, (int)0);
13026 break;
13027 default:
13028 Unimplemented();
13029 }
13030 __ nop();
13031 %}
13033 ins_pipe( pipe_jump );
13034 ins_pc_relative(1);
13035 %}
13038 // ============================================================================
13039 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13040 // array for an instance of the superklass. Set a hidden internal cache on a
13041 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13042 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13043 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13044 match(Set result (PartialSubtypeCheck sub super));
13045 effect(KILL tmp);
13046 ins_cost(1100); // slightly larger than the next version
13047 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13049 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13050 ins_pipe( pipe_slow );
13051 %}
13054 // Conditional-store of an int value.
13055 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13056 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13057 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13058 // effect(KILL oldval);
13059 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13061 ins_encode %{
13062 Register oldval = $oldval$$Register;
13063 Register newval = $newval$$Register;
13064 Address addr(as_Register($mem$$base), $mem$$disp);
13065 Label again, failure;
13067 // int base = $mem$$base;
13068 int index = $mem$$index;
13069 int scale = $mem$$scale;
13070 int disp = $mem$$disp;
13072 guarantee(Assembler::is_simm16(disp), "");
13074 if( index != 0 ) {
13075 __ stop("in storeIConditional: index != 0");
13076 } else {
13077 __ bind(again);
13078 if(UseSyncLevel <= 1000) __ sync();
13079 __ ll(AT, addr);
13080 __ bne(AT, oldval, failure);
13081 __ delayed()->addu(AT, R0, R0);
13083 __ addu(AT, newval, R0);
13084 __ sc(AT, addr);
13085 __ beq(AT, R0, again);
13086 __ delayed()->addiu(AT, R0, 0xFF);
13087 __ bind(failure);
13088 __ sync();
13089 }
13090 %}
13092 ins_pipe( long_memory_op );
13093 %}
13095 // Conditional-store of a long value.
13096 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13097 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13098 %{
13099 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13100 effect(KILL oldval);
13102 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13103 ins_encode%{
13104 Register oldval = $oldval$$Register;
13105 Register newval = $newval$$Register;
13106 Address addr((Register)$mem$$base, $mem$$disp);
13108 int index = $mem$$index;
13109 int scale = $mem$$scale;
13110 int disp = $mem$$disp;
13112 guarantee(Assembler::is_simm16(disp), "");
13114 if( index != 0 ) {
13115 __ stop("in storeIConditional: index != 0");
13116 } else {
13117 __ cmpxchg(newval, addr, oldval);
13118 }
13119 %}
13120 ins_pipe( long_memory_op );
13121 %}
13124 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13125 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13126 effect(KILL oldval);
13127 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13128 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13129 "MOV $res, 1 @ compareAndSwapI\n\t"
13130 "BNE AT, R0 @ compareAndSwapI\n\t"
13131 "MOV $res, 0 @ compareAndSwapI\n"
13132 "L:" %}
13133 ins_encode %{
13134 Register newval = $newval$$Register;
13135 Register oldval = $oldval$$Register;
13136 Register res = $res$$Register;
13137 Address addr($mem_ptr$$Register, 0);
13138 Label L;
13140 __ cmpxchg32(newval, addr, oldval);
13141 __ move(res, AT);
13142 %}
13143 ins_pipe( long_memory_op );
13144 %}
13146 //FIXME:
13147 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13148 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13149 effect(KILL oldval);
13150 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13151 "MOV $res, AT @ compareAndSwapP\n\t"
13152 "L:" %}
13153 ins_encode %{
13154 Register newval = $newval$$Register;
13155 Register oldval = $oldval$$Register;
13156 Register res = $res$$Register;
13157 Address addr($mem_ptr$$Register, 0);
13158 Label L;
13160 __ cmpxchg(newval, addr, oldval);
13161 __ move(res, AT);
13162 %}
13163 ins_pipe( long_memory_op );
13164 %}
13166 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13167 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13168 effect(KILL oldval);
13169 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13170 "MOV $res, AT @ compareAndSwapN\n\t"
13171 "L:" %}
13172 ins_encode %{
13173 Register newval = $newval$$Register;
13174 Register oldval = $oldval$$Register;
13175 Register res = $res$$Register;
13176 Address addr($mem_ptr$$Register, 0);
13177 Label L;
13179 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13180 * Thus, we should extend oldval's sign for correct comparision.
13181 */
13182 __ sll(oldval, oldval, 0);
13184 __ cmpxchg32(newval, addr, oldval);
13185 __ move(res, AT);
13186 %}
13187 ins_pipe( long_memory_op );
13188 %}
13190 //----------Max and Min--------------------------------------------------------
13191 // Min Instructions
13192 ////
13193 // *** Min and Max using the conditional move are slower than the
13194 // *** branch version on a Pentium III.
13195 // // Conditional move for min
13196 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13197 // effect( USE_DEF op2, USE op1, USE cr );
13198 // format %{ "CMOVlt $op2,$op1\t! min" %}
13199 // opcode(0x4C,0x0F);
13200 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13201 // ins_pipe( pipe_cmov_reg );
13202 //%}
13203 //
13204 //// Min Register with Register (P6 version)
13205 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13206 // predicate(VM_Version::supports_cmov() );
13207 // match(Set op2 (MinI op1 op2));
13208 // ins_cost(200);
13209 // expand %{
13210 // eFlagsReg cr;
13211 // compI_eReg(cr,op1,op2);
13212 // cmovI_reg_lt(op2,op1,cr);
13213 // %}
13214 //%}
13216 // Min Register with Register (generic version)
13217 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13218 match(Set dst (MinI dst src));
13219 //effect(KILL flags);
13220 ins_cost(80);
13222 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13223 ins_encode %{
13224 Register dst = $dst$$Register;
13225 Register src = $src$$Register;
13227 __ slt(AT, src, dst);
13228 __ movn(dst, src, AT);
13230 %}
13232 ins_pipe( pipe_slow );
13233 %}
13235 // Max Register with Register
13236 // *** Min and Max using the conditional move are slower than the
13237 // *** branch version on a Pentium III.
13238 // // Conditional move for max
13239 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13240 // effect( USE_DEF op2, USE op1, USE cr );
13241 // format %{ "CMOVgt $op2,$op1\t! max" %}
13242 // opcode(0x4F,0x0F);
13243 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13244 // ins_pipe( pipe_cmov_reg );
13245 //%}
13246 //
13247 // // Max Register with Register (P6 version)
13248 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13249 // predicate(VM_Version::supports_cmov() );
13250 // match(Set op2 (MaxI op1 op2));
13251 // ins_cost(200);
13252 // expand %{
13253 // eFlagsReg cr;
13254 // compI_eReg(cr,op1,op2);
13255 // cmovI_reg_gt(op2,op1,cr);
13256 // %}
13257 //%}
13259 // Max Register with Register (generic version)
13260 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13261 match(Set dst (MaxI dst src));
13262 ins_cost(80);
13264 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13266 ins_encode %{
13267 Register dst = $dst$$Register;
13268 Register src = $src$$Register;
13270 __ slt(AT, dst, src);
13271 __ movn(dst, src, AT);
13273 %}
13275 ins_pipe( pipe_slow );
13276 %}
13278 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13279 match(Set dst (MaxI dst zero));
13280 ins_cost(50);
13282 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13284 ins_encode %{
13285 Register dst = $dst$$Register;
13287 __ slt(AT, dst, R0);
13288 __ movn(dst, R0, AT);
13290 %}
13292 ins_pipe( pipe_slow );
13293 %}
13295 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13296 %{
13297 match(Set dst (AndL src mask));
13299 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13300 ins_encode %{
13301 Register dst = $dst$$Register;
13302 Register src = $src$$Register;
13304 __ dext(dst, src, 0, 32);
13305 %}
13306 ins_pipe(ialu_regI_regI);
13307 %}
13309 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13310 %{
13311 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13313 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13314 ins_encode %{
13315 Register dst = $dst$$Register;
13316 Register src1 = $src1$$Register;
13317 Register src2 = $src2$$Register;
13319 if (src1 == dst) {
13320 __ dinsu(dst, src2, 32, 32);
13321 } else if (src2 == dst) {
13322 __ dsll32(dst, dst, 0);
13323 __ dins(dst, src1, 0, 32);
13324 } else {
13325 __ dext(dst, src1, 0, 32);
13326 __ dinsu(dst, src2, 32, 32);
13327 }
13328 %}
13329 ins_pipe(ialu_regI_regI);
13330 %}
13332 // Zero-extend convert int to long
13333 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13334 %{
13335 match(Set dst (AndL (ConvI2L src) mask));
13337 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13338 ins_encode %{
13339 Register dst = $dst$$Register;
13340 Register src = $src$$Register;
13342 __ dext(dst, src, 0, 32);
13343 %}
13344 ins_pipe(ialu_regI_regI);
13345 %}
13347 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13348 %{
13349 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13351 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13352 ins_encode %{
13353 Register dst = $dst$$Register;
13354 Register src = $src$$Register;
13356 __ dext(dst, src, 0, 32);
13357 %}
13358 ins_pipe(ialu_regI_regI);
13359 %}
13361 // Match loading integer and casting it to unsigned int in long register.
13362 // LoadI + ConvI2L + AndL 0xffffffff.
13363 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13364 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13366 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13367 ins_encode (load_N_enc(dst, mem));
13368 ins_pipe(ialu_loadI);
13369 %}
13371 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13372 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13374 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13375 ins_encode (load_N_enc(dst, mem));
13376 ins_pipe(ialu_loadI);
13377 %}
13380 // ============================================================================
13381 // Safepoint Instruction
13382 instruct safePoint_poll_reg(mRegP poll) %{
13383 match(SafePoint poll);
13384 predicate(false);
13385 effect(USE poll);
13387 ins_cost(125);
13388 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13390 ins_encode %{
13391 Register poll_reg = $poll$$Register;
13393 __ block_comment("Safepoint:");
13394 __ relocate(relocInfo::poll_type);
13395 __ lw(AT, poll_reg, 0);
13396 %}
13398 ins_pipe( ialu_storeI );
13399 %}
13401 instruct safePoint_poll() %{
13402 match(SafePoint);
13404 ins_cost(105);
13405 format %{ "poll for GC @ safePoint_poll" %}
13407 ins_encode %{
13408 __ block_comment("Safepoint:");
13409 __ set64(T9, (long)os::get_polling_page());
13410 __ relocate(relocInfo::poll_type);
13411 __ lw(AT, T9, 0);
13412 %}
13414 ins_pipe( ialu_storeI );
13415 %}
13417 //----------Arithmetic Conversion Instructions---------------------------------
13419 instruct roundFloat_nop(regF dst)
13420 %{
13421 match(Set dst (RoundFloat dst));
13423 ins_cost(0);
13424 ins_encode();
13425 ins_pipe(empty);
13426 %}
13428 instruct roundDouble_nop(regD dst)
13429 %{
13430 match(Set dst (RoundDouble dst));
13432 ins_cost(0);
13433 ins_encode();
13434 ins_pipe(empty);
13435 %}
13437 //---------- Zeros Count Instructions ------------------------------------------
13438 // CountLeadingZerosINode CountTrailingZerosINode
13439 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13440 predicate(UseCountLeadingZerosInstruction);
13441 match(Set dst (CountLeadingZerosI src));
13443 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13444 ins_encode %{
13445 __ clz($dst$$Register, $src$$Register);
13446 %}
13447 ins_pipe( ialu_regL_regL );
13448 %}
13450 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13451 predicate(UseCountLeadingZerosInstruction);
13452 match(Set dst (CountLeadingZerosL src));
13454 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13455 ins_encode %{
13456 __ dclz($dst$$Register, $src$$Register);
13457 %}
13458 ins_pipe( ialu_regL_regL );
13459 %}
13461 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13462 predicate(UseCountTrailingZerosInstruction);
13463 match(Set dst (CountTrailingZerosI src));
13465 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13466 ins_encode %{
13467 // ctz and dctz is gs instructions.
13468 __ ctz($dst$$Register, $src$$Register);
13469 %}
13470 ins_pipe( ialu_regL_regL );
13471 %}
13473 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13474 predicate(UseCountTrailingZerosInstruction);
13475 match(Set dst (CountTrailingZerosL src));
13477 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13478 ins_encode %{
13479 __ dctz($dst$$Register, $src$$Register);
13480 %}
13481 ins_pipe( ialu_regL_regL );
13482 %}
13484 // ====================VECTOR INSTRUCTIONS=====================================
13486 // Load vectors (8 bytes long)
13487 instruct loadV8(vecD dst, memory mem) %{
13488 predicate(n->as_LoadVector()->memory_size() == 8);
13489 match(Set dst (LoadVector mem));
13490 ins_cost(125);
13491 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13492 ins_encode(load_D_enc(dst, mem));
13493 ins_pipe( fpu_loadF );
13494 %}
13496 // Store vectors (8 bytes long)
13497 instruct storeV8(memory mem, vecD src) %{
13498 predicate(n->as_StoreVector()->memory_size() == 8);
13499 match(Set mem (StoreVector mem src));
13500 ins_cost(145);
13501 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13502 ins_encode(store_D_reg_enc(mem, src));
13503 ins_pipe( fpu_storeF );
13504 %}
13506 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13507 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13508 match(Set dst (ReplicateB src));
13509 ins_cost(100);
13510 format %{ "replv_ob AT, $src\n\t"
13511 "dmtc1 AT, $dst\t! replicate8B" %}
13512 ins_encode %{
13513 __ replv_ob(AT, $src$$Register);
13514 __ dmtc1(AT, $dst$$FloatRegister);
13515 %}
13516 ins_pipe( pipe_mtc1 );
13517 %}
13519 instruct Repl8B(vecD dst, mRegI src) %{
13520 predicate(n->as_Vector()->length() == 8);
13521 match(Set dst (ReplicateB src));
13522 ins_cost(140);
13523 format %{ "move AT, $src\n\t"
13524 "dins AT, AT, 8, 8\n\t"
13525 "dins AT, AT, 16, 16\n\t"
13526 "dinsu AT, AT, 32, 32\n\t"
13527 "dmtc1 AT, $dst\t! replicate8B" %}
13528 ins_encode %{
13529 __ move(AT, $src$$Register);
13530 __ dins(AT, AT, 8, 8);
13531 __ dins(AT, AT, 16, 16);
13532 __ dinsu(AT, AT, 32, 32);
13533 __ dmtc1(AT, $dst$$FloatRegister);
13534 %}
13535 ins_pipe( pipe_mtc1 );
13536 %}
13538 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13539 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13540 match(Set dst (ReplicateB con));
13541 ins_cost(110);
13542 format %{ "repl_ob AT, [$con]\n\t"
13543 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13544 ins_encode %{
13545 int val = $con$$constant;
13546 __ repl_ob(AT, val);
13547 __ dmtc1(AT, $dst$$FloatRegister);
13548 %}
13549 ins_pipe( pipe_mtc1 );
13550 %}
13552 instruct Repl8B_imm(vecD dst, immI con) %{
13553 predicate(n->as_Vector()->length() == 8);
13554 match(Set dst (ReplicateB con));
13555 ins_cost(150);
13556 format %{ "move AT, [$con]\n\t"
13557 "dins AT, AT, 8, 8\n\t"
13558 "dins AT, AT, 16, 16\n\t"
13559 "dinsu AT, AT, 32, 32\n\t"
13560 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13561 ins_encode %{
13562 __ move(AT, $con$$constant);
13563 __ dins(AT, AT, 8, 8);
13564 __ dins(AT, AT, 16, 16);
13565 __ dinsu(AT, AT, 32, 32);
13566 __ dmtc1(AT, $dst$$FloatRegister);
13567 %}
13568 ins_pipe( pipe_mtc1 );
13569 %}
13571 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13572 predicate(n->as_Vector()->length() == 8);
13573 match(Set dst (ReplicateB zero));
13574 ins_cost(90);
13575 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13576 ins_encode %{
13577 __ dmtc1(R0, $dst$$FloatRegister);
13578 %}
13579 ins_pipe( pipe_mtc1 );
13580 %}
13582 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13583 predicate(n->as_Vector()->length() == 8);
13584 match(Set dst (ReplicateB M1));
13585 ins_cost(80);
13586 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13587 ins_encode %{
13588 __ nor(AT, R0, R0);
13589 __ dmtc1(AT, $dst$$FloatRegister);
13590 %}
13591 ins_pipe( pipe_mtc1 );
13592 %}
13594 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13595 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13596 match(Set dst (ReplicateS src));
13597 ins_cost(100);
13598 format %{ "replv_qh AT, $src\n\t"
13599 "dmtc1 AT, $dst\t! replicate4S" %}
13600 ins_encode %{
13601 __ replv_qh(AT, $src$$Register);
13602 __ dmtc1(AT, $dst$$FloatRegister);
13603 %}
13604 ins_pipe( pipe_mtc1 );
13605 %}
13607 instruct Repl4S(vecD dst, mRegI src) %{
13608 predicate(n->as_Vector()->length() == 4);
13609 match(Set dst (ReplicateS src));
13610 ins_cost(120);
13611 format %{ "move AT, $src \n\t"
13612 "dins AT, AT, 16, 16\n\t"
13613 "dinsu AT, AT, 32, 32\n\t"
13614 "dmtc1 AT, $dst\t! replicate4S" %}
13615 ins_encode %{
13616 __ move(AT, $src$$Register);
13617 __ dins(AT, AT, 16, 16);
13618 __ dinsu(AT, AT, 32, 32);
13619 __ dmtc1(AT, $dst$$FloatRegister);
13620 %}
13621 ins_pipe( pipe_mtc1 );
13622 %}
13624 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13625 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13626 match(Set dst (ReplicateS con));
13627 ins_cost(100);
13628 format %{ "replv_qh AT, [$con]\n\t"
13629 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13630 ins_encode %{
13631 int val = $con$$constant;
13632 if ( Assembler::is_simm(val, 10)) {
13633 //repl_qh supports 10 bits immediate
13634 __ repl_qh(AT, val);
13635 } else {
13636 __ li32(AT, val);
13637 __ replv_qh(AT, AT);
13638 }
13639 __ dmtc1(AT, $dst$$FloatRegister);
13640 %}
13641 ins_pipe( pipe_mtc1 );
13642 %}
13644 instruct Repl4S_imm(vecD dst, immI con) %{
13645 predicate(n->as_Vector()->length() == 4);
13646 match(Set dst (ReplicateS con));
13647 ins_cost(110);
13648 format %{ "move AT, [$con]\n\t"
13649 "dins AT, AT, 16, 16\n\t"
13650 "dinsu AT, AT, 32, 32\n\t"
13651 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13652 ins_encode %{
13653 __ move(AT, $con$$constant);
13654 __ dins(AT, AT, 16, 16);
13655 __ dinsu(AT, AT, 32, 32);
13656 __ dmtc1(AT, $dst$$FloatRegister);
13657 %}
13658 ins_pipe( pipe_mtc1 );
13659 %}
13661 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13662 predicate(n->as_Vector()->length() == 4);
13663 match(Set dst (ReplicateS zero));
13664 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13665 ins_encode %{
13666 __ dmtc1(R0, $dst$$FloatRegister);
13667 %}
13668 ins_pipe( pipe_mtc1 );
13669 %}
13671 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13672 predicate(n->as_Vector()->length() == 4);
13673 match(Set dst (ReplicateS M1));
13674 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13675 ins_encode %{
13676 __ nor(AT, R0, R0);
13677 __ dmtc1(AT, $dst$$FloatRegister);
13678 %}
13679 ins_pipe( pipe_mtc1 );
13680 %}
13682 // Replicate integer (4 byte) scalar to be vector
13683 instruct Repl2I(vecD dst, mRegI src) %{
13684 predicate(n->as_Vector()->length() == 2);
13685 match(Set dst (ReplicateI src));
13686 format %{ "dins AT, $src, 0, 32\n\t"
13687 "dinsu AT, $src, 32, 32\n\t"
13688 "dmtc1 AT, $dst\t! replicate2I" %}
13689 ins_encode %{
13690 __ dins(AT, $src$$Register, 0, 32);
13691 __ dinsu(AT, $src$$Register, 32, 32);
13692 __ dmtc1(AT, $dst$$FloatRegister);
13693 %}
13694 ins_pipe( pipe_mtc1 );
13695 %}
13697 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13698 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13699 predicate(n->as_Vector()->length() == 2);
13700 match(Set dst (ReplicateI con));
13701 effect(KILL tmp);
13702 format %{ "li32 AT, [$con], 32\n\t"
13703 "dinsu AT, AT\n\t"
13704 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13705 ins_encode %{
13706 int val = $con$$constant;
13707 __ li32(AT, val);
13708 __ dinsu(AT, AT, 32, 32);
13709 __ dmtc1(AT, $dst$$FloatRegister);
13710 %}
13711 ins_pipe( pipe_mtc1 );
13712 %}
13714 // Replicate integer (4 byte) scalar zero to be vector
13715 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13716 predicate(n->as_Vector()->length() == 2);
13717 match(Set dst (ReplicateI zero));
13718 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13719 ins_encode %{
13720 __ dmtc1(R0, $dst$$FloatRegister);
13721 %}
13722 ins_pipe( pipe_mtc1 );
13723 %}
13725 // Replicate integer (4 byte) scalar -1 to be vector
13726 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13727 predicate(n->as_Vector()->length() == 2);
13728 match(Set dst (ReplicateI M1));
13729 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13730 ins_encode %{
13731 __ nor(AT, R0, R0);
13732 __ dmtc1(AT, $dst$$FloatRegister);
13733 %}
13734 ins_pipe( pipe_mtc1 );
13735 %}
13737 // Replicate float (4 byte) scalar to be vector
13738 instruct Repl2F(vecD dst, regF src) %{
13739 predicate(n->as_Vector()->length() == 2);
13740 match(Set dst (ReplicateF src));
13741 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13742 ins_encode %{
13743 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13744 %}
13745 ins_pipe( pipe_slow );
13746 %}
13748 // Replicate float (4 byte) scalar zero to be vector
13749 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13750 predicate(n->as_Vector()->length() == 2);
13751 match(Set dst (ReplicateF zero));
13752 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13753 ins_encode %{
13754 __ dmtc1(R0, $dst$$FloatRegister);
13755 %}
13756 ins_pipe( pipe_mtc1 );
13757 %}
13760 // ====================VECTOR ARITHMETIC=======================================
13762 // --------------------------------- ADD --------------------------------------
13764 // Floats vector add
13765 instruct vadd2F(vecD dst, vecD src) %{
13766 predicate(n->as_Vector()->length() == 2);
13767 match(Set dst (AddVF dst src));
13768 format %{ "add.ps $dst,$src\t! add packed2F" %}
13769 ins_encode %{
13770 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13771 %}
13772 ins_pipe( pipe_slow );
13773 %}
13775 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13776 predicate(n->as_Vector()->length() == 2);
13777 match(Set dst (AddVF src1 src2));
13778 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13779 ins_encode %{
13780 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13781 %}
13782 ins_pipe( fpu_regF_regF );
13783 %}
13785 // --------------------------------- SUB --------------------------------------
13787 // Floats vector sub
13788 instruct vsub2F(vecD dst, vecD src) %{
13789 predicate(n->as_Vector()->length() == 2);
13790 match(Set dst (SubVF dst src));
13791 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13792 ins_encode %{
13793 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13794 %}
13795 ins_pipe( fpu_regF_regF );
13796 %}
13798 // --------------------------------- MUL --------------------------------------
13800 // Floats vector mul
13801 instruct vmul2F(vecD dst, vecD src) %{
13802 predicate(n->as_Vector()->length() == 2);
13803 match(Set dst (MulVF dst src));
13804 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13805 ins_encode %{
13806 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13807 %}
13808 ins_pipe( fpu_regF_regF );
13809 %}
13811 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13812 predicate(n->as_Vector()->length() == 2);
13813 match(Set dst (MulVF src1 src2));
13814 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13815 ins_encode %{
13816 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13817 %}
13818 ins_pipe( fpu_regF_regF );
13819 %}
13821 // --------------------------------- DIV --------------------------------------
13822 // MIPS do not have div.ps
13825 //----------PEEPHOLE RULES-----------------------------------------------------
13826 // These must follow all instruction definitions as they use the names
13827 // defined in the instructions definitions.
13828 //
13829 // peepmatch ( root_instr_name [preceeding_instruction]* );
13830 //
13831 // peepconstraint %{
13832 // (instruction_number.operand_name relational_op instruction_number.operand_name
13833 // [, ...] );
13834 // // instruction numbers are zero-based using left to right order in peepmatch
13835 //
13836 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13837 // // provide an instruction_number.operand_name for each operand that appears
13838 // // in the replacement instruction's match rule
13839 //
13840 // ---------VM FLAGS---------------------------------------------------------
13841 //
13842 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13843 //
13844 // Each peephole rule is given an identifying number starting with zero and
13845 // increasing by one in the order seen by the parser. An individual peephole
13846 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13847 // on the command-line.
13848 //
13849 // ---------CURRENT LIMITATIONS----------------------------------------------
13850 //
13851 // Only match adjacent instructions in same basic block
13852 // Only equality constraints
13853 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13854 // Only one replacement instruction
13855 //
13856 // ---------EXAMPLE----------------------------------------------------------
13857 //
13858 // // pertinent parts of existing instructions in architecture description
13859 // instruct movI(eRegI dst, eRegI src) %{
13860 // match(Set dst (CopyI src));
13861 // %}
13862 //
13863 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13864 // match(Set dst (AddI dst src));
13865 // effect(KILL cr);
13866 // %}
13867 //
13868 // // Change (inc mov) to lea
13869 // peephole %{
13870 // // increment preceeded by register-register move
13871 // peepmatch ( incI_eReg movI );
13872 // // require that the destination register of the increment
13873 // // match the destination register of the move
13874 // peepconstraint ( 0.dst == 1.dst );
13875 // // construct a replacement instruction that sets
13876 // // the destination to ( move's source register + one )
13877 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13878 // %}
13879 //
13880 // Implementation no longer uses movX instructions since
13881 // machine-independent system no longer uses CopyX nodes.
13882 //
13883 // peephole %{
13884 // peepmatch ( incI_eReg movI );
13885 // peepconstraint ( 0.dst == 1.dst );
13886 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13887 // %}
13888 //
13889 // peephole %{
13890 // peepmatch ( decI_eReg movI );
13891 // peepconstraint ( 0.dst == 1.dst );
13892 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13893 // %}
13894 //
13895 // peephole %{
13896 // peepmatch ( addI_eReg_imm movI );
13897 // peepconstraint ( 0.dst == 1.dst );
13898 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13899 // %}
13900 //
13901 // peephole %{
13902 // peepmatch ( addP_eReg_imm movP );
13903 // peepconstraint ( 0.dst == 1.dst );
13904 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13905 // %}
13907 // // Change load of spilled value to only a spill
13908 // instruct storeI(memory mem, eRegI src) %{
13909 // match(Set mem (StoreI mem src));
13910 // %}
13911 //
13912 // instruct loadI(eRegI dst, memory mem) %{
13913 // match(Set dst (LoadI mem));
13914 // %}
13915 //
13916 //peephole %{
13917 // peepmatch ( loadI storeI );
13918 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13919 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13920 //%}
13922 //----------SMARTSPILL RULES---------------------------------------------------
13923 // These must follow all instruction definitions as they use the names
13924 // defined in the instructions definitions.