Tue, 28 Feb 2017 10:29:54 -0500
[C2] Remove storeL_imm & storeL_reg_atomic in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 /*
585 // Note that the code buffer's insts_mark is always relative to insts.
586 // That's why we must use the macroassembler to generate a handler.
587 MacroAssembler _masm(&cbuf);
588 address base = __ start_a_stub(size_deopt_handler());
589 if (base == NULL) return 0; // CodeBuffer::expand failed
590 int offset = __ offset();
592 #ifdef _LP64
593 address the_pc = (address) __ pc();
594 Label next;
595 // push a "the_pc" on the stack without destroying any registers
596 // as they all may be live.
598 // push address of "next"
599 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
600 __ bind(next);
601 // adjust it so it matches "the_pc"
602 __ subptr(Address(rsp, 0), __ offset() - offset);
603 #else
604 InternalAddress here(__ pc());
605 __ pushptr(here.addr());
606 #endif
608 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
609 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
610 __ end_a_stub();
611 return offset;
612 */
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a handler.
615 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(size_deopt_handler());
619 // FIXME
620 if (base == NULL) return 0; // CodeBuffer::expand failed
621 int offset = __ offset();
623 __ block_comment("; emit_deopt_handler");
625 cbuf.set_insts_mark();
626 __ relocate(relocInfo::runtime_call_type);
628 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
629 __ jalr(T9);
630 __ delayed()->nop();
631 __ align(16);
632 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
633 __ end_a_stub();
634 return offset;
635 }
638 const bool Matcher::match_rule_supported(int opcode) {
639 if (!has_match_rule(opcode))
640 return false;
642 switch (opcode) {
643 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
644 case Op_CountLeadingZerosI:
645 case Op_CountLeadingZerosL:
646 if (!UseCountLeadingZerosInstruction)
647 return false;
648 break;
649 case Op_CountTrailingZerosI:
650 case Op_CountTrailingZerosL:
651 if (!UseCountTrailingZerosInstruction)
652 return false;
653 break;
654 }
656 return true; // Per default match rules are supported.
657 }
659 //FIXME
660 // emit call stub, compiled java to interpreter
661 void emit_java_to_interp(CodeBuffer &cbuf ) {
662 // Stub is fixed up when the corresponding call is converted from calling
663 // compiled code to calling interpreted code.
664 // mov rbx,0
665 // jmp -1
667 address mark = cbuf.insts_mark(); // get mark within main instrs section
669 // Note that the code buffer's insts_mark is always relative to insts.
670 // That's why we must use the macroassembler to generate a stub.
671 MacroAssembler _masm(&cbuf);
673 address base =
674 __ start_a_stub(Compile::MAX_stubs_size);
675 if (base == NULL) return; // CodeBuffer::expand failed
676 // static stub relocation stores the instruction address of the call
678 __ relocate(static_stub_Relocation::spec(mark), 0);
680 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
681 /*
682 int oop_index = __ oop_recorder()->allocate_index(NULL);
683 RelocationHolder rspec = oop_Relocation::spec(oop_index);
684 __ relocate(rspec);
685 */
687 // static stub relocation also tags the methodOop in the code-stream.
688 __ li48(S3, (long)0);
689 // This is recognized as unresolved by relocs/nativeInst/ic code
691 __ relocate(relocInfo::runtime_call_type);
693 cbuf.set_insts_mark();
694 address call_pc = (address)-1;
695 __ li48(AT, (long)call_pc);
696 __ jr(AT);
697 __ nop();
698 __ align(16);
699 __ end_a_stub();
700 // Update current stubs pointer and restore code_end.
701 }
703 // size of call stub, compiled java to interpretor
704 uint size_java_to_interp() {
705 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
706 return round_to(size, 16);
707 }
709 // relocation entries for call stub, compiled java to interpreter
710 uint reloc_java_to_interp() {
711 return 16; // in emit_java_to_interp + in Java_Static_Call
712 }
714 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
715 if( Assembler::is_simm16(offset) ) return true;
716 else
717 {
718 assert(false, "Not implemented yet !" );
719 Unimplemented();
720 }
721 }
724 // No additional cost for CMOVL.
725 const int Matcher::long_cmove_cost() { return 0; }
727 // No CMOVF/CMOVD with SSE2
728 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
730 // Does the CPU require late expand (see block.cpp for description of late expand)?
731 const bool Matcher::require_postalloc_expand = false;
733 // Should the Matcher clone shifts on addressing modes, expecting them
734 // to be subsumed into complex addressing expressions or compute them
735 // into registers? True for Intel but false for most RISCs
736 const bool Matcher::clone_shift_expressions = false;
738 // Do we need to mask the count passed to shift instructions or does
739 // the cpu only look at the lower 5/6 bits anyway?
740 const bool Matcher::need_masked_shift_count = false;
742 bool Matcher::narrow_oop_use_complex_address() {
743 NOT_LP64(ShouldNotCallThis());
744 assert(UseCompressedOops, "only for compressed oops code");
745 return false;
746 }
748 bool Matcher::narrow_klass_use_complex_address() {
749 NOT_LP64(ShouldNotCallThis());
750 assert(UseCompressedClassPointers, "only for compressed klass code");
751 return false;
752 }
754 // This is UltraSparc specific, true just means we have fast l2f conversion
755 const bool Matcher::convL2FSupported(void) {
756 return true;
757 }
759 // Max vector size in bytes. 0 if not supported.
760 const int Matcher::vector_width_in_bytes(BasicType bt) {
761 assert(MaxVectorSize == 8, "");
762 return 8;
763 }
765 // Vector ideal reg
766 const int Matcher::vector_ideal_reg(int size) {
767 assert(MaxVectorSize == 8, "");
768 switch(size) {
769 case 8: return Op_VecD;
770 }
771 ShouldNotReachHere();
772 return 0;
773 }
775 // Only lowest bits of xmm reg are used for vector shift count.
776 const int Matcher::vector_shift_count_ideal_reg(int size) {
777 fatal("vector shift is not supported");
778 return Node::NotAMachineReg;
779 }
781 // Limits on vector size (number of elements) loaded into vector.
782 const int Matcher::max_vector_size(const BasicType bt) {
783 assert(is_java_primitive(bt), "only primitive type vectors");
784 return vector_width_in_bytes(bt)/type2aelembytes(bt);
785 }
787 const int Matcher::min_vector_size(const BasicType bt) {
788 return max_vector_size(bt); // Same as max.
789 }
791 // MIPS supports misaligned vectors store/load? FIXME
792 const bool Matcher::misaligned_vectors_ok() {
793 return false;
794 //return !AlignVector; // can be changed by flag
795 }
797 // Register for DIVI projection of divmodI
798 RegMask Matcher::divI_proj_mask() {
799 ShouldNotReachHere();
800 return RegMask();
801 }
803 // Register for MODI projection of divmodI
804 RegMask Matcher::modI_proj_mask() {
805 ShouldNotReachHere();
806 return RegMask();
807 }
809 // Register for DIVL projection of divmodL
810 RegMask Matcher::divL_proj_mask() {
811 ShouldNotReachHere();
812 return RegMask();
813 }
815 int Matcher::regnum_to_fpu_offset(int regnum) {
816 return regnum - 32; // The FP registers are in the second chunk
817 }
820 const bool Matcher::isSimpleConstant64(jlong value) {
821 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
822 return true;
823 }
826 // Return whether or not this register is ever used as an argument. This
827 // function is used on startup to build the trampoline stubs in generateOptoStub.
828 // Registers not mentioned will be killed by the VM call in the trampoline, and
829 // arguments in those registers not be available to the callee.
830 bool Matcher::can_be_java_arg( int reg ) {
831 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
832 if ( reg == T0_num || reg == T0_H_num
833 || reg == A0_num || reg == A0_H_num
834 || reg == A1_num || reg == A1_H_num
835 || reg == A2_num || reg == A2_H_num
836 || reg == A3_num || reg == A3_H_num
837 || reg == A4_num || reg == A4_H_num
838 || reg == A5_num || reg == A5_H_num
839 || reg == A6_num || reg == A6_H_num
840 || reg == A7_num || reg == A7_H_num )
841 return true;
843 if ( reg == F12_num || reg == F12_H_num
844 || reg == F13_num || reg == F13_H_num
845 || reg == F14_num || reg == F14_H_num
846 || reg == F15_num || reg == F15_H_num
847 || reg == F16_num || reg == F16_H_num
848 || reg == F17_num || reg == F17_H_num
849 || reg == F18_num || reg == F18_H_num
850 || reg == F19_num || reg == F19_H_num )
851 return true;
853 return false;
854 }
856 bool Matcher::is_spillable_arg( int reg ) {
857 return can_be_java_arg(reg);
858 }
860 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
861 return false;
862 }
864 // Register for MODL projection of divmodL
865 RegMask Matcher::modL_proj_mask() {
866 ShouldNotReachHere();
867 return RegMask();
868 }
870 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
871 return FP_REG_mask();
872 }
874 // MIPS doesn't support AES intrinsics
875 const bool Matcher::pass_original_key_for_aes() {
876 return false;
877 }
879 // The address of the call instruction needs to be 16-byte aligned to
880 // ensure that it does not span a cache line so that it can be patched.
882 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 // The address of the call instruction needs to be 16-byte aligned to
895 // ensure that it does not span a cache line so that it can be patched.
896 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
897 //li64 <--- skip
899 //lui
900 //ori
901 //dsll
902 //ori
904 //jalr
905 //nop
907 current_offset += 4 * 6; // skip li64
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 int CallLeafDirectNode::compute_padding(int current_offset) const {
924 //lui
925 //ori
926 //dsll
927 //ori
929 //jalr
930 //nop
932 return round_to(current_offset, alignment_required()) - current_offset;
933 }
935 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
936 //lui
937 //ori
938 //dsll
939 //ori
941 //jalr
942 //nop
944 return round_to(current_offset, alignment_required()) - current_offset;
945 }
947 // If CPU can load and store mis-aligned doubles directly then no fixup is
948 // needed. Else we split the double into 2 integer pieces and move it
949 // piece-by-piece. Only happens when passing doubles into C code as the
950 // Java calling convention forces doubles to be aligned.
951 const bool Matcher::misaligned_doubles_ok = false;
952 // Do floats take an entire double register or just half?
953 //const bool Matcher::float_in_double = true;
954 bool Matcher::float_in_double() { return false; }
955 // Threshold size for cleararray.
956 const int Matcher::init_array_short_size = 8 * BytesPerLong;
957 // Do ints take an entire long register or just half?
958 const bool Matcher::int_in_long = true;
959 // Is it better to copy float constants, or load them directly from memory?
960 // Intel can load a float constant from a direct address, requiring no
961 // extra registers. Most RISCs will have to materialize an address into a
962 // register first, so they would do better to copy the constant from stack.
963 const bool Matcher::rematerialize_float_constants = false;
964 // Advertise here if the CPU requires explicit rounding operations
965 // to implement the UseStrictFP mode.
966 const bool Matcher::strict_fp_requires_explicit_rounding = false;
967 // The ecx parameter to rep stos for the ClearArray node is in dwords.
968 const bool Matcher::init_array_count_is_in_bytes = false;
971 // Indicate if the safepoint node needs the polling page as an input.
972 // Since MIPS doesn't have absolute addressing, it needs.
973 bool SafePointNode::needs_polling_address_input() {
974 return true;
975 }
977 // !!!!! Special hack to get all type of calls to specify the byte offset
978 // from the start of the call to the point where the return address
979 // will point.
980 int MachCallStaticJavaNode::ret_addr_offset() {
981 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
982 //The value ought to be 16 bytes.
983 //lui
984 //ori
985 //dsll
986 //ori
987 //jalr
988 //nop
989 return NativeCall::instruction_size;
990 }
992 int MachCallDynamicJavaNode::ret_addr_offset() {
993 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
995 // return NativeCall::instruction_size;
996 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
997 //The value ought to be 4 + 16 bytes.
998 //lui IC_Klass,
999 //ori IC_Klass,
1000 //dsll IC_Klass
1001 //ori IC_Klass
1002 //lui T9
1003 //ori T9
1004 //dsll T9
1005 //ori T9
1006 //jalr T9
1007 //nop
1008 return 6 * 4 + NativeCall::instruction_size;
1010 }
1012 /*
1013 // EMIT_OPCODE()
1014 void emit_opcode(CodeBuffer &cbuf, int code) {
1015 *(cbuf.code_end()) = (unsigned char)code;
1016 cbuf.set_code_end(cbuf.code_end() + 1);
1017 }
1018 */
1020 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1021 int format) {
1022 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1023 cbuf.insts()->emit_int32(d32);
1024 }
1026 //=============================================================================
1028 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1030 static enum RC rc_class( OptoReg::Name reg ) {
1031 if( !OptoReg::is_valid(reg) ) return rc_bad;
1032 if (OptoReg::is_stack(reg)) return rc_stack;
1033 VMReg r = OptoReg::as_VMReg(reg);
1034 if (r->is_Register()) return rc_int;
1035 assert(r->is_FloatRegister(), "must be");
1036 return rc_float;
1037 }
1039 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1040 // Get registers to move
1041 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1042 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1043 OptoReg::Name dst_second = ra_->get_reg_second(this );
1044 OptoReg::Name dst_first = ra_->get_reg_first(this );
1046 enum RC src_second_rc = rc_class(src_second);
1047 enum RC src_first_rc = rc_class(src_first);
1048 enum RC dst_second_rc = rc_class(dst_second);
1049 enum RC dst_first_rc = rc_class(dst_first);
1051 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1053 // Generate spill code!
1054 int size = 0;
1056 if( src_first == dst_first && src_second == dst_second )
1057 return 0; // Self copy, no move
1059 if (src_first_rc == rc_stack) {
1060 // mem ->
1061 if (dst_first_rc == rc_stack) {
1062 // mem -> mem
1063 assert(src_second != dst_first, "overlap");
1064 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1065 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1066 // 64-bit
1067 int src_offset = ra_->reg2offset(src_first);
1068 int dst_offset = ra_->reg2offset(dst_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ ld(AT, Address(SP, src_offset));
1072 __ sd(AT, Address(SP, dst_offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1078 "sd AT, [SP + #%d]",
1079 src_offset, dst_offset);
1080 }
1081 #endif
1082 }
1083 size += 8;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 // No pushl/popl, so:
1089 int src_offset = ra_->reg2offset(src_first);
1090 int dst_offset = ra_->reg2offset(dst_first);
1091 if (cbuf) {
1092 MacroAssembler _masm(cbuf);
1093 __ lw(AT, Address(SP, src_offset));
1094 __ sw(AT, Address(SP, dst_offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 st->print("lw AT, [SP + #%d] spill 2\n\t"
1100 "sw AT, [SP + #%d]\n\t",
1101 src_offset, dst_offset);
1102 }
1103 #endif
1104 }
1105 size += 8;
1106 }
1107 return size;
1108 } else if (dst_first_rc == rc_int) {
1109 // mem -> gpr
1110 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1111 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1112 // 64-bit
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if(!do_size){
1120 if (size != 0) st->print("\n\t");
1121 st->print("ld %s, [SP + #%d]\t# spill 3",
1122 Matcher::regName[dst_first],
1123 offset);
1124 }
1125 #endif
1126 }
1127 size += 4;
1128 } else {
1129 // 32-bit
1130 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1131 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1132 int offset = ra_->reg2offset(src_first);
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 if (this->ideal_reg() == Op_RegI)
1136 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 else
1138 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1139 #ifndef PRODUCT
1140 } else {
1141 if(!do_size){
1142 if (size != 0) st->print("\n\t");
1143 if (this->ideal_reg() == Op_RegI)
1144 st->print("lw %s, [SP + #%d]\t# spill 4",
1145 Matcher::regName[dst_first],
1146 offset);
1147 else
1148 st->print("lwu %s, [SP + #%d]\t# spill 5",
1149 Matcher::regName[dst_first],
1150 offset);
1151 }
1152 #endif
1153 }
1154 size += 4;
1155 }
1156 return size;
1157 } else if (dst_first_rc == rc_float) {
1158 // mem-> xmm
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(src_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1190 Matcher::regName[dst_first],
1191 offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 }
1199 } else if (src_first_rc == rc_int) {
1200 // gpr ->
1201 if (dst_first_rc == rc_stack) {
1202 // gpr -> mem
1203 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1204 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1205 // 64-bit
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sd %s, [SP + #%d] # spill 8",
1215 Matcher::regName[src_first],
1216 offset);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 } else {
1222 // 32-bit
1223 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1224 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1225 int offset = ra_->reg2offset(dst_first);
1226 if (cbuf) {
1227 MacroAssembler _masm(cbuf);
1228 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("sw %s, [SP + #%d]\t# spill 9",
1234 Matcher::regName[src_first], offset);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s <-- %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s <-- %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s <-- %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s <-- %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 if (UseLoongsonISA) {
1500 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1501 } else {
1502 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1503 st->cr(); st->print("\t");
1504 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1505 }
1507 if( do_polling() && C->is_method_compilation() ) {
1508 st->print("Poll Safepoint # MachEpilogNode");
1509 }
1510 }
1511 #endif
1513 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 Compile *C = ra_->C;
1515 MacroAssembler _masm(&cbuf);
1516 int framesize = C->frame_size_in_bytes();
1518 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1520 __ daddiu(SP, SP, framesize);
1522 if (UseLoongsonISA) {
1523 __ gslq(RA, FP, SP, -wordSize*2);
1524 } else {
1525 __ ld(RA, SP, -wordSize );
1526 __ ld(FP, SP, -wordSize*2 );
1527 }
1529 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1530 if( do_polling() && C->is_method_compilation() ) {
1531 #ifndef OPT_SAFEPOINT
1532 __ set64(AT, (long)os::get_polling_page());
1533 __ relocate(relocInfo::poll_return_type);
1534 __ lw(AT, AT, 0);
1535 #else
1536 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1537 __ relocate(relocInfo::poll_return_type);
1538 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1539 #endif
1540 }
1541 }
1543 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1544 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1545 }
1547 int MachEpilogNode::reloc() const {
1548 return 0; // a large enough number
1549 }
1551 const Pipeline * MachEpilogNode::pipeline() const {
1552 return MachNode::pipeline_class();
1553 }
1555 int MachEpilogNode::safepoint_offset() const { return 0; }
1557 //=============================================================================
1559 #ifndef PRODUCT
1560 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1561 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1562 int reg = ra_->get_reg_first(this);
1563 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1564 }
1565 #endif
1568 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1569 return 4;
1570 }
1572 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1573 MacroAssembler _masm(&cbuf);
1574 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1575 int reg = ra_->get_encode(this);
1577 __ addi(as_Register(reg), SP, offset);
1578 /*
1579 if( offset >= 128 ) {
1580 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1581 emit_rm(cbuf, 0x2, reg, 0x04);
1582 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1583 emit_d32(cbuf, offset);
1584 }
1585 else {
1586 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1587 emit_rm(cbuf, 0x1, reg, 0x04);
1588 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1589 emit_d8(cbuf, offset);
1590 }
1591 */
1592 }
1595 //static int sizeof_FFree_Float_Stack_All = -1;
1597 int MachCallRuntimeNode::ret_addr_offset() {
1598 //lui
1599 //ori
1600 //dsll
1601 //ori
1602 //jalr
1603 //nop
1604 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1605 return NativeCall::instruction_size;
1606 // return 16;
1607 }
1613 //=============================================================================
1614 #ifndef PRODUCT
1615 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1616 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1617 }
1618 #endif
1620 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1621 MacroAssembler _masm(&cbuf);
1622 int i = 0;
1623 for(i = 0; i < _count; i++)
1624 __ nop();
1625 }
1627 uint MachNopNode::size(PhaseRegAlloc *) const {
1628 return 4 * _count;
1629 }
1630 const Pipeline* MachNopNode::pipeline() const {
1631 return MachNode::pipeline_class();
1632 }
1634 //=============================================================================
1636 //=============================================================================
1637 #ifndef PRODUCT
1638 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1639 st->print_cr("load_klass(AT, T0)");
1640 st->print_cr("\tbeq(AT, iCache, L)");
1641 st->print_cr("\tnop");
1642 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1643 st->print_cr("\tnop");
1644 st->print_cr("\tnop");
1645 st->print_cr(" L:");
1646 }
1647 #endif
1650 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651 MacroAssembler _masm(&cbuf);
1652 #ifdef ASSERT
1653 //uint code_size = cbuf.code_size();
1654 #endif
1655 int ic_reg = Matcher::inline_cache_reg_encode();
1656 Label L;
1657 Register receiver = T0;
1658 Register iCache = as_Register(ic_reg);
1659 __ load_klass(AT, receiver);
1660 __ beq(AT, iCache, L);
1661 __ nop();
1663 __ relocate(relocInfo::runtime_call_type);
1664 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1665 __ jr(T9);
1666 __ nop();
1668 /* WARNING these NOPs are critical so that verified entry point is properly
1669 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1670 __ align(CodeEntryAlignment);
1671 __ bind(L);
1672 }
1674 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1675 return MachNode::size(ra_);
1676 }
1680 //=============================================================================
1682 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1684 int Compile::ConstantTable::calculate_table_base_offset() const {
1685 return 0; // absolute addressing, no offset
1686 }
1688 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1689 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1690 ShouldNotReachHere();
1691 }
1693 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1694 Compile* C = ra_->C;
1695 Compile::ConstantTable& constant_table = C->constant_table();
1696 MacroAssembler _masm(&cbuf);
1698 Register Rtoc = as_Register(ra_->get_encode(this));
1699 CodeSection* consts_section = __ code()->consts();
1700 int consts_size = consts_section->align_at_start(consts_section->size());
1701 assert(constant_table.size() == consts_size, "must be equal");
1703 if (consts_section->size()) {
1704 // Materialize the constant table base.
1705 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1706 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1707 __ relocate(relocInfo::internal_pc_type);
1708 __ li48(Rtoc, (long)baseaddr);
1709 }
1710 }
1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1713 // li48 (4 insts)
1714 return 4 * 4;
1715 }
1717 #ifndef PRODUCT
1718 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1719 Register r = as_Register(ra_->get_encode(this));
1720 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1721 }
1722 #endif
1725 //=============================================================================
1726 #ifndef PRODUCT
1727 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1728 Compile* C = ra_->C;
1730 int framesize = C->frame_size_in_bytes();
1731 int bangsize = C->bang_size_in_bytes();
1732 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1734 // Calls to C2R adapters often do not accept exceptional returns.
1735 // We require that their callers must bang for them. But be careful, because
1736 // some VM calls (such as call site linkage) can use several kilobytes of
1737 // stack. But the stack safety zone should account for that.
1738 // See bugs 4446381, 4468289, 4497237.
1739 if (C->need_stack_bang(bangsize)) {
1740 st->print_cr("# stack bang"); st->print("\t");
1741 }
1742 if (UseLoongsonISA) {
1743 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1744 } else {
1745 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1746 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1747 }
1748 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1749 st->print("daddiu SP, SP, -%d \t",framesize);
1750 }
1751 #endif
1754 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1755 Compile* C = ra_->C;
1756 MacroAssembler _masm(&cbuf);
1758 int framesize = C->frame_size_in_bytes();
1759 int bangsize = C->bang_size_in_bytes();
1761 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1763 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1765 if (C->need_stack_bang(framesize)) {
1766 __ generate_stack_overflow_check(framesize);
1767 }
1769 if (UseLoongsonISA) {
1770 __ gssq(RA, FP, SP, -wordSize*2);
1771 } else {
1772 __ sd(RA, SP, -wordSize);
1773 __ sd(FP, SP, -wordSize*2);
1774 }
1775 __ daddiu(FP, SP, -wordSize*2);
1776 __ daddiu(SP, SP, -framesize);
1777 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1778 __ nop();
1780 C->set_frame_complete(cbuf.insts_size());
1781 if (C->has_mach_constant_base_node()) {
1782 // NOTE: We set the table base offset here because users might be
1783 // emitted before MachConstantBaseNode.
1784 Compile::ConstantTable& constant_table = C->constant_table();
1785 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1786 }
1788 }
1791 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1792 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1793 return MachNode::size(ra_); // too many variables; just compute it the hard way
1794 }
1796 int MachPrologNode::reloc() const {
1797 return 0; // a large enough number
1798 }
1800 %}
1802 //----------ENCODING BLOCK-----------------------------------------------------
1803 // This block specifies the encoding classes used by the compiler to output
1804 // byte streams. Encoding classes generate functions which are called by
1805 // Machine Instruction Nodes in order to generate the bit encoding of the
1806 // instruction. Operands specify their base encoding interface with the
1807 // interface keyword. There are currently supported four interfaces,
1808 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1809 // operand to generate a function which returns its register number when
1810 // queried. CONST_INTER causes an operand to generate a function which
1811 // returns the value of the constant when queried. MEMORY_INTER causes an
1812 // operand to generate four functions which return the Base Register, the
1813 // Index Register, the Scale Value, and the Offset Value of the operand when
1814 // queried. COND_INTER causes an operand to generate six functions which
1815 // return the encoding code (ie - encoding bits for the instruction)
1816 // associated with each basic boolean condition for a conditional instruction.
1817 // Instructions specify two basic values for encoding. They use the
1818 // ins_encode keyword to specify their encoding class (which must be one of
1819 // the class names specified in the encoding block), and they use the
1820 // opcode keyword to specify, in order, their primary, secondary, and
1821 // tertiary opcode. Only the opcode sections which a particular instruction
1822 // needs for encoding need to be specified.
1823 encode %{
1824 /*
1825 Alias:
1826 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1827 118 B14: # B19 B15 <- B13 Freq: 0.899955
1828 118 add S1, S2, V0 #@addP_reg_reg
1829 11c lb S0, [S1 + #-8257524] #@loadB
1830 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1831 */
1832 //Load byte signed
1833 enc_class load_B_enc (mRegI dst, memory mem) %{
1834 MacroAssembler _masm(&cbuf);
1835 int dst = $dst$$reg;
1836 int base = $mem$$base;
1837 int index = $mem$$index;
1838 int scale = $mem$$scale;
1839 int disp = $mem$$disp;
1841 if( index != 0 ) {
1842 if( Assembler::is_simm16(disp) ) {
1843 if( UseLoongsonISA ) {
1844 if (scale == 0) {
1845 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1849 }
1850 } else {
1851 if (scale == 0) {
1852 __ addu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ addu(AT, as_Register(base), AT);
1856 }
1857 __ lb(as_Register(dst), AT, disp);
1858 }
1859 } else {
1860 if (scale == 0) {
1861 __ addu(AT, as_Register(base), as_Register(index));
1862 } else {
1863 __ dsll(AT, as_Register(index), scale);
1864 __ addu(AT, as_Register(base), AT);
1865 }
1866 __ move(T9, disp);
1867 if( UseLoongsonISA ) {
1868 __ gslbx(as_Register(dst), AT, T9, 0);
1869 } else {
1870 __ addu(AT, AT, T9);
1871 __ lb(as_Register(dst), AT, 0);
1872 }
1873 }
1874 } else {
1875 if( Assembler::is_simm16(disp) ) {
1876 __ lb(as_Register(dst), as_Register(base), disp);
1877 } else {
1878 __ move(T9, disp);
1879 if( UseLoongsonISA ) {
1880 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1881 } else {
1882 __ addu(AT, as_Register(base), T9);
1883 __ lb(as_Register(dst), AT, 0);
1884 }
1885 }
1886 }
1887 %}
1889 //Load byte unsigned
1890 enc_class load_UB_enc (mRegI dst, memory mem) %{
1891 MacroAssembler _masm(&cbuf);
1892 int dst = $dst$$reg;
1893 int base = $mem$$base;
1894 int index = $mem$$index;
1895 int scale = $mem$$scale;
1896 int disp = $mem$$disp;
1898 if( index != 0 ) {
1899 if (scale == 0) {
1900 __ daddu(AT, as_Register(base), as_Register(index));
1901 } else {
1902 __ dsll(AT, as_Register(index), scale);
1903 __ daddu(AT, as_Register(base), AT);
1904 }
1905 if( Assembler::is_simm16(disp) ) {
1906 __ lbu(as_Register(dst), AT, disp);
1907 } else {
1908 __ move(T9, disp);
1909 __ daddu(AT, AT, T9);
1910 __ lbu(as_Register(dst), AT, 0);
1911 }
1912 } else {
1913 if( Assembler::is_simm16(disp) ) {
1914 __ lbu(as_Register(dst), as_Register(base), disp);
1915 } else {
1916 __ move(T9, disp);
1917 __ daddu(AT, as_Register(base), T9);
1918 __ lbu(as_Register(dst), AT, 0);
1919 }
1920 }
1921 %}
1923 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1924 MacroAssembler _masm(&cbuf);
1925 int src = $src$$reg;
1926 int base = $mem$$base;
1927 int index = $mem$$index;
1928 int scale = $mem$$scale;
1929 int disp = $mem$$disp;
1931 if( index != 0 ) {
1932 if (scale == 0) {
1933 if( Assembler::is_simm(disp, 8) ) {
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1936 } else {
1937 __ addu(AT, as_Register(base), as_Register(index));
1938 __ sb(as_Register(src), AT, disp);
1939 }
1940 } else if( Assembler::is_simm16(disp) ) {
1941 __ addu(AT, as_Register(base), as_Register(index));
1942 __ sb(as_Register(src), AT, disp);
1943 } else {
1944 __ addu(AT, as_Register(base), as_Register(index));
1945 __ move(T9, disp);
1946 if (UseLoongsonISA) {
1947 __ gssbx(as_Register(src), AT, T9, 0);
1948 } else {
1949 __ addu(AT, AT, T9);
1950 __ sb(as_Register(src), AT, 0);
1951 }
1952 }
1953 } else {
1954 __ dsll(AT, as_Register(index), scale);
1955 if( Assembler::is_simm(disp, 8) ) {
1956 if (UseLoongsonISA) {
1957 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1958 } else {
1959 __ addu(AT, as_Register(base), AT);
1960 __ sb(as_Register(src), AT, disp);
1961 }
1962 } else if( Assembler::is_simm16(disp) ) {
1963 __ addu(AT, as_Register(base), AT);
1964 __ sb(as_Register(src), AT, disp);
1965 } else {
1966 __ addu(AT, as_Register(base), AT);
1967 __ move(T9, disp);
1968 if (UseLoongsonISA) {
1969 __ gssbx(as_Register(src), AT, T9, 0);
1970 } else {
1971 __ addu(AT, AT, T9);
1972 __ sb(as_Register(src), AT, 0);
1973 }
1974 }
1975 }
1976 } else {
1977 if( Assembler::is_simm16(disp) ) {
1978 __ sb(as_Register(src), as_Register(base), disp);
1979 } else {
1980 __ move(T9, disp);
1981 if (UseLoongsonISA) {
1982 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1983 } else {
1984 __ addu(AT, as_Register(base), T9);
1985 __ sb(as_Register(src), AT, 0);
1986 }
1987 }
1988 }
1989 %}
1991 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1992 MacroAssembler _masm(&cbuf);
1993 int base = $mem$$base;
1994 int index = $mem$$index;
1995 int scale = $mem$$scale;
1996 int disp = $mem$$disp;
1997 int value = $src$$constant;
1999 if( index != 0 ) {
2000 if (!UseLoongsonISA) {
2001 if (scale == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 } else {
2004 __ dsll(AT, as_Register(index), scale);
2005 __ daddu(AT, as_Register(base), AT);
2006 }
2007 if( Assembler::is_simm16(disp) ) {
2008 if (value == 0) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 } else {
2015 if (value == 0) {
2016 __ move(T9, disp);
2017 __ daddu(AT, AT, T9);
2018 __ sb(R0, AT, 0);
2019 } else {
2020 __ move(T9, disp);
2021 __ daddu(AT, AT, T9);
2022 __ move(T9, value);
2023 __ sb(T9, AT, 0);
2024 }
2025 }
2026 } else {
2028 if (scale == 0) {
2029 if( Assembler::is_simm(disp, 8) ) {
2030 if (value == 0) {
2031 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2032 } else {
2033 __ move(T9, value);
2034 __ gssbx(T9, as_Register(base), as_Register(index), disp);
2035 }
2036 } else if( Assembler::is_simm16(disp) ) {
2037 __ daddu(AT, as_Register(base), as_Register(index));
2038 if (value == 0) {
2039 __ sb(R0, AT, disp);
2040 } else {
2041 __ move(T9, value);
2042 __ sb(T9, AT, disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ daddu(AT, as_Register(base), as_Register(index));
2047 __ move(T9, disp);
2048 __ gssbx(R0, AT, T9, 0);
2049 } else {
2050 __ move(AT, disp);
2051 __ move(T9, value);
2052 __ daddu(AT, as_Register(base), AT);
2053 __ gssbx(T9, AT, as_Register(index), 0);
2054 }
2055 }
2057 } else {
2059 if( Assembler::is_simm(disp, 8) ) {
2060 __ dsll(AT, as_Register(index), scale);
2061 if (value == 0) {
2062 __ gssbx(R0, as_Register(base), AT, disp);
2063 } else {
2064 __ move(T9, value);
2065 __ gssbx(T9, as_Register(base), AT, disp);
2066 }
2067 } else if( Assembler::is_simm16(disp) ) {
2068 __ dsll(AT, as_Register(index), scale);
2069 __ daddu(AT, as_Register(base), AT);
2070 if (value == 0) {
2071 __ sb(R0, AT, disp);
2072 } else {
2073 __ move(T9, value);
2074 __ sb(T9, AT, disp);
2075 }
2076 } else {
2077 __ dsll(AT, as_Register(index), scale);
2078 if (value == 0) {
2079 __ daddu(AT, as_Register(base), AT);
2080 __ move(T9, disp);
2081 __ gssbx(R0, AT, T9, 0);
2082 } else {
2083 __ move(T9, disp);
2084 __ daddu(AT, AT, T9);
2085 __ move(T9, value);
2086 __ gssbx(T9, as_Register(base), AT, 0);
2087 }
2088 }
2089 }
2090 }
2091 } else {
2092 if( Assembler::is_simm16(disp) ) {
2093 if (value == 0) {
2094 __ sb(R0, as_Register(base), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ sb(AT, as_Register(base), disp);
2098 }
2099 } else {
2100 if (value == 0) {
2101 __ move(T9, disp);
2102 if (UseLoongsonISA) {
2103 __ gssbx(R0, as_Register(base), T9, 0);
2104 } else {
2105 __ daddu(AT, as_Register(base), T9);
2106 __ sb(R0, AT, 0);
2107 }
2108 } else {
2109 __ move(T9, disp);
2110 if (UseLoongsonISA) {
2111 __ move(AT, value);
2112 __ gssbx(AT, as_Register(base), T9, 0);
2113 } else {
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2121 %}
2124 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2125 MacroAssembler _masm(&cbuf);
2126 int base = $mem$$base;
2127 int index = $mem$$index;
2128 int scale = $mem$$scale;
2129 int disp = $mem$$disp;
2130 int value = $src$$constant;
2132 if( index != 0 ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 }
2139 if( Assembler::is_simm16(disp) ) {
2140 if (value == 0) {
2141 __ sb(R0, AT, disp);
2142 } else {
2143 __ move(T9, value);
2144 __ sb(T9, AT, disp);
2145 }
2146 } else {
2147 if (value == 0) {
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ sb(R0, AT, 0);
2151 } else {
2152 __ move(T9, disp);
2153 __ daddu(AT, AT, T9);
2154 __ move(T9, value);
2155 __ sb(T9, AT, 0);
2156 }
2157 }
2158 } else {
2159 if( Assembler::is_simm16(disp) ) {
2160 if (value == 0) {
2161 __ sb(R0, as_Register(base), disp);
2162 } else {
2163 __ move(AT, value);
2164 __ sb(AT, as_Register(base), disp);
2165 }
2166 } else {
2167 if (value == 0) {
2168 __ move(T9, disp);
2169 __ daddu(AT, as_Register(base), T9);
2170 __ sb(R0, AT, 0);
2171 } else {
2172 __ move(T9, disp);
2173 __ daddu(AT, as_Register(base), T9);
2174 __ move(T9, value);
2175 __ sb(T9, AT, 0);
2176 }
2177 }
2178 }
2180 __ sync();
2181 %}
2183 // Load Short (16bit signed)
2184 enc_class load_S_enc (mRegI dst, memory mem) %{
2185 MacroAssembler _masm(&cbuf);
2186 int dst = $dst$$reg;
2187 int base = $mem$$base;
2188 int index = $mem$$index;
2189 int scale = $mem$$scale;
2190 int disp = $mem$$disp;
2192 if( index != 0 ) {
2193 if (scale == 0) {
2194 __ daddu(AT, as_Register(base), as_Register(index));
2195 } else {
2196 __ dsll(AT, as_Register(index), scale);
2197 __ daddu(AT, as_Register(base), AT);
2198 }
2199 if( Assembler::is_simm16(disp) ) {
2200 __ lh(as_Register(dst), AT, disp);
2201 } else {
2202 __ move(T9, disp);
2203 __ addu(AT, AT, T9);
2204 __ lh(as_Register(dst), AT, 0);
2205 }
2206 } else {
2207 if( Assembler::is_simm16(disp) ) {
2208 __ lh(as_Register(dst), as_Register(base), disp);
2209 } else {
2210 __ move(T9, disp);
2211 __ addu(AT, as_Register(base), T9);
2212 __ lh(as_Register(dst), AT, 0);
2213 }
2214 }
2215 %}
2217 // Load Char (16bit unsigned)
2218 enc_class load_C_enc (mRegI dst, memory mem) %{
2219 MacroAssembler _masm(&cbuf);
2220 int dst = $dst$$reg;
2221 int base = $mem$$base;
2222 int index = $mem$$index;
2223 int scale = $mem$$scale;
2224 int disp = $mem$$disp;
2226 if( index != 0 ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ daddu(AT, as_Register(base), AT);
2232 }
2233 if( Assembler::is_simm16(disp) ) {
2234 __ lhu(as_Register(dst), AT, disp);
2235 } else {
2236 __ move(T9, disp);
2237 __ addu(AT, AT, T9);
2238 __ lhu(as_Register(dst), AT, 0);
2239 }
2240 } else {
2241 if( Assembler::is_simm16(disp) ) {
2242 __ lhu(as_Register(dst), as_Register(base), disp);
2243 } else {
2244 __ move(T9, disp);
2245 __ daddu(AT, as_Register(base), T9);
2246 __ lhu(as_Register(dst), AT, 0);
2247 }
2248 }
2249 %}
2251 // Store Char (16bit unsigned)
2252 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2253 MacroAssembler _masm(&cbuf);
2254 int src = $src$$reg;
2255 int base = $mem$$base;
2256 int index = $mem$$index;
2257 int scale = $mem$$scale;
2258 int disp = $mem$$disp;
2260 if( index != 0 ) {
2261 if( Assembler::is_simm16(disp) ) {
2262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2263 if (scale == 0) {
2264 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2268 }
2269 } else {
2270 if (scale == 0) {
2271 __ addu(AT, as_Register(base), as_Register(index));
2272 } else {
2273 __ dsll(AT, as_Register(index), scale);
2274 __ addu(AT, as_Register(base), AT);
2275 }
2276 __ sh(as_Register(src), AT, disp);
2277 }
2278 } else {
2279 if (scale == 0) {
2280 __ addu(AT, as_Register(base), as_Register(index));
2281 } else {
2282 __ dsll(AT, as_Register(index), scale);
2283 __ addu(AT, as_Register(base), AT);
2284 }
2285 __ move(T9, disp);
2286 if( UseLoongsonISA ) {
2287 __ gsshx(as_Register(src), AT, T9, 0);
2288 } else {
2289 __ addu(AT, AT, T9);
2290 __ sh(as_Register(src), AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 __ sh(as_Register(src), as_Register(base), disp);
2296 } else {
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2300 } else {
2301 __ addu(AT, as_Register(base), T9);
2302 __ sh(as_Register(src), AT, 0);
2303 }
2304 }
2305 }
2306 %}
2308 enc_class load_I_enc (mRegI dst, memory mem) %{
2309 MacroAssembler _masm(&cbuf);
2310 int dst = $dst$$reg;
2311 int base = $mem$$base;
2312 int index = $mem$$index;
2313 int scale = $mem$$scale;
2314 int disp = $mem$$disp;
2316 if( index != 0 ) {
2317 if( Assembler::is_simm16(disp) ) {
2318 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2319 if (scale == 0) {
2320 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2324 }
2325 } else {
2326 if (scale == 0) {
2327 __ addu(AT, as_Register(base), as_Register(index));
2328 } else {
2329 __ dsll(AT, as_Register(index), scale);
2330 __ addu(AT, as_Register(base), AT);
2331 }
2332 __ lw(as_Register(dst), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ move(T9, disp);
2342 if( UseLoongsonISA ) {
2343 __ gslwx(as_Register(dst), AT, T9, 0);
2344 } else {
2345 __ addu(AT, AT, T9);
2346 __ lw(as_Register(dst), AT, 0);
2347 }
2348 }
2349 } else {
2350 if( Assembler::is_simm16(disp) ) {
2351 __ lw(as_Register(dst), as_Register(base), disp);
2352 } else {
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2356 } else {
2357 __ addu(AT, as_Register(base), T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 }
2362 %}
2364 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2365 MacroAssembler _masm(&cbuf);
2366 int src = $src$$reg;
2367 int base = $mem$$base;
2368 int index = $mem$$index;
2369 int scale = $mem$$scale;
2370 int disp = $mem$$disp;
2372 if( index != 0 ) {
2373 if( Assembler::is_simm16(disp) ) {
2374 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2375 if (scale == 0) {
2376 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2380 }
2381 } else {
2382 if (scale == 0) {
2383 __ addu(AT, as_Register(base), as_Register(index));
2384 } else {
2385 __ dsll(AT, as_Register(index), scale);
2386 __ addu(AT, as_Register(base), AT);
2387 }
2388 __ sw(as_Register(src), AT, disp);
2389 }
2390 } else {
2391 if (scale == 0) {
2392 __ addu(AT, as_Register(base), as_Register(index));
2393 } else {
2394 __ dsll(AT, as_Register(index), scale);
2395 __ addu(AT, as_Register(base), AT);
2396 }
2397 __ move(T9, disp);
2398 if( UseLoongsonISA ) {
2399 __ gsswx(as_Register(src), AT, T9, 0);
2400 } else {
2401 __ addu(AT, AT, T9);
2402 __ sw(as_Register(src), AT, 0);
2403 }
2404 }
2405 } else {
2406 if( Assembler::is_simm16(disp) ) {
2407 __ sw(as_Register(src), as_Register(base), disp);
2408 } else {
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2412 } else {
2413 __ addu(AT, as_Register(base), T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 }
2418 %}
2420 enc_class store_I_immI_enc (memory mem, immI src) %{
2421 MacroAssembler _masm(&cbuf);
2422 int base = $mem$$base;
2423 int index = $mem$$index;
2424 int scale = $mem$$scale;
2425 int disp = $mem$$disp;
2426 int value = $src$$constant;
2428 if( index != 0 ) {
2429 if (scale == 0) {
2430 __ daddu(AT, as_Register(base), as_Register(index));
2431 } else {
2432 __ dsll(AT, as_Register(index), scale);
2433 __ daddu(AT, as_Register(base), AT);
2434 }
2435 if( Assembler::is_simm16(disp) ) {
2436 if (value == 0) {
2437 __ sw(R0, AT, disp);
2438 } else {
2439 __ move(T9, value);
2440 __ sw(T9, AT, disp);
2441 }
2442 } else {
2443 if (value == 0) {
2444 __ move(T9, disp);
2445 __ addu(AT, AT, T9);
2446 __ sw(R0, AT, 0);
2447 } else {
2448 __ move(T9, disp);
2449 __ addu(AT, AT, T9);
2450 __ move(T9, value);
2451 __ sw(T9, AT, 0);
2452 }
2453 }
2454 } else {
2455 if( Assembler::is_simm16(disp) ) {
2456 if (value == 0) {
2457 __ sw(R0, as_Register(base), disp);
2458 } else {
2459 __ move(AT, value);
2460 __ sw(AT, as_Register(base), disp);
2461 }
2462 } else {
2463 if (value == 0) {
2464 __ move(T9, disp);
2465 __ addu(AT, as_Register(base), T9);
2466 __ sw(R0, AT, 0);
2467 } else {
2468 __ move(T9, disp);
2469 __ addu(AT, as_Register(base), T9);
2470 __ move(T9, value);
2471 __ sw(T9, AT, 0);
2472 }
2473 }
2474 }
2475 %}
2477 enc_class load_N_enc (mRegN dst, memory mem) %{
2478 MacroAssembler _masm(&cbuf);
2479 int dst = $dst$$reg;
2480 int base = $mem$$base;
2481 int index = $mem$$index;
2482 int scale = $mem$$scale;
2483 int disp = $mem$$disp;
2484 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2485 assert(disp_reloc == relocInfo::none, "cannot have disp");
2487 if( index != 0 ) {
2488 if (scale == 0) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 } else {
2491 __ dsll(AT, as_Register(index), scale);
2492 __ daddu(AT, as_Register(base), AT);
2493 }
2494 if( Assembler::is_simm16(disp) ) {
2495 __ lwu(as_Register(dst), AT, disp);
2496 } else {
2497 __ li(T9, disp);
2498 __ daddu(AT, AT, T9);
2499 __ lwu(as_Register(dst), AT, 0);
2500 }
2501 } else {
2502 if( Assembler::is_simm16(disp) ) {
2503 __ lwu(as_Register(dst), as_Register(base), disp);
2504 } else {
2505 __ li(T9, disp);
2506 __ daddu(AT, as_Register(base), T9);
2507 __ lwu(as_Register(dst), AT, 0);
2508 }
2509 }
2511 %}
2514 enc_class load_P_enc (mRegP dst, memory mem) %{
2515 MacroAssembler _masm(&cbuf);
2516 int dst = $dst$$reg;
2517 int base = $mem$$base;
2518 int index = $mem$$index;
2519 int scale = $mem$$scale;
2520 int disp = $mem$$disp;
2521 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2522 assert(disp_reloc == relocInfo::none, "cannot have disp");
2524 if( index != 0 ) {
2525 if (scale == 0) {
2526 __ daddu(AT, as_Register(base), as_Register(index));
2527 } else {
2528 __ dsll(AT, as_Register(index), scale);
2529 __ daddu(AT, as_Register(base), AT);
2530 }
2531 if( Assembler::is_simm16(disp) ) {
2532 __ ld(as_Register(dst), AT, disp);
2533 } else {
2534 __ li(T9, disp);
2535 __ daddu(AT, AT, T9);
2536 __ ld(as_Register(dst), AT, 0);
2537 }
2538 } else {
2539 if( Assembler::is_simm16(disp) ) {
2540 __ ld(as_Register(dst), as_Register(base), disp);
2541 } else {
2542 __ li(T9, disp);
2543 __ daddu(AT, as_Register(base), T9);
2544 __ ld(as_Register(dst), AT, 0);
2545 }
2546 }
2547 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2548 %}
2550 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2551 MacroAssembler _masm(&cbuf);
2552 int src = $src$$reg;
2553 int base = $mem$$base;
2554 int index = $mem$$index;
2555 int scale = $mem$$scale;
2556 int disp = $mem$$disp;
2558 if( index != 0 ) {
2559 if (scale == 0) {
2560 __ daddu(AT, as_Register(base), as_Register(index));
2561 } else {
2562 __ dsll(AT, as_Register(index), scale);
2563 __ daddu(AT, as_Register(base), AT);
2564 }
2565 if( Assembler::is_simm16(disp) ) {
2566 __ sd(as_Register(src), AT, disp);
2567 } else {
2568 __ move(T9, disp);
2569 __ daddu(AT, AT, T9);
2570 __ sd(as_Register(src), AT, 0);
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 __ sd(as_Register(src), as_Register(base), disp);
2575 } else {
2576 __ move(T9, disp);
2577 __ daddu(AT, as_Register(base), T9);
2578 __ sd(as_Register(src), AT, 0);
2579 }
2580 }
2581 %}
2583 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2584 MacroAssembler _masm(&cbuf);
2585 int src = $src$$reg;
2586 int base = $mem$$base;
2587 int index = $mem$$index;
2588 int scale = $mem$$scale;
2589 int disp = $mem$$disp;
2591 if( index != 0 ) {
2592 if (scale == 0) {
2593 __ daddu(AT, as_Register(base), as_Register(index));
2594 } else {
2595 __ dsll(AT, as_Register(index), scale);
2596 __ daddu(AT, as_Register(base), AT);
2597 }
2598 if( Assembler::is_simm16(disp) ) {
2599 __ sw(as_Register(src), AT, disp);
2600 } else {
2601 __ move(T9, disp);
2602 __ addu(AT, AT, T9);
2603 __ sw(as_Register(src), AT, 0);
2604 }
2605 } else {
2606 if( Assembler::is_simm16(disp) ) {
2607 __ sw(as_Register(src), as_Register(base), disp);
2608 } else {
2609 __ move(T9, disp);
2610 __ addu(AT, as_Register(base), T9);
2611 __ sw(as_Register(src), AT, 0);
2612 }
2613 }
2614 %}
2616 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2617 MacroAssembler _masm(&cbuf);
2618 int base = $mem$$base;
2619 int index = $mem$$index;
2620 int scale = $mem$$scale;
2621 int disp = $mem$$disp;
2622 long value = $src$$constant;
2624 if( index != 0 ) {
2625 if (scale == 0) {
2626 __ daddu(AT, as_Register(base), as_Register(index));
2627 } else {
2628 __ dsll(AT, as_Register(index), scale);
2629 __ daddu(AT, as_Register(base), AT);
2630 }
2631 if( Assembler::is_simm16(disp) ) {
2632 if (value == 0) {
2633 __ sd(R0, AT, disp);
2634 } else {
2635 __ move(T9, value);
2636 __ sd(T9, AT, disp);
2637 }
2638 } else {
2639 if (value == 0) {
2640 __ move(T9, disp);
2641 __ daddu(AT, AT, T9);
2642 __ sd(R0, AT, 0);
2643 } else {
2644 __ move(T9, disp);
2645 __ daddu(AT, AT, T9);
2646 __ move(T9, value);
2647 __ sd(T9, AT, 0);
2648 }
2649 }
2650 } else {
2651 if( Assembler::is_simm16(disp) ) {
2652 if (value == 0) {
2653 __ sd(R0, as_Register(base), disp);
2654 } else {
2655 __ move(AT, value);
2656 __ sd(AT, as_Register(base), disp);
2657 }
2658 } else {
2659 if (value == 0) {
2660 __ move(T9, disp);
2661 __ daddu(AT, as_Register(base), T9);
2662 __ sd(R0, AT, 0);
2663 } else {
2664 __ move(T9, disp);
2665 __ daddu(AT, as_Register(base), T9);
2666 __ move(T9, value);
2667 __ sd(T9, AT, 0);
2668 }
2669 }
2670 }
2671 %}
2674 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2675 MacroAssembler _masm(&cbuf);
2676 int base = $mem$$base;
2677 int index = $mem$$index;
2678 int scale = $mem$$scale;
2679 int disp = $mem$$disp;
2681 if(index!=0){
2682 if (scale == 0) {
2683 __ daddu(AT, as_Register(base), as_Register(index));
2684 } else {
2685 __ dsll(AT, as_Register(index), scale);
2686 __ daddu(AT, as_Register(base), AT);
2687 }
2689 if( Assembler::is_simm16(disp) ) {
2690 __ sw(R0, AT, disp);
2691 } else {
2692 __ move(T9, disp);
2693 __ daddu(AT, AT, T9);
2694 __ sw(R0, AT, 0);
2695 }
2696 }
2697 else {
2698 if( Assembler::is_simm16(disp) ) {
2699 __ sw(R0, as_Register(base), disp);
2700 } else {
2701 __ move(T9, disp);
2702 __ daddu(AT, as_Register(base), T9);
2703 __ sw(R0, AT, 0);
2704 }
2705 }
2706 %}
2708 enc_class load_L_enc (mRegL dst, memory mem) %{
2709 MacroAssembler _masm(&cbuf);
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2714 Register dst_reg = as_Register($dst$$reg);
2716 /*********************2013/03/27**************************
2717 * Jin: $base may contain a null object.
2718 * Server JIT force the exception_offset to be the pos of
2719 * the first instruction.
2720 * I insert such a 'null_check' at the beginning.
2721 *******************************************************/
2723 __ lw(AT, as_Register(base), 0);
2725 /*********************2012/10/04**************************
2726 * Error case found in SortTest
2727 * 337 b java.util.Arrays::sort1 (401 bytes)
2728 * B73:
2729 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2730 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2731 *
2732 * The original instructions generated here are :
2733 * __ lw(dst_lo, as_Register(base), disp);
2734 * __ lw(dst_hi, as_Register(base), disp + 4);
2735 *******************************************************/
2737 if( index != 0 ) {
2738 if (scale == 0) {
2739 __ daddu(AT, as_Register(base), as_Register(index));
2740 } else {
2741 __ dsll(AT, as_Register(index), scale);
2742 __ daddu(AT, as_Register(base), AT);
2743 }
2744 if( Assembler::is_simm16(disp) ) {
2745 __ ld(dst_reg, AT, disp);
2746 } else {
2747 __ move(T9, disp);
2748 __ daddu(AT, AT, T9);
2749 __ ld(dst_reg, AT, 0);
2750 }
2751 } else {
2752 if( Assembler::is_simm16(disp) ) {
2753 __ move(AT, as_Register(base));
2754 __ ld(dst_reg, AT, disp);
2755 } else {
2756 __ move(T9, disp);
2757 __ daddu(AT, as_Register(base), T9);
2758 __ ld(dst_reg, AT, 0);
2759 }
2760 }
2761 %}
2763 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2764 MacroAssembler _masm(&cbuf);
2765 int base = $mem$$base;
2766 int index = $mem$$index;
2767 int scale = $mem$$scale;
2768 int disp = $mem$$disp;
2769 Register src_reg = as_Register($src$$reg);
2771 if( index != 0 ) {
2772 if (scale == 0) {
2773 __ daddu(AT, as_Register(base), as_Register(index));
2774 } else {
2775 __ dsll(AT, as_Register(index), scale);
2776 __ daddu(AT, as_Register(base), AT);
2777 }
2778 if( Assembler::is_simm16(disp) ) {
2779 __ sd(src_reg, AT, disp);
2780 } else {
2781 __ move(T9, disp);
2782 __ daddu(AT, AT, T9);
2783 __ sd(src_reg, AT, 0);
2784 }
2785 } else {
2786 if( Assembler::is_simm16(disp) ) {
2787 __ move(AT, as_Register(base));
2788 __ sd(src_reg, AT, disp);
2789 } else {
2790 __ move(T9, disp);
2791 __ daddu(AT, as_Register(base), T9);
2792 __ sd(src_reg, AT, 0);
2793 }
2794 }
2795 %}
2797 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2798 MacroAssembler _masm(&cbuf);
2799 int base = $mem$$base;
2800 int index = $mem$$index;
2801 int scale = $mem$$scale;
2802 int disp = $mem$$disp;
2804 if( index != 0 ) {
2805 if (scale == 0) {
2806 __ daddu(AT, as_Register(base), as_Register(index));
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ daddu(AT, as_Register(base), AT);
2810 }
2811 if( Assembler::is_simm16(disp) ) {
2812 __ sd(R0, AT, disp);
2813 } else {
2814 __ move(T9, disp);
2815 __ addu(AT, AT, T9);
2816 __ sd(R0, AT, 0);
2817 }
2818 } else {
2819 if( Assembler::is_simm16(disp) ) {
2820 __ move(AT, as_Register(base));
2821 __ sd(R0, AT, disp);
2822 } else {
2823 __ move(T9, disp);
2824 __ addu(AT, as_Register(base), T9);
2825 __ sd(R0, AT, 0);
2826 }
2827 }
2828 %}
2830 enc_class load_F_enc (regF dst, memory mem) %{
2831 MacroAssembler _masm(&cbuf);
2832 int base = $mem$$base;
2833 int index = $mem$$index;
2834 int scale = $mem$$scale;
2835 int disp = $mem$$disp;
2836 FloatRegister dst = $dst$$FloatRegister;
2838 if( index != 0 ) {
2839 if( Assembler::is_simm16(disp) ) {
2840 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2841 if (scale == 0) {
2842 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2843 } else {
2844 __ dsll(AT, as_Register(index), scale);
2845 __ gslwxc1(dst, as_Register(base), AT, disp);
2846 }
2847 } else {
2848 if (scale == 0) {
2849 __ daddu(AT, as_Register(base), as_Register(index));
2850 } else {
2851 __ dsll(AT, as_Register(index), scale);
2852 __ daddu(AT, as_Register(base), AT);
2853 }
2854 __ lwc1(dst, AT, disp);
2855 }
2856 } else {
2857 if (scale == 0) {
2858 __ daddu(AT, as_Register(base), as_Register(index));
2859 } else {
2860 __ dsll(AT, as_Register(index), scale);
2861 __ daddu(AT, as_Register(base), AT);
2862 }
2863 __ move(T9, disp);
2864 if( UseLoongsonISA ) {
2865 __ gslwxc1(dst, AT, T9, 0);
2866 } else {
2867 __ daddu(AT, AT, T9);
2868 __ lwc1(dst, AT, 0);
2869 }
2870 }
2871 } else {
2872 if( Assembler::is_simm16(disp) ) {
2873 __ lwc1(dst, as_Register(base), disp);
2874 } else {
2875 __ move(T9, disp);
2876 if( UseLoongsonISA ) {
2877 __ gslwxc1(dst, as_Register(base), T9, 0);
2878 } else {
2879 __ daddu(AT, as_Register(base), T9);
2880 __ lwc1(dst, AT, 0);
2881 }
2882 }
2883 }
2884 %}
2886 enc_class store_F_reg_enc (memory mem, regF src) %{
2887 MacroAssembler _masm(&cbuf);
2888 int base = $mem$$base;
2889 int index = $mem$$index;
2890 int scale = $mem$$scale;
2891 int disp = $mem$$disp;
2892 FloatRegister src = $src$$FloatRegister;
2894 if( index != 0 ) {
2895 if( Assembler::is_simm16(disp) ) {
2896 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2897 if (scale == 0) {
2898 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2899 } else {
2900 __ dsll(AT, as_Register(index), scale);
2901 __ gsswxc1(src, as_Register(base), AT, disp);
2902 }
2903 } else {
2904 if (scale == 0) {
2905 __ daddu(AT, as_Register(base), as_Register(index));
2906 } else {
2907 __ dsll(AT, as_Register(index), scale);
2908 __ daddu(AT, as_Register(base), AT);
2909 }
2910 __ swc1(src, AT, disp);
2911 }
2912 } else {
2913 if (scale == 0) {
2914 __ daddu(AT, as_Register(base), as_Register(index));
2915 } else {
2916 __ dsll(AT, as_Register(index), scale);
2917 __ daddu(AT, as_Register(base), AT);
2918 }
2919 __ move(T9, disp);
2920 if( UseLoongsonISA ) {
2921 __ gsswxc1(src, AT, T9, 0);
2922 } else {
2923 __ daddu(AT, AT, T9);
2924 __ swc1(src, AT, 0);
2925 }
2926 }
2927 } else {
2928 if( Assembler::is_simm16(disp) ) {
2929 __ swc1(src, as_Register(base), disp);
2930 } else {
2931 __ move(T9, disp);
2932 if( UseLoongsonISA ) {
2933 __ gslwxc1(src, as_Register(base), T9, 0);
2934 } else {
2935 __ daddu(AT, as_Register(base), T9);
2936 __ swc1(src, AT, 0);
2937 }
2938 }
2939 }
2940 %}
2942 enc_class load_D_enc (regD dst, memory mem) %{
2943 MacroAssembler _masm(&cbuf);
2944 int base = $mem$$base;
2945 int index = $mem$$index;
2946 int scale = $mem$$scale;
2947 int disp = $mem$$disp;
2948 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2950 if( index != 0 ) {
2951 if( Assembler::is_simm16(disp) ) {
2952 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2953 if (scale == 0) {
2954 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
2955 } else {
2956 __ dsll(AT, as_Register(index), scale);
2957 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
2958 }
2959 } else {
2960 if (scale == 0) {
2961 __ daddu(AT, as_Register(base), as_Register(index));
2962 } else {
2963 __ dsll(AT, as_Register(index), scale);
2964 __ daddu(AT, as_Register(base), AT);
2965 }
2966 __ ldc1(dst_reg, AT, disp);
2967 }
2968 } else {
2969 if (scale == 0) {
2970 __ daddu(AT, as_Register(base), as_Register(index));
2971 } else {
2972 __ dsll(AT, as_Register(index), scale);
2973 __ daddu(AT, as_Register(base), AT);
2974 }
2975 __ move(T9, disp);
2976 if( UseLoongsonISA ) {
2977 __ gsldxc1(dst_reg, AT, T9, 0);
2978 } else {
2979 __ addu(AT, AT, T9);
2980 __ ldc1(dst_reg, AT, 0);
2981 }
2982 }
2983 } else {
2984 if( Assembler::is_simm16(disp) ) {
2985 __ ldc1(dst_reg, as_Register(base), disp);
2986 } else {
2987 __ move(T9, disp);
2988 if( UseLoongsonISA ) {
2989 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
2990 } else {
2991 __ addu(AT, as_Register(base), T9);
2992 __ ldc1(dst_reg, AT, 0);
2993 }
2994 }
2995 }
2996 %}
2998 enc_class store_D_reg_enc (memory mem, regD src) %{
2999 MacroAssembler _masm(&cbuf);
3000 int base = $mem$$base;
3001 int index = $mem$$index;
3002 int scale = $mem$$scale;
3003 int disp = $mem$$disp;
3004 FloatRegister src_reg = as_FloatRegister($src$$reg);
3006 if( index != 0 ) {
3007 if( Assembler::is_simm16(disp) ) {
3008 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3009 if (scale == 0) {
3010 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3011 } else {
3012 __ dsll(AT, as_Register(index), scale);
3013 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3014 }
3015 } else {
3016 if (scale == 0) {
3017 __ daddu(AT, as_Register(base), as_Register(index));
3018 } else {
3019 __ dsll(AT, as_Register(index), scale);
3020 __ daddu(AT, as_Register(base), AT);
3021 }
3022 __ sdc1(src_reg, AT, disp);
3023 }
3024 } else {
3025 if (scale == 0) {
3026 __ daddu(AT, as_Register(base), as_Register(index));
3027 } else {
3028 __ dsll(AT, as_Register(index), scale);
3029 __ daddu(AT, as_Register(base), AT);
3030 }
3031 __ move(T9, disp);
3032 if( UseLoongsonISA ) {
3033 __ gssdxc1(src_reg, AT, T9, 0);
3034 } else {
3035 __ addu(AT, AT, T9);
3036 __ sdc1(src_reg, AT, 0);
3037 }
3038 }
3039 } else {
3040 if( Assembler::is_simm16(disp) ) {
3041 __ sdc1(src_reg, as_Register(base), disp);
3042 } else {
3043 __ move(T9, disp);
3044 if( UseLoongsonISA ) {
3045 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3046 } else {
3047 __ addu(AT, as_Register(base), T9);
3048 __ sdc1(src_reg, AT, 0);
3049 }
3050 }
3051 }
3052 %}
3054 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3055 MacroAssembler _masm(&cbuf);
3056 // This is the instruction starting address for relocation info.
3057 __ block_comment("Java_To_Runtime");
3058 cbuf.set_insts_mark();
3059 __ relocate(relocInfo::runtime_call_type);
3061 __ li48(T9, (long)$meth$$method);
3062 __ jalr(T9);
3063 __ nop();
3064 %}
3066 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3067 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3068 // who we intended to call.
3069 MacroAssembler _masm(&cbuf);
3070 cbuf.set_insts_mark();
3072 if ( !_method ) {
3073 __ relocate(relocInfo::runtime_call_type);
3074 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3075 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3076 } else if(_optimized_virtual) {
3077 __ relocate(relocInfo::opt_virtual_call_type);
3078 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3079 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3080 } else {
3081 __ relocate(relocInfo::static_call_type);
3082 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3083 // static_call_Relocation::spec(), RELOC_IMM32 );
3084 }
3086 __ li(T9, $meth$$method);
3087 __ jalr(T9);
3088 __ nop();
3089 if( _method ) { // Emit stub for static call
3090 emit_java_to_interp(cbuf);
3091 }
3092 %}
3095 /*
3096 * [Ref: LIR_Assembler::ic_call() ]
3097 */
3098 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3099 MacroAssembler _masm(&cbuf);
3100 __ block_comment("Java_Dynamic_Call");
3101 __ ic_call((address)$meth$$method);
3102 %}
3105 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3106 Register flags = $cr$$Register;
3107 Label L;
3109 MacroAssembler _masm(&cbuf);
3111 __ addu(flags, R0, R0);
3112 __ beq(AT, R0, L);
3113 __ delayed()->nop();
3114 __ move(flags, 0xFFFFFFFF);
3115 __ bind(L);
3116 %}
3118 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3119 Register result = $result$$Register;
3120 Register sub = $sub$$Register;
3121 Register super = $super$$Register;
3122 Register length = $tmp$$Register;
3123 Register tmp = T9;
3124 Label miss;
3126 /* 2012/9/28 Jin: result may be the same as sub
3127 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3128 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3129 * 4bc mov S2, NULL #@loadConP
3130 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3131 */
3132 MacroAssembler _masm(&cbuf);
3133 Label done;
3134 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3135 NULL, &miss,
3136 /*set_cond_codes:*/ true);
3137 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3138 __ move(result, 0);
3139 __ b(done);
3140 __ nop();
3142 __ bind(miss);
3143 __ move(result, 1);
3144 __ bind(done);
3145 %}
3147 %}
3150 //---------MIPS FRAME--------------------------------------------------------------
3151 // Definition of frame structure and management information.
3152 //
3153 // S T A C K L A Y O U T Allocators stack-slot number
3154 // | (to get allocators register number
3155 // G Owned by | | v add SharedInfo::stack0)
3156 // r CALLER | |
3157 // o | +--------+ pad to even-align allocators stack-slot
3158 // w V | pad0 | numbers; owned by CALLER
3159 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3160 // h ^ | in | 5
3161 // | | args | 4 Holes in incoming args owned by SELF
3162 // | | old | | 3
3163 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3164 // v | | ret | 3 return address
3165 // Owned by +--------+
3166 // Self | pad2 | 2 pad to align old SP
3167 // | +--------+ 1
3168 // | | locks | 0
3169 // | +--------+----> SharedInfo::stack0, even aligned
3170 // | | pad1 | 11 pad to align new SP
3171 // | +--------+
3172 // | | | 10
3173 // | | spills | 9 spills
3174 // V | | 8 (pad0 slot for callee)
3175 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3176 // ^ | out | 7
3177 // | | args | 6 Holes in outgoing args owned by CALLEE
3178 // Owned by new | |
3179 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3180 // | |
3181 //
3182 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3183 // known from SELF's arguments and the Java calling convention.
3184 // Region 6-7 is determined per call site.
3185 // Note 2: If the calling convention leaves holes in the incoming argument
3186 // area, those holes are owned by SELF. Holes in the outgoing area
3187 // are owned by the CALLEE. Holes should not be nessecary in the
3188 // incoming area, as the Java calling convention is completely under
3189 // the control of the AD file. Doubles can be sorted and packed to
3190 // avoid holes. Holes in the outgoing arguments may be nessecary for
3191 // varargs C calling conventions.
3192 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3193 // even aligned with pad0 as needed.
3194 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3195 // region 6-11 is even aligned; it may be padded out more so that
3196 // the region from SP to FP meets the minimum stack alignment.
3197 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3198 // alignment. Region 11, pad1, may be dynamically extended so that
3199 // SP meets the minimum alignment.
3202 frame %{
3204 stack_direction(TOWARDS_LOW);
3206 // These two registers define part of the calling convention
3207 // between compiled code and the interpreter.
3208 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3209 // for more information. by yjl 3/16/2006
3211 inline_cache_reg(T1); // Inline Cache Register
3212 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3213 /*
3214 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3215 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3216 */
3218 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3219 cisc_spilling_operand_name(indOffset32);
3221 // Number of stack slots consumed by locking an object
3222 // generate Compile::sync_stack_slots
3223 #ifdef _LP64
3224 sync_stack_slots(2);
3225 #else
3226 sync_stack_slots(1);
3227 #endif
3229 frame_pointer(SP);
3231 // Interpreter stores its frame pointer in a register which is
3232 // stored to the stack by I2CAdaptors.
3233 // I2CAdaptors convert from interpreted java to compiled java.
3235 interpreter_frame_pointer(FP);
3237 // generate Matcher::stack_alignment
3238 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3240 // Number of stack slots between incoming argument block and the start of
3241 // a new frame. The PROLOG must add this many slots to the stack. The
3242 // EPILOG must remove this many slots. Intel needs one slot for
3243 // return address.
3244 // generate Matcher::in_preserve_stack_slots
3245 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3246 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3248 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3249 // for calls to C. Supports the var-args backing area for register parms.
3250 varargs_C_out_slots_killed(0);
3252 // The after-PROLOG location of the return address. Location of
3253 // return address specifies a type (REG or STACK) and a number
3254 // representing the register number (i.e. - use a register name) or
3255 // stack slot.
3256 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3257 // Otherwise, it is above the locks and verification slot and alignment word
3258 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3259 return_addr(REG RA);
3261 // Body of function which returns an integer array locating
3262 // arguments either in registers or in stack slots. Passed an array
3263 // of ideal registers called "sig" and a "length" count. Stack-slot
3264 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3265 // arguments for a CALLEE. Incoming stack arguments are
3266 // automatically biased by the preserve_stack_slots field above.
3269 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3270 // StartNode::calling_convention call this. by yjl 3/16/2006
3271 calling_convention %{
3272 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3273 %}
3278 // Body of function which returns an integer array locating
3279 // arguments either in registers or in stack slots. Passed an array
3280 // of ideal registers called "sig" and a "length" count. Stack-slot
3281 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3282 // arguments for a CALLEE. Incoming stack arguments are
3283 // automatically biased by the preserve_stack_slots field above.
3286 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3287 c_calling_convention %{
3288 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3289 %}
3292 // Location of C & interpreter return values
3293 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3294 // SEE Matcher::match. by yjl 3/16/2006
3295 c_return_value %{
3296 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3297 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3298 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3299 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3300 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3301 %}
3303 // Location of return values
3304 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3305 // SEE Matcher::match. by yjl 3/16/2006
3307 return_value %{
3308 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3309 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3310 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3311 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3312 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3313 %}
3315 %}
3317 //----------ATTRIBUTES---------------------------------------------------------
3318 //----------Operand Attributes-------------------------------------------------
3319 op_attrib op_cost(0); // Required cost attribute
3321 //----------Instruction Attributes---------------------------------------------
3322 ins_attrib ins_cost(100); // Required cost attribute
3323 ins_attrib ins_size(32); // Required size attribute (in bits)
3324 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3325 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3326 // non-matching short branch variant of some
3327 // long branch?
3328 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3329 // specifies the alignment that some part of the instruction (not
3330 // necessarily the start) requires. If > 1, a compute_padding()
3331 // function must be provided for the instruction
3333 //----------OPERANDS-----------------------------------------------------------
3334 // Operand definitions must precede instruction definitions for correct parsing
3335 // in the ADLC because operands constitute user defined types which are used in
3336 // instruction definitions.
3338 // Vectors
3339 operand vecD() %{
3340 constraint(ALLOC_IN_RC(dbl_reg));
3341 match(VecD);
3343 format %{ %}
3344 interface(REG_INTER);
3345 %}
3347 // Flags register, used as output of compare instructions
3348 operand FlagsReg() %{
3349 constraint(ALLOC_IN_RC(mips_flags));
3350 match(RegFlags);
3352 format %{ "EFLAGS" %}
3353 interface(REG_INTER);
3354 %}
3356 //----------Simple Operands----------------------------------------------------
3357 //TODO: Should we need to define some more special immediate number ?
3358 // Immediate Operands
3359 // Integer Immediate
3360 operand immI() %{
3361 match(ConI);
3362 //TODO: should not match immI8 here LEE
3363 match(immI8);
3365 op_cost(20);
3366 format %{ %}
3367 interface(CONST_INTER);
3368 %}
3370 // Long Immediate 8-bit
3371 operand immL8()
3372 %{
3373 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3374 match(ConL);
3376 op_cost(5);
3377 format %{ %}
3378 interface(CONST_INTER);
3379 %}
3381 // Constant for test vs zero
3382 operand immI0() %{
3383 predicate(n->get_int() == 0);
3384 match(ConI);
3386 op_cost(0);
3387 format %{ %}
3388 interface(CONST_INTER);
3389 %}
3391 // Constant for increment
3392 operand immI1() %{
3393 predicate(n->get_int() == 1);
3394 match(ConI);
3396 op_cost(0);
3397 format %{ %}
3398 interface(CONST_INTER);
3399 %}
3401 // Constant for decrement
3402 operand immI_M1() %{
3403 predicate(n->get_int() == -1);
3404 match(ConI);
3406 op_cost(0);
3407 format %{ %}
3408 interface(CONST_INTER);
3409 %}
3411 operand immI_MaxI() %{
3412 predicate(n->get_int() == 2147483647);
3413 match(ConI);
3415 op_cost(0);
3416 format %{ %}
3417 interface(CONST_INTER);
3418 %}
3420 // Valid scale values for addressing modes
3421 operand immI2() %{
3422 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3423 match(ConI);
3425 format %{ %}
3426 interface(CONST_INTER);
3427 %}
3429 operand immI8() %{
3430 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3431 match(ConI);
3433 op_cost(5);
3434 format %{ %}
3435 interface(CONST_INTER);
3436 %}
3438 operand immI16() %{
3439 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3440 match(ConI);
3442 op_cost(10);
3443 format %{ %}
3444 interface(CONST_INTER);
3445 %}
3447 // Constant for long shifts
3448 operand immI_32() %{
3449 predicate( n->get_int() == 32 );
3450 match(ConI);
3452 op_cost(0);
3453 format %{ %}
3454 interface(CONST_INTER);
3455 %}
3457 operand immI_63() %{
3458 predicate( n->get_int() == 63 );
3459 match(ConI);
3461 op_cost(0);
3462 format %{ %}
3463 interface(CONST_INTER);
3464 %}
3466 operand immI_0_31() %{
3467 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3468 match(ConI);
3470 op_cost(0);
3471 format %{ %}
3472 interface(CONST_INTER);
3473 %}
3475 // Operand for non-negtive integer mask
3476 operand immI_nonneg_mask() %{
3477 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3478 match(ConI);
3480 op_cost(0);
3481 format %{ %}
3482 interface(CONST_INTER);
3483 %}
3485 operand immI_32_63() %{
3486 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3487 match(ConI);
3488 op_cost(0);
3490 format %{ %}
3491 interface(CONST_INTER);
3492 %}
3494 operand immI16_sub() %{
3495 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3496 match(ConI);
3498 op_cost(10);
3499 format %{ %}
3500 interface(CONST_INTER);
3501 %}
3503 operand immI_0_32767() %{
3504 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3505 match(ConI);
3506 op_cost(0);
3508 format %{ %}
3509 interface(CONST_INTER);
3510 %}
3512 operand immI_0_65535() %{
3513 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3514 match(ConI);
3515 op_cost(0);
3517 format %{ %}
3518 interface(CONST_INTER);
3519 %}
3521 operand immI_1() %{
3522 predicate( n->get_int() == 1 );
3523 match(ConI);
3525 op_cost(0);
3526 format %{ %}
3527 interface(CONST_INTER);
3528 %}
3530 operand immI_2() %{
3531 predicate( n->get_int() == 2 );
3532 match(ConI);
3534 op_cost(0);
3535 format %{ %}
3536 interface(CONST_INTER);
3537 %}
3539 operand immI_3() %{
3540 predicate( n->get_int() == 3 );
3541 match(ConI);
3543 op_cost(0);
3544 format %{ %}
3545 interface(CONST_INTER);
3546 %}
3548 operand immI_7() %{
3549 predicate( n->get_int() == 7 );
3550 match(ConI);
3552 format %{ %}
3553 interface(CONST_INTER);
3554 %}
3556 // Immediates for special shifts (sign extend)
3558 // Constants for increment
3559 operand immI_16() %{
3560 predicate( n->get_int() == 16 );
3561 match(ConI);
3563 format %{ %}
3564 interface(CONST_INTER);
3565 %}
3567 operand immI_24() %{
3568 predicate( n->get_int() == 24 );
3569 match(ConI);
3571 format %{ %}
3572 interface(CONST_INTER);
3573 %}
3575 // Constant for byte-wide masking
3576 operand immI_255() %{
3577 predicate( n->get_int() == 255 );
3578 match(ConI);
3580 op_cost(0);
3581 format %{ %}
3582 interface(CONST_INTER);
3583 %}
3585 operand immI_65535() %{
3586 predicate( n->get_int() == 65535 );
3587 match(ConI);
3589 op_cost(5);
3590 format %{ %}
3591 interface(CONST_INTER);
3592 %}
3594 operand immI_65536() %{
3595 predicate( n->get_int() == 65536 );
3596 match(ConI);
3598 op_cost(5);
3599 format %{ %}
3600 interface(CONST_INTER);
3601 %}
3603 operand immI_M65536() %{
3604 predicate( n->get_int() == -65536 );
3605 match(ConI);
3607 op_cost(5);
3608 format %{ %}
3609 interface(CONST_INTER);
3610 %}
3612 // Pointer Immediate
3613 operand immP() %{
3614 match(ConP);
3616 op_cost(10);
3617 format %{ %}
3618 interface(CONST_INTER);
3619 %}
3621 operand immP31()
3622 %{
3623 predicate(n->as_Type()->type()->reloc() == relocInfo::none
3624 && (n->get_ptr() >> 31) == 0);
3625 match(ConP);
3627 op_cost(5);
3628 format %{ %}
3629 interface(CONST_INTER);
3630 %}
3632 // NULL Pointer Immediate
3633 operand immP0() %{
3634 predicate( n->get_ptr() == 0 );
3635 match(ConP);
3636 op_cost(0);
3638 format %{ %}
3639 interface(CONST_INTER);
3640 %}
3642 // Pointer Immediate: 64-bit
3643 operand immP_set() %{
3644 match(ConP);
3646 op_cost(5);
3647 // formats are generated automatically for constants and base registers
3648 format %{ %}
3649 interface(CONST_INTER);
3650 %}
3652 // Pointer Immediate: 64-bit
3653 operand immP_load() %{
3654 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3655 match(ConP);
3657 op_cost(5);
3658 // formats are generated automatically for constants and base registers
3659 format %{ %}
3660 interface(CONST_INTER);
3661 %}
3663 // Pointer Immediate: 64-bit
3664 operand immP_no_oop_cheap() %{
3665 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3666 match(ConP);
3668 op_cost(5);
3669 // formats are generated automatically for constants and base registers
3670 format %{ %}
3671 interface(CONST_INTER);
3672 %}
3674 // Pointer for polling page
3675 operand immP_poll() %{
3676 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3677 match(ConP);
3678 op_cost(5);
3680 format %{ %}
3681 interface(CONST_INTER);
3682 %}
3684 // Pointer Immediate
3685 operand immN() %{
3686 match(ConN);
3688 op_cost(10);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 operand immNKlass() %{
3694 match(ConNKlass);
3696 op_cost(10);
3697 format %{ %}
3698 interface(CONST_INTER);
3699 %}
3701 // NULL Pointer Immediate
3702 operand immN0() %{
3703 predicate(n->get_narrowcon() == 0);
3704 match(ConN);
3706 op_cost(5);
3707 format %{ %}
3708 interface(CONST_INTER);
3709 %}
3711 // Long Immediate
3712 operand immL() %{
3713 match(ConL);
3715 op_cost(20);
3716 format %{ %}
3717 interface(CONST_INTER);
3718 %}
3720 // Long Immediate zero
3721 operand immL0() %{
3722 predicate( n->get_long() == 0L );
3723 match(ConL);
3724 op_cost(0);
3726 format %{ %}
3727 interface(CONST_INTER);
3728 %}
3730 operand immL7() %{
3731 predicate( n->get_long() == 7L );
3732 match(ConL);
3733 op_cost(0);
3735 format %{ %}
3736 interface(CONST_INTER);
3737 %}
3739 operand immL_M1() %{
3740 predicate( n->get_long() == -1L );
3741 match(ConL);
3742 op_cost(0);
3744 format %{ %}
3745 interface(CONST_INTER);
3746 %}
3748 // bit 0..2 zero
3749 operand immL_M8() %{
3750 predicate( n->get_long() == -8L );
3751 match(ConL);
3752 op_cost(0);
3754 format %{ %}
3755 interface(CONST_INTER);
3756 %}
3758 // bit 2 zero
3759 operand immL_M5() %{
3760 predicate( n->get_long() == -5L );
3761 match(ConL);
3762 op_cost(0);
3764 format %{ %}
3765 interface(CONST_INTER);
3766 %}
3768 // bit 1..2 zero
3769 operand immL_M7() %{
3770 predicate( n->get_long() == -7L );
3771 match(ConL);
3772 op_cost(0);
3774 format %{ %}
3775 interface(CONST_INTER);
3776 %}
3778 // bit 0..1 zero
3779 operand immL_M4() %{
3780 predicate( n->get_long() == -4L );
3781 match(ConL);
3782 op_cost(0);
3784 format %{ %}
3785 interface(CONST_INTER);
3786 %}
3788 // bit 3..6 zero
3789 operand immL_M121() %{
3790 predicate( n->get_long() == -121L );
3791 match(ConL);
3792 op_cost(0);
3794 format %{ %}
3795 interface(CONST_INTER);
3796 %}
3798 // Long immediate from 0 to 127.
3799 // Used for a shorter form of long mul by 10.
3800 operand immL_127() %{
3801 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3802 match(ConL);
3803 op_cost(0);
3805 format %{ %}
3806 interface(CONST_INTER);
3807 %}
3809 // Operand for non-negtive long mask
3810 operand immL_nonneg_mask() %{
3811 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3812 match(ConL);
3814 op_cost(0);
3815 format %{ %}
3816 interface(CONST_INTER);
3817 %}
3819 operand immL_0_65535() %{
3820 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3821 match(ConL);
3822 op_cost(0);
3824 format %{ %}
3825 interface(CONST_INTER);
3826 %}
3828 // Long Immediate: cheap (materialize in <= 3 instructions)
3829 operand immL_cheap() %{
3830 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3831 match(ConL);
3832 op_cost(0);
3834 format %{ %}
3835 interface(CONST_INTER);
3836 %}
3838 // Long Immediate: expensive (materialize in > 3 instructions)
3839 operand immL_expensive() %{
3840 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3841 match(ConL);
3842 op_cost(0);
3844 format %{ %}
3845 interface(CONST_INTER);
3846 %}
3848 operand immL16() %{
3849 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3850 match(ConL);
3852 op_cost(10);
3853 format %{ %}
3854 interface(CONST_INTER);
3855 %}
3857 operand immL16_sub() %{
3858 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3859 match(ConL);
3861 op_cost(10);
3862 format %{ %}
3863 interface(CONST_INTER);
3864 %}
3866 // Long Immediate: low 32-bit mask
3867 operand immL_32bits() %{
3868 predicate(n->get_long() == 0xFFFFFFFFL);
3869 match(ConL);
3870 op_cost(20);
3872 format %{ %}
3873 interface(CONST_INTER);
3874 %}
3876 // Long Immediate 32-bit signed
3877 operand immL32()
3878 %{
3879 predicate(n->get_long() == (int) (n->get_long()));
3880 match(ConL);
3882 op_cost(15);
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3888 //single-precision floating-point zero
3889 operand immF0() %{
3890 predicate(jint_cast(n->getf()) == 0);
3891 match(ConF);
3893 op_cost(5);
3894 format %{ %}
3895 interface(CONST_INTER);
3896 %}
3898 //single-precision floating-point immediate
3899 operand immF() %{
3900 match(ConF);
3902 op_cost(20);
3903 format %{ %}
3904 interface(CONST_INTER);
3905 %}
3907 //double-precision floating-point zero
3908 operand immD0() %{
3909 predicate(jlong_cast(n->getd()) == 0);
3910 match(ConD);
3912 op_cost(5);
3913 format %{ %}
3914 interface(CONST_INTER);
3915 %}
3917 //double-precision floating-point immediate
3918 operand immD() %{
3919 match(ConD);
3921 op_cost(20);
3922 format %{ %}
3923 interface(CONST_INTER);
3924 %}
3926 // Register Operands
3927 // Integer Register
3928 operand mRegI() %{
3929 constraint(ALLOC_IN_RC(int_reg));
3930 match(RegI);
3932 format %{ %}
3933 interface(REG_INTER);
3934 %}
3936 operand no_Ax_mRegI() %{
3937 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3938 match(RegI);
3939 match(mRegI);
3941 format %{ %}
3942 interface(REG_INTER);
3943 %}
3945 operand mS0RegI() %{
3946 constraint(ALLOC_IN_RC(s0_reg));
3947 match(RegI);
3948 match(mRegI);
3950 format %{ "S0" %}
3951 interface(REG_INTER);
3952 %}
3954 operand mS1RegI() %{
3955 constraint(ALLOC_IN_RC(s1_reg));
3956 match(RegI);
3957 match(mRegI);
3959 format %{ "S1" %}
3960 interface(REG_INTER);
3961 %}
3963 operand mS2RegI() %{
3964 constraint(ALLOC_IN_RC(s2_reg));
3965 match(RegI);
3966 match(mRegI);
3968 format %{ "S2" %}
3969 interface(REG_INTER);
3970 %}
3972 operand mS3RegI() %{
3973 constraint(ALLOC_IN_RC(s3_reg));
3974 match(RegI);
3975 match(mRegI);
3977 format %{ "S3" %}
3978 interface(REG_INTER);
3979 %}
3981 operand mS4RegI() %{
3982 constraint(ALLOC_IN_RC(s4_reg));
3983 match(RegI);
3984 match(mRegI);
3986 format %{ "S4" %}
3987 interface(REG_INTER);
3988 %}
3990 operand mS5RegI() %{
3991 constraint(ALLOC_IN_RC(s5_reg));
3992 match(RegI);
3993 match(mRegI);
3995 format %{ "S5" %}
3996 interface(REG_INTER);
3997 %}
3999 operand mS6RegI() %{
4000 constraint(ALLOC_IN_RC(s6_reg));
4001 match(RegI);
4002 match(mRegI);
4004 format %{ "S6" %}
4005 interface(REG_INTER);
4006 %}
4008 operand mS7RegI() %{
4009 constraint(ALLOC_IN_RC(s7_reg));
4010 match(RegI);
4011 match(mRegI);
4013 format %{ "S7" %}
4014 interface(REG_INTER);
4015 %}
4018 operand mT0RegI() %{
4019 constraint(ALLOC_IN_RC(t0_reg));
4020 match(RegI);
4021 match(mRegI);
4023 format %{ "T0" %}
4024 interface(REG_INTER);
4025 %}
4027 operand mT1RegI() %{
4028 constraint(ALLOC_IN_RC(t1_reg));
4029 match(RegI);
4030 match(mRegI);
4032 format %{ "T1" %}
4033 interface(REG_INTER);
4034 %}
4036 operand mT2RegI() %{
4037 constraint(ALLOC_IN_RC(t2_reg));
4038 match(RegI);
4039 match(mRegI);
4041 format %{ "T2" %}
4042 interface(REG_INTER);
4043 %}
4045 operand mT3RegI() %{
4046 constraint(ALLOC_IN_RC(t3_reg));
4047 match(RegI);
4048 match(mRegI);
4050 format %{ "T3" %}
4051 interface(REG_INTER);
4052 %}
4054 operand mT8RegI() %{
4055 constraint(ALLOC_IN_RC(t8_reg));
4056 match(RegI);
4057 match(mRegI);
4059 format %{ "T8" %}
4060 interface(REG_INTER);
4061 %}
4063 operand mT9RegI() %{
4064 constraint(ALLOC_IN_RC(t9_reg));
4065 match(RegI);
4066 match(mRegI);
4068 format %{ "T9" %}
4069 interface(REG_INTER);
4070 %}
4072 operand mA0RegI() %{
4073 constraint(ALLOC_IN_RC(a0_reg));
4074 match(RegI);
4075 match(mRegI);
4077 format %{ "A0" %}
4078 interface(REG_INTER);
4079 %}
4081 operand mA1RegI() %{
4082 constraint(ALLOC_IN_RC(a1_reg));
4083 match(RegI);
4084 match(mRegI);
4086 format %{ "A1" %}
4087 interface(REG_INTER);
4088 %}
4090 operand mA2RegI() %{
4091 constraint(ALLOC_IN_RC(a2_reg));
4092 match(RegI);
4093 match(mRegI);
4095 format %{ "A2" %}
4096 interface(REG_INTER);
4097 %}
4099 operand mA3RegI() %{
4100 constraint(ALLOC_IN_RC(a3_reg));
4101 match(RegI);
4102 match(mRegI);
4104 format %{ "A3" %}
4105 interface(REG_INTER);
4106 %}
4108 operand mA4RegI() %{
4109 constraint(ALLOC_IN_RC(a4_reg));
4110 match(RegI);
4111 match(mRegI);
4113 format %{ "A4" %}
4114 interface(REG_INTER);
4115 %}
4117 operand mA5RegI() %{
4118 constraint(ALLOC_IN_RC(a5_reg));
4119 match(RegI);
4120 match(mRegI);
4122 format %{ "A5" %}
4123 interface(REG_INTER);
4124 %}
4126 operand mA6RegI() %{
4127 constraint(ALLOC_IN_RC(a6_reg));
4128 match(RegI);
4129 match(mRegI);
4131 format %{ "A6" %}
4132 interface(REG_INTER);
4133 %}
4135 operand mA7RegI() %{
4136 constraint(ALLOC_IN_RC(a7_reg));
4137 match(RegI);
4138 match(mRegI);
4140 format %{ "A7" %}
4141 interface(REG_INTER);
4142 %}
4144 operand mV0RegI() %{
4145 constraint(ALLOC_IN_RC(v0_reg));
4146 match(RegI);
4147 match(mRegI);
4149 format %{ "V0" %}
4150 interface(REG_INTER);
4151 %}
4153 operand mV1RegI() %{
4154 constraint(ALLOC_IN_RC(v1_reg));
4155 match(RegI);
4156 match(mRegI);
4158 format %{ "V1" %}
4159 interface(REG_INTER);
4160 %}
4162 operand mRegN() %{
4163 constraint(ALLOC_IN_RC(int_reg));
4164 match(RegN);
4166 format %{ %}
4167 interface(REG_INTER);
4168 %}
4170 operand t0_RegN() %{
4171 constraint(ALLOC_IN_RC(t0_reg));
4172 match(RegN);
4173 match(mRegN);
4175 format %{ %}
4176 interface(REG_INTER);
4177 %}
4179 operand t1_RegN() %{
4180 constraint(ALLOC_IN_RC(t1_reg));
4181 match(RegN);
4182 match(mRegN);
4184 format %{ %}
4185 interface(REG_INTER);
4186 %}
4188 operand t2_RegN() %{
4189 constraint(ALLOC_IN_RC(t2_reg));
4190 match(RegN);
4191 match(mRegN);
4193 format %{ %}
4194 interface(REG_INTER);
4195 %}
4197 operand t3_RegN() %{
4198 constraint(ALLOC_IN_RC(t3_reg));
4199 match(RegN);
4200 match(mRegN);
4202 format %{ %}
4203 interface(REG_INTER);
4204 %}
4206 operand t8_RegN() %{
4207 constraint(ALLOC_IN_RC(t8_reg));
4208 match(RegN);
4209 match(mRegN);
4211 format %{ %}
4212 interface(REG_INTER);
4213 %}
4215 operand t9_RegN() %{
4216 constraint(ALLOC_IN_RC(t9_reg));
4217 match(RegN);
4218 match(mRegN);
4220 format %{ %}
4221 interface(REG_INTER);
4222 %}
4224 operand a0_RegN() %{
4225 constraint(ALLOC_IN_RC(a0_reg));
4226 match(RegN);
4227 match(mRegN);
4229 format %{ %}
4230 interface(REG_INTER);
4231 %}
4233 operand a1_RegN() %{
4234 constraint(ALLOC_IN_RC(a1_reg));
4235 match(RegN);
4236 match(mRegN);
4238 format %{ %}
4239 interface(REG_INTER);
4240 %}
4242 operand a2_RegN() %{
4243 constraint(ALLOC_IN_RC(a2_reg));
4244 match(RegN);
4245 match(mRegN);
4247 format %{ %}
4248 interface(REG_INTER);
4249 %}
4251 operand a3_RegN() %{
4252 constraint(ALLOC_IN_RC(a3_reg));
4253 match(RegN);
4254 match(mRegN);
4256 format %{ %}
4257 interface(REG_INTER);
4258 %}
4260 operand a4_RegN() %{
4261 constraint(ALLOC_IN_RC(a4_reg));
4262 match(RegN);
4263 match(mRegN);
4265 format %{ %}
4266 interface(REG_INTER);
4267 %}
4269 operand a5_RegN() %{
4270 constraint(ALLOC_IN_RC(a5_reg));
4271 match(RegN);
4272 match(mRegN);
4274 format %{ %}
4275 interface(REG_INTER);
4276 %}
4278 operand a6_RegN() %{
4279 constraint(ALLOC_IN_RC(a6_reg));
4280 match(RegN);
4281 match(mRegN);
4283 format %{ %}
4284 interface(REG_INTER);
4285 %}
4287 operand a7_RegN() %{
4288 constraint(ALLOC_IN_RC(a7_reg));
4289 match(RegN);
4290 match(mRegN);
4292 format %{ %}
4293 interface(REG_INTER);
4294 %}
4296 operand s0_RegN() %{
4297 constraint(ALLOC_IN_RC(s0_reg));
4298 match(RegN);
4299 match(mRegN);
4301 format %{ %}
4302 interface(REG_INTER);
4303 %}
4305 operand s1_RegN() %{
4306 constraint(ALLOC_IN_RC(s1_reg));
4307 match(RegN);
4308 match(mRegN);
4310 format %{ %}
4311 interface(REG_INTER);
4312 %}
4314 operand s2_RegN() %{
4315 constraint(ALLOC_IN_RC(s2_reg));
4316 match(RegN);
4317 match(mRegN);
4319 format %{ %}
4320 interface(REG_INTER);
4321 %}
4323 operand s3_RegN() %{
4324 constraint(ALLOC_IN_RC(s3_reg));
4325 match(RegN);
4326 match(mRegN);
4328 format %{ %}
4329 interface(REG_INTER);
4330 %}
4332 operand s4_RegN() %{
4333 constraint(ALLOC_IN_RC(s4_reg));
4334 match(RegN);
4335 match(mRegN);
4337 format %{ %}
4338 interface(REG_INTER);
4339 %}
4341 operand s5_RegN() %{
4342 constraint(ALLOC_IN_RC(s5_reg));
4343 match(RegN);
4344 match(mRegN);
4346 format %{ %}
4347 interface(REG_INTER);
4348 %}
4350 operand s6_RegN() %{
4351 constraint(ALLOC_IN_RC(s6_reg));
4352 match(RegN);
4353 match(mRegN);
4355 format %{ %}
4356 interface(REG_INTER);
4357 %}
4359 operand s7_RegN() %{
4360 constraint(ALLOC_IN_RC(s7_reg));
4361 match(RegN);
4362 match(mRegN);
4364 format %{ %}
4365 interface(REG_INTER);
4366 %}
4368 operand v0_RegN() %{
4369 constraint(ALLOC_IN_RC(v0_reg));
4370 match(RegN);
4371 match(mRegN);
4373 format %{ %}
4374 interface(REG_INTER);
4375 %}
4377 operand v1_RegN() %{
4378 constraint(ALLOC_IN_RC(v1_reg));
4379 match(RegN);
4380 match(mRegN);
4382 format %{ %}
4383 interface(REG_INTER);
4384 %}
4386 // Pointer Register
4387 operand mRegP() %{
4388 constraint(ALLOC_IN_RC(p_reg));
4389 match(RegP);
4391 format %{ %}
4392 interface(REG_INTER);
4393 %}
4395 operand no_T8_mRegP() %{
4396 constraint(ALLOC_IN_RC(no_T8_p_reg));
4397 match(RegP);
4398 match(mRegP);
4400 format %{ %}
4401 interface(REG_INTER);
4402 %}
4404 operand s0_RegP()
4405 %{
4406 constraint(ALLOC_IN_RC(s0_long_reg));
4407 match(RegP);
4408 match(mRegP);
4409 match(no_T8_mRegP);
4411 format %{ %}
4412 interface(REG_INTER);
4413 %}
4415 operand s1_RegP()
4416 %{
4417 constraint(ALLOC_IN_RC(s1_long_reg));
4418 match(RegP);
4419 match(mRegP);
4420 match(no_T8_mRegP);
4422 format %{ %}
4423 interface(REG_INTER);
4424 %}
4426 operand s2_RegP()
4427 %{
4428 constraint(ALLOC_IN_RC(s2_long_reg));
4429 match(RegP);
4430 match(mRegP);
4431 match(no_T8_mRegP);
4433 format %{ %}
4434 interface(REG_INTER);
4435 %}
4437 operand s3_RegP()
4438 %{
4439 constraint(ALLOC_IN_RC(s3_long_reg));
4440 match(RegP);
4441 match(mRegP);
4442 match(no_T8_mRegP);
4444 format %{ %}
4445 interface(REG_INTER);
4446 %}
4448 operand s4_RegP()
4449 %{
4450 constraint(ALLOC_IN_RC(s4_long_reg));
4451 match(RegP);
4452 match(mRegP);
4453 match(no_T8_mRegP);
4455 format %{ %}
4456 interface(REG_INTER);
4457 %}
4459 operand s5_RegP()
4460 %{
4461 constraint(ALLOC_IN_RC(s5_long_reg));
4462 match(RegP);
4463 match(mRegP);
4464 match(no_T8_mRegP);
4466 format %{ %}
4467 interface(REG_INTER);
4468 %}
4470 operand s6_RegP()
4471 %{
4472 constraint(ALLOC_IN_RC(s6_long_reg));
4473 match(RegP);
4474 match(mRegP);
4475 match(no_T8_mRegP);
4477 format %{ %}
4478 interface(REG_INTER);
4479 %}
4481 operand s7_RegP()
4482 %{
4483 constraint(ALLOC_IN_RC(s7_long_reg));
4484 match(RegP);
4485 match(mRegP);
4486 match(no_T8_mRegP);
4488 format %{ %}
4489 interface(REG_INTER);
4490 %}
4492 operand t0_RegP()
4493 %{
4494 constraint(ALLOC_IN_RC(t0_long_reg));
4495 match(RegP);
4496 match(mRegP);
4497 match(no_T8_mRegP);
4499 format %{ %}
4500 interface(REG_INTER);
4501 %}
4503 operand t1_RegP()
4504 %{
4505 constraint(ALLOC_IN_RC(t1_long_reg));
4506 match(RegP);
4507 match(mRegP);
4508 match(no_T8_mRegP);
4510 format %{ %}
4511 interface(REG_INTER);
4512 %}
4514 operand t2_RegP()
4515 %{
4516 constraint(ALLOC_IN_RC(t2_long_reg));
4517 match(RegP);
4518 match(mRegP);
4519 match(no_T8_mRegP);
4521 format %{ %}
4522 interface(REG_INTER);
4523 %}
4525 operand t3_RegP()
4526 %{
4527 constraint(ALLOC_IN_RC(t3_long_reg));
4528 match(RegP);
4529 match(mRegP);
4530 match(no_T8_mRegP);
4532 format %{ %}
4533 interface(REG_INTER);
4534 %}
4536 operand t8_RegP()
4537 %{
4538 constraint(ALLOC_IN_RC(t8_long_reg));
4539 match(RegP);
4540 match(mRegP);
4542 format %{ %}
4543 interface(REG_INTER);
4544 %}
4546 operand t9_RegP()
4547 %{
4548 constraint(ALLOC_IN_RC(t9_long_reg));
4549 match(RegP);
4550 match(mRegP);
4551 match(no_T8_mRegP);
4553 format %{ %}
4554 interface(REG_INTER);
4555 %}
4557 operand a0_RegP()
4558 %{
4559 constraint(ALLOC_IN_RC(a0_long_reg));
4560 match(RegP);
4561 match(mRegP);
4562 match(no_T8_mRegP);
4564 format %{ %}
4565 interface(REG_INTER);
4566 %}
4568 operand a1_RegP()
4569 %{
4570 constraint(ALLOC_IN_RC(a1_long_reg));
4571 match(RegP);
4572 match(mRegP);
4573 match(no_T8_mRegP);
4575 format %{ %}
4576 interface(REG_INTER);
4577 %}
4579 operand a2_RegP()
4580 %{
4581 constraint(ALLOC_IN_RC(a2_long_reg));
4582 match(RegP);
4583 match(mRegP);
4584 match(no_T8_mRegP);
4586 format %{ %}
4587 interface(REG_INTER);
4588 %}
4590 operand a3_RegP()
4591 %{
4592 constraint(ALLOC_IN_RC(a3_long_reg));
4593 match(RegP);
4594 match(mRegP);
4595 match(no_T8_mRegP);
4597 format %{ %}
4598 interface(REG_INTER);
4599 %}
4601 operand a4_RegP()
4602 %{
4603 constraint(ALLOC_IN_RC(a4_long_reg));
4604 match(RegP);
4605 match(mRegP);
4606 match(no_T8_mRegP);
4608 format %{ %}
4609 interface(REG_INTER);
4610 %}
4613 operand a5_RegP()
4614 %{
4615 constraint(ALLOC_IN_RC(a5_long_reg));
4616 match(RegP);
4617 match(mRegP);
4618 match(no_T8_mRegP);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand a6_RegP()
4625 %{
4626 constraint(ALLOC_IN_RC(a6_long_reg));
4627 match(RegP);
4628 match(mRegP);
4629 match(no_T8_mRegP);
4631 format %{ %}
4632 interface(REG_INTER);
4633 %}
4635 operand a7_RegP()
4636 %{
4637 constraint(ALLOC_IN_RC(a7_long_reg));
4638 match(RegP);
4639 match(mRegP);
4640 match(no_T8_mRegP);
4642 format %{ %}
4643 interface(REG_INTER);
4644 %}
4646 operand v0_RegP()
4647 %{
4648 constraint(ALLOC_IN_RC(v0_long_reg));
4649 match(RegP);
4650 match(mRegP);
4651 match(no_T8_mRegP);
4653 format %{ %}
4654 interface(REG_INTER);
4655 %}
4657 operand v1_RegP()
4658 %{
4659 constraint(ALLOC_IN_RC(v1_long_reg));
4660 match(RegP);
4661 match(mRegP);
4662 match(no_T8_mRegP);
4664 format %{ %}
4665 interface(REG_INTER);
4666 %}
4668 /*
4669 operand mSPRegP(mRegP reg) %{
4670 constraint(ALLOC_IN_RC(sp_reg));
4671 match(reg);
4673 format %{ "SP" %}
4674 interface(REG_INTER);
4675 %}
4677 operand mFPRegP(mRegP reg) %{
4678 constraint(ALLOC_IN_RC(fp_reg));
4679 match(reg);
4681 format %{ "FP" %}
4682 interface(REG_INTER);
4683 %}
4684 */
4686 operand mRegL() %{
4687 constraint(ALLOC_IN_RC(long_reg));
4688 match(RegL);
4690 format %{ %}
4691 interface(REG_INTER);
4692 %}
4694 operand v0RegL() %{
4695 constraint(ALLOC_IN_RC(v0_long_reg));
4696 match(RegL);
4697 match(mRegL);
4699 format %{ %}
4700 interface(REG_INTER);
4701 %}
4703 operand v1RegL() %{
4704 constraint(ALLOC_IN_RC(v1_long_reg));
4705 match(RegL);
4706 match(mRegL);
4708 format %{ %}
4709 interface(REG_INTER);
4710 %}
4712 operand a0RegL() %{
4713 constraint(ALLOC_IN_RC(a0_long_reg));
4714 match(RegL);
4715 match(mRegL);
4717 format %{ "A0" %}
4718 interface(REG_INTER);
4719 %}
4721 operand a1RegL() %{
4722 constraint(ALLOC_IN_RC(a1_long_reg));
4723 match(RegL);
4724 match(mRegL);
4726 format %{ %}
4727 interface(REG_INTER);
4728 %}
4730 operand a2RegL() %{
4731 constraint(ALLOC_IN_RC(a2_long_reg));
4732 match(RegL);
4733 match(mRegL);
4735 format %{ %}
4736 interface(REG_INTER);
4737 %}
4739 operand a3RegL() %{
4740 constraint(ALLOC_IN_RC(a3_long_reg));
4741 match(RegL);
4742 match(mRegL);
4744 format %{ %}
4745 interface(REG_INTER);
4746 %}
4748 operand t0RegL() %{
4749 constraint(ALLOC_IN_RC(t0_long_reg));
4750 match(RegL);
4751 match(mRegL);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 operand t1RegL() %{
4758 constraint(ALLOC_IN_RC(t1_long_reg));
4759 match(RegL);
4760 match(mRegL);
4762 format %{ %}
4763 interface(REG_INTER);
4764 %}
4766 operand t2RegL() %{
4767 constraint(ALLOC_IN_RC(t2_long_reg));
4768 match(RegL);
4769 match(mRegL);
4771 format %{ %}
4772 interface(REG_INTER);
4773 %}
4775 operand t3RegL() %{
4776 constraint(ALLOC_IN_RC(t3_long_reg));
4777 match(RegL);
4778 match(mRegL);
4780 format %{ %}
4781 interface(REG_INTER);
4782 %}
4784 operand t8RegL() %{
4785 constraint(ALLOC_IN_RC(t8_long_reg));
4786 match(RegL);
4787 match(mRegL);
4789 format %{ %}
4790 interface(REG_INTER);
4791 %}
4793 operand a4RegL() %{
4794 constraint(ALLOC_IN_RC(a4_long_reg));
4795 match(RegL);
4796 match(mRegL);
4798 format %{ %}
4799 interface(REG_INTER);
4800 %}
4802 operand a5RegL() %{
4803 constraint(ALLOC_IN_RC(a5_long_reg));
4804 match(RegL);
4805 match(mRegL);
4807 format %{ %}
4808 interface(REG_INTER);
4809 %}
4811 operand a6RegL() %{
4812 constraint(ALLOC_IN_RC(a6_long_reg));
4813 match(RegL);
4814 match(mRegL);
4816 format %{ %}
4817 interface(REG_INTER);
4818 %}
4820 operand a7RegL() %{
4821 constraint(ALLOC_IN_RC(a7_long_reg));
4822 match(RegL);
4823 match(mRegL);
4825 format %{ %}
4826 interface(REG_INTER);
4827 %}
4829 operand s0RegL() %{
4830 constraint(ALLOC_IN_RC(s0_long_reg));
4831 match(RegL);
4832 match(mRegL);
4834 format %{ %}
4835 interface(REG_INTER);
4836 %}
4838 operand s1RegL() %{
4839 constraint(ALLOC_IN_RC(s1_long_reg));
4840 match(RegL);
4841 match(mRegL);
4843 format %{ %}
4844 interface(REG_INTER);
4845 %}
4847 operand s2RegL() %{
4848 constraint(ALLOC_IN_RC(s2_long_reg));
4849 match(RegL);
4850 match(mRegL);
4852 format %{ %}
4853 interface(REG_INTER);
4854 %}
4856 operand s3RegL() %{
4857 constraint(ALLOC_IN_RC(s3_long_reg));
4858 match(RegL);
4859 match(mRegL);
4861 format %{ %}
4862 interface(REG_INTER);
4863 %}
4865 operand s4RegL() %{
4866 constraint(ALLOC_IN_RC(s4_long_reg));
4867 match(RegL);
4868 match(mRegL);
4870 format %{ %}
4871 interface(REG_INTER);
4872 %}
4874 operand s7RegL() %{
4875 constraint(ALLOC_IN_RC(s7_long_reg));
4876 match(RegL);
4877 match(mRegL);
4879 format %{ %}
4880 interface(REG_INTER);
4881 %}
4883 // Floating register operands
4884 operand regF() %{
4885 constraint(ALLOC_IN_RC(flt_reg));
4886 match(RegF);
4888 format %{ %}
4889 interface(REG_INTER);
4890 %}
4892 //Double Precision Floating register operands
4893 operand regD() %{
4894 constraint(ALLOC_IN_RC(dbl_reg));
4895 match(RegD);
4897 format %{ %}
4898 interface(REG_INTER);
4899 %}
4901 //----------Memory Operands----------------------------------------------------
4902 // Indirect Memory Operand
4903 operand indirect(mRegP reg) %{
4904 constraint(ALLOC_IN_RC(p_reg));
4905 match(reg);
4907 format %{ "[$reg] @ indirect" %}
4908 interface(MEMORY_INTER) %{
4909 base($reg);
4910 index(0x0); /* NO_INDEX */
4911 scale(0x0);
4912 disp(0x0);
4913 %}
4914 %}
4916 // Indirect Memory Plus Short Offset Operand
4917 operand indOffset8(mRegP reg, immL8 off)
4918 %{
4919 constraint(ALLOC_IN_RC(p_reg));
4920 match(AddP reg off);
4922 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4923 interface(MEMORY_INTER) %{
4924 base($reg);
4925 index(0x0); /* NO_INDEX */
4926 scale(0x0);
4927 disp($off);
4928 %}
4929 %}
4931 // Indirect Memory Times Scale Plus Index Register
4932 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4933 %{
4934 constraint(ALLOC_IN_RC(p_reg));
4935 match(AddP reg (LShiftL lreg scale));
4937 op_cost(10);
4938 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4939 interface(MEMORY_INTER) %{
4940 base($reg);
4941 index($lreg);
4942 scale($scale);
4943 disp(0x0);
4944 %}
4945 %}
4948 // [base + index + offset]
4949 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4950 %{
4951 constraint(ALLOC_IN_RC(p_reg));
4952 op_cost(5);
4953 match(AddP (AddP base index) off);
4955 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4956 interface(MEMORY_INTER) %{
4957 base($base);
4958 index($index);
4959 scale(0x0);
4960 disp($off);
4961 %}
4962 %}
4964 // [base + index + offset]
4965 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
4966 %{
4967 constraint(ALLOC_IN_RC(p_reg));
4968 op_cost(5);
4969 match(AddP (AddP base (ConvI2L index)) off);
4971 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
4972 interface(MEMORY_INTER) %{
4973 base($base);
4974 index($index);
4975 scale(0x0);
4976 disp($off);
4977 %}
4978 %}
4980 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4981 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
4982 %{
4983 constraint(ALLOC_IN_RC(p_reg));
4984 match(AddP (AddP reg (LShiftL lreg scale)) off);
4986 op_cost(10);
4987 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
4988 interface(MEMORY_INTER) %{
4989 base($reg);
4990 index($lreg);
4991 scale($scale);
4992 disp($off);
4993 %}
4994 %}
4996 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
4997 %{
4998 constraint(ALLOC_IN_RC(p_reg));
4999 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5001 op_cost(10);
5002 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5003 interface(MEMORY_INTER) %{
5004 base($reg);
5005 index($ireg);
5006 scale($scale);
5007 disp($off);
5008 %}
5009 %}
5011 // [base + index<<scale + offset]
5012 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5013 %{
5014 constraint(ALLOC_IN_RC(p_reg));
5015 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5016 op_cost(10);
5017 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5019 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5020 interface(MEMORY_INTER) %{
5021 base($base);
5022 index($index);
5023 scale($scale);
5024 disp($off);
5025 %}
5026 %}
5028 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5029 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5030 %{
5031 predicate(Universe::narrow_oop_shift() == 0);
5032 constraint(ALLOC_IN_RC(p_reg));
5033 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5035 op_cost(10);
5036 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5037 interface(MEMORY_INTER) %{
5038 base($reg);
5039 index($lreg);
5040 scale($scale);
5041 disp($off);
5042 %}
5043 %}
5045 // [base + index<<scale + offset] for compressd Oops
5046 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5047 %{
5048 constraint(ALLOC_IN_RC(p_reg));
5049 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5050 predicate(Universe::narrow_oop_shift() == 0);
5051 op_cost(10);
5052 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5054 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5055 interface(MEMORY_INTER) %{
5056 base($base);
5057 index($index);
5058 scale($scale);
5059 disp($off);
5060 %}
5061 %}
5063 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5064 // Indirect Memory Plus Long Offset Operand
5065 operand indOffset32(mRegP reg, immL32 off) %{
5066 constraint(ALLOC_IN_RC(p_reg));
5067 op_cost(20);
5068 match(AddP reg off);
5070 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5071 interface(MEMORY_INTER) %{
5072 base($reg);
5073 index(0x0); /* NO_INDEX */
5074 scale(0x0);
5075 disp($off);
5076 %}
5077 %}
5079 // Indirect Memory Plus Index Register
5080 operand indIndex(mRegP addr, mRegL index) %{
5081 constraint(ALLOC_IN_RC(p_reg));
5082 match(AddP addr index);
5084 op_cost(20);
5085 format %{"[$addr + $index] @ indIndex" %}
5086 interface(MEMORY_INTER) %{
5087 base($addr);
5088 index($index);
5089 scale(0x0);
5090 disp(0x0);
5091 %}
5092 %}
5094 operand indirectNarrowKlass(mRegN reg)
5095 %{
5096 predicate(Universe::narrow_klass_shift() == 0);
5097 constraint(ALLOC_IN_RC(p_reg));
5098 op_cost(10);
5099 match(DecodeNKlass reg);
5101 format %{ "[$reg] @ indirectNarrowKlass" %}
5102 interface(MEMORY_INTER) %{
5103 base($reg);
5104 index(0x0);
5105 scale(0x0);
5106 disp(0x0);
5107 %}
5108 %}
5110 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5111 %{
5112 predicate(Universe::narrow_klass_shift() == 0);
5113 constraint(ALLOC_IN_RC(p_reg));
5114 op_cost(10);
5115 match(AddP (DecodeNKlass reg) off);
5117 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5118 interface(MEMORY_INTER) %{
5119 base($reg);
5120 index(0x0);
5121 scale(0x0);
5122 disp($off);
5123 %}
5124 %}
5126 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5127 %{
5128 predicate(Universe::narrow_klass_shift() == 0);
5129 constraint(ALLOC_IN_RC(p_reg));
5130 op_cost(10);
5131 match(AddP (DecodeNKlass reg) off);
5133 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5134 interface(MEMORY_INTER) %{
5135 base($reg);
5136 index(0x0);
5137 scale(0x0);
5138 disp($off);
5139 %}
5140 %}
5142 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5143 %{
5144 predicate(Universe::narrow_klass_shift() == 0);
5145 constraint(ALLOC_IN_RC(p_reg));
5146 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5148 op_cost(10);
5149 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5150 interface(MEMORY_INTER) %{
5151 base($reg);
5152 index($lreg);
5153 scale(0x0);
5154 disp($off);
5155 %}
5156 %}
5158 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5159 %{
5160 predicate(Universe::narrow_klass_shift() == 0);
5161 constraint(ALLOC_IN_RC(p_reg));
5162 match(AddP (DecodeNKlass reg) lreg);
5164 op_cost(10);
5165 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5166 interface(MEMORY_INTER) %{
5167 base($reg);
5168 index($lreg);
5169 scale(0x0);
5170 disp(0x0);
5171 %}
5172 %}
5174 // Indirect Memory Operand
5175 operand indirectNarrow(mRegN reg)
5176 %{
5177 predicate(Universe::narrow_oop_shift() == 0);
5178 constraint(ALLOC_IN_RC(p_reg));
5179 op_cost(10);
5180 match(DecodeN reg);
5182 format %{ "[$reg] @ indirectNarrow" %}
5183 interface(MEMORY_INTER) %{
5184 base($reg);
5185 index(0x0);
5186 scale(0x0);
5187 disp(0x0);
5188 %}
5189 %}
5191 // Indirect Memory Plus Short Offset Operand
5192 operand indOffset8Narrow(mRegN reg, immL8 off)
5193 %{
5194 predicate(Universe::narrow_oop_shift() == 0);
5195 constraint(ALLOC_IN_RC(p_reg));
5196 op_cost(10);
5197 match(AddP (DecodeN reg) off);
5199 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5200 interface(MEMORY_INTER) %{
5201 base($reg);
5202 index(0x0);
5203 scale(0x0);
5204 disp($off);
5205 %}
5206 %}
5208 // Indirect Memory Plus Index Register Plus Offset Operand
5209 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5210 %{
5211 predicate(Universe::narrow_oop_shift() == 0);
5212 constraint(ALLOC_IN_RC(p_reg));
5213 match(AddP (AddP (DecodeN reg) lreg) off);
5215 op_cost(10);
5216 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5217 interface(MEMORY_INTER) %{
5218 base($reg);
5219 index($lreg);
5220 scale(0x0);
5221 disp($off);
5222 %}
5223 %}
5225 //----------Load Long Memory Operands------------------------------------------
5226 // The load-long idiom will use it's address expression again after loading
5227 // the first word of the long. If the load-long destination overlaps with
5228 // registers used in the addressing expression, the 2nd half will be loaded
5229 // from a clobbered address. Fix this by requiring that load-long use
5230 // address registers that do not overlap with the load-long target.
5232 // load-long support
5233 operand load_long_RegP() %{
5234 constraint(ALLOC_IN_RC(p_reg));
5235 match(RegP);
5236 match(mRegP);
5237 op_cost(100);
5238 format %{ %}
5239 interface(REG_INTER);
5240 %}
5242 // Indirect Memory Operand Long
5243 operand load_long_indirect(load_long_RegP reg) %{
5244 constraint(ALLOC_IN_RC(p_reg));
5245 match(reg);
5247 format %{ "[$reg]" %}
5248 interface(MEMORY_INTER) %{
5249 base($reg);
5250 index(0x0);
5251 scale(0x0);
5252 disp(0x0);
5253 %}
5254 %}
5256 // Indirect Memory Plus Long Offset Operand
5257 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5258 match(AddP reg off);
5260 format %{ "[$reg + $off]" %}
5261 interface(MEMORY_INTER) %{
5262 base($reg);
5263 index(0x0);
5264 scale(0x0);
5265 disp($off);
5266 %}
5267 %}
5269 //----------Conditional Branch Operands----------------------------------------
5270 // Comparison Op - This is the operation of the comparison, and is limited to
5271 // the following set of codes:
5272 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5273 //
5274 // Other attributes of the comparison, such as unsignedness, are specified
5275 // by the comparison instruction that sets a condition code flags register.
5276 // That result is represented by a flags operand whose subtype is appropriate
5277 // to the unsignedness (etc.) of the comparison.
5278 //
5279 // Later, the instruction which matches both the Comparison Op (a Bool) and
5280 // the flags (produced by the Cmp) specifies the coding of the comparison op
5281 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5283 // Comparision Code
5284 operand cmpOp() %{
5285 match(Bool);
5287 format %{ "" %}
5288 interface(COND_INTER) %{
5289 equal(0x01);
5290 not_equal(0x02);
5291 greater(0x03);
5292 greater_equal(0x04);
5293 less(0x05);
5294 less_equal(0x06);
5295 overflow(0x7);
5296 no_overflow(0x8);
5297 %}
5298 %}
5301 // Comparision Code
5302 // Comparison Code, unsigned compare. Used by FP also, with
5303 // C2 (unordered) turned into GT or LT already. The other bits
5304 // C0 and C3 are turned into Carry & Zero flags.
5305 operand cmpOpU() %{
5306 match(Bool);
5308 format %{ "" %}
5309 interface(COND_INTER) %{
5310 equal(0x01);
5311 not_equal(0x02);
5312 greater(0x03);
5313 greater_equal(0x04);
5314 less(0x05);
5315 less_equal(0x06);
5316 overflow(0x7);
5317 no_overflow(0x8);
5318 %}
5319 %}
5321 /*
5322 // Comparison Code, unsigned compare. Used by FP also, with
5323 // C2 (unordered) turned into GT or LT already. The other bits
5324 // C0 and C3 are turned into Carry & Zero flags.
5325 operand cmpOpU() %{
5326 match(Bool);
5328 format %{ "" %}
5329 interface(COND_INTER) %{
5330 equal(0x4);
5331 not_equal(0x5);
5332 less(0x2);
5333 greater_equal(0x3);
5334 less_equal(0x6);
5335 greater(0x7);
5336 %}
5337 %}
5338 */
5339 /*
5340 // Comparison Code for FP conditional move
5341 operand cmpOp_fcmov() %{
5342 match(Bool);
5344 format %{ "" %}
5345 interface(COND_INTER) %{
5346 equal (0x01);
5347 not_equal (0x02);
5348 greater (0x03);
5349 greater_equal(0x04);
5350 less (0x05);
5351 less_equal (0x06);
5352 %}
5353 %}
5355 // Comparision Code used in long compares
5356 operand cmpOp_commute() %{
5357 match(Bool);
5359 format %{ "" %}
5360 interface(COND_INTER) %{
5361 equal(0x4);
5362 not_equal(0x5);
5363 less(0xF);
5364 greater_equal(0xE);
5365 less_equal(0xD);
5366 greater(0xC);
5367 %}
5368 %}
5369 */
5371 //----------Special Memory Operands--------------------------------------------
5372 // Stack Slot Operand - This operand is used for loading and storing temporary
5373 // values on the stack where a match requires a value to
5374 // flow through memory.
5375 operand stackSlotP(sRegP reg) %{
5376 constraint(ALLOC_IN_RC(stack_slots));
5377 // No match rule because this operand is only generated in matching
5378 op_cost(50);
5379 format %{ "[$reg]" %}
5380 interface(MEMORY_INTER) %{
5381 base(0x1d); // SP
5382 index(0x0); // No Index
5383 scale(0x0); // No Scale
5384 disp($reg); // Stack Offset
5385 %}
5386 %}
5388 operand stackSlotI(sRegI reg) %{
5389 constraint(ALLOC_IN_RC(stack_slots));
5390 // No match rule because this operand is only generated in matching
5391 op_cost(50);
5392 format %{ "[$reg]" %}
5393 interface(MEMORY_INTER) %{
5394 base(0x1d); // SP
5395 index(0x0); // No Index
5396 scale(0x0); // No Scale
5397 disp($reg); // Stack Offset
5398 %}
5399 %}
5401 operand stackSlotF(sRegF reg) %{
5402 constraint(ALLOC_IN_RC(stack_slots));
5403 // No match rule because this operand is only generated in matching
5404 op_cost(50);
5405 format %{ "[$reg]" %}
5406 interface(MEMORY_INTER) %{
5407 base(0x1d); // SP
5408 index(0x0); // No Index
5409 scale(0x0); // No Scale
5410 disp($reg); // Stack Offset
5411 %}
5412 %}
5414 operand stackSlotD(sRegD reg) %{
5415 constraint(ALLOC_IN_RC(stack_slots));
5416 // No match rule because this operand is only generated in matching
5417 op_cost(50);
5418 format %{ "[$reg]" %}
5419 interface(MEMORY_INTER) %{
5420 base(0x1d); // SP
5421 index(0x0); // No Index
5422 scale(0x0); // No Scale
5423 disp($reg); // Stack Offset
5424 %}
5425 %}
5427 operand stackSlotL(sRegL reg) %{
5428 constraint(ALLOC_IN_RC(stack_slots));
5429 // No match rule because this operand is only generated in matching
5430 op_cost(50);
5431 format %{ "[$reg]" %}
5432 interface(MEMORY_INTER) %{
5433 base(0x1d); // SP
5434 index(0x0); // No Index
5435 scale(0x0); // No Scale
5436 disp($reg); // Stack Offset
5437 %}
5438 %}
5441 //------------------------OPERAND CLASSES--------------------------------------
5442 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5443 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5446 //----------PIPELINE-----------------------------------------------------------
5447 // Rules which define the behavior of the target architectures pipeline.
5449 pipeline %{
5451 //----------ATTRIBUTES---------------------------------------------------------
5452 attributes %{
5453 fixed_size_instructions; // Fixed size instructions
5454 branch_has_delay_slot; // branch have delay slot in gs2
5455 max_instructions_per_bundle = 1; // 1 instruction per bundle
5456 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5457 bundle_unit_size=4;
5458 instruction_unit_size = 4; // An instruction is 4 bytes long
5459 instruction_fetch_unit_size = 16; // The processor fetches one line
5460 instruction_fetch_units = 1; // of 16 bytes
5462 // List of nop instructions
5463 nops( MachNop );
5464 %}
5466 //----------RESOURCES----------------------------------------------------------
5467 // Resources are the functional units available to the machine
5469 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5471 //----------PIPELINE DESCRIPTION-----------------------------------------------
5472 // Pipeline Description specifies the stages in the machine's pipeline
5474 // IF: fetch
5475 // ID: decode
5476 // RD: read
5477 // CA: caculate
5478 // WB: write back
5479 // CM: commit
5481 pipe_desc(IF, ID, RD, CA, WB, CM);
5484 //----------PIPELINE CLASSES---------------------------------------------------
5485 // Pipeline Classes describe the stages in which input and output are
5486 // referenced by the hardware pipeline.
5488 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5489 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5490 single_instruction;
5491 src1 : RD(read);
5492 src2 : RD(read);
5493 dst : WB(write)+1;
5494 DECODE : ID;
5495 ALU : CA;
5496 %}
5498 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5499 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5500 src1 : RD(read);
5501 src2 : RD(read);
5502 dst : WB(write)+5;
5503 DECODE : ID;
5504 ALU2 : CA;
5505 %}
5507 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5508 src1 : RD(read);
5509 src2 : RD(read);
5510 dst : WB(write)+10;
5511 DECODE : ID;
5512 ALU2 : CA;
5513 %}
5515 //No.19 Integer div operation : dst <-- reg1 div reg2
5516 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5517 src1 : RD(read);
5518 src2 : RD(read);
5519 dst : WB(write)+10;
5520 DECODE : ID;
5521 ALU2 : CA;
5522 %}
5524 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5525 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5526 instruction_count(2);
5527 src1 : RD(read);
5528 src2 : RD(read);
5529 dst : WB(write)+10;
5530 DECODE : ID;
5531 ALU2 : CA;
5532 %}
5534 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5535 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5536 instruction_count(2);
5537 src1 : RD(read);
5538 src2 : RD(read);
5539 dst : WB(write);
5540 DECODE : ID;
5541 ALU : CA;
5542 %}
5544 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5545 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5546 instruction_count(2);
5547 src : RD(read);
5548 dst : WB(write);
5549 DECODE : ID;
5550 ALU : CA;
5551 %}
5553 //no.16 load Long from memory :
5554 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5555 instruction_count(2);
5556 mem : RD(read);
5557 dst : WB(write)+5;
5558 DECODE : ID;
5559 MEM : RD;
5560 %}
5562 //No.17 Store Long to Memory :
5563 pipe_class ialu_storeL(mRegL src, memory mem) %{
5564 instruction_count(2);
5565 mem : RD(read);
5566 src : RD(read);
5567 DECODE : ID;
5568 MEM : RD;
5569 %}
5571 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5572 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5573 single_instruction;
5574 src : RD(read);
5575 dst : WB(write);
5576 DECODE : ID;
5577 ALU : CA;
5578 %}
5580 //No.3 Integer move operation : dst <-- reg
5581 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5582 src : RD(read);
5583 dst : WB(write);
5584 DECODE : ID;
5585 ALU : CA;
5586 %}
5588 //No.4 No instructions : do nothing
5589 pipe_class empty( ) %{
5590 instruction_count(0);
5591 %}
5593 //No.5 UnConditional branch :
5594 pipe_class pipe_jump( label labl ) %{
5595 multiple_bundles;
5596 DECODE : ID;
5597 BR : RD;
5598 %}
5600 //No.6 ALU Conditional branch :
5601 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5602 multiple_bundles;
5603 src1 : RD(read);
5604 src2 : RD(read);
5605 DECODE : ID;
5606 BR : RD;
5607 %}
5609 //no.7 load integer from memory :
5610 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5611 mem : RD(read);
5612 dst : WB(write)+3;
5613 DECODE : ID;
5614 MEM : RD;
5615 %}
5617 //No.8 Store Integer to Memory :
5618 pipe_class ialu_storeI(mRegI src, memory mem) %{
5619 mem : RD(read);
5620 src : RD(read);
5621 DECODE : ID;
5622 MEM : RD;
5623 %}
5626 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5627 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5628 src1 : RD(read);
5629 src2 : RD(read);
5630 dst : WB(write);
5631 DECODE : ID;
5632 FPU : CA;
5633 %}
5635 //No.22 Floating div operation : dst <-- reg1 div reg2
5636 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5637 src1 : RD(read);
5638 src2 : RD(read);
5639 dst : WB(write);
5640 DECODE : ID;
5641 FPU2 : CA;
5642 %}
5644 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5645 src : RD(read);
5646 dst : WB(write);
5647 DECODE : ID;
5648 FPU1 : CA;
5649 %}
5651 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5652 src : RD(read);
5653 dst : WB(write);
5654 DECODE : ID;
5655 FPU1 : CA;
5656 %}
5658 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5659 src : RD(read);
5660 dst : WB(write);
5661 DECODE : ID;
5662 MEM : RD;
5663 %}
5665 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5666 src : RD(read);
5667 dst : WB(write);
5668 DECODE : ID;
5669 MEM : RD(5);
5670 %}
5672 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5673 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5674 multiple_bundles;
5675 src1 : RD(read);
5676 src2 : RD(read);
5677 dst : WB(write);
5678 DECODE : ID;
5679 FPU2 : CA;
5680 %}
5682 //No.11 Load Floating from Memory :
5683 pipe_class fpu_loadF(regF dst, memory mem) %{
5684 instruction_count(1);
5685 mem : RD(read);
5686 dst : WB(write)+3;
5687 DECODE : ID;
5688 MEM : RD;
5689 %}
5691 //No.12 Store Floating to Memory :
5692 pipe_class fpu_storeF(regF src, memory mem) %{
5693 instruction_count(1);
5694 mem : RD(read);
5695 src : RD(read);
5696 DECODE : ID;
5697 MEM : RD;
5698 %}
5700 //No.13 FPU Conditional branch :
5701 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5702 multiple_bundles;
5703 src1 : RD(read);
5704 src2 : RD(read);
5705 DECODE : ID;
5706 BR : RD;
5707 %}
5709 //No.14 Floating FPU reg operation : dst <-- op reg
5710 pipe_class fpu1_regF(regF dst, regF src) %{
5711 src : RD(read);
5712 dst : WB(write);
5713 DECODE : ID;
5714 FPU : CA;
5715 %}
5717 pipe_class long_memory_op() %{
5718 instruction_count(10); multiple_bundles; force_serialization;
5719 fixed_latency(30);
5720 %}
5722 pipe_class simple_call() %{
5723 instruction_count(10); multiple_bundles; force_serialization;
5724 fixed_latency(200);
5725 BR : RD;
5726 %}
5728 pipe_class call() %{
5729 instruction_count(10); multiple_bundles; force_serialization;
5730 fixed_latency(200);
5731 %}
5733 //FIXME:
5734 //No.9 Piple slow : for multi-instructions
5735 pipe_class pipe_slow( ) %{
5736 instruction_count(20);
5737 force_serialization;
5738 multiple_bundles;
5739 fixed_latency(50);
5740 %}
5742 %}
5746 //----------INSTRUCTIONS-------------------------------------------------------
5747 //
5748 // match -- States which machine-independent subtree may be replaced
5749 // by this instruction.
5750 // ins_cost -- The estimated cost of this instruction is used by instruction
5751 // selection to identify a minimum cost tree of machine
5752 // instructions that matches a tree of machine-independent
5753 // instructions.
5754 // format -- A string providing the disassembly for this instruction.
5755 // The value of an instruction's operand may be inserted
5756 // by referring to it with a '$' prefix.
5757 // opcode -- Three instruction opcodes may be provided. These are referred
5758 // to within an encode class as $primary, $secondary, and $tertiary
5759 // respectively. The primary opcode is commonly used to
5760 // indicate the type of machine instruction, while secondary
5761 // and tertiary are often used for prefix options or addressing
5762 // modes.
5763 // ins_encode -- A list of encode classes with parameters. The encode class
5764 // name must have been defined in an 'enc_class' specification
5765 // in the encode section of the architecture description.
5768 // Load Integer
5769 instruct loadI(mRegI dst, memory mem) %{
5770 match(Set dst (LoadI mem));
5772 ins_cost(125);
5773 format %{ "lw $dst, $mem #@loadI" %}
5774 ins_encode (load_I_enc(dst, mem));
5775 ins_pipe( ialu_loadI );
5776 %}
5778 instruct loadI_convI2L(mRegL dst, memory mem) %{
5779 match(Set dst (ConvI2L (LoadI mem)));
5781 ins_cost(125);
5782 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5783 ins_encode (load_I_enc(dst, mem));
5784 ins_pipe( ialu_loadI );
5785 %}
5787 // Load Integer (32 bit signed) to Byte (8 bit signed)
5788 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5789 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5791 ins_cost(125);
5792 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5793 ins_encode(load_B_enc(dst, mem));
5794 ins_pipe(ialu_loadI);
5795 %}
5797 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5798 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5799 match(Set dst (AndI (LoadI mem) mask));
5801 ins_cost(125);
5802 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5803 ins_encode(load_UB_enc(dst, mem));
5804 ins_pipe(ialu_loadI);
5805 %}
5807 // Load Integer (32 bit signed) to Short (16 bit signed)
5808 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5809 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5811 ins_cost(125);
5812 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5813 ins_encode(load_S_enc(dst, mem));
5814 ins_pipe(ialu_loadI);
5815 %}
5817 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5818 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5819 match(Set dst (AndI (LoadI mem) mask));
5821 ins_cost(125);
5822 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5823 ins_encode(load_C_enc(dst, mem));
5824 ins_pipe(ialu_loadI);
5825 %}
5827 // Load Long.
5828 instruct loadL(mRegL dst, memory mem) %{
5829 // predicate(!((LoadLNode*)n)->require_atomic_access());
5830 match(Set dst (LoadL mem));
5832 ins_cost(250);
5833 format %{ "ld $dst, $mem #@loadL" %}
5834 ins_encode(load_L_enc(dst, mem));
5835 ins_pipe( ialu_loadL );
5836 %}
5838 // Load Long - UNaligned
5839 instruct loadL_unaligned(mRegL dst, memory mem) %{
5840 match(Set dst (LoadL_unaligned mem));
5842 // FIXME: Jin: Need more effective ldl/ldr
5843 ins_cost(450);
5844 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5845 ins_encode(load_L_enc(dst, mem));
5846 ins_pipe( ialu_loadL );
5847 %}
5849 // Store Long
5850 instruct storeL_reg(memory mem, mRegL src) %{
5851 match(Set mem (StoreL mem src));
5853 ins_cost(200);
5854 format %{ "sd $mem, $src #@storeL_reg\n" %}
5855 ins_encode(store_L_reg_enc(mem, src));
5856 ins_pipe( ialu_storeL );
5857 %}
5860 instruct storeL_immL0(memory mem, immL0 zero) %{
5861 match(Set mem (StoreL mem zero));
5863 ins_cost(180);
5864 format %{ "sd $mem, zero #@storeL_immL0" %}
5865 ins_encode(store_L_immL0_enc(mem, zero));
5866 ins_pipe( ialu_storeL );
5867 %}
5869 // Load Compressed Pointer
5870 instruct loadN(mRegN dst, memory mem)
5871 %{
5872 match(Set dst (LoadN mem));
5874 ins_cost(125); // XXX
5875 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5876 ins_encode (load_N_enc(dst, mem));
5877 ins_pipe( ialu_loadI ); // XXX
5878 %}
5880 // Load Pointer
5881 instruct loadP(mRegP dst, memory mem) %{
5882 match(Set dst (LoadP mem));
5884 ins_cost(125);
5885 format %{ "ld $dst, $mem #@loadP" %}
5886 ins_encode (load_P_enc(dst, mem));
5887 ins_pipe( ialu_loadI );
5888 %}
5890 // Load Klass Pointer
5891 instruct loadKlass(mRegP dst, memory mem) %{
5892 match(Set dst (LoadKlass mem));
5894 ins_cost(125);
5895 format %{ "MOV $dst,$mem @ loadKlass" %}
5896 ins_encode (load_P_enc(dst, mem));
5897 ins_pipe( ialu_loadI );
5898 %}
5900 // Load narrow Klass Pointer
5901 instruct loadNKlass(mRegN dst, memory mem)
5902 %{
5903 match(Set dst (LoadNKlass mem));
5905 ins_cost(125); // XXX
5906 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5907 ins_encode (load_N_enc(dst, mem));
5908 ins_pipe( ialu_loadI ); // XXX
5909 %}
5911 // Load Constant
5912 instruct loadConI(mRegI dst, immI src) %{
5913 match(Set dst src);
5915 ins_cost(150);
5916 format %{ "mov $dst, $src #@loadConI" %}
5917 ins_encode %{
5918 Register dst = $dst$$Register;
5919 int value = $src$$constant;
5920 __ move(dst, value);
5921 %}
5922 ins_pipe( ialu_regI_regI );
5923 %}
5926 instruct loadConL_set64(mRegL dst, immL src) %{
5927 match(Set dst src);
5928 ins_cost(120);
5929 format %{ "li $dst, $src @ loadConL_set64" %}
5930 ins_encode %{
5931 __ set64($dst$$Register, $src$$constant);
5932 %}
5933 ins_pipe(ialu_regL_regL);
5934 %}
5936 /*
5937 // Load long value from constant table (predicated by immL_expensive).
5938 instruct loadConL_load(mRegL dst, immL_expensive src) %{
5939 match(Set dst src);
5940 ins_cost(150);
5941 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
5942 ins_encode %{
5943 int con_offset = $constantoffset($src);
5945 if (Assembler::is_simm16(con_offset)) {
5946 __ ld($dst$$Register, $constanttablebase, con_offset);
5947 } else {
5948 __ set64(AT, con_offset);
5949 if (UseLoongsonISA) {
5950 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
5951 } else {
5952 __ daddu(AT, $constanttablebase, AT);
5953 __ ld($dst$$Register, AT, 0);
5954 }
5955 }
5956 %}
5957 ins_pipe(ialu_loadI);
5958 %}
5959 */
5961 instruct loadConL16(mRegL dst, immL16 src) %{
5962 match(Set dst src);
5963 ins_cost(105);
5964 format %{ "mov $dst, $src #@loadConL16" %}
5965 ins_encode %{
5966 Register dst_reg = as_Register($dst$$reg);
5967 int value = $src$$constant;
5968 __ daddiu(dst_reg, R0, value);
5969 %}
5970 ins_pipe( ialu_regL_regL );
5971 %}
5974 instruct loadConL0(mRegL dst, immL0 src) %{
5975 match(Set dst src);
5976 ins_cost(100);
5977 format %{ "mov $dst, zero #@loadConL0" %}
5978 ins_encode %{
5979 Register dst_reg = as_Register($dst$$reg);
5980 __ daddu(dst_reg, R0, R0);
5981 %}
5982 ins_pipe( ialu_regL_regL );
5983 %}
5985 // Load Range
5986 instruct loadRange(mRegI dst, memory mem) %{
5987 match(Set dst (LoadRange mem));
5989 ins_cost(125);
5990 format %{ "MOV $dst,$mem @ loadRange" %}
5991 ins_encode(load_I_enc(dst, mem));
5992 ins_pipe( ialu_loadI );
5993 %}
5996 instruct storeP(memory mem, mRegP src ) %{
5997 match(Set mem (StoreP mem src));
5999 ins_cost(125);
6000 format %{ "sd $src, $mem #@storeP" %}
6001 ins_encode(store_P_reg_enc(mem, src));
6002 ins_pipe( ialu_storeI );
6003 %}
6005 /*
6006 [Ref: loadConP]
6008 Error:
6009 0x2d4b6d40: lui t9, 0x4f <--- handle
6010 0x2d4b6d44: addiu t9, t9, 0xffff808c
6011 0x2d4b6d48: sw t9, 0x4(s2)
6013 OK:
6014 0x2cc5ed40: lui t9, 0x336a <--- klass
6015 0x2cc5ed44: addiu t9, t9, 0x5a10
6016 0x2cc5ed48: sw t9, 0x4(s2)
6017 */
6018 // Store Pointer Immediate; null pointers or constant oops that do not
6019 // need card-mark barriers.
6021 // Store NULL Pointer, mark word, or other simple pointer constant.
6022 instruct storeImmP(memory mem, immP31 src) %{
6023 match(Set mem (StoreP mem src));
6025 ins_cost(150);
6026 format %{ "mov $mem, $src #@storeImmP" %}
6027 ins_encode(store_P_immP_enc(mem, src));
6028 ins_pipe( ialu_storeI );
6029 %}
6031 // Store Byte Immediate
6032 instruct storeImmB(memory mem, immI8 src) %{
6033 match(Set mem (StoreB mem src));
6035 ins_cost(150);
6036 format %{ "movb $mem, $src #@storeImmB" %}
6037 ins_encode(store_B_immI_enc(mem, src));
6038 ins_pipe( ialu_storeI );
6039 %}
6041 // Store Compressed Pointer
6042 instruct storeN(memory mem, mRegN src)
6043 %{
6044 match(Set mem (StoreN mem src));
6046 ins_cost(125); // XXX
6047 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6048 ins_encode(store_N_reg_enc(mem, src));
6049 ins_pipe( ialu_storeI );
6050 %}
6052 instruct storeNKlass(memory mem, mRegN src)
6053 %{
6054 match(Set mem (StoreNKlass mem src));
6056 ins_cost(125); // XXX
6057 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6058 ins_encode(store_N_reg_enc(mem, src));
6059 ins_pipe( ialu_storeI );
6060 %}
6062 instruct storeImmN0(memory mem, immN0 zero)
6063 %{
6064 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6065 match(Set mem (StoreN mem zero));
6067 ins_cost(125); // XXX
6068 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6069 ins_encode(storeImmN0_enc(mem, zero));
6070 ins_pipe( ialu_storeI );
6071 %}
6073 // Store Byte
6074 instruct storeB(memory mem, mRegI src) %{
6075 match(Set mem (StoreB mem src));
6077 ins_cost(125);
6078 format %{ "sb $src, $mem #@storeB" %}
6079 ins_encode(store_B_reg_enc(mem, src));
6080 ins_pipe( ialu_storeI );
6081 %}
6083 instruct storeB_convL2I(memory mem, mRegL src) %{
6084 match(Set mem (StoreB mem (ConvL2I src)));
6086 ins_cost(125);
6087 format %{ "sb $src, $mem #@storeB_convL2I" %}
6088 ins_encode(store_B_reg_enc(mem, src));
6089 ins_pipe( ialu_storeI );
6090 %}
6092 // Load Byte (8bit signed)
6093 instruct loadB(mRegI dst, memory mem) %{
6094 match(Set dst (LoadB mem));
6096 ins_cost(125);
6097 format %{ "lb $dst, $mem #@loadB" %}
6098 ins_encode(load_B_enc(dst, mem));
6099 ins_pipe( ialu_loadI );
6100 %}
6102 instruct loadB_convI2L(mRegL dst, memory mem) %{
6103 match(Set dst (ConvI2L (LoadB mem)));
6105 ins_cost(125);
6106 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6107 ins_encode(load_B_enc(dst, mem));
6108 ins_pipe( ialu_loadI );
6109 %}
6111 // Load Byte (8bit UNsigned)
6112 instruct loadUB(mRegI dst, memory mem) %{
6113 match(Set dst (LoadUB mem));
6115 ins_cost(125);
6116 format %{ "lbu $dst, $mem #@loadUB" %}
6117 ins_encode(load_UB_enc(dst, mem));
6118 ins_pipe( ialu_loadI );
6119 %}
6121 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6122 match(Set dst (ConvI2L (LoadUB mem)));
6124 ins_cost(125);
6125 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6126 ins_encode(load_UB_enc(dst, mem));
6127 ins_pipe( ialu_loadI );
6128 %}
6130 // Load Short (16bit signed)
6131 instruct loadS(mRegI dst, memory mem) %{
6132 match(Set dst (LoadS mem));
6134 ins_cost(125);
6135 format %{ "lh $dst, $mem #@loadS" %}
6136 ins_encode(load_S_enc(dst, mem));
6137 ins_pipe( ialu_loadI );
6138 %}
6140 // Load Short (16 bit signed) to Byte (8 bit signed)
6141 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6142 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6144 ins_cost(125);
6145 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6146 ins_encode(load_B_enc(dst, mem));
6147 ins_pipe(ialu_loadI);
6148 %}
6150 instruct loadS_convI2L(mRegL dst, memory mem) %{
6151 match(Set dst (ConvI2L (LoadS mem)));
6153 ins_cost(125);
6154 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6155 ins_encode(load_S_enc(dst, mem));
6156 ins_pipe( ialu_loadI );
6157 %}
6159 // Store Integer Immediate
6160 instruct storeImmI(memory mem, immI src) %{
6161 match(Set mem (StoreI mem src));
6163 ins_cost(150);
6164 format %{ "mov $mem, $src #@storeImmI" %}
6165 ins_encode(store_I_immI_enc(mem, src));
6166 ins_pipe( ialu_storeI );
6167 %}
6169 // Store Integer
6170 instruct storeI(memory mem, mRegI src) %{
6171 match(Set mem (StoreI mem src));
6173 ins_cost(125);
6174 format %{ "sw $mem, $src #@storeI" %}
6175 ins_encode(store_I_reg_enc(mem, src));
6176 ins_pipe( ialu_storeI );
6177 %}
6179 instruct storeI_convL2I(memory mem, mRegL src) %{
6180 match(Set mem (StoreI mem (ConvL2I src)));
6182 ins_cost(125);
6183 format %{ "sw $mem, $src #@storeI_convL2I" %}
6184 ins_encode(store_I_reg_enc(mem, src));
6185 ins_pipe( ialu_storeI );
6186 %}
6188 // Load Float
6189 instruct loadF(regF dst, memory mem) %{
6190 match(Set dst (LoadF mem));
6192 ins_cost(150);
6193 format %{ "loadF $dst, $mem #@loadF" %}
6194 ins_encode(load_F_enc(dst, mem));
6195 ins_pipe( ialu_loadI );
6196 %}
6198 instruct loadConP_general(mRegP dst, immP src) %{
6199 match(Set dst src);
6201 ins_cost(120);
6202 format %{ "li $dst, $src #@loadConP_general" %}
6204 ins_encode %{
6205 Register dst = $dst$$Register;
6206 long* value = (long*)$src$$constant;
6207 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
6209 /* During GC, klassOop may be moved to new position in the heap.
6210 * It must be relocated.
6211 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
6212 */
6213 if (is_need_reloc) {
6214 if($src->constant_reloc() == relocInfo::metadata_type){
6215 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6216 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6218 __ relocate(rspec);
6219 __ li48(dst, (long)value);
6220 }
6222 if($src->constant_reloc() == relocInfo::oop_type){
6223 int oop_index = __ oop_recorder()->find_index((jobject)value);
6224 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6226 __ relocate(rspec);
6227 __ li48(dst, (long)value);
6228 }
6229 } else {
6230 __ set64(dst, (long)value);
6231 }
6232 %}
6234 ins_pipe( ialu_regI_regI );
6235 %}
6237 /*
6238 instruct loadConP_load(mRegP dst, immP_load src) %{
6239 match(Set dst src);
6241 ins_cost(100);
6242 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6244 ins_encode %{
6246 int con_offset = $constantoffset($src);
6248 if (Assembler::is_simm16(con_offset)) {
6249 __ ld($dst$$Register, $constanttablebase, con_offset);
6250 } else {
6251 __ set64(AT, con_offset);
6252 if (UseLoongsonISA) {
6253 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6254 } else {
6255 __ daddu(AT, $constanttablebase, AT);
6256 __ ld($dst$$Register, AT, 0);
6257 }
6258 }
6259 %}
6261 ins_pipe(ialu_loadI);
6262 %}
6263 */
6265 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6266 match(Set dst src);
6268 ins_cost(80);
6269 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6271 ins_encode %{
6272 __ set64($dst$$Register, $src$$constant);
6273 %}
6275 ins_pipe(ialu_regI_regI);
6276 %}
6279 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6280 match(Set dst src);
6282 ins_cost(50);
6283 format %{ "li $dst, $src #@loadConP_poll" %}
6285 ins_encode %{
6286 Register dst = $dst$$Register;
6287 intptr_t value = (intptr_t)$src$$constant;
6289 __ set64(dst, (jlong)value);
6290 %}
6292 ins_pipe( ialu_regI_regI );
6293 %}
6295 instruct loadConP0(mRegP dst, immP0 src)
6296 %{
6297 match(Set dst src);
6299 ins_cost(50);
6300 format %{ "mov $dst, R0\t# ptr" %}
6301 ins_encode %{
6302 Register dst_reg = $dst$$Register;
6303 __ daddu(dst_reg, R0, R0);
6304 %}
6305 ins_pipe( ialu_regI_regI );
6306 %}
6308 instruct loadConN0(mRegN dst, immN0 src) %{
6309 match(Set dst src);
6310 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6311 ins_encode %{
6312 __ move($dst$$Register, R0);
6313 %}
6314 ins_pipe( ialu_regI_regI );
6315 %}
6317 instruct loadConN(mRegN dst, immN src) %{
6318 match(Set dst src);
6320 ins_cost(125);
6321 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6322 ins_encode %{
6323 address con = (address)$src$$constant;
6324 if (con == NULL) {
6325 ShouldNotReachHere();
6326 } else {
6327 assert (UseCompressedOops, "should only be used for compressed headers");
6328 assert (Universe::heap() != NULL, "java heap should be initialized");
6329 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6331 Register dst = $dst$$Register;
6332 long* value = (long*)$src$$constant;
6333 int oop_index = __ oop_recorder()->find_index((jobject)value);
6334 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6335 if(rspec.type()!=relocInfo::none){
6336 __ relocate(rspec, Assembler::narrow_oop_operand);
6337 __ li48(dst, oop_index);
6338 } else {
6339 __ set64(dst, oop_index);
6340 }
6341 }
6342 %}
6343 ins_pipe( ialu_regI_regI ); // XXX
6344 %}
6346 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6347 match(Set dst src);
6349 ins_cost(125);
6350 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6351 ins_encode %{
6352 address con = (address)$src$$constant;
6353 if (con == NULL) {
6354 ShouldNotReachHere();
6355 } else {
6356 Register dst = $dst$$Register;
6357 long* value = (long*)$src$$constant;
6359 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6360 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6361 long narrowp = (long)Klass::encode_klass((Klass*)value);
6363 if(rspec.type()!=relocInfo::none){
6364 __ relocate(rspec, Assembler::narrow_oop_operand);
6365 __ li48(dst, narrowp);
6366 } else {
6367 __ set64(dst, narrowp);
6368 }
6369 }
6370 %}
6371 ins_pipe( ialu_regI_regI ); // XXX
6372 %}
6374 //FIXME
6375 // Tail Call; Jump from runtime stub to Java code.
6376 // Also known as an 'interprocedural jump'.
6377 // Target of jump will eventually return to caller.
6378 // TailJump below removes the return address.
6379 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6380 match(TailCall jump_target method_oop );
6381 ins_cost(300);
6382 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6384 ins_encode %{
6385 Register target = $jump_target$$Register;
6386 Register oop = $method_oop$$Register;
6388 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6389 __ push(RA);
6391 __ move(S3, oop);
6392 __ jr(target);
6393 __ nop();
6394 %}
6396 ins_pipe( pipe_jump );
6397 %}
6399 // Create exception oop: created by stack-crawling runtime code.
6400 // Created exception is now available to this handler, and is setup
6401 // just prior to jumping to this handler. No code emitted.
6402 instruct CreateException( a0_RegP ex_oop )
6403 %{
6404 match(Set ex_oop (CreateEx));
6406 // use the following format syntax
6407 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6408 ins_encode %{
6409 /* Jin: X86 leaves this function empty */
6410 __ block_comment("CreateException is empty in X86/MIPS");
6411 %}
6412 ins_pipe( empty );
6413 // ins_pipe( pipe_jump );
6414 %}
6417 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6419 - Common try/catch:
6420 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6421 |- V0, V1 are created
6422 |- T9 <= SharedRuntime::exception_handler_for_return_address
6423 `- jr T9
6424 `- the caller's exception_handler
6425 `- jr OptoRuntime::exception_blob
6426 `- here
6427 - Rethrow(e.g. 'unwind'):
6428 * The callee:
6429 |- an exception is triggered during execution
6430 `- exits the callee method through RethrowException node
6431 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6432 `- The callee jumps to OptoRuntime::rethrow_stub()
6433 * In OptoRuntime::rethrow_stub:
6434 |- The VM calls _rethrow_Java to determine the return address in the caller method
6435 `- exits the stub with tailjmpInd
6436 |- pops exception_oop(V0) and exception_pc(V1)
6437 `- jumps to the return address(usually an exception_handler)
6438 * The caller:
6439 `- continues processing the exception_blob with V0/V1
6440 */
6442 /*
6443 Disassembling OptoRuntime::rethrow_stub()
6445 ; locals
6446 0x2d3bf320: addiu sp, sp, 0xfffffff8
6447 0x2d3bf324: sw ra, 0x4(sp)
6448 0x2d3bf328: sw fp, 0x0(sp)
6449 0x2d3bf32c: addu fp, sp, zero
6450 0x2d3bf330: addiu sp, sp, 0xfffffff0
6451 0x2d3bf334: sw ra, 0x8(sp)
6452 0x2d3bf338: sw t0, 0x4(sp)
6453 0x2d3bf33c: sw sp, 0x0(sp)
6455 ; get_thread(S2)
6456 0x2d3bf340: addu s2, sp, zero
6457 0x2d3bf344: srl s2, s2, 12
6458 0x2d3bf348: sll s2, s2, 2
6459 0x2d3bf34c: lui at, 0x2c85
6460 0x2d3bf350: addu at, at, s2
6461 0x2d3bf354: lw s2, 0xffffcc80(at)
6463 0x2d3bf358: lw s0, 0x0(sp)
6464 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6465 0x2d3bf360: sw s2, 0xc(sp)
6467 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6468 0x2d3bf364: lw a0, 0x4(sp)
6469 0x2d3bf368: lw a1, 0xc(sp)
6470 0x2d3bf36c: lw a2, 0x8(sp)
6471 ;; Java_To_Runtime
6472 0x2d3bf370: lui t9, 0x2c34
6473 0x2d3bf374: addiu t9, t9, 0xffff8a48
6474 0x2d3bf378: jalr t9
6475 0x2d3bf37c: nop
6477 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6479 0x2d3bf384: lw s0, 0xc(sp)
6480 0x2d3bf388: sw zero, 0x118(s0)
6481 0x2d3bf38c: sw zero, 0x11c(s0)
6482 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6483 0x2d3bf394: addu s2, s0, zero
6484 0x2d3bf398: sw zero, 0x144(s2)
6485 0x2d3bf39c: lw s0, 0x4(s2)
6486 0x2d3bf3a0: addiu s4, zero, 0x0
6487 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6488 0x2d3bf3a8: nop
6489 0x2d3bf3ac: addiu sp, sp, 0x10
6490 0x2d3bf3b0: addiu sp, sp, 0x8
6491 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6492 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6493 0x2d3bf3bc: lui at, 0x2b48
6494 0x2d3bf3c0: lw at, 0x100(at)
6496 ; tailjmpInd: Restores exception_oop & exception_pc
6497 0x2d3bf3c4: addu v1, ra, zero
6498 0x2d3bf3c8: addu v0, s1, zero
6499 0x2d3bf3cc: jr s3
6500 0x2d3bf3d0: nop
6501 ; Exception:
6502 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6503 0x2d3bf3d8: addiu s1, s1, 0x40
6504 0x2d3bf3dc: addiu s2, zero, 0x0
6505 0x2d3bf3e0: addiu sp, sp, 0x10
6506 0x2d3bf3e4: addiu sp, sp, 0x8
6507 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6508 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6509 0x2d3bf3f0: lui at, 0x2b48
6510 0x2d3bf3f4: lw at, 0x100(at)
6511 ; TailCalljmpInd
6512 __ push(RA); ; to be used in generate_forward_exception()
6513 0x2d3bf3f8: addu t7, s2, zero
6514 0x2d3bf3fc: jr s1
6515 0x2d3bf400: nop
6516 */
6517 // Rethrow exception:
6518 // The exception oop will come in the first argument position.
6519 // Then JUMP (not call) to the rethrow stub code.
6520 instruct RethrowException()
6521 %{
6522 match(Rethrow);
6524 // use the following format syntax
6525 format %{ "JMP rethrow_stub #@RethrowException" %}
6526 ins_encode %{
6527 __ block_comment("@ RethrowException");
6529 cbuf.set_insts_mark();
6530 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6532 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6533 __ li(T9, OptoRuntime::rethrow_stub());
6534 __ jr(T9);
6535 __ nop();
6536 %}
6537 ins_pipe( pipe_jump );
6538 %}
6540 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6541 match(If cmp (CmpP op1 zero));
6542 effect(USE labl);
6544 ins_cost(180);
6545 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6547 ins_encode %{
6548 Register op1 = $op1$$Register;
6549 Register op2 = R0;
6550 Label &L = *($labl$$label);
6551 int flag = $cmp$$cmpcode;
6553 switch(flag)
6554 {
6555 case 0x01: //equal
6556 if (&L)
6557 __ beq(op1, op2, L);
6558 else
6559 __ beq(op1, op2, (int)0);
6560 break;
6561 case 0x02: //not_equal
6562 if (&L)
6563 __ bne(op1, op2, L);
6564 else
6565 __ bne(op1, op2, (int)0);
6566 break;
6567 /*
6568 case 0x03: //above
6569 __ sltu(AT, op2, op1);
6570 if(&L)
6571 __ bne(R0, AT, L);
6572 else
6573 __ bne(R0, AT, (int)0);
6574 break;
6575 case 0x04: //above_equal
6576 __ sltu(AT, op1, op2);
6577 if(&L)
6578 __ beq(AT, R0, L);
6579 else
6580 __ beq(AT, R0, (int)0);
6581 break;
6582 case 0x05: //below
6583 __ sltu(AT, op1, op2);
6584 if(&L)
6585 __ bne(R0, AT, L);
6586 else
6587 __ bne(R0, AT, (int)0);
6588 break;
6589 case 0x06: //below_equal
6590 __ sltu(AT, op2, op1);
6591 if(&L)
6592 __ beq(AT, R0, L);
6593 else
6594 __ beq(AT, R0, (int)0);
6595 break;
6596 */
6597 default:
6598 Unimplemented();
6599 }
6600 __ nop();
6601 %}
6603 ins_pc_relative(1);
6604 ins_pipe( pipe_alu_branch );
6605 %}
6608 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6609 match(If cmp (CmpP op1 op2));
6610 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6611 effect(USE labl);
6613 ins_cost(200);
6614 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6616 ins_encode %{
6617 Register op1 = $op1$$Register;
6618 Register op2 = $op2$$Register;
6619 Label &L = *($labl$$label);
6620 int flag = $cmp$$cmpcode;
6622 switch(flag)
6623 {
6624 case 0x01: //equal
6625 if (&L)
6626 __ beq(op1, op2, L);
6627 else
6628 __ beq(op1, op2, (int)0);
6629 break;
6630 case 0x02: //not_equal
6631 if (&L)
6632 __ bne(op1, op2, L);
6633 else
6634 __ bne(op1, op2, (int)0);
6635 break;
6636 case 0x03: //above
6637 __ sltu(AT, op2, op1);
6638 if(&L)
6639 __ bne(R0, AT, L);
6640 else
6641 __ bne(R0, AT, (int)0);
6642 break;
6643 case 0x04: //above_equal
6644 __ sltu(AT, op1, op2);
6645 if(&L)
6646 __ beq(AT, R0, L);
6647 else
6648 __ beq(AT, R0, (int)0);
6649 break;
6650 case 0x05: //below
6651 __ sltu(AT, op1, op2);
6652 if(&L)
6653 __ bne(R0, AT, L);
6654 else
6655 __ bne(R0, AT, (int)0);
6656 break;
6657 case 0x06: //below_equal
6658 __ sltu(AT, op2, op1);
6659 if(&L)
6660 __ beq(AT, R0, L);
6661 else
6662 __ beq(AT, R0, (int)0);
6663 break;
6664 default:
6665 Unimplemented();
6666 }
6667 __ nop();
6668 %}
6670 ins_pc_relative(1);
6671 ins_pipe( pipe_alu_branch );
6672 %}
6674 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6675 match(If cmp (CmpN op1 null));
6676 effect(USE labl);
6678 ins_cost(180);
6679 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6680 "BP$cmp $labl @ cmpN_null_branch" %}
6681 ins_encode %{
6682 Register op1 = $op1$$Register;
6683 Register op2 = R0;
6684 Label &L = *($labl$$label);
6685 int flag = $cmp$$cmpcode;
6687 switch(flag)
6688 {
6689 case 0x01: //equal
6690 if (&L)
6691 __ beq(op1, op2, L);
6692 else
6693 __ beq(op1, op2, (int)0);
6694 break;
6695 case 0x02: //not_equal
6696 if (&L)
6697 __ bne(op1, op2, L);
6698 else
6699 __ bne(op1, op2, (int)0);
6700 break;
6701 default:
6702 Unimplemented();
6703 }
6704 __ nop();
6705 %}
6706 //TODO: pipe_branchP or create pipe_branchN LEE
6707 ins_pc_relative(1);
6708 ins_pipe( pipe_alu_branch );
6709 %}
6711 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6712 match(If cmp (CmpN op1 op2));
6713 effect(USE labl);
6715 ins_cost(180);
6716 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6717 "BP$cmp $labl" %}
6718 ins_encode %{
6719 Register op1_reg = $op1$$Register;
6720 Register op2_reg = $op2$$Register;
6721 Label &L = *($labl$$label);
6722 int flag = $cmp$$cmpcode;
6724 switch(flag)
6725 {
6726 case 0x01: //equal
6727 if (&L)
6728 __ beq(op1_reg, op2_reg, L);
6729 else
6730 __ beq(op1_reg, op2_reg, (int)0);
6731 break;
6732 case 0x02: //not_equal
6733 if (&L)
6734 __ bne(op1_reg, op2_reg, L);
6735 else
6736 __ bne(op1_reg, op2_reg, (int)0);
6737 break;
6738 case 0x03: //above
6739 __ sltu(AT, op2_reg, op1_reg);
6740 if(&L)
6741 __ bne(R0, AT, L);
6742 else
6743 __ bne(R0, AT, (int)0);
6744 break;
6745 case 0x04: //above_equal
6746 __ sltu(AT, op1_reg, op2_reg);
6747 if(&L)
6748 __ beq(AT, R0, L);
6749 else
6750 __ beq(AT, R0, (int)0);
6751 break;
6752 case 0x05: //below
6753 __ sltu(AT, op1_reg, op2_reg);
6754 if(&L)
6755 __ bne(R0, AT, L);
6756 else
6757 __ bne(R0, AT, (int)0);
6758 break;
6759 case 0x06: //below_equal
6760 __ sltu(AT, op2_reg, op1_reg);
6761 if(&L)
6762 __ beq(AT, R0, L);
6763 else
6764 __ beq(AT, R0, (int)0);
6765 break;
6766 default:
6767 Unimplemented();
6768 }
6769 __ nop();
6770 %}
6771 ins_pc_relative(1);
6772 ins_pipe( pipe_alu_branch );
6773 %}
6775 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6776 match( If cmp (CmpU src1 src2) );
6777 effect(USE labl);
6778 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6780 ins_encode %{
6781 Register op1 = $src1$$Register;
6782 Register op2 = $src2$$Register;
6783 Label &L = *($labl$$label);
6784 int flag = $cmp$$cmpcode;
6786 switch(flag)
6787 {
6788 case 0x01: //equal
6789 if (&L)
6790 __ beq(op1, op2, L);
6791 else
6792 __ beq(op1, op2, (int)0);
6793 break;
6794 case 0x02: //not_equal
6795 if (&L)
6796 __ bne(op1, op2, L);
6797 else
6798 __ bne(op1, op2, (int)0);
6799 break;
6800 case 0x03: //above
6801 __ sltu(AT, op2, op1);
6802 if(&L)
6803 __ bne(AT, R0, L);
6804 else
6805 __ bne(AT, R0, (int)0);
6806 break;
6807 case 0x04: //above_equal
6808 __ sltu(AT, op1, op2);
6809 if(&L)
6810 __ beq(AT, R0, L);
6811 else
6812 __ beq(AT, R0, (int)0);
6813 break;
6814 case 0x05: //below
6815 __ sltu(AT, op1, op2);
6816 if(&L)
6817 __ bne(AT, R0, L);
6818 else
6819 __ bne(AT, R0, (int)0);
6820 break;
6821 case 0x06: //below_equal
6822 __ sltu(AT, op2, op1);
6823 if(&L)
6824 __ beq(AT, R0, L);
6825 else
6826 __ beq(AT, R0, (int)0);
6827 break;
6828 default:
6829 Unimplemented();
6830 }
6831 __ nop();
6832 %}
6834 ins_pc_relative(1);
6835 ins_pipe( pipe_alu_branch );
6836 %}
6839 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6840 match( If cmp (CmpU src1 src2) );
6841 effect(USE labl);
6842 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6844 ins_encode %{
6845 Register op1 = $src1$$Register;
6846 int val = $src2$$constant;
6847 Label &L = *($labl$$label);
6848 int flag = $cmp$$cmpcode;
6850 __ move(AT, val);
6851 switch(flag)
6852 {
6853 case 0x01: //equal
6854 if (&L)
6855 __ beq(op1, AT, L);
6856 else
6857 __ beq(op1, AT, (int)0);
6858 break;
6859 case 0x02: //not_equal
6860 if (&L)
6861 __ bne(op1, AT, L);
6862 else
6863 __ bne(op1, AT, (int)0);
6864 break;
6865 case 0x03: //above
6866 __ sltu(AT, AT, op1);
6867 if(&L)
6868 __ bne(R0, AT, L);
6869 else
6870 __ bne(R0, AT, (int)0);
6871 break;
6872 case 0x04: //above_equal
6873 __ sltu(AT, op1, AT);
6874 if(&L)
6875 __ beq(AT, R0, L);
6876 else
6877 __ beq(AT, R0, (int)0);
6878 break;
6879 case 0x05: //below
6880 __ sltu(AT, op1, AT);
6881 if(&L)
6882 __ bne(R0, AT, L);
6883 else
6884 __ bne(R0, AT, (int)0);
6885 break;
6886 case 0x06: //below_equal
6887 __ sltu(AT, AT, op1);
6888 if(&L)
6889 __ beq(AT, R0, L);
6890 else
6891 __ beq(AT, R0, (int)0);
6892 break;
6893 default:
6894 Unimplemented();
6895 }
6896 __ nop();
6897 %}
6899 ins_pc_relative(1);
6900 ins_pipe( pipe_alu_branch );
6901 %}
6903 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6904 match( If cmp (CmpI src1 src2) );
6905 effect(USE labl);
6906 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6908 ins_encode %{
6909 Register op1 = $src1$$Register;
6910 Register op2 = $src2$$Register;
6911 Label &L = *($labl$$label);
6912 int flag = $cmp$$cmpcode;
6914 switch(flag)
6915 {
6916 case 0x01: //equal
6917 if (&L)
6918 __ beq(op1, op2, L);
6919 else
6920 __ beq(op1, op2, (int)0);
6921 break;
6922 case 0x02: //not_equal
6923 if (&L)
6924 __ bne(op1, op2, L);
6925 else
6926 __ bne(op1, op2, (int)0);
6927 break;
6928 case 0x03: //above
6929 __ slt(AT, op2, op1);
6930 if(&L)
6931 __ bne(R0, AT, L);
6932 else
6933 __ bne(R0, AT, (int)0);
6934 break;
6935 case 0x04: //above_equal
6936 __ slt(AT, op1, op2);
6937 if(&L)
6938 __ beq(AT, R0, L);
6939 else
6940 __ beq(AT, R0, (int)0);
6941 break;
6942 case 0x05: //below
6943 __ slt(AT, op1, op2);
6944 if(&L)
6945 __ bne(R0, AT, L);
6946 else
6947 __ bne(R0, AT, (int)0);
6948 break;
6949 case 0x06: //below_equal
6950 __ slt(AT, op2, op1);
6951 if(&L)
6952 __ beq(AT, R0, L);
6953 else
6954 __ beq(AT, R0, (int)0);
6955 break;
6956 default:
6957 Unimplemented();
6958 }
6959 __ nop();
6960 %}
6962 ins_pc_relative(1);
6963 ins_pipe( pipe_alu_branch );
6964 %}
6966 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6967 match( If cmp (CmpI src1 src2) );
6968 effect(USE labl);
6969 ins_cost(170);
6970 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6972 ins_encode %{
6973 Register op1 = $src1$$Register;
6974 // int val = $src2$$constant;
6975 Label &L = *($labl$$label);
6976 int flag = $cmp$$cmpcode;
6978 //__ move(AT, val);
6979 switch(flag)
6980 {
6981 case 0x01: //equal
6982 if (&L)
6983 __ beq(op1, R0, L);
6984 else
6985 __ beq(op1, R0, (int)0);
6986 break;
6987 case 0x02: //not_equal
6988 if (&L)
6989 __ bne(op1, R0, L);
6990 else
6991 __ bne(op1, R0, (int)0);
6992 break;
6993 case 0x03: //greater
6994 if(&L)
6995 __ bgtz(op1, L);
6996 else
6997 __ bgtz(op1, (int)0);
6998 break;
6999 case 0x04: //greater_equal
7000 if(&L)
7001 __ bgez(op1, L);
7002 else
7003 __ bgez(op1, (int)0);
7004 break;
7005 case 0x05: //less
7006 if(&L)
7007 __ bltz(op1, L);
7008 else
7009 __ bltz(op1, (int)0);
7010 break;
7011 case 0x06: //less_equal
7012 if(&L)
7013 __ blez(op1, L);
7014 else
7015 __ blez(op1, (int)0);
7016 break;
7017 default:
7018 Unimplemented();
7019 }
7020 __ nop();
7021 %}
7023 ins_pc_relative(1);
7024 ins_pipe( pipe_alu_branch );
7025 %}
7028 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7029 match( If cmp (CmpI src1 src2) );
7030 effect(USE labl);
7031 ins_cost(200);
7032 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7034 ins_encode %{
7035 Register op1 = $src1$$Register;
7036 int val = $src2$$constant;
7037 Label &L = *($labl$$label);
7038 int flag = $cmp$$cmpcode;
7040 __ move(AT, val);
7041 switch(flag)
7042 {
7043 case 0x01: //equal
7044 if (&L)
7045 __ beq(op1, AT, L);
7046 else
7047 __ beq(op1, AT, (int)0);
7048 break;
7049 case 0x02: //not_equal
7050 if (&L)
7051 __ bne(op1, AT, L);
7052 else
7053 __ bne(op1, AT, (int)0);
7054 break;
7055 case 0x03: //greater
7056 __ slt(AT, AT, op1);
7057 if(&L)
7058 __ bne(R0, AT, L);
7059 else
7060 __ bne(R0, AT, (int)0);
7061 break;
7062 case 0x04: //greater_equal
7063 __ slt(AT, op1, AT);
7064 if(&L)
7065 __ beq(AT, R0, L);
7066 else
7067 __ beq(AT, R0, (int)0);
7068 break;
7069 case 0x05: //less
7070 __ slt(AT, op1, AT);
7071 if(&L)
7072 __ bne(R0, AT, L);
7073 else
7074 __ bne(R0, AT, (int)0);
7075 break;
7076 case 0x06: //less_equal
7077 __ slt(AT, AT, op1);
7078 if(&L)
7079 __ beq(AT, R0, L);
7080 else
7081 __ beq(AT, R0, (int)0);
7082 break;
7083 default:
7084 Unimplemented();
7085 }
7086 __ nop();
7087 %}
7089 ins_pc_relative(1);
7090 ins_pipe( pipe_alu_branch );
7091 %}
7093 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7094 match( If cmp (CmpU src1 zero) );
7095 effect(USE labl);
7096 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7098 ins_encode %{
7099 Register op1 = $src1$$Register;
7100 Label &L = *($labl$$label);
7101 int flag = $cmp$$cmpcode;
7103 switch(flag)
7104 {
7105 case 0x01: //equal
7106 if (&L)
7107 __ beq(op1, R0, L);
7108 else
7109 __ beq(op1, R0, (int)0);
7110 break;
7111 case 0x02: //not_equal
7112 if (&L)
7113 __ bne(op1, R0, L);
7114 else
7115 __ bne(op1, R0, (int)0);
7116 break;
7117 case 0x03: //above
7118 if(&L)
7119 __ bne(R0, op1, L);
7120 else
7121 __ bne(R0, op1, (int)0);
7122 break;
7123 case 0x04: //above_equal
7124 if(&L)
7125 __ beq(R0, R0, L);
7126 else
7127 __ beq(R0, R0, (int)0);
7128 break;
7129 case 0x05: //below
7130 return;
7131 break;
7132 case 0x06: //below_equal
7133 if(&L)
7134 __ beq(op1, R0, L);
7135 else
7136 __ beq(op1, R0, (int)0);
7137 break;
7138 default:
7139 Unimplemented();
7140 }
7141 __ nop();
7142 %}
7144 ins_pc_relative(1);
7145 ins_pipe( pipe_alu_branch );
7146 %}
7149 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7150 match( If cmp (CmpU src1 src2) );
7151 effect(USE labl);
7152 ins_cost(180);
7153 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7155 ins_encode %{
7156 Register op1 = $src1$$Register;
7157 int val = $src2$$constant;
7158 Label &L = *($labl$$label);
7159 int flag = $cmp$$cmpcode;
7161 switch(flag)
7162 {
7163 case 0x01: //equal
7164 __ move(AT, val);
7165 if (&L)
7166 __ beq(op1, AT, L);
7167 else
7168 __ beq(op1, AT, (int)0);
7169 break;
7170 case 0x02: //not_equal
7171 __ move(AT, val);
7172 if (&L)
7173 __ bne(op1, AT, L);
7174 else
7175 __ bne(op1, AT, (int)0);
7176 break;
7177 case 0x03: //above
7178 __ move(AT, val);
7179 __ sltu(AT, AT, op1);
7180 if(&L)
7181 __ bne(R0, AT, L);
7182 else
7183 __ bne(R0, AT, (int)0);
7184 break;
7185 case 0x04: //above_equal
7186 __ sltiu(AT, op1, val);
7187 if(&L)
7188 __ beq(AT, R0, L);
7189 else
7190 __ beq(AT, R0, (int)0);
7191 break;
7192 case 0x05: //below
7193 __ sltiu(AT, op1, val);
7194 if(&L)
7195 __ bne(R0, AT, L);
7196 else
7197 __ bne(R0, AT, (int)0);
7198 break;
7199 case 0x06: //below_equal
7200 __ move(AT, val);
7201 __ sltu(AT, AT, op1);
7202 if(&L)
7203 __ beq(AT, R0, L);
7204 else
7205 __ beq(AT, R0, (int)0);
7206 break;
7207 default:
7208 Unimplemented();
7209 }
7210 __ nop();
7211 %}
7213 ins_pc_relative(1);
7214 ins_pipe( pipe_alu_branch );
7215 %}
7218 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7219 match( If cmp (CmpL src1 src2) );
7220 effect(USE labl);
7221 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7222 ins_cost(250);
7224 ins_encode %{
7225 Register opr1_reg = as_Register($src1$$reg);
7226 Register opr2_reg = as_Register($src2$$reg);
7228 Label &target = *($labl$$label);
7229 int flag = $cmp$$cmpcode;
7231 switch(flag)
7232 {
7233 case 0x01: //equal
7234 if (&target)
7235 __ beq(opr1_reg, opr2_reg, target);
7236 else
7237 __ beq(opr1_reg, opr2_reg, (int)0);
7238 __ delayed()->nop();
7239 break;
7241 case 0x02: //not_equal
7242 if(&target)
7243 __ bne(opr1_reg, opr2_reg, target);
7244 else
7245 __ bne(opr1_reg, opr2_reg, (int)0);
7246 __ delayed()->nop();
7247 break;
7249 case 0x03: //greater
7250 __ slt(AT, opr2_reg, opr1_reg);
7251 if(&target)
7252 __ bne(AT, R0, target);
7253 else
7254 __ bne(AT, R0, (int)0);
7255 __ delayed()->nop();
7256 break;
7258 case 0x04: //greater_equal
7259 __ slt(AT, opr1_reg, opr2_reg);
7260 if(&target)
7261 __ beq(AT, R0, target);
7262 else
7263 __ beq(AT, R0, (int)0);
7264 __ delayed()->nop();
7266 break;
7268 case 0x05: //less
7269 __ slt(AT, opr1_reg, opr2_reg);
7270 if(&target)
7271 __ bne(AT, R0, target);
7272 else
7273 __ bne(AT, R0, (int)0);
7274 __ delayed()->nop();
7276 break;
7278 case 0x06: //less_equal
7279 __ slt(AT, opr2_reg, opr1_reg);
7281 if(&target)
7282 __ beq(AT, R0, target);
7283 else
7284 __ beq(AT, R0, (int)0);
7285 __ delayed()->nop();
7287 break;
7289 default:
7290 Unimplemented();
7291 }
7292 %}
7295 ins_pc_relative(1);
7296 ins_pipe( pipe_alu_branch );
7297 %}
7299 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7300 match( If cmp (CmpL src1 src2) );
7301 effect(USE labl);
7302 ins_cost(180);
7303 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7305 ins_encode %{
7306 Register op1 = $src1$$Register;
7307 int val = $src2$$constant;
7308 Label &L = *($labl$$label);
7309 int flag = $cmp$$cmpcode;
7311 __ daddiu(AT, op1, -1 * val);
7312 switch(flag)
7313 {
7314 case 0x01: //equal
7315 if (&L)
7316 __ beq(R0, AT, L);
7317 else
7318 __ beq(R0, AT, (int)0);
7319 break;
7320 case 0x02: //not_equal
7321 if (&L)
7322 __ bne(R0, AT, L);
7323 else
7324 __ bne(R0, AT, (int)0);
7325 break;
7326 case 0x03: //greater
7327 if(&L)
7328 __ bgtz(AT, L);
7329 else
7330 __ bgtz(AT, (int)0);
7331 break;
7332 case 0x04: //greater_equal
7333 if(&L)
7334 __ bgez(AT, L);
7335 else
7336 __ bgez(AT, (int)0);
7337 break;
7338 case 0x05: //less
7339 if(&L)
7340 __ bltz(AT, L);
7341 else
7342 __ bltz(AT, (int)0);
7343 break;
7344 case 0x06: //less_equal
7345 if(&L)
7346 __ blez(AT, L);
7347 else
7348 __ blez(AT, (int)0);
7349 break;
7350 default:
7351 Unimplemented();
7352 }
7353 __ nop();
7354 %}
7356 ins_pc_relative(1);
7357 ins_pipe( pipe_alu_branch );
7358 %}
7361 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7362 match( If cmp (CmpI src1 src2) );
7363 effect(USE labl);
7364 ins_cost(180);
7365 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7367 ins_encode %{
7368 Register op1 = $src1$$Register;
7369 int val = $src2$$constant;
7370 Label &L = *($labl$$label);
7371 int flag = $cmp$$cmpcode;
7373 __ addiu32(AT, op1, -1 * val);
7374 switch(flag)
7375 {
7376 case 0x01: //equal
7377 if (&L)
7378 __ beq(R0, AT, L);
7379 else
7380 __ beq(R0, AT, (int)0);
7381 break;
7382 case 0x02: //not_equal
7383 if (&L)
7384 __ bne(R0, AT, L);
7385 else
7386 __ bne(R0, AT, (int)0);
7387 break;
7388 case 0x03: //greater
7389 if(&L)
7390 __ bgtz(AT, L);
7391 else
7392 __ bgtz(AT, (int)0);
7393 break;
7394 case 0x04: //greater_equal
7395 if(&L)
7396 __ bgez(AT, L);
7397 else
7398 __ bgez(AT, (int)0);
7399 break;
7400 case 0x05: //less
7401 if(&L)
7402 __ bltz(AT, L);
7403 else
7404 __ bltz(AT, (int)0);
7405 break;
7406 case 0x06: //less_equal
7407 if(&L)
7408 __ blez(AT, L);
7409 else
7410 __ blez(AT, (int)0);
7411 break;
7412 default:
7413 Unimplemented();
7414 }
7415 __ nop();
7416 %}
7418 ins_pc_relative(1);
7419 ins_pipe( pipe_alu_branch );
7420 %}
7422 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7423 match( If cmp (CmpL src1 zero) );
7424 effect(USE labl);
7425 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7426 ins_cost(150);
7428 ins_encode %{
7429 Register opr1_reg = as_Register($src1$$reg);
7430 Label &target = *($labl$$label);
7431 int flag = $cmp$$cmpcode;
7433 switch(flag)
7434 {
7435 case 0x01: //equal
7436 if (&target)
7437 __ beq(opr1_reg, R0, target);
7438 else
7439 __ beq(opr1_reg, R0, int(0));
7440 break;
7442 case 0x02: //not_equal
7443 if(&target)
7444 __ bne(opr1_reg, R0, target);
7445 else
7446 __ bne(opr1_reg, R0, (int)0);
7447 break;
7449 case 0x03: //greater
7450 if(&target)
7451 __ bgtz(opr1_reg, target);
7452 else
7453 __ bgtz(opr1_reg, (int)0);
7454 break;
7456 case 0x04: //greater_equal
7457 if(&target)
7458 __ bgez(opr1_reg, target);
7459 else
7460 __ bgez(opr1_reg, (int)0);
7461 break;
7463 case 0x05: //less
7464 __ slt(AT, opr1_reg, R0);
7465 if(&target)
7466 __ bne(AT, R0, target);
7467 else
7468 __ bne(AT, R0, (int)0);
7469 break;
7471 case 0x06: //less_equal
7472 if (&target)
7473 __ blez(opr1_reg, target);
7474 else
7475 __ blez(opr1_reg, int(0));
7476 break;
7478 default:
7479 Unimplemented();
7480 }
7481 __ delayed()->nop();
7482 %}
7485 ins_pc_relative(1);
7486 ins_pipe( pipe_alu_branch );
7487 %}
7490 //FIXME
7491 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7492 match( If cmp (CmpF src1 src2) );
7493 effect(USE labl);
7494 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7496 ins_encode %{
7497 FloatRegister reg_op1 = $src1$$FloatRegister;
7498 FloatRegister reg_op2 = $src2$$FloatRegister;
7499 Label &L = *($labl$$label);
7500 int flag = $cmp$$cmpcode;
7502 switch(flag)
7503 {
7504 case 0x01: //equal
7505 __ c_eq_s(reg_op1, reg_op2);
7506 if (&L)
7507 __ bc1t(L);
7508 else
7509 __ bc1t((int)0);
7510 break;
7511 case 0x02: //not_equal
7512 __ c_eq_s(reg_op1, reg_op2);
7513 if (&L)
7514 __ bc1f(L);
7515 else
7516 __ bc1f((int)0);
7517 break;
7518 case 0x03: //greater
7519 __ c_ule_s(reg_op1, reg_op2);
7520 if(&L)
7521 __ bc1f(L);
7522 else
7523 __ bc1f((int)0);
7524 break;
7525 case 0x04: //greater_equal
7526 __ c_ult_s(reg_op1, reg_op2);
7527 if(&L)
7528 __ bc1f(L);
7529 else
7530 __ bc1f((int)0);
7531 break;
7532 case 0x05: //less
7533 __ c_ult_s(reg_op1, reg_op2);
7534 if(&L)
7535 __ bc1t(L);
7536 else
7537 __ bc1t((int)0);
7538 break;
7539 case 0x06: //less_equal
7540 __ c_ule_s(reg_op1, reg_op2);
7541 if(&L)
7542 __ bc1t(L);
7543 else
7544 __ bc1t((int)0);
7545 break;
7546 default:
7547 Unimplemented();
7548 }
7549 __ nop();
7550 %}
7552 ins_pc_relative(1);
7553 ins_pipe(pipe_slow);
7554 %}
7556 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7557 match( If cmp (CmpD src1 src2) );
7558 effect(USE labl);
7559 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7561 ins_encode %{
7562 FloatRegister reg_op1 = $src1$$FloatRegister;
7563 FloatRegister reg_op2 = $src2$$FloatRegister;
7564 Label &L = *($labl$$label);
7565 int flag = $cmp$$cmpcode;
7567 switch(flag)
7568 {
7569 case 0x01: //equal
7570 __ c_eq_d(reg_op1, reg_op2);
7571 if (&L)
7572 __ bc1t(L);
7573 else
7574 __ bc1t((int)0);
7575 break;
7576 case 0x02: //not_equal
7577 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7578 __ c_eq_d(reg_op1, reg_op2);
7579 if (&L)
7580 __ bc1f(L);
7581 else
7582 __ bc1f((int)0);
7583 break;
7584 case 0x03: //greater
7585 __ c_ule_d(reg_op1, reg_op2);
7586 if(&L)
7587 __ bc1f(L);
7588 else
7589 __ bc1f((int)0);
7590 break;
7591 case 0x04: //greater_equal
7592 __ c_ult_d(reg_op1, reg_op2);
7593 if(&L)
7594 __ bc1f(L);
7595 else
7596 __ bc1f((int)0);
7597 break;
7598 case 0x05: //less
7599 __ c_ult_d(reg_op1, reg_op2);
7600 if(&L)
7601 __ bc1t(L);
7602 else
7603 __ bc1t((int)0);
7604 break;
7605 case 0x06: //less_equal
7606 __ c_ule_d(reg_op1, reg_op2);
7607 if(&L)
7608 __ bc1t(L);
7609 else
7610 __ bc1t((int)0);
7611 break;
7612 default:
7613 Unimplemented();
7614 }
7615 __ nop();
7616 %}
7618 ins_pc_relative(1);
7619 ins_pipe(pipe_slow);
7620 %}
7623 // Call Runtime Instruction
7624 instruct CallRuntimeDirect(method meth) %{
7625 match(CallRuntime );
7626 effect(USE meth);
7628 ins_cost(300);
7629 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7630 ins_encode( Java_To_Runtime( meth ) );
7631 ins_pipe( pipe_slow );
7632 ins_alignment(16);
7633 %}
7637 //------------------------MemBar Instructions-------------------------------
7638 //Memory barrier flavors
7640 instruct membar_acquire() %{
7641 match(MemBarAcquire);
7642 ins_cost(0);
7644 size(0);
7645 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7646 ins_encode();
7647 ins_pipe(empty);
7648 %}
7650 instruct load_fence() %{
7651 match(LoadFence);
7652 ins_cost(400);
7654 format %{ "MEMBAR @ load_fence" %}
7655 ins_encode %{
7656 __ sync();
7657 %}
7658 ins_pipe(pipe_slow);
7659 %}
7661 instruct membar_acquire_lock()
7662 %{
7663 match(MemBarAcquireLock);
7664 ins_cost(0);
7666 size(0);
7667 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7668 ins_encode();
7669 ins_pipe(empty);
7670 %}
7672 instruct membar_release() %{
7673 match(MemBarRelease);
7674 ins_cost(0);
7676 size(0);
7677 format %{ "MEMBAR-release (empty) @ membar_release" %}
7678 ins_encode();
7679 ins_pipe(empty);
7680 %}
7682 instruct store_fence() %{
7683 match(StoreFence);
7684 ins_cost(400);
7686 format %{ "MEMBAR @ store_fence" %}
7688 ins_encode %{
7689 __ sync();
7690 %}
7692 ins_pipe(pipe_slow);
7693 %}
7695 instruct membar_release_lock()
7696 %{
7697 match(MemBarReleaseLock);
7698 ins_cost(0);
7700 size(0);
7701 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7702 ins_encode();
7703 ins_pipe(empty);
7704 %}
7707 instruct membar_volatile() %{
7708 match(MemBarVolatile);
7709 ins_cost(400);
7711 format %{ "MEMBAR-volatile" %}
7712 ins_encode %{
7713 if( !os::is_MP() ) return; // Not needed on single CPU
7714 __ sync();
7716 %}
7717 ins_pipe(pipe_slow);
7718 %}
7720 instruct unnecessary_membar_volatile() %{
7721 match(MemBarVolatile);
7722 predicate(Matcher::post_store_load_barrier(n));
7723 ins_cost(0);
7725 size(0);
7726 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7727 ins_encode( );
7728 ins_pipe(empty);
7729 %}
7731 instruct membar_storestore() %{
7732 match(MemBarStoreStore);
7734 ins_cost(0);
7735 size(0);
7736 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7737 ins_encode( );
7738 ins_pipe(empty);
7739 %}
7741 //----------Move Instructions--------------------------------------------------
7742 instruct castX2P(mRegP dst, mRegL src) %{
7743 match(Set dst (CastX2P src));
7744 format %{ "castX2P $dst, $src @ castX2P" %}
7745 ins_encode %{
7746 Register src = $src$$Register;
7747 Register dst = $dst$$Register;
7749 if(src != dst)
7750 __ move(dst, src);
7751 %}
7752 ins_cost(10);
7753 ins_pipe( ialu_regI_mov );
7754 %}
7756 instruct castP2X(mRegL dst, mRegP src ) %{
7757 match(Set dst (CastP2X src));
7759 format %{ "mov $dst, $src\t #@castP2X" %}
7760 ins_encode %{
7761 Register src = $src$$Register;
7762 Register dst = $dst$$Register;
7764 if(src != dst)
7765 __ move(dst, src);
7766 %}
7767 ins_pipe( ialu_regI_mov );
7768 %}
7770 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7771 match(Set dst (MoveF2I src));
7772 effect(DEF dst, USE src);
7773 ins_cost(85);
7774 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7775 ins_encode %{
7776 Register dst = as_Register($dst$$reg);
7777 FloatRegister src = as_FloatRegister($src$$reg);
7779 __ mfc1(dst, src);
7780 %}
7781 ins_pipe( pipe_slow );
7782 %}
7784 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7785 match(Set dst (MoveI2F src));
7786 effect(DEF dst, USE src);
7787 ins_cost(85);
7788 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7789 ins_encode %{
7790 Register src = as_Register($src$$reg);
7791 FloatRegister dst = as_FloatRegister($dst$$reg);
7793 __ mtc1(src, dst);
7794 %}
7795 ins_pipe( pipe_slow );
7796 %}
7798 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7799 match(Set dst (MoveD2L src));
7800 effect(DEF dst, USE src);
7801 ins_cost(85);
7802 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7803 ins_encode %{
7804 Register dst = as_Register($dst$$reg);
7805 FloatRegister src = as_FloatRegister($src$$reg);
7807 __ dmfc1(dst, src);
7808 %}
7809 ins_pipe( pipe_slow );
7810 %}
7812 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7813 match(Set dst (MoveL2D src));
7814 effect(DEF dst, USE src);
7815 ins_cost(85);
7816 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7817 ins_encode %{
7818 FloatRegister dst = as_FloatRegister($dst$$reg);
7819 Register src = as_Register($src$$reg);
7821 __ dmtc1(src, dst);
7822 %}
7823 ins_pipe( pipe_slow );
7824 %}
7826 //----------Conditional Move---------------------------------------------------
7827 // Conditional move
7828 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7829 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7830 ins_cost(80);
7831 format %{
7832 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7833 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7834 %}
7836 ins_encode %{
7837 Register op1 = $tmp1$$Register;
7838 Register op2 = $tmp2$$Register;
7839 Register dst = $dst$$Register;
7840 Register src = $src$$Register;
7841 int flag = $cop$$cmpcode;
7843 switch(flag)
7844 {
7845 case 0x01: //equal
7846 __ subu32(AT, op1, op2);
7847 __ movz(dst, src, AT);
7848 break;
7850 case 0x02: //not_equal
7851 __ subu32(AT, op1, op2);
7852 __ movn(dst, src, AT);
7853 break;
7855 case 0x03: //great
7856 __ slt(AT, op2, op1);
7857 __ movn(dst, src, AT);
7858 break;
7860 case 0x04: //great_equal
7861 __ slt(AT, op1, op2);
7862 __ movz(dst, src, AT);
7863 break;
7865 case 0x05: //less
7866 __ slt(AT, op1, op2);
7867 __ movn(dst, src, AT);
7868 break;
7870 case 0x06: //less_equal
7871 __ slt(AT, op2, op1);
7872 __ movz(dst, src, AT);
7873 break;
7875 default:
7876 Unimplemented();
7877 }
7878 %}
7880 ins_pipe( pipe_slow );
7881 %}
7883 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7884 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7885 ins_cost(80);
7886 format %{
7887 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7888 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7889 %}
7890 ins_encode %{
7891 Register op1 = $tmp1$$Register;
7892 Register op2 = $tmp2$$Register;
7893 Register dst = $dst$$Register;
7894 Register src = $src$$Register;
7895 int flag = $cop$$cmpcode;
7897 switch(flag)
7898 {
7899 case 0x01: //equal
7900 __ subu(AT, op1, op2);
7901 __ movz(dst, src, AT);
7902 break;
7904 case 0x02: //not_equal
7905 __ subu(AT, op1, op2);
7906 __ movn(dst, src, AT);
7907 break;
7909 case 0x03: //above
7910 __ sltu(AT, op2, op1);
7911 __ movn(dst, src, AT);
7912 break;
7914 case 0x04: //above_equal
7915 __ sltu(AT, op1, op2);
7916 __ movz(dst, src, AT);
7917 break;
7919 case 0x05: //below
7920 __ sltu(AT, op1, op2);
7921 __ movn(dst, src, AT);
7922 break;
7924 case 0x06: //below_equal
7925 __ sltu(AT, op2, op1);
7926 __ movz(dst, src, AT);
7927 break;
7929 default:
7930 Unimplemented();
7931 }
7932 %}
7934 ins_pipe( pipe_slow );
7935 %}
7937 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7938 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7939 ins_cost(80);
7940 format %{
7941 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7942 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7943 %}
7944 ins_encode %{
7945 Register op1 = $tmp1$$Register;
7946 Register op2 = $tmp2$$Register;
7947 Register dst = $dst$$Register;
7948 Register src = $src$$Register;
7949 int flag = $cop$$cmpcode;
7951 switch(flag)
7952 {
7953 case 0x01: //equal
7954 __ subu32(AT, op1, op2);
7955 __ movz(dst, src, AT);
7956 break;
7958 case 0x02: //not_equal
7959 __ subu32(AT, op1, op2);
7960 __ movn(dst, src, AT);
7961 break;
7963 case 0x03: //above
7964 __ sltu(AT, op2, op1);
7965 __ movn(dst, src, AT);
7966 break;
7968 case 0x04: //above_equal
7969 __ sltu(AT, op1, op2);
7970 __ movz(dst, src, AT);
7971 break;
7973 case 0x05: //below
7974 __ sltu(AT, op1, op2);
7975 __ movn(dst, src, AT);
7976 break;
7978 case 0x06: //below_equal
7979 __ sltu(AT, op2, op1);
7980 __ movz(dst, src, AT);
7981 break;
7983 default:
7984 Unimplemented();
7985 }
7986 %}
7988 ins_pipe( pipe_slow );
7989 %}
7991 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7992 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7993 ins_cost(80);
7994 format %{
7995 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
7996 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
7997 %}
7998 ins_encode %{
7999 Register op1 = $tmp1$$Register;
8000 Register op2 = $tmp2$$Register;
8001 Register dst = $dst$$Register;
8002 Register src = $src$$Register;
8003 int flag = $cop$$cmpcode;
8005 switch(flag)
8006 {
8007 case 0x01: //equal
8008 __ subu32(AT, op1, op2);
8009 __ movz(dst, src, AT);
8010 break;
8012 case 0x02: //not_equal
8013 __ subu32(AT, op1, op2);
8014 __ movn(dst, src, AT);
8015 break;
8017 case 0x03: //above
8018 __ sltu(AT, op2, op1);
8019 __ movn(dst, src, AT);
8020 break;
8022 case 0x04: //above_equal
8023 __ sltu(AT, op1, op2);
8024 __ movz(dst, src, AT);
8025 break;
8027 case 0x05: //below
8028 __ sltu(AT, op1, op2);
8029 __ movn(dst, src, AT);
8030 break;
8032 case 0x06: //below_equal
8033 __ sltu(AT, op2, op1);
8034 __ movz(dst, src, AT);
8035 break;
8037 default:
8038 Unimplemented();
8039 }
8040 %}
8042 ins_pipe( pipe_slow );
8043 %}
8045 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8046 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8047 ins_cost(80);
8048 format %{
8049 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8050 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8051 %}
8052 ins_encode %{
8053 Register op1 = $tmp1$$Register;
8054 Register op2 = $tmp2$$Register;
8055 Register dst = $dst$$Register;
8056 Register src = $src$$Register;
8057 int flag = $cop$$cmpcode;
8059 switch(flag)
8060 {
8061 case 0x01: //equal
8062 __ subu(AT, op1, op2);
8063 __ movz(dst, src, AT);
8064 break;
8066 case 0x02: //not_equal
8067 __ subu(AT, op1, op2);
8068 __ movn(dst, src, AT);
8069 break;
8071 case 0x03: //above
8072 __ sltu(AT, op2, op1);
8073 __ movn(dst, src, AT);
8074 break;
8076 case 0x04: //above_equal
8077 __ sltu(AT, op1, op2);
8078 __ movz(dst, src, AT);
8079 break;
8081 case 0x05: //below
8082 __ sltu(AT, op1, op2);
8083 __ movn(dst, src, AT);
8084 break;
8086 case 0x06: //below_equal
8087 __ sltu(AT, op2, op1);
8088 __ movz(dst, src, AT);
8089 break;
8091 default:
8092 Unimplemented();
8093 }
8094 %}
8096 ins_pipe( pipe_slow );
8097 %}
8099 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8100 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8101 ins_cost(80);
8102 format %{
8103 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8104 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8105 %}
8106 ins_encode %{
8107 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8108 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8109 Register dst = as_Register($dst$$reg);
8110 Register src = as_Register($src$$reg);
8112 int flag = $cop$$cmpcode;
8114 switch(flag)
8115 {
8116 case 0x01: //equal
8117 __ c_eq_d(reg_op1, reg_op2);
8118 __ movt(dst, src);
8119 break;
8120 case 0x02: //not_equal
8121 __ c_eq_d(reg_op1, reg_op2);
8122 __ movf(dst, src);
8123 break;
8124 case 0x03: //greater
8125 __ c_ole_d(reg_op1, reg_op2);
8126 __ movf(dst, src);
8127 break;
8128 case 0x04: //greater_equal
8129 __ c_olt_d(reg_op1, reg_op2);
8130 __ movf(dst, src);
8131 break;
8132 case 0x05: //less
8133 __ c_ult_d(reg_op1, reg_op2);
8134 __ movt(dst, src);
8135 break;
8136 case 0x06: //less_equal
8137 __ c_ule_d(reg_op1, reg_op2);
8138 __ movt(dst, src);
8139 break;
8140 default:
8141 Unimplemented();
8142 }
8143 %}
8145 ins_pipe( pipe_slow );
8146 %}
8149 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8150 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8151 ins_cost(80);
8152 format %{
8153 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8154 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8155 %}
8156 ins_encode %{
8157 Register op1 = $tmp1$$Register;
8158 Register op2 = $tmp2$$Register;
8159 Register dst = $dst$$Register;
8160 Register src = $src$$Register;
8161 int flag = $cop$$cmpcode;
8163 switch(flag)
8164 {
8165 case 0x01: //equal
8166 __ subu32(AT, op1, op2);
8167 __ movz(dst, src, AT);
8168 break;
8170 case 0x02: //not_equal
8171 __ subu32(AT, op1, op2);
8172 __ movn(dst, src, AT);
8173 break;
8175 case 0x03: //above
8176 __ sltu(AT, op2, op1);
8177 __ movn(dst, src, AT);
8178 break;
8180 case 0x04: //above_equal
8181 __ sltu(AT, op1, op2);
8182 __ movz(dst, src, AT);
8183 break;
8185 case 0x05: //below
8186 __ sltu(AT, op1, op2);
8187 __ movn(dst, src, AT);
8188 break;
8190 case 0x06: //below_equal
8191 __ sltu(AT, op2, op1);
8192 __ movz(dst, src, AT);
8193 break;
8195 default:
8196 Unimplemented();
8197 }
8198 %}
8200 ins_pipe( pipe_slow );
8201 %}
8204 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8205 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8206 ins_cost(80);
8207 format %{
8208 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8209 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8210 %}
8211 ins_encode %{
8212 Register op1 = $tmp1$$Register;
8213 Register op2 = $tmp2$$Register;
8214 Register dst = $dst$$Register;
8215 Register src = $src$$Register;
8216 int flag = $cop$$cmpcode;
8218 switch(flag)
8219 {
8220 case 0x01: //equal
8221 __ subu(AT, op1, op2);
8222 __ movz(dst, src, AT);
8223 break;
8225 case 0x02: //not_equal
8226 __ subu(AT, op1, op2);
8227 __ movn(dst, src, AT);
8228 break;
8230 case 0x03: //above
8231 __ sltu(AT, op2, op1);
8232 __ movn(dst, src, AT);
8233 break;
8235 case 0x04: //above_equal
8236 __ sltu(AT, op1, op2);
8237 __ movz(dst, src, AT);
8238 break;
8240 case 0x05: //below
8241 __ sltu(AT, op1, op2);
8242 __ movn(dst, src, AT);
8243 break;
8245 case 0x06: //below_equal
8246 __ sltu(AT, op2, op1);
8247 __ movz(dst, src, AT);
8248 break;
8250 default:
8251 Unimplemented();
8252 }
8253 %}
8255 ins_pipe( pipe_slow );
8256 %}
8258 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8259 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8260 ins_cost(80);
8261 format %{
8262 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8263 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8264 %}
8265 ins_encode %{
8266 Register opr1 = as_Register($tmp1$$reg);
8267 Register opr2 = as_Register($tmp2$$reg);
8268 Register dst = $dst$$Register;
8269 Register src = $src$$Register;
8270 int flag = $cop$$cmpcode;
8272 switch(flag)
8273 {
8274 case 0x01: //equal
8275 __ subu(AT, opr1, opr2);
8276 __ movz(dst, src, AT);
8277 break;
8279 case 0x02: //not_equal
8280 __ subu(AT, opr1, opr2);
8281 __ movn(dst, src, AT);
8282 break;
8284 case 0x03: //greater
8285 __ slt(AT, opr2, opr1);
8286 __ movn(dst, src, AT);
8287 break;
8289 case 0x04: //greater_equal
8290 __ slt(AT, opr1, opr2);
8291 __ movz(dst, src, AT);
8292 break;
8294 case 0x05: //less
8295 __ slt(AT, opr1, opr2);
8296 __ movn(dst, src, AT);
8297 break;
8299 case 0x06: //less_equal
8300 __ slt(AT, opr2, opr1);
8301 __ movz(dst, src, AT);
8302 break;
8304 default:
8305 Unimplemented();
8306 }
8307 %}
8309 ins_pipe( pipe_slow );
8310 %}
8312 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8313 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8314 ins_cost(80);
8315 format %{
8316 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8317 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8318 %}
8319 ins_encode %{
8320 Register opr1 = as_Register($tmp1$$reg);
8321 Register opr2 = as_Register($tmp2$$reg);
8322 Register dst = $dst$$Register;
8323 Register src = $src$$Register;
8324 int flag = $cop$$cmpcode;
8326 switch(flag)
8327 {
8328 case 0x01: //equal
8329 __ subu(AT, opr1, opr2);
8330 __ movz(dst, src, AT);
8331 break;
8333 case 0x02: //not_equal
8334 __ subu(AT, opr1, opr2);
8335 __ movn(dst, src, AT);
8336 break;
8338 case 0x03: //greater
8339 __ slt(AT, opr2, opr1);
8340 __ movn(dst, src, AT);
8341 break;
8343 case 0x04: //greater_equal
8344 __ slt(AT, opr1, opr2);
8345 __ movz(dst, src, AT);
8346 break;
8348 case 0x05: //less
8349 __ slt(AT, opr1, opr2);
8350 __ movn(dst, src, AT);
8351 break;
8353 case 0x06: //less_equal
8354 __ slt(AT, opr2, opr1);
8355 __ movz(dst, src, AT);
8356 break;
8358 default:
8359 Unimplemented();
8360 }
8361 %}
8363 ins_pipe( pipe_slow );
8364 %}
8366 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8367 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8368 ins_cost(80);
8369 format %{
8370 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8371 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8372 %}
8373 ins_encode %{
8374 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8375 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8376 Register dst = as_Register($dst$$reg);
8377 Register src = as_Register($src$$reg);
8379 int flag = $cop$$cmpcode;
8381 switch(flag)
8382 {
8383 case 0x01: //equal
8384 __ c_eq_d(reg_op1, reg_op2);
8385 __ movt(dst, src);
8386 break;
8387 case 0x02: //not_equal
8388 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8389 __ c_eq_d(reg_op1, reg_op2);
8390 __ movf(dst, src);
8391 break;
8392 case 0x03: //greater
8393 __ c_ole_d(reg_op1, reg_op2);
8394 __ movf(dst, src);
8395 break;
8396 case 0x04: //greater_equal
8397 __ c_olt_d(reg_op1, reg_op2);
8398 __ movf(dst, src);
8399 break;
8400 case 0x05: //less
8401 __ c_ult_d(reg_op1, reg_op2);
8402 __ movt(dst, src);
8403 break;
8404 case 0x06: //less_equal
8405 __ c_ule_d(reg_op1, reg_op2);
8406 __ movt(dst, src);
8407 break;
8408 default:
8409 Unimplemented();
8410 }
8411 %}
8413 ins_pipe( pipe_slow );
8414 %}
8417 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8418 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8419 ins_cost(80);
8420 format %{
8421 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8422 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8423 %}
8424 ins_encode %{
8425 Register op1 = $tmp1$$Register;
8426 Register op2 = $tmp2$$Register;
8427 Register dst = $dst$$Register;
8428 Register src = $src$$Register;
8429 int flag = $cop$$cmpcode;
8431 switch(flag)
8432 {
8433 case 0x01: //equal
8434 __ subu(AT, op1, op2);
8435 __ movz(dst, src, AT);
8436 break;
8438 case 0x02: //not_equal
8439 __ subu(AT, op1, op2);
8440 __ movn(dst, src, AT);
8441 break;
8443 case 0x03: //above
8444 __ sltu(AT, op2, op1);
8445 __ movn(dst, src, AT);
8446 break;
8448 case 0x04: //above_equal
8449 __ sltu(AT, op1, op2);
8450 __ movz(dst, src, AT);
8451 break;
8453 case 0x05: //below
8454 __ sltu(AT, op1, op2);
8455 __ movn(dst, src, AT);
8456 break;
8458 case 0x06: //below_equal
8459 __ sltu(AT, op2, op1);
8460 __ movz(dst, src, AT);
8461 break;
8463 default:
8464 Unimplemented();
8465 }
8466 %}
8468 ins_pipe( pipe_slow );
8469 %}
8471 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8472 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8473 ins_cost(80);
8474 format %{
8475 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8476 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8477 %}
8478 ins_encode %{
8479 Register op1 = $tmp1$$Register;
8480 Register op2 = $tmp2$$Register;
8481 Register dst = $dst$$Register;
8482 Register src = $src$$Register;
8483 int flag = $cop$$cmpcode;
8485 switch(flag)
8486 {
8487 case 0x01: //equal
8488 __ subu32(AT, op1, op2);
8489 __ movz(dst, src, AT);
8490 break;
8492 case 0x02: //not_equal
8493 __ subu32(AT, op1, op2);
8494 __ movn(dst, src, AT);
8495 break;
8497 case 0x03: //above
8498 __ slt(AT, op2, op1);
8499 __ movn(dst, src, AT);
8500 break;
8502 case 0x04: //above_equal
8503 __ slt(AT, op1, op2);
8504 __ movz(dst, src, AT);
8505 break;
8507 case 0x05: //below
8508 __ slt(AT, op1, op2);
8509 __ movn(dst, src, AT);
8510 break;
8512 case 0x06: //below_equal
8513 __ slt(AT, op2, op1);
8514 __ movz(dst, src, AT);
8515 break;
8517 default:
8518 Unimplemented();
8519 }
8520 %}
8522 ins_pipe( pipe_slow );
8523 %}
8525 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8526 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8527 ins_cost(80);
8528 format %{
8529 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8530 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8531 %}
8532 ins_encode %{
8533 Register op1 = $tmp1$$Register;
8534 Register op2 = $tmp2$$Register;
8535 Register dst = $dst$$Register;
8536 Register src = $src$$Register;
8537 int flag = $cop$$cmpcode;
8539 switch(flag)
8540 {
8541 case 0x01: //equal
8542 __ subu32(AT, op1, op2);
8543 __ movz(dst, src, AT);
8544 break;
8546 case 0x02: //not_equal
8547 __ subu32(AT, op1, op2);
8548 __ movn(dst, src, AT);
8549 break;
8551 case 0x03: //above
8552 __ slt(AT, op2, op1);
8553 __ movn(dst, src, AT);
8554 break;
8556 case 0x04: //above_equal
8557 __ slt(AT, op1, op2);
8558 __ movz(dst, src, AT);
8559 break;
8561 case 0x05: //below
8562 __ slt(AT, op1, op2);
8563 __ movn(dst, src, AT);
8564 break;
8566 case 0x06: //below_equal
8567 __ slt(AT, op2, op1);
8568 __ movz(dst, src, AT);
8569 break;
8571 default:
8572 Unimplemented();
8573 }
8574 %}
8576 ins_pipe( pipe_slow );
8577 %}
8580 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8581 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8582 ins_cost(80);
8583 format %{
8584 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8585 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8586 %}
8588 ins_encode %{
8589 Register op1 = $tmp1$$Register;
8590 Register op2 = $tmp2$$Register;
8591 Register dst = as_Register($dst$$reg);
8592 Register src = as_Register($src$$reg);
8593 int flag = $cop$$cmpcode;
8595 switch(flag)
8596 {
8597 case 0x01: //equal
8598 __ subu32(AT, op1, op2);
8599 __ movz(dst, src, AT);
8600 break;
8602 case 0x02: //not_equal
8603 __ subu32(AT, op1, op2);
8604 __ movn(dst, src, AT);
8605 break;
8607 case 0x03: //great
8608 __ slt(AT, op2, op1);
8609 __ movn(dst, src, AT);
8610 break;
8612 case 0x04: //great_equal
8613 __ slt(AT, op1, op2);
8614 __ movz(dst, src, AT);
8615 break;
8617 case 0x05: //less
8618 __ slt(AT, op1, op2);
8619 __ movn(dst, src, AT);
8620 break;
8622 case 0x06: //less_equal
8623 __ slt(AT, op2, op1);
8624 __ movz(dst, src, AT);
8625 break;
8627 default:
8628 Unimplemented();
8629 }
8630 %}
8632 ins_pipe( pipe_slow );
8633 %}
8635 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8636 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8637 ins_cost(80);
8638 format %{
8639 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8640 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8641 %}
8642 ins_encode %{
8643 Register opr1 = as_Register($tmp1$$reg);
8644 Register opr2 = as_Register($tmp2$$reg);
8645 Register dst = as_Register($dst$$reg);
8646 Register src = as_Register($src$$reg);
8647 int flag = $cop$$cmpcode;
8649 switch(flag)
8650 {
8651 case 0x01: //equal
8652 __ subu(AT, opr1, opr2);
8653 __ movz(dst, src, AT);
8654 break;
8656 case 0x02: //not_equal
8657 __ subu(AT, opr1, opr2);
8658 __ movn(dst, src, AT);
8659 break;
8661 case 0x03: //greater
8662 __ slt(AT, opr2, opr1);
8663 __ movn(dst, src, AT);
8664 break;
8666 case 0x04: //greater_equal
8667 __ slt(AT, opr1, opr2);
8668 __ movz(dst, src, AT);
8669 break;
8671 case 0x05: //less
8672 __ slt(AT, opr1, opr2);
8673 __ movn(dst, src, AT);
8674 break;
8676 case 0x06: //less_equal
8677 __ slt(AT, opr2, opr1);
8678 __ movz(dst, src, AT);
8679 break;
8681 default:
8682 Unimplemented();
8683 }
8684 %}
8686 ins_pipe( pipe_slow );
8687 %}
8689 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8690 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8691 ins_cost(80);
8692 format %{
8693 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8694 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8695 %}
8696 ins_encode %{
8697 Register op1 = $tmp1$$Register;
8698 Register op2 = $tmp2$$Register;
8699 Register dst = $dst$$Register;
8700 Register src = $src$$Register;
8701 int flag = $cop$$cmpcode;
8703 switch(flag)
8704 {
8705 case 0x01: //equal
8706 __ subu32(AT, op1, op2);
8707 __ movz(dst, src, AT);
8708 break;
8710 case 0x02: //not_equal
8711 __ subu32(AT, op1, op2);
8712 __ movn(dst, src, AT);
8713 break;
8715 case 0x03: //above
8716 __ sltu(AT, op2, op1);
8717 __ movn(dst, src, AT);
8718 break;
8720 case 0x04: //above_equal
8721 __ sltu(AT, op1, op2);
8722 __ movz(dst, src, AT);
8723 break;
8725 case 0x05: //below
8726 __ sltu(AT, op1, op2);
8727 __ movn(dst, src, AT);
8728 break;
8730 case 0x06: //below_equal
8731 __ sltu(AT, op2, op1);
8732 __ movz(dst, src, AT);
8733 break;
8735 default:
8736 Unimplemented();
8737 }
8738 %}
8740 ins_pipe( pipe_slow );
8741 %}
8744 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8745 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8746 ins_cost(80);
8747 format %{
8748 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8749 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8750 %}
8751 ins_encode %{
8752 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8753 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8754 Register dst = as_Register($dst$$reg);
8755 Register src = as_Register($src$$reg);
8757 int flag = $cop$$cmpcode;
8759 switch(flag)
8760 {
8761 case 0x01: //equal
8762 __ c_eq_d(reg_op1, reg_op2);
8763 __ movt(dst, src);
8764 break;
8765 case 0x02: //not_equal
8766 __ c_eq_d(reg_op1, reg_op2);
8767 __ movf(dst, src);
8768 break;
8769 case 0x03: //greater
8770 __ c_ole_d(reg_op1, reg_op2);
8771 __ movf(dst, src);
8772 break;
8773 case 0x04: //greater_equal
8774 __ c_olt_d(reg_op1, reg_op2);
8775 __ movf(dst, src);
8776 break;
8777 case 0x05: //less
8778 __ c_ult_d(reg_op1, reg_op2);
8779 __ movt(dst, src);
8780 break;
8781 case 0x06: //less_equal
8782 __ c_ule_d(reg_op1, reg_op2);
8783 __ movt(dst, src);
8784 break;
8785 default:
8786 Unimplemented();
8787 }
8788 %}
8790 ins_pipe( pipe_slow );
8791 %}
8793 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8794 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8795 ins_cost(200);
8796 format %{
8797 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8798 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8799 %}
8800 ins_encode %{
8801 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8802 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8803 FloatRegister dst = as_FloatRegister($dst$$reg);
8804 FloatRegister src = as_FloatRegister($src$$reg);
8806 int flag = $cop$$cmpcode;
8808 Label L;
8810 switch(flag)
8811 {
8812 case 0x01: //equal
8813 __ c_eq_d(reg_op1, reg_op2);
8814 __ bc1f(L);
8815 __ nop();
8816 __ mov_d(dst, src);
8817 __ bind(L);
8818 break;
8819 case 0x02: //not_equal
8820 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8821 __ c_eq_d(reg_op1, reg_op2);
8822 __ bc1t(L);
8823 __ nop();
8824 __ mov_d(dst, src);
8825 __ bind(L);
8826 break;
8827 case 0x03: //greater
8828 __ c_ole_d(reg_op1, reg_op2);
8829 __ bc1t(L);
8830 __ nop();
8831 __ mov_d(dst, src);
8832 __ bind(L);
8833 break;
8834 case 0x04: //greater_equal
8835 __ c_olt_d(reg_op1, reg_op2);
8836 __ bc1t(L);
8837 __ nop();
8838 __ mov_d(dst, src);
8839 __ bind(L);
8840 break;
8841 case 0x05: //less
8842 __ c_ult_d(reg_op1, reg_op2);
8843 __ bc1f(L);
8844 __ nop();
8845 __ mov_d(dst, src);
8846 __ bind(L);
8847 break;
8848 case 0x06: //less_equal
8849 __ c_ule_d(reg_op1, reg_op2);
8850 __ bc1f(L);
8851 __ nop();
8852 __ mov_d(dst, src);
8853 __ bind(L);
8854 break;
8855 default:
8856 Unimplemented();
8857 }
8858 %}
8860 ins_pipe( pipe_slow );
8861 %}
8863 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8864 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8865 ins_cost(200);
8866 format %{
8867 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8868 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8869 %}
8871 ins_encode %{
8872 Register op1 = $tmp1$$Register;
8873 Register op2 = $tmp2$$Register;
8874 FloatRegister dst = as_FloatRegister($dst$$reg);
8875 FloatRegister src = as_FloatRegister($src$$reg);
8876 int flag = $cop$$cmpcode;
8877 Label L;
8879 switch(flag)
8880 {
8881 case 0x01: //equal
8882 __ bne(op1, op2, L);
8883 __ nop();
8884 __ mov_s(dst, src);
8885 __ bind(L);
8886 break;
8887 case 0x02: //not_equal
8888 __ beq(op1, op2, L);
8889 __ nop();
8890 __ mov_s(dst, src);
8891 __ bind(L);
8892 break;
8893 case 0x03: //great
8894 __ slt(AT, op2, op1);
8895 __ beq(AT, R0, L);
8896 __ nop();
8897 __ mov_s(dst, src);
8898 __ bind(L);
8899 break;
8900 case 0x04: //great_equal
8901 __ slt(AT, op1, op2);
8902 __ bne(AT, R0, L);
8903 __ nop();
8904 __ mov_s(dst, src);
8905 __ bind(L);
8906 break;
8907 case 0x05: //less
8908 __ slt(AT, op1, op2);
8909 __ beq(AT, R0, L);
8910 __ nop();
8911 __ mov_s(dst, src);
8912 __ bind(L);
8913 break;
8914 case 0x06: //less_equal
8915 __ slt(AT, op2, op1);
8916 __ bne(AT, R0, L);
8917 __ nop();
8918 __ mov_s(dst, src);
8919 __ bind(L);
8920 break;
8921 default:
8922 Unimplemented();
8923 }
8924 %}
8926 ins_pipe( pipe_slow );
8927 %}
8929 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8930 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8931 ins_cost(200);
8932 format %{
8933 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8934 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8935 %}
8937 ins_encode %{
8938 Register op1 = $tmp1$$Register;
8939 Register op2 = $tmp2$$Register;
8940 FloatRegister dst = as_FloatRegister($dst$$reg);
8941 FloatRegister src = as_FloatRegister($src$$reg);
8942 int flag = $cop$$cmpcode;
8943 Label L;
8945 switch(flag)
8946 {
8947 case 0x01: //equal
8948 __ bne(op1, op2, L);
8949 __ nop();
8950 __ mov_d(dst, src);
8951 __ bind(L);
8952 break;
8953 case 0x02: //not_equal
8954 __ beq(op1, op2, L);
8955 __ nop();
8956 __ mov_d(dst, src);
8957 __ bind(L);
8958 break;
8959 case 0x03: //great
8960 __ slt(AT, op2, op1);
8961 __ beq(AT, R0, L);
8962 __ nop();
8963 __ mov_d(dst, src);
8964 __ bind(L);
8965 break;
8966 case 0x04: //great_equal
8967 __ slt(AT, op1, op2);
8968 __ bne(AT, R0, L);
8969 __ nop();
8970 __ mov_d(dst, src);
8971 __ bind(L);
8972 break;
8973 case 0x05: //less
8974 __ slt(AT, op1, op2);
8975 __ beq(AT, R0, L);
8976 __ nop();
8977 __ mov_d(dst, src);
8978 __ bind(L);
8979 break;
8980 case 0x06: //less_equal
8981 __ slt(AT, op2, op1);
8982 __ bne(AT, R0, L);
8983 __ nop();
8984 __ mov_d(dst, src);
8985 __ bind(L);
8986 break;
8987 default:
8988 Unimplemented();
8989 }
8990 %}
8992 ins_pipe( pipe_slow );
8993 %}
8995 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
8996 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8997 ins_cost(200);
8998 format %{
8999 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9000 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9001 %}
9003 ins_encode %{
9004 Register op1 = $tmp1$$Register;
9005 Register op2 = $tmp2$$Register;
9006 FloatRegister dst = as_FloatRegister($dst$$reg);
9007 FloatRegister src = as_FloatRegister($src$$reg);
9008 int flag = $cop$$cmpcode;
9009 Label L;
9011 switch(flag)
9012 {
9013 case 0x01: //equal
9014 __ bne(op1, op2, L);
9015 __ nop();
9016 __ mov_d(dst, src);
9017 __ bind(L);
9018 break;
9019 case 0x02: //not_equal
9020 __ beq(op1, op2, L);
9021 __ nop();
9022 __ mov_d(dst, src);
9023 __ bind(L);
9024 break;
9025 case 0x03: //great
9026 __ slt(AT, op2, op1);
9027 __ beq(AT, R0, L);
9028 __ nop();
9029 __ mov_d(dst, src);
9030 __ bind(L);
9031 break;
9032 case 0x04: //great_equal
9033 __ slt(AT, op1, op2);
9034 __ bne(AT, R0, L);
9035 __ nop();
9036 __ mov_d(dst, src);
9037 __ bind(L);
9038 break;
9039 case 0x05: //less
9040 __ slt(AT, op1, op2);
9041 __ beq(AT, R0, L);
9042 __ nop();
9043 __ mov_d(dst, src);
9044 __ bind(L);
9045 break;
9046 case 0x06: //less_equal
9047 __ slt(AT, op2, op1);
9048 __ bne(AT, R0, L);
9049 __ nop();
9050 __ mov_d(dst, src);
9051 __ bind(L);
9052 break;
9053 default:
9054 Unimplemented();
9055 }
9056 %}
9058 ins_pipe( pipe_slow );
9059 %}
9061 //FIXME
9062 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9063 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9064 ins_cost(80);
9065 format %{
9066 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9067 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9068 %}
9070 ins_encode %{
9071 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9072 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9073 Register dst = $dst$$Register;
9074 Register src = $src$$Register;
9075 int flag = $cop$$cmpcode;
9077 switch(flag)
9078 {
9079 case 0x01: //equal
9080 __ c_eq_s(reg_op1, reg_op2);
9081 __ movt(dst, src);
9082 break;
9083 case 0x02: //not_equal
9084 __ c_eq_s(reg_op1, reg_op2);
9085 __ movf(dst, src);
9086 break;
9087 case 0x03: //greater
9088 __ c_ole_s(reg_op1, reg_op2);
9089 __ movf(dst, src);
9090 break;
9091 case 0x04: //greater_equal
9092 __ c_olt_s(reg_op1, reg_op2);
9093 __ movf(dst, src);
9094 break;
9095 case 0x05: //less
9096 __ c_ult_s(reg_op1, reg_op2);
9097 __ movt(dst, src);
9098 break;
9099 case 0x06: //less_equal
9100 __ c_ule_s(reg_op1, reg_op2);
9101 __ movt(dst, src);
9102 break;
9103 default:
9104 Unimplemented();
9105 }
9106 %}
9107 ins_pipe( pipe_slow );
9108 %}
9110 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9111 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9112 ins_cost(200);
9113 format %{
9114 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9115 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9116 %}
9118 ins_encode %{
9119 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9120 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9121 FloatRegister dst = $dst$$FloatRegister;
9122 FloatRegister src = $src$$FloatRegister;
9123 Label L;
9124 int flag = $cop$$cmpcode;
9126 switch(flag)
9127 {
9128 case 0x01: //equal
9129 __ c_eq_s(reg_op1, reg_op2);
9130 __ bc1f(L);
9131 __ nop();
9132 __ mov_s(dst, src);
9133 __ bind(L);
9134 break;
9135 case 0x02: //not_equal
9136 __ c_eq_s(reg_op1, reg_op2);
9137 __ bc1t(L);
9138 __ nop();
9139 __ mov_s(dst, src);
9140 __ bind(L);
9141 break;
9142 case 0x03: //greater
9143 __ c_ole_s(reg_op1, reg_op2);
9144 __ bc1t(L);
9145 __ nop();
9146 __ mov_s(dst, src);
9147 __ bind(L);
9148 break;
9149 case 0x04: //greater_equal
9150 __ c_olt_s(reg_op1, reg_op2);
9151 __ bc1t(L);
9152 __ nop();
9153 __ mov_s(dst, src);
9154 __ bind(L);
9155 break;
9156 case 0x05: //less
9157 __ c_ult_s(reg_op1, reg_op2);
9158 __ bc1f(L);
9159 __ nop();
9160 __ mov_s(dst, src);
9161 __ bind(L);
9162 break;
9163 case 0x06: //less_equal
9164 __ c_ule_s(reg_op1, reg_op2);
9165 __ bc1f(L);
9166 __ nop();
9167 __ mov_s(dst, src);
9168 __ bind(L);
9169 break;
9170 default:
9171 Unimplemented();
9172 }
9173 %}
9174 ins_pipe( pipe_slow );
9175 %}
9177 // Manifest a CmpL result in an integer register. Very painful.
9178 // This is the test to avoid.
9179 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9180 match(Set dst (CmpL3 src1 src2));
9181 ins_cost(1000);
9182 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9183 ins_encode %{
9184 Register opr1 = as_Register($src1$$reg);
9185 Register opr2 = as_Register($src2$$reg);
9186 Register dst = as_Register($dst$$reg);
9188 Label Done;
9190 __ subu(AT, opr1, opr2);
9191 __ bltz(AT, Done);
9192 __ delayed()->daddiu(dst, R0, -1);
9194 __ move(dst, 1);
9195 __ movz(dst, R0, AT);
9197 __ bind(Done);
9198 %}
9199 ins_pipe( pipe_slow );
9200 %}
9202 //
9203 // less_rsult = -1
9204 // greater_result = 1
9205 // equal_result = 0
9206 // nan_result = -1
9207 //
9208 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9209 match(Set dst (CmpF3 src1 src2));
9210 ins_cost(1000);
9211 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9212 ins_encode %{
9213 FloatRegister src1 = as_FloatRegister($src1$$reg);
9214 FloatRegister src2 = as_FloatRegister($src2$$reg);
9215 Register dst = as_Register($dst$$reg);
9217 Label Done;
9219 __ c_ult_s(src1, src2);
9220 __ bc1t(Done);
9221 __ delayed()->daddiu(dst, R0, -1);
9223 __ c_eq_s(src1, src2);
9224 __ move(dst, 1);
9225 __ movt(dst, R0);
9227 __ bind(Done);
9228 %}
9229 ins_pipe( pipe_slow );
9230 %}
9232 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9233 match(Set dst (CmpD3 src1 src2));
9234 ins_cost(1000);
9235 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9236 ins_encode %{
9237 FloatRegister src1 = as_FloatRegister($src1$$reg);
9238 FloatRegister src2 = as_FloatRegister($src2$$reg);
9239 Register dst = as_Register($dst$$reg);
9241 Label Done;
9243 __ c_ult_d(src1, src2);
9244 __ bc1t(Done);
9245 __ delayed()->daddiu(dst, R0, -1);
9247 __ c_eq_d(src1, src2);
9248 __ move(dst, 1);
9249 __ movt(dst, R0);
9251 __ bind(Done);
9252 %}
9253 ins_pipe( pipe_slow );
9254 %}
9256 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9257 match(Set dummy (ClearArray cnt base));
9258 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9259 ins_encode %{
9260 //Assume cnt is the number of bytes in an array to be cleared,
9261 //and base points to the starting address of the array.
9262 Register base = $base$$Register;
9263 Register num = $cnt$$Register;
9264 Label Loop, done;
9266 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9267 __ move(T9, num); /* T9 = words */
9268 __ beq(T9, R0, done);
9269 __ nop();
9270 __ move(AT, base);
9272 __ bind(Loop);
9273 __ sd(R0, Address(AT, 0));
9274 __ daddi(AT, AT, wordSize);
9275 __ daddi(T9, T9, -1);
9276 __ bne(T9, R0, Loop);
9277 __ delayed()->nop();
9278 __ bind(done);
9279 %}
9280 ins_pipe( pipe_slow );
9281 %}
9283 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9284 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9285 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9287 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9288 ins_encode %{
9289 // Get the first character position in both strings
9290 // [8] char array, [12] offset, [16] count
9291 Register str1 = $str1$$Register;
9292 Register str2 = $str2$$Register;
9293 Register cnt1 = $cnt1$$Register;
9294 Register cnt2 = $cnt2$$Register;
9295 Register result = $result$$Register;
9297 Label L, Loop, haveResult, done;
9299 // compute the and difference of lengths (in result)
9300 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9302 // compute the shorter length (in cnt1)
9303 __ slt(AT, cnt2, cnt1);
9304 __ movn(cnt1, cnt2, AT);
9306 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9307 __ bind(Loop); // Loop begin
9308 __ beq(cnt1, R0, done);
9309 __ delayed()->lhu(AT, str1, 0);;
9311 // compare current character
9312 __ lhu(cnt2, str2, 0);
9313 __ bne(AT, cnt2, haveResult);
9314 __ delayed()->addi(str1, str1, 2);
9315 __ addi(str2, str2, 2);
9316 __ b(Loop);
9317 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9319 __ bind(haveResult);
9320 __ subu(result, AT, cnt2);
9322 __ bind(done);
9323 %}
9325 ins_pipe( pipe_slow );
9326 %}
9328 // intrinsic optimization
9329 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9330 match(Set result (StrEquals (Binary str1 str2) cnt));
9331 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9333 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9334 ins_encode %{
9335 // Get the first character position in both strings
9336 // [8] char array, [12] offset, [16] count
9337 Register str1 = $str1$$Register;
9338 Register str2 = $str2$$Register;
9339 Register cnt = $cnt$$Register;
9340 Register tmp = $temp$$Register;
9341 Register result = $result$$Register;
9343 Label Loop, done;
9346 __ beq(str1, str2, done); // same char[] ?
9347 __ daddiu(result, R0, 1);
9349 __ bind(Loop); // Loop begin
9350 __ beq(cnt, R0, done);
9351 __ daddiu(result, R0, 1); // count == 0
9353 // compare current character
9354 __ lhu(AT, str1, 0);;
9355 __ lhu(tmp, str2, 0);
9356 __ bne(AT, tmp, done);
9357 __ delayed()->daddi(result, R0, 0);
9358 __ addi(str1, str1, 2);
9359 __ addi(str2, str2, 2);
9360 __ b(Loop);
9361 __ delayed()->addi(cnt, cnt, -1); // Loop end
9363 __ bind(done);
9364 %}
9366 ins_pipe( pipe_slow );
9367 %}
9369 //----------Arithmetic Instructions-------------------------------------------
9370 //----------Addition Instructions---------------------------------------------
9371 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9372 match(Set dst (AddI src1 src2));
9374 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9375 ins_encode %{
9376 Register dst = $dst$$Register;
9377 Register src1 = $src1$$Register;
9378 Register src2 = $src2$$Register;
9379 __ addu32(dst, src1, src2);
9380 %}
9381 ins_pipe( ialu_regI_regI );
9382 %}
9384 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9385 match(Set dst (AddI src1 src2));
9387 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9388 ins_encode %{
9389 Register dst = $dst$$Register;
9390 Register src1 = $src1$$Register;
9391 int imm = $src2$$constant;
9393 if(Assembler::is_simm16(imm)) {
9394 __ addiu32(dst, src1, imm);
9395 } else {
9396 __ move(AT, imm);
9397 __ addu32(dst, src1, AT);
9398 }
9399 %}
9400 ins_pipe( ialu_regI_regI );
9401 %}
9403 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9404 match(Set dst (AddP src1 src2));
9406 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9408 ins_encode %{
9409 Register dst = $dst$$Register;
9410 Register src1 = $src1$$Register;
9411 Register src2 = $src2$$Register;
9412 __ daddu(dst, src1, src2);
9413 %}
9415 ins_pipe( ialu_regI_regI );
9416 %}
9418 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9419 match(Set dst (AddP src1 (ConvI2L src2)));
9421 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9423 ins_encode %{
9424 Register dst = $dst$$Register;
9425 Register src1 = $src1$$Register;
9426 Register src2 = $src2$$Register;
9427 __ daddu(dst, src1, src2);
9428 %}
9430 ins_pipe( ialu_regI_regI );
9431 %}
9433 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9434 match(Set dst (AddP src1 src2));
9436 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9437 ins_encode %{
9438 Register src1 = $src1$$Register;
9439 long src2 = $src2$$constant;
9440 Register dst = $dst$$Register;
9442 if(Assembler::is_simm16(src2)) {
9443 __ daddiu(dst, src1, src2);
9444 } else {
9445 __ set64(AT, src2);
9446 __ daddu(dst, src1, AT);
9447 }
9448 %}
9449 ins_pipe( ialu_regI_imm16 );
9450 %}
9452 // Add Long Register with Register
9453 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9454 match(Set dst (AddL src1 src2));
9455 ins_cost(200);
9456 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9458 ins_encode %{
9459 Register dst_reg = as_Register($dst$$reg);
9460 Register src1_reg = as_Register($src1$$reg);
9461 Register src2_reg = as_Register($src2$$reg);
9463 __ daddu(dst_reg, src1_reg, src2_reg);
9464 %}
9466 ins_pipe( ialu_regL_regL );
9467 %}
9469 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9470 %{
9471 match(Set dst (AddL src1 src2));
9473 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9474 ins_encode %{
9475 Register dst_reg = as_Register($dst$$reg);
9476 Register src1_reg = as_Register($src1$$reg);
9477 int src2_imm = $src2$$constant;
9479 __ daddiu(dst_reg, src1_reg, src2_imm);
9480 %}
9482 ins_pipe( ialu_regL_regL );
9483 %}
9485 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9486 %{
9487 match(Set dst (AddL (ConvI2L src1) src2));
9489 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9490 ins_encode %{
9491 Register dst_reg = as_Register($dst$$reg);
9492 Register src1_reg = as_Register($src1$$reg);
9493 int src2_imm = $src2$$constant;
9495 __ daddiu(dst_reg, src1_reg, src2_imm);
9496 %}
9498 ins_pipe( ialu_regL_regL );
9499 %}
9501 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9502 match(Set dst (AddL (ConvI2L src1) src2));
9503 ins_cost(200);
9504 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9506 ins_encode %{
9507 Register dst_reg = as_Register($dst$$reg);
9508 Register src1_reg = as_Register($src1$$reg);
9509 Register src2_reg = as_Register($src2$$reg);
9511 __ daddu(dst_reg, src1_reg, src2_reg);
9512 %}
9514 ins_pipe( ialu_regL_regL );
9515 %}
9517 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9518 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9519 ins_cost(200);
9520 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9522 ins_encode %{
9523 Register dst_reg = as_Register($dst$$reg);
9524 Register src1_reg = as_Register($src1$$reg);
9525 Register src2_reg = as_Register($src2$$reg);
9527 __ daddu(dst_reg, src1_reg, src2_reg);
9528 %}
9530 ins_pipe( ialu_regL_regL );
9531 %}
9533 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9534 match(Set dst (AddL src1 (ConvI2L src2)));
9535 ins_cost(200);
9536 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9538 ins_encode %{
9539 Register dst_reg = as_Register($dst$$reg);
9540 Register src1_reg = as_Register($src1$$reg);
9541 Register src2_reg = as_Register($src2$$reg);
9543 __ daddu(dst_reg, src1_reg, src2_reg);
9544 %}
9546 ins_pipe( ialu_regL_regL );
9547 %}
9549 //----------Subtraction Instructions-------------------------------------------
9550 // Integer Subtraction Instructions
9551 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9552 match(Set dst (SubI src1 src2));
9553 ins_cost(100);
9555 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9556 ins_encode %{
9557 Register dst = $dst$$Register;
9558 Register src1 = $src1$$Register;
9559 Register src2 = $src2$$Register;
9560 __ subu32(dst, src1, src2);
9561 %}
9562 ins_pipe( ialu_regI_regI );
9563 %}
9565 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9566 match(Set dst (SubI src1 src2));
9567 ins_cost(80);
9569 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9570 ins_encode %{
9571 Register dst = $dst$$Register;
9572 Register src1 = $src1$$Register;
9573 __ addiu32(dst, src1, -1 * $src2$$constant);
9574 %}
9575 ins_pipe( ialu_regI_regI );
9576 %}
9578 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9579 match(Set dst (SubI zero src));
9580 ins_cost(80);
9582 format %{ "neg $dst, $src #@negI_Reg" %}
9583 ins_encode %{
9584 Register dst = $dst$$Register;
9585 Register src = $src$$Register;
9586 __ subu32(dst, R0, src);
9587 %}
9588 ins_pipe( ialu_regI_regI );
9589 %}
9591 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9592 match(Set dst (SubL zero src));
9593 ins_cost(80);
9595 format %{ "neg $dst, $src #@negL_Reg" %}
9596 ins_encode %{
9597 Register dst = $dst$$Register;
9598 Register src = $src$$Register;
9599 __ subu(dst, R0, src);
9600 %}
9601 ins_pipe( ialu_regI_regI );
9602 %}
9604 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9605 match(Set dst (SubL src1 src2));
9606 ins_cost(80);
9608 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9609 ins_encode %{
9610 Register dst = $dst$$Register;
9611 Register src1 = $src1$$Register;
9612 __ daddiu(dst, src1, -1 * $src2$$constant);
9613 %}
9614 ins_pipe( ialu_regI_regI );
9615 %}
9617 // Subtract Long Register with Register.
9618 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9619 match(Set dst (SubL src1 src2));
9620 ins_cost(100);
9621 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9622 ins_encode %{
9623 Register dst = as_Register($dst$$reg);
9624 Register src1 = as_Register($src1$$reg);
9625 Register src2 = as_Register($src2$$reg);
9627 __ subu(dst, src1, src2);
9628 %}
9629 ins_pipe( ialu_regL_regL );
9630 %}
9632 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9633 match(Set dst (SubL src1 (ConvI2L src2)));
9634 ins_cost(100);
9635 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9636 ins_encode %{
9637 Register dst = as_Register($dst$$reg);
9638 Register src1 = as_Register($src1$$reg);
9639 Register src2 = as_Register($src2$$reg);
9641 __ subu(dst, src1, src2);
9642 %}
9643 ins_pipe( ialu_regL_regL );
9644 %}
9646 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9647 match(Set dst (SubL (ConvI2L src1) src2));
9648 ins_cost(200);
9649 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9650 ins_encode %{
9651 Register dst = as_Register($dst$$reg);
9652 Register src1 = as_Register($src1$$reg);
9653 Register src2 = as_Register($src2$$reg);
9655 __ subu(dst, src1, src2);
9656 %}
9657 ins_pipe( ialu_regL_regL );
9658 %}
9660 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9661 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9662 ins_cost(200);
9663 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9664 ins_encode %{
9665 Register dst = as_Register($dst$$reg);
9666 Register src1 = as_Register($src1$$reg);
9667 Register src2 = as_Register($src2$$reg);
9669 __ subu(dst, src1, src2);
9670 %}
9671 ins_pipe( ialu_regL_regL );
9672 %}
9674 // Integer MOD with Register
9675 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9676 match(Set dst (ModI src1 src2));
9677 ins_cost(300);
9678 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9679 ins_encode %{
9680 Register dst = $dst$$Register;
9681 Register src1 = $src1$$Register;
9682 Register src2 = $src2$$Register;
9684 //if (UseLoongsonISA) {
9685 if (0) {
9686 // 2016.08.10
9687 // Experiments show that gsmod is slower that div+mfhi.
9688 // So I just disable it here.
9689 __ gsmod(dst, src1, src2);
9690 } else {
9691 __ div(src1, src2);
9692 __ mfhi(dst);
9693 }
9694 %}
9696 //ins_pipe( ialu_mod );
9697 ins_pipe( ialu_regI_regI );
9698 %}
9700 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9701 match(Set dst (ModL src1 src2));
9702 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9704 ins_encode %{
9705 Register dst = as_Register($dst$$reg);
9706 Register op1 = as_Register($src1$$reg);
9707 Register op2 = as_Register($src2$$reg);
9709 if (UseLoongsonISA) {
9710 __ gsdmod(dst, op1, op2);
9711 } else {
9712 __ ddiv(op1, op2);
9713 __ mfhi(dst);
9714 }
9715 %}
9716 ins_pipe( pipe_slow );
9717 %}
9719 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9720 match(Set dst (MulI src1 src2));
9722 ins_cost(300);
9723 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9724 ins_encode %{
9725 Register src1 = $src1$$Register;
9726 Register src2 = $src2$$Register;
9727 Register dst = $dst$$Register;
9729 __ mul(dst, src1, src2);
9730 %}
9731 ins_pipe( ialu_mult );
9732 %}
9734 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9735 match(Set dst (AddI (MulI src1 src2) src3));
9737 ins_cost(999);
9738 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9739 ins_encode %{
9740 Register src1 = $src1$$Register;
9741 Register src2 = $src2$$Register;
9742 Register src3 = $src3$$Register;
9743 Register dst = $dst$$Register;
9745 __ mtlo(src3);
9746 __ madd(src1, src2);
9747 __ mflo(dst);
9748 %}
9749 ins_pipe( ialu_mult );
9750 %}
9752 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9753 match(Set dst (DivI src1 src2));
9755 ins_cost(300);
9756 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9757 ins_encode %{
9758 Register src1 = $src1$$Register;
9759 Register src2 = $src2$$Register;
9760 Register dst = $dst$$Register;
9762 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9763 We must trap an exception manually. */
9764 __ teq(R0, src2, 0x7);
9766 if (UseLoongsonISA) {
9767 __ gsdiv(dst, src1, src2);
9768 } else {
9769 __ div(src1, src2);
9771 __ nop();
9772 __ nop();
9773 __ mflo(dst);
9774 }
9775 %}
9776 ins_pipe( ialu_mod );
9777 %}
9779 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9780 match(Set dst (DivF src1 src2));
9782 ins_cost(300);
9783 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9784 ins_encode %{
9785 FloatRegister src1 = $src1$$FloatRegister;
9786 FloatRegister src2 = $src2$$FloatRegister;
9787 FloatRegister dst = $dst$$FloatRegister;
9789 /* Here do we need to trap an exception manually ? */
9790 __ div_s(dst, src1, src2);
9791 %}
9792 ins_pipe( pipe_slow );
9793 %}
9795 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9796 match(Set dst (DivD src1 src2));
9798 ins_cost(300);
9799 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9800 ins_encode %{
9801 FloatRegister src1 = $src1$$FloatRegister;
9802 FloatRegister src2 = $src2$$FloatRegister;
9803 FloatRegister dst = $dst$$FloatRegister;
9805 /* Here do we need to trap an exception manually ? */
9806 __ div_d(dst, src1, src2);
9807 %}
9808 ins_pipe( pipe_slow );
9809 %}
9811 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9812 match(Set dst (MulL src1 src2));
9813 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9814 ins_encode %{
9815 Register dst = as_Register($dst$$reg);
9816 Register op1 = as_Register($src1$$reg);
9817 Register op2 = as_Register($src2$$reg);
9819 if (UseLoongsonISA) {
9820 __ gsdmult(dst, op1, op2);
9821 } else {
9822 __ dmult(op1, op2);
9823 __ mflo(dst);
9824 }
9825 %}
9826 ins_pipe( pipe_slow );
9827 %}
9829 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9830 match(Set dst (MulL src1 (ConvI2L src2)));
9831 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9832 ins_encode %{
9833 Register dst = as_Register($dst$$reg);
9834 Register op1 = as_Register($src1$$reg);
9835 Register op2 = as_Register($src2$$reg);
9837 if (UseLoongsonISA) {
9838 __ gsdmult(dst, op1, op2);
9839 } else {
9840 __ dmult(op1, op2);
9841 __ mflo(dst);
9842 }
9843 %}
9844 ins_pipe( pipe_slow );
9845 %}
9847 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9848 match(Set dst (DivL src1 src2));
9849 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9851 ins_encode %{
9852 Register dst = as_Register($dst$$reg);
9853 Register op1 = as_Register($src1$$reg);
9854 Register op2 = as_Register($src2$$reg);
9856 if (UseLoongsonISA) {
9857 __ gsddiv(dst, op1, op2);
9858 } else {
9859 __ ddiv(op1, op2);
9860 __ mflo(dst);
9861 }
9862 %}
9863 ins_pipe( pipe_slow );
9864 %}
9866 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9867 match(Set dst (AddF src1 src2));
9868 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9869 ins_encode %{
9870 FloatRegister src1 = as_FloatRegister($src1$$reg);
9871 FloatRegister src2 = as_FloatRegister($src2$$reg);
9872 FloatRegister dst = as_FloatRegister($dst$$reg);
9874 __ add_s(dst, src1, src2);
9875 %}
9876 ins_pipe( fpu_regF_regF );
9877 %}
9879 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9880 match(Set dst (SubF src1 src2));
9881 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9882 ins_encode %{
9883 FloatRegister src1 = as_FloatRegister($src1$$reg);
9884 FloatRegister src2 = as_FloatRegister($src2$$reg);
9885 FloatRegister dst = as_FloatRegister($dst$$reg);
9887 __ sub_s(dst, src1, src2);
9888 %}
9889 ins_pipe( fpu_regF_regF );
9890 %}
9891 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9892 match(Set dst (AddD src1 src2));
9893 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9894 ins_encode %{
9895 FloatRegister src1 = as_FloatRegister($src1$$reg);
9896 FloatRegister src2 = as_FloatRegister($src2$$reg);
9897 FloatRegister dst = as_FloatRegister($dst$$reg);
9899 __ add_d(dst, src1, src2);
9900 %}
9901 ins_pipe( fpu_regF_regF );
9902 %}
9904 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9905 match(Set dst (SubD src1 src2));
9906 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9907 ins_encode %{
9908 FloatRegister src1 = as_FloatRegister($src1$$reg);
9909 FloatRegister src2 = as_FloatRegister($src2$$reg);
9910 FloatRegister dst = as_FloatRegister($dst$$reg);
9912 __ sub_d(dst, src1, src2);
9913 %}
9914 ins_pipe( fpu_regF_regF );
9915 %}
9917 instruct negF_reg(regF dst, regF src) %{
9918 match(Set dst (NegF src));
9919 format %{ "negF $dst, $src @negF_reg" %}
9920 ins_encode %{
9921 FloatRegister src = as_FloatRegister($src$$reg);
9922 FloatRegister dst = as_FloatRegister($dst$$reg);
9924 __ neg_s(dst, src);
9925 %}
9926 ins_pipe( fpu_regF_regF );
9927 %}
9929 instruct negD_reg(regD dst, regD src) %{
9930 match(Set dst (NegD src));
9931 format %{ "negD $dst, $src @negD_reg" %}
9932 ins_encode %{
9933 FloatRegister src = as_FloatRegister($src$$reg);
9934 FloatRegister dst = as_FloatRegister($dst$$reg);
9936 __ neg_d(dst, src);
9937 %}
9938 ins_pipe( fpu_regF_regF );
9939 %}
9942 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9943 match(Set dst (MulF src1 src2));
9944 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9945 ins_encode %{
9946 FloatRegister src1 = $src1$$FloatRegister;
9947 FloatRegister src2 = $src2$$FloatRegister;
9948 FloatRegister dst = $dst$$FloatRegister;
9950 __ mul_s(dst, src1, src2);
9951 %}
9952 ins_pipe( fpu_regF_regF );
9953 %}
9955 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9956 match(Set dst (AddF (MulF src1 src2) src3));
9957 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9958 ins_cost(44444);
9959 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9960 ins_encode %{
9961 FloatRegister src1 = $src1$$FloatRegister;
9962 FloatRegister src2 = $src2$$FloatRegister;
9963 FloatRegister src3 = $src3$$FloatRegister;
9964 FloatRegister dst = $dst$$FloatRegister;
9966 __ madd_s(dst, src1, src2, src3);
9967 %}
9968 ins_pipe( fpu_regF_regF );
9969 %}
9971 // Mul two double precision floating piont number
9972 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9973 match(Set dst (MulD src1 src2));
9974 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9975 ins_encode %{
9976 FloatRegister src1 = $src1$$FloatRegister;
9977 FloatRegister src2 = $src2$$FloatRegister;
9978 FloatRegister dst = $dst$$FloatRegister;
9980 __ mul_d(dst, src1, src2);
9981 %}
9982 ins_pipe( fpu_regF_regF );
9983 %}
9985 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
9986 match(Set dst (AddD (MulD src1 src2) src3));
9987 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9988 ins_cost(44444);
9989 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
9990 ins_encode %{
9991 FloatRegister src1 = $src1$$FloatRegister;
9992 FloatRegister src2 = $src2$$FloatRegister;
9993 FloatRegister src3 = $src3$$FloatRegister;
9994 FloatRegister dst = $dst$$FloatRegister;
9996 __ madd_d(dst, src1, src2, src3);
9997 %}
9998 ins_pipe( fpu_regF_regF );
9999 %}
10001 instruct absF_reg(regF dst, regF src) %{
10002 match(Set dst (AbsF src));
10003 ins_cost(100);
10004 format %{ "absF $dst, $src @absF_reg" %}
10005 ins_encode %{
10006 FloatRegister src = as_FloatRegister($src$$reg);
10007 FloatRegister dst = as_FloatRegister($dst$$reg);
10009 __ abs_s(dst, src);
10010 %}
10011 ins_pipe( fpu_regF_regF );
10012 %}
10015 // intrinsics for math_native.
10016 // AbsD SqrtD CosD SinD TanD LogD Log10D
10018 instruct absD_reg(regD dst, regD src) %{
10019 match(Set dst (AbsD src));
10020 ins_cost(100);
10021 format %{ "absD $dst, $src @absD_reg" %}
10022 ins_encode %{
10023 FloatRegister src = as_FloatRegister($src$$reg);
10024 FloatRegister dst = as_FloatRegister($dst$$reg);
10026 __ abs_d(dst, src);
10027 %}
10028 ins_pipe( fpu_regF_regF );
10029 %}
10031 instruct sqrtD_reg(regD dst, regD src) %{
10032 match(Set dst (SqrtD src));
10033 ins_cost(100);
10034 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10035 ins_encode %{
10036 FloatRegister src = as_FloatRegister($src$$reg);
10037 FloatRegister dst = as_FloatRegister($dst$$reg);
10039 __ sqrt_d(dst, src);
10040 %}
10041 ins_pipe( fpu_regF_regF );
10042 %}
10044 instruct sqrtF_reg(regF dst, regF src) %{
10045 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10046 ins_cost(100);
10047 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10048 ins_encode %{
10049 FloatRegister src = as_FloatRegister($src$$reg);
10050 FloatRegister dst = as_FloatRegister($dst$$reg);
10052 __ sqrt_s(dst, src);
10053 %}
10054 ins_pipe( fpu_regF_regF );
10055 %}
10056 //----------------------------------Logical Instructions----------------------
10057 //__________________________________Integer Logical Instructions-------------
10059 //And Instuctions
10060 // And Register with Immediate
10061 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10062 match(Set dst (AndI src1 src2));
10064 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10065 ins_encode %{
10066 Register dst = $dst$$Register;
10067 Register src = $src1$$Register;
10068 int val = $src2$$constant;
10070 __ move(AT, val);
10071 __ andr(dst, src, AT);
10072 %}
10073 ins_pipe( ialu_regI_regI );
10074 %}
10076 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10077 match(Set dst (AndI src1 src2));
10078 ins_cost(60);
10080 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10081 ins_encode %{
10082 Register dst = $dst$$Register;
10083 Register src = $src1$$Register;
10084 int val = $src2$$constant;
10086 __ andi(dst, src, val);
10087 %}
10088 ins_pipe( ialu_regI_regI );
10089 %}
10091 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10092 match(Set dst (AndI src1 mask));
10093 ins_cost(60);
10095 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10096 ins_encode %{
10097 Register dst = $dst$$Register;
10098 Register src = $src1$$Register;
10099 int size = Assembler::is_int_mask($mask$$constant);
10101 __ ext(dst, src, 0, size);
10102 %}
10103 ins_pipe( ialu_regI_regI );
10104 %}
10106 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10107 match(Set dst (AndL src1 mask));
10108 ins_cost(60);
10110 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10111 ins_encode %{
10112 Register dst = $dst$$Register;
10113 Register src = $src1$$Register;
10114 int size = Assembler::is_jlong_mask($mask$$constant);
10116 __ dext(dst, src, 0, size);
10117 %}
10118 ins_pipe( ialu_regI_regI );
10119 %}
10121 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10122 match(Set dst (XorI src1 src2));
10123 ins_cost(60);
10125 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10126 ins_encode %{
10127 Register dst = $dst$$Register;
10128 Register src = $src1$$Register;
10129 int val = $src2$$constant;
10131 __ xori(dst, src, val);
10132 %}
10133 ins_pipe( ialu_regI_regI );
10134 %}
10136 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10137 match(Set dst (XorI src1 M1));
10138 predicate(UseLoongsonISA);
10139 ins_cost(60);
10141 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10142 ins_encode %{
10143 Register dst = $dst$$Register;
10144 Register src = $src1$$Register;
10146 __ gsorn(dst, R0, src);
10147 %}
10148 ins_pipe( ialu_regI_regI );
10149 %}
10151 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10152 match(Set dst (XorI (ConvL2I src1) M1));
10153 predicate(UseLoongsonISA);
10154 ins_cost(60);
10156 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10157 ins_encode %{
10158 Register dst = $dst$$Register;
10159 Register src = $src1$$Register;
10161 __ gsorn(dst, R0, src);
10162 %}
10163 ins_pipe( ialu_regI_regI );
10164 %}
10166 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10167 match(Set dst (XorL src1 src2));
10168 ins_cost(60);
10170 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10171 ins_encode %{
10172 Register dst = $dst$$Register;
10173 Register src = $src1$$Register;
10174 int val = $src2$$constant;
10176 __ xori(dst, src, val);
10177 %}
10178 ins_pipe( ialu_regI_regI );
10179 %}
10181 /*
10182 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10183 match(Set dst (XorL src1 M1));
10184 predicate(UseLoongsonISA);
10185 ins_cost(60);
10187 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10188 ins_encode %{
10189 Register dst = $dst$$Register;
10190 Register src = $src1$$Register;
10192 __ gsorn(dst, R0, src);
10193 %}
10194 ins_pipe( ialu_regI_regI );
10195 %}
10196 */
10198 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10199 match(Set dst (AndI mask (LoadB mem)));
10200 ins_cost(60);
10202 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10203 ins_encode(load_UB_enc(dst, mem));
10204 ins_pipe( ialu_loadI );
10205 %}
10207 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10208 match(Set dst (AndI (LoadB mem) mask));
10209 ins_cost(60);
10211 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10212 ins_encode(load_UB_enc(dst, mem));
10213 ins_pipe( ialu_loadI );
10214 %}
10216 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10217 match(Set dst (AndI src1 src2));
10219 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10220 ins_encode %{
10221 Register dst = $dst$$Register;
10222 Register src1 = $src1$$Register;
10223 Register src2 = $src2$$Register;
10224 __ andr(dst, src1, src2);
10225 %}
10226 ins_pipe( ialu_regI_regI );
10227 %}
10229 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10230 match(Set dst (AndI src1 (XorI src2 M1)));
10231 predicate(UseLoongsonISA);
10233 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10234 ins_encode %{
10235 Register dst = $dst$$Register;
10236 Register src1 = $src1$$Register;
10237 Register src2 = $src2$$Register;
10239 __ gsandn(dst, src1, src2);
10240 %}
10241 ins_pipe( ialu_regI_regI );
10242 %}
10244 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10245 match(Set dst (OrI src1 (XorI src2 M1)));
10246 predicate(UseLoongsonISA);
10248 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10249 ins_encode %{
10250 Register dst = $dst$$Register;
10251 Register src1 = $src1$$Register;
10252 Register src2 = $src2$$Register;
10254 __ gsorn(dst, src1, src2);
10255 %}
10256 ins_pipe( ialu_regI_regI );
10257 %}
10259 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10260 match(Set dst (AndI (XorI src1 M1) src2));
10261 predicate(UseLoongsonISA);
10263 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10264 ins_encode %{
10265 Register dst = $dst$$Register;
10266 Register src1 = $src1$$Register;
10267 Register src2 = $src2$$Register;
10269 __ gsandn(dst, src2, src1);
10270 %}
10271 ins_pipe( ialu_regI_regI );
10272 %}
10274 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10275 match(Set dst (OrI (XorI src1 M1) src2));
10276 predicate(UseLoongsonISA);
10278 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10279 ins_encode %{
10280 Register dst = $dst$$Register;
10281 Register src1 = $src1$$Register;
10282 Register src2 = $src2$$Register;
10284 __ gsorn(dst, src2, src1);
10285 %}
10286 ins_pipe( ialu_regI_regI );
10287 %}
10289 // And Long Register with Register
10290 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10291 match(Set dst (AndL src1 src2));
10292 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10293 ins_encode %{
10294 Register dst_reg = as_Register($dst$$reg);
10295 Register src1_reg = as_Register($src1$$reg);
10296 Register src2_reg = as_Register($src2$$reg);
10298 __ andr(dst_reg, src1_reg, src2_reg);
10299 %}
10300 ins_pipe( ialu_regL_regL );
10301 %}
10303 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10304 match(Set dst (AndL src1 (ConvI2L src2)));
10305 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10306 ins_encode %{
10307 Register dst_reg = as_Register($dst$$reg);
10308 Register src1_reg = as_Register($src1$$reg);
10309 Register src2_reg = as_Register($src2$$reg);
10311 __ andr(dst_reg, src1_reg, src2_reg);
10312 %}
10313 ins_pipe( ialu_regL_regL );
10314 %}
10316 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10317 match(Set dst (AndL src1 src2));
10318 ins_cost(60);
10320 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10321 ins_encode %{
10322 Register dst = $dst$$Register;
10323 Register src = $src1$$Register;
10324 long val = $src2$$constant;
10326 __ andi(dst, src, val);
10327 %}
10328 ins_pipe( ialu_regI_regI );
10329 %}
10331 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10332 match(Set dst (ConvL2I (AndL src1 src2)));
10333 ins_cost(60);
10335 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10336 ins_encode %{
10337 Register dst = $dst$$Register;
10338 Register src = $src1$$Register;
10339 long val = $src2$$constant;
10341 __ andi(dst, src, val);
10342 %}
10343 ins_pipe( ialu_regI_regI );
10344 %}
10346 /*
10347 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10348 match(Set dst (AndL src1 (XorL src2 M1)));
10349 predicate(UseLoongsonISA);
10351 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10352 ins_encode %{
10353 Register dst = $dst$$Register;
10354 Register src1 = $src1$$Register;
10355 Register src2 = $src2$$Register;
10357 __ gsandn(dst, src1, src2);
10358 %}
10359 ins_pipe( ialu_regI_regI );
10360 %}
10361 */
10363 /*
10364 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10365 match(Set dst (OrL src1 (XorL src2 M1)));
10366 predicate(UseLoongsonISA);
10368 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10369 ins_encode %{
10370 Register dst = $dst$$Register;
10371 Register src1 = $src1$$Register;
10372 Register src2 = $src2$$Register;
10374 __ gsorn(dst, src1, src2);
10375 %}
10376 ins_pipe( ialu_regI_regI );
10377 %}
10378 */
10380 /*
10381 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10382 match(Set dst (AndL (XorL src1 M1) src2));
10383 predicate(UseLoongsonISA);
10385 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10386 ins_encode %{
10387 Register dst = $dst$$Register;
10388 Register src1 = $src1$$Register;
10389 Register src2 = $src2$$Register;
10391 __ gsandn(dst, src2, src1);
10392 %}
10393 ins_pipe( ialu_regI_regI );
10394 %}
10395 */
10397 /*
10398 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10399 match(Set dst (OrL (XorL src1 M1) src2));
10400 predicate(UseLoongsonISA);
10402 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10403 ins_encode %{
10404 Register dst = $dst$$Register;
10405 Register src1 = $src1$$Register;
10406 Register src2 = $src2$$Register;
10408 __ gsorn(dst, src2, src1);
10409 %}
10410 ins_pipe( ialu_regI_regI );
10411 %}
10412 */
10414 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10415 match(Set dst (AndL dst M8));
10416 ins_cost(60);
10418 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10419 ins_encode %{
10420 Register dst = $dst$$Register;
10422 __ dins(dst, R0, 0, 3);
10423 %}
10424 ins_pipe( ialu_regI_regI );
10425 %}
10427 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10428 match(Set dst (AndL dst M5));
10429 ins_cost(60);
10431 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10432 ins_encode %{
10433 Register dst = $dst$$Register;
10435 __ dins(dst, R0, 2, 1);
10436 %}
10437 ins_pipe( ialu_regI_regI );
10438 %}
10440 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10441 match(Set dst (AndL dst M7));
10442 ins_cost(60);
10444 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10445 ins_encode %{
10446 Register dst = $dst$$Register;
10448 __ dins(dst, R0, 1, 2);
10449 %}
10450 ins_pipe( ialu_regI_regI );
10451 %}
10453 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10454 match(Set dst (AndL dst M4));
10455 ins_cost(60);
10457 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10458 ins_encode %{
10459 Register dst = $dst$$Register;
10461 __ dins(dst, R0, 0, 2);
10462 %}
10463 ins_pipe( ialu_regI_regI );
10464 %}
10466 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10467 match(Set dst (AndL dst M121));
10468 ins_cost(60);
10470 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10471 ins_encode %{
10472 Register dst = $dst$$Register;
10474 __ dins(dst, R0, 3, 4);
10475 %}
10476 ins_pipe( ialu_regI_regI );
10477 %}
10479 // Or Long Register with Register
10480 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10481 match(Set dst (OrL src1 src2));
10482 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10483 ins_encode %{
10484 Register dst_reg = $dst$$Register;
10485 Register src1_reg = $src1$$Register;
10486 Register src2_reg = $src2$$Register;
10488 __ orr(dst_reg, src1_reg, src2_reg);
10489 %}
10490 ins_pipe( ialu_regL_regL );
10491 %}
10493 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10494 match(Set dst (OrL (CastP2X src1) src2));
10495 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10496 ins_encode %{
10497 Register dst_reg = $dst$$Register;
10498 Register src1_reg = $src1$$Register;
10499 Register src2_reg = $src2$$Register;
10501 __ orr(dst_reg, src1_reg, src2_reg);
10502 %}
10503 ins_pipe( ialu_regL_regL );
10504 %}
10506 // Xor Long Register with Register
10507 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10508 match(Set dst (XorL src1 src2));
10509 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10510 ins_encode %{
10511 Register dst_reg = as_Register($dst$$reg);
10512 Register src1_reg = as_Register($src1$$reg);
10513 Register src2_reg = as_Register($src2$$reg);
10515 __ xorr(dst_reg, src1_reg, src2_reg);
10516 %}
10517 ins_pipe( ialu_regL_regL );
10518 %}
10520 // Shift Left by 8-bit immediate
10521 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10522 match(Set dst (LShiftI src shift));
10524 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10525 ins_encode %{
10526 Register src = $src$$Register;
10527 Register dst = $dst$$Register;
10528 int shamt = $shift$$constant;
10530 __ sll(dst, src, shamt);
10531 %}
10532 ins_pipe( ialu_regI_regI );
10533 %}
10535 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10536 match(Set dst (LShiftI (ConvL2I src) shift));
10538 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10539 ins_encode %{
10540 Register src = $src$$Register;
10541 Register dst = $dst$$Register;
10542 int shamt = $shift$$constant;
10544 __ sll(dst, src, shamt);
10545 %}
10546 ins_pipe( ialu_regI_regI );
10547 %}
10549 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10550 match(Set dst (AndI (LShiftI src shift) mask));
10552 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10553 ins_encode %{
10554 Register src = $src$$Register;
10555 Register dst = $dst$$Register;
10557 __ sll(dst, src, 16);
10558 %}
10559 ins_pipe( ialu_regI_regI );
10560 %}
10562 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10563 %{
10564 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10566 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10567 ins_encode %{
10568 Register src = $src$$Register;
10569 Register dst = $dst$$Register;
10571 __ andi(dst, src, 7);
10572 %}
10573 ins_pipe(ialu_regI_regI);
10574 %}
10576 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10577 %{
10578 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10580 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10581 ins_encode %{
10582 Register src = $src1$$Register;
10583 int val = $src2$$constant;
10584 Register dst = $dst$$Register;
10586 __ ori(dst, src, val);
10587 %}
10588 ins_pipe(ialu_regI_regI);
10589 %}
10591 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10592 // This idiom is used by the compiler the i2s bytecode.
10593 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10594 %{
10595 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10597 format %{ "i2s $dst, $src\t# @i2s" %}
10598 ins_encode %{
10599 Register src = $src$$Register;
10600 Register dst = $dst$$Register;
10602 __ seh(dst, src);
10603 %}
10604 ins_pipe(ialu_regI_regI);
10605 %}
10607 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10608 // This idiom is used by the compiler for the i2b bytecode.
10609 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10610 %{
10611 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10613 format %{ "i2b $dst, $src\t# @i2b" %}
10614 ins_encode %{
10615 Register src = $src$$Register;
10616 Register dst = $dst$$Register;
10618 __ seb(dst, src);
10619 %}
10620 ins_pipe(ialu_regI_regI);
10621 %}
10624 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10625 match(Set dst (LShiftI (ConvL2I src) shift));
10627 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10628 ins_encode %{
10629 Register src = $src$$Register;
10630 Register dst = $dst$$Register;
10631 int shamt = $shift$$constant;
10633 __ sll(dst, src, shamt);
10634 %}
10635 ins_pipe( ialu_regI_regI );
10636 %}
10638 // Shift Left by 8-bit immediate
10639 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10640 match(Set dst (LShiftI src shift));
10642 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10643 ins_encode %{
10644 Register src = $src$$Register;
10645 Register dst = $dst$$Register;
10646 Register shamt = $shift$$Register;
10647 __ sllv(dst, src, shamt);
10648 %}
10649 ins_pipe( ialu_regI_regI );
10650 %}
10653 // Shift Left Long
10654 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10655 //predicate(UseNewLongLShift);
10656 match(Set dst (LShiftL src shift));
10657 ins_cost(100);
10658 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10659 ins_encode %{
10660 Register src_reg = as_Register($src$$reg);
10661 Register dst_reg = as_Register($dst$$reg);
10662 int shamt = $shift$$constant;
10664 if (__ is_simm(shamt, 5))
10665 __ dsll(dst_reg, src_reg, shamt);
10666 else
10667 {
10668 int sa = Assembler::low(shamt, 6);
10669 if (sa < 32) {
10670 __ dsll(dst_reg, src_reg, sa);
10671 } else {
10672 __ dsll32(dst_reg, src_reg, sa - 32);
10673 }
10674 }
10675 %}
10676 ins_pipe( ialu_regL_regL );
10677 %}
10679 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10680 //predicate(UseNewLongLShift);
10681 match(Set dst (LShiftL (ConvI2L src) shift));
10682 ins_cost(100);
10683 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10684 ins_encode %{
10685 Register src_reg = as_Register($src$$reg);
10686 Register dst_reg = as_Register($dst$$reg);
10687 int shamt = $shift$$constant;
10689 if (__ is_simm(shamt, 5))
10690 __ dsll(dst_reg, src_reg, shamt);
10691 else
10692 {
10693 int sa = Assembler::low(shamt, 6);
10694 if (sa < 32) {
10695 __ dsll(dst_reg, src_reg, sa);
10696 } else {
10697 __ dsll32(dst_reg, src_reg, sa - 32);
10698 }
10699 }
10700 %}
10701 ins_pipe( ialu_regL_regL );
10702 %}
10704 // Shift Left Long
10705 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10706 //predicate(UseNewLongLShift);
10707 match(Set dst (LShiftL src shift));
10708 ins_cost(100);
10709 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10710 ins_encode %{
10711 Register src_reg = as_Register($src$$reg);
10712 Register dst_reg = as_Register($dst$$reg);
10714 __ dsllv(dst_reg, src_reg, $shift$$Register);
10715 %}
10716 ins_pipe( ialu_regL_regL );
10717 %}
10719 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10720 match(Set dst (LShiftL (ConvI2L src) shift));
10721 ins_cost(100);
10722 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10723 ins_encode %{
10724 Register src_reg = as_Register($src$$reg);
10725 Register dst_reg = as_Register($dst$$reg);
10726 int shamt = $shift$$constant;
10728 if (__ is_simm(shamt, 5)) {
10729 __ dsll(dst_reg, src_reg, shamt);
10730 } else {
10731 int sa = Assembler::low(shamt, 6);
10732 if (sa < 32) {
10733 __ dsll(dst_reg, src_reg, sa);
10734 } else {
10735 __ dsll32(dst_reg, src_reg, sa - 32);
10736 }
10737 }
10738 %}
10739 ins_pipe( ialu_regL_regL );
10740 %}
10742 // Shift Right Long
10743 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10744 match(Set dst (RShiftL src shift));
10745 ins_cost(100);
10746 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10747 ins_encode %{
10748 Register src_reg = as_Register($src$$reg);
10749 Register dst_reg = as_Register($dst$$reg);
10750 int shamt = ($shift$$constant & 0x3f);
10751 if (__ is_simm(shamt, 5))
10752 __ dsra(dst_reg, src_reg, shamt);
10753 else {
10754 int sa = Assembler::low(shamt, 6);
10755 if (sa < 32) {
10756 __ dsra(dst_reg, src_reg, sa);
10757 } else {
10758 __ dsra32(dst_reg, src_reg, sa - 32);
10759 }
10760 }
10761 %}
10762 ins_pipe( ialu_regL_regL );
10763 %}
10765 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10766 match(Set dst (ConvL2I (RShiftL src shift)));
10767 ins_cost(100);
10768 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10769 ins_encode %{
10770 Register src_reg = as_Register($src$$reg);
10771 Register dst_reg = as_Register($dst$$reg);
10772 int shamt = $shift$$constant;
10774 __ dsra32(dst_reg, src_reg, shamt - 32);
10775 %}
10776 ins_pipe( ialu_regL_regL );
10777 %}
10779 // Shift Right Long arithmetically
10780 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10781 //predicate(UseNewLongLShift);
10782 match(Set dst (RShiftL src shift));
10783 ins_cost(100);
10784 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10785 ins_encode %{
10786 Register src_reg = as_Register($src$$reg);
10787 Register dst_reg = as_Register($dst$$reg);
10789 __ dsrav(dst_reg, src_reg, $shift$$Register);
10790 %}
10791 ins_pipe( ialu_regL_regL );
10792 %}
10794 // Shift Right Long logically
10795 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10796 match(Set dst (URShiftL src shift));
10797 ins_cost(100);
10798 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10799 ins_encode %{
10800 Register src_reg = as_Register($src$$reg);
10801 Register dst_reg = as_Register($dst$$reg);
10803 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10804 %}
10805 ins_pipe( ialu_regL_regL );
10806 %}
10808 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10809 match(Set dst (URShiftL src shift));
10810 ins_cost(80);
10811 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10812 ins_encode %{
10813 Register src_reg = as_Register($src$$reg);
10814 Register dst_reg = as_Register($dst$$reg);
10815 int shamt = $shift$$constant;
10817 __ dsrl(dst_reg, src_reg, shamt);
10818 %}
10819 ins_pipe( ialu_regL_regL );
10820 %}
10822 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10823 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10824 ins_cost(80);
10825 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10826 ins_encode %{
10827 Register src_reg = as_Register($src$$reg);
10828 Register dst_reg = as_Register($dst$$reg);
10829 int shamt = $shift$$constant;
10831 __ dext(dst_reg, src_reg, shamt, 31);
10832 %}
10833 ins_pipe( ialu_regL_regL );
10834 %}
10836 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10837 match(Set dst (URShiftL (CastP2X src) shift));
10838 ins_cost(80);
10839 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10840 ins_encode %{
10841 Register src_reg = as_Register($src$$reg);
10842 Register dst_reg = as_Register($dst$$reg);
10843 int shamt = $shift$$constant;
10845 __ dsrl(dst_reg, src_reg, shamt);
10846 %}
10847 ins_pipe( ialu_regL_regL );
10848 %}
10850 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10851 match(Set dst (URShiftL src shift));
10852 ins_cost(80);
10853 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10854 ins_encode %{
10855 Register src_reg = as_Register($src$$reg);
10856 Register dst_reg = as_Register($dst$$reg);
10857 int shamt = $shift$$constant;
10859 __ dsrl32(dst_reg, src_reg, shamt - 32);
10860 %}
10861 ins_pipe( ialu_regL_regL );
10862 %}
10864 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10865 match(Set dst (ConvL2I (URShiftL src shift)));
10866 predicate(n->in(1)->in(2)->get_int() > 32);
10867 ins_cost(80);
10868 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10869 ins_encode %{
10870 Register src_reg = as_Register($src$$reg);
10871 Register dst_reg = as_Register($dst$$reg);
10872 int shamt = $shift$$constant;
10874 __ dsrl32(dst_reg, src_reg, shamt - 32);
10875 %}
10876 ins_pipe( ialu_regL_regL );
10877 %}
10879 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10880 match(Set dst (URShiftL (CastP2X src) shift));
10881 ins_cost(80);
10882 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10883 ins_encode %{
10884 Register src_reg = as_Register($src$$reg);
10885 Register dst_reg = as_Register($dst$$reg);
10886 int shamt = $shift$$constant;
10888 __ dsrl32(dst_reg, src_reg, shamt - 32);
10889 %}
10890 ins_pipe( ialu_regL_regL );
10891 %}
10893 // Xor Instructions
10894 // Xor Register with Register
10895 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10896 match(Set dst (XorI src1 src2));
10898 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10900 ins_encode %{
10901 Register dst = $dst$$Register;
10902 Register src1 = $src1$$Register;
10903 Register src2 = $src2$$Register;
10904 __ xorr(dst, src1, src2);
10905 __ sll(dst, dst, 0); /* long -> int */
10906 %}
10908 ins_pipe( ialu_regI_regI );
10909 %}
10911 // Or Instructions
10912 // Or Register with Register
10913 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10914 match(Set dst (OrI src1 src2));
10916 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10917 ins_encode %{
10918 Register dst = $dst$$Register;
10919 Register src1 = $src1$$Register;
10920 Register src2 = $src2$$Register;
10921 __ orr(dst, src1, src2);
10922 %}
10924 ins_pipe( ialu_regI_regI );
10925 %}
10927 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10928 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10929 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10931 format %{ "rotr $dst, $src, 1 ...\n\t"
10932 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10933 ins_encode %{
10934 Register dst = $dst$$Register;
10935 Register src = $src$$Register;
10936 int rshift = $rshift$$constant;
10938 __ rotr(dst, src, 1);
10939 if (rshift - 1) {
10940 __ srl(dst, dst, rshift - 1);
10941 }
10942 %}
10944 ins_pipe( ialu_regI_regI );
10945 %}
10947 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10948 match(Set dst (OrI src1 (CastP2X src2)));
10950 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10951 ins_encode %{
10952 Register dst = $dst$$Register;
10953 Register src1 = $src1$$Register;
10954 Register src2 = $src2$$Register;
10955 __ orr(dst, src1, src2);
10956 %}
10958 ins_pipe( ialu_regI_regI );
10959 %}
10961 // Logical Shift Right by 8-bit immediate
10962 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10963 match(Set dst (URShiftI src shift));
10964 // effect(KILL cr);
10966 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10967 ins_encode %{
10968 Register src = $src$$Register;
10969 Register dst = $dst$$Register;
10970 int shift = $shift$$constant;
10972 __ srl(dst, src, shift);
10973 %}
10974 ins_pipe( ialu_regI_regI );
10975 %}
10977 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
10978 match(Set dst (AndI (URShiftI src shift) mask));
10980 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
10981 ins_encode %{
10982 Register src = $src$$Register;
10983 Register dst = $dst$$Register;
10984 int pos = $shift$$constant;
10985 int size = Assembler::is_int_mask($mask$$constant);
10987 __ ext(dst, src, pos, size);
10988 %}
10989 ins_pipe( ialu_regI_regI );
10990 %}
10992 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
10993 %{
10994 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
10995 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
10997 ins_cost(100);
10998 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
10999 ins_encode %{
11000 Register dst = $dst$$Register;
11001 int sa = $rshift$$constant;
11003 __ rotr(dst, dst, sa);
11004 %}
11005 ins_pipe( ialu_regI_regI );
11006 %}
11008 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11009 %{
11010 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11011 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11013 ins_cost(100);
11014 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11015 ins_encode %{
11016 Register dst = $dst$$Register;
11017 int sa = $rshift$$constant;
11019 __ drotr(dst, dst, sa);
11020 %}
11021 ins_pipe( ialu_regI_regI );
11022 %}
11024 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11025 %{
11026 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11027 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11029 ins_cost(100);
11030 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11031 ins_encode %{
11032 Register dst = $dst$$Register;
11033 int sa = $rshift$$constant;
11035 __ drotr32(dst, dst, sa - 32);
11036 %}
11037 ins_pipe( ialu_regI_regI );
11038 %}
11040 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11041 %{
11042 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11043 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11045 ins_cost(100);
11046 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11047 ins_encode %{
11048 Register dst = $dst$$Register;
11049 int sa = $rshift$$constant;
11051 __ rotr(dst, dst, sa);
11052 %}
11053 ins_pipe( ialu_regI_regI );
11054 %}
11056 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11057 %{
11058 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11059 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11061 ins_cost(100);
11062 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11063 ins_encode %{
11064 Register dst = $dst$$Register;
11065 int sa = $rshift$$constant;
11067 __ drotr(dst, dst, sa);
11068 %}
11069 ins_pipe( ialu_regI_regI );
11070 %}
11072 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11073 %{
11074 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11075 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11077 ins_cost(100);
11078 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11079 ins_encode %{
11080 Register dst = $dst$$Register;
11081 int sa = $rshift$$constant;
11083 __ drotr32(dst, dst, sa - 32);
11084 %}
11085 ins_pipe( ialu_regI_regI );
11086 %}
11088 // Logical Shift Right
11089 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11090 match(Set dst (URShiftI src shift));
11092 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11093 ins_encode %{
11094 Register src = $src$$Register;
11095 Register dst = $dst$$Register;
11096 Register shift = $shift$$Register;
11097 __ srlv(dst, src, shift);
11098 %}
11099 ins_pipe( ialu_regI_regI );
11100 %}
11103 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11104 match(Set dst (RShiftI src shift));
11105 // effect(KILL cr);
11107 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11108 ins_encode %{
11109 Register src = $src$$Register;
11110 Register dst = $dst$$Register;
11111 int shift = $shift$$constant;
11112 __ sra(dst, src, shift);
11113 %}
11114 ins_pipe( ialu_regI_regI );
11115 %}
11117 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11118 match(Set dst (RShiftI src shift));
11119 // effect(KILL cr);
11121 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11122 ins_encode %{
11123 Register src = $src$$Register;
11124 Register dst = $dst$$Register;
11125 Register shift = $shift$$Register;
11126 __ srav(dst, src, shift);
11127 %}
11128 ins_pipe( ialu_regI_regI );
11129 %}
11131 //----------Convert Int to Boolean---------------------------------------------
11133 instruct convI2B(mRegI dst, mRegI src) %{
11134 match(Set dst (Conv2B src));
11136 ins_cost(100);
11137 format %{ "convI2B $dst, $src @ convI2B" %}
11138 ins_encode %{
11139 Register dst = as_Register($dst$$reg);
11140 Register src = as_Register($src$$reg);
11142 if (dst != src) {
11143 __ daddiu(dst, R0, 1);
11144 __ movz(dst, R0, src);
11145 } else {
11146 __ move(AT, src);
11147 __ daddiu(dst, R0, 1);
11148 __ movz(dst, R0, AT);
11149 }
11150 %}
11152 ins_pipe( ialu_regL_regL );
11153 %}
11155 instruct convI2L_reg( mRegL dst, mRegI src) %{
11156 match(Set dst (ConvI2L src));
11158 ins_cost(100);
11159 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11160 ins_encode %{
11161 Register dst = as_Register($dst$$reg);
11162 Register src = as_Register($src$$reg);
11164 if(dst != src) __ sll(dst, src, 0);
11165 %}
11166 ins_pipe( ialu_regL_regL );
11167 %}
11170 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11171 match(Set dst (ConvL2I src));
11173 format %{ "MOV $dst, $src @ convL2I_reg" %}
11174 ins_encode %{
11175 Register dst = as_Register($dst$$reg);
11176 Register src = as_Register($src$$reg);
11178 __ sll(dst, src, 0);
11179 %}
11181 ins_pipe( ialu_regI_regI );
11182 %}
11184 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11185 match(Set dst (ConvI2L (ConvL2I src)));
11187 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11188 ins_encode %{
11189 Register dst = as_Register($dst$$reg);
11190 Register src = as_Register($src$$reg);
11192 __ sll(dst, src, 0);
11193 %}
11195 ins_pipe( ialu_regI_regI );
11196 %}
11198 instruct convL2D_reg( regD dst, mRegL src ) %{
11199 match(Set dst (ConvL2D src));
11200 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11201 ins_encode %{
11202 Register src = as_Register($src$$reg);
11203 FloatRegister dst = as_FloatRegister($dst$$reg);
11205 __ dmtc1(src, dst);
11206 __ cvt_d_l(dst, dst);
11207 %}
11209 ins_pipe( pipe_slow );
11210 %}
11212 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11213 match(Set dst (ConvD2L src));
11214 ins_cost(150);
11215 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11216 ins_encode %{
11217 Register dst = as_Register($dst$$reg);
11218 FloatRegister src = as_FloatRegister($src$$reg);
11220 Label Done;
11222 __ trunc_l_d(F30, src);
11223 // max_long: 0x7fffffffffffffff
11224 // __ set64(AT, 0x7fffffffffffffff);
11225 __ daddiu(AT, R0, -1);
11226 __ dsrl(AT, AT, 1);
11227 __ dmfc1(dst, F30);
11229 __ bne(dst, AT, Done);
11230 __ delayed()->mtc1(R0, F30);
11232 __ cvt_d_w(F30, F30);
11233 __ c_ult_d(src, F30);
11234 __ bc1f(Done);
11235 __ delayed()->daddiu(T9, R0, -1);
11237 __ c_un_d(src, src); //NaN?
11238 __ subu(dst, T9, AT);
11239 __ movt(dst, R0);
11241 __ bind(Done);
11242 %}
11244 ins_pipe( pipe_slow );
11245 %}
11247 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11248 match(Set dst (ConvD2L src));
11249 ins_cost(250);
11250 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11251 ins_encode %{
11252 Register dst = as_Register($dst$$reg);
11253 FloatRegister src = as_FloatRegister($src$$reg);
11255 Label L;
11257 __ c_un_d(src, src); //NaN?
11258 __ bc1t(L);
11259 __ delayed();
11260 __ move(dst, R0);
11262 __ trunc_l_d(F30, src);
11263 __ cfc1(AT, 31);
11264 __ li(T9, 0x10000);
11265 __ andr(AT, AT, T9);
11266 __ beq(AT, R0, L);
11267 __ delayed()->dmfc1(dst, F30);
11269 __ mov_d(F12, src);
11270 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11271 __ move(dst, V0);
11272 __ bind(L);
11273 %}
11275 ins_pipe( pipe_slow );
11276 %}
11278 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11279 match(Set dst (ConvF2I src));
11280 ins_cost(150);
11281 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11282 ins_encode %{
11283 Register dreg = $dst$$Register;
11284 FloatRegister fval = $src$$FloatRegister;
11286 __ trunc_w_s(F30, fval);
11287 __ mfc1(dreg, F30);
11288 __ c_un_s(fval, fval); //NaN?
11289 __ movt(dreg, R0);
11290 %}
11292 ins_pipe( pipe_slow );
11293 %}
11295 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11296 match(Set dst (ConvF2I src));
11297 ins_cost(250);
11298 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11299 ins_encode %{
11300 Register dreg = $dst$$Register;
11301 FloatRegister fval = $src$$FloatRegister;
11302 Label L;
11304 __ c_un_s(fval, fval); //NaN?
11305 __ bc1t(L);
11306 __ delayed();
11307 __ move(dreg, R0);
11309 __ trunc_w_s(F30, fval);
11311 /* Call SharedRuntime:f2i() to do valid convention */
11312 __ cfc1(AT, 31);
11313 __ li(T9, 0x10000);
11314 __ andr(AT, AT, T9);
11315 __ beq(AT, R0, L);
11316 __ delayed()->mfc1(dreg, F30);
11318 __ mov_s(F12, fval);
11320 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11321 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11322 *
11323 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11324 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11325 */
11326 if(dreg != V0) {
11327 __ push(V0);
11328 }
11329 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11330 if(dreg != V0) {
11331 __ move(dreg, V0);
11332 __ pop(V0);
11333 }
11334 __ bind(L);
11335 %}
11337 ins_pipe( pipe_slow );
11338 %}
11340 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11341 match(Set dst (ConvF2L src));
11342 ins_cost(150);
11343 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11344 ins_encode %{
11345 Register dreg = $dst$$Register;
11346 FloatRegister fval = $src$$FloatRegister;
11348 __ trunc_l_s(F30, fval);
11349 __ dmfc1(dreg, F30);
11350 __ c_un_s(fval, fval); //NaN?
11351 __ movt(dreg, R0);
11352 %}
11354 ins_pipe( pipe_slow );
11355 %}
11357 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11358 match(Set dst (ConvF2L src));
11359 ins_cost(250);
11360 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11361 ins_encode %{
11362 Register dst = as_Register($dst$$reg);
11363 FloatRegister fval = $src$$FloatRegister;
11364 Label L;
11366 __ c_un_s(fval, fval); //NaN?
11367 __ bc1t(L);
11368 __ delayed();
11369 __ move(dst, R0);
11371 __ trunc_l_s(F30, fval);
11372 __ cfc1(AT, 31);
11373 __ li(T9, 0x10000);
11374 __ andr(AT, AT, T9);
11375 __ beq(AT, R0, L);
11376 __ delayed()->dmfc1(dst, F30);
11378 __ mov_s(F12, fval);
11379 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11380 __ move(dst, V0);
11381 __ bind(L);
11382 %}
11384 ins_pipe( pipe_slow );
11385 %}
11387 instruct convL2F_reg( regF dst, mRegL src ) %{
11388 match(Set dst (ConvL2F src));
11389 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11390 ins_encode %{
11391 FloatRegister dst = $dst$$FloatRegister;
11392 Register src = as_Register($src$$reg);
11393 Label L;
11395 __ dmtc1(src, dst);
11396 __ cvt_s_l(dst, dst);
11397 %}
11399 ins_pipe( pipe_slow );
11400 %}
11402 instruct convI2F_reg( regF dst, mRegI src ) %{
11403 match(Set dst (ConvI2F src));
11404 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11405 ins_encode %{
11406 Register src = $src$$Register;
11407 FloatRegister dst = $dst$$FloatRegister;
11409 __ mtc1(src, dst);
11410 __ cvt_s_w(dst, dst);
11411 %}
11413 ins_pipe( fpu_regF_regF );
11414 %}
11416 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11417 match(Set dst (CmpLTMask p zero));
11418 ins_cost(100);
11420 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11421 ins_encode %{
11422 Register src = $p$$Register;
11423 Register dst = $dst$$Register;
11425 __ sra(dst, src, 31);
11426 %}
11427 ins_pipe( pipe_slow );
11428 %}
11431 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11432 match(Set dst (CmpLTMask p q));
11433 ins_cost(400);
11435 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11436 ins_encode %{
11437 Register p = $p$$Register;
11438 Register q = $q$$Register;
11439 Register dst = $dst$$Register;
11441 __ slt(dst, p, q);
11442 __ subu(dst, R0, dst);
11443 %}
11444 ins_pipe( pipe_slow );
11445 %}
11447 instruct convP2B(mRegI dst, mRegP src) %{
11448 match(Set dst (Conv2B src));
11450 ins_cost(100);
11451 format %{ "convP2B $dst, $src @ convP2B" %}
11452 ins_encode %{
11453 Register dst = as_Register($dst$$reg);
11454 Register src = as_Register($src$$reg);
11456 if (dst != src) {
11457 __ daddiu(dst, R0, 1);
11458 __ movz(dst, R0, src);
11459 } else {
11460 __ move(AT, src);
11461 __ daddiu(dst, R0, 1);
11462 __ movz(dst, R0, AT);
11463 }
11464 %}
11466 ins_pipe( ialu_regL_regL );
11467 %}
11470 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11471 match(Set dst (ConvI2D src));
11472 format %{ "conI2D $dst, $src @convI2D_reg" %}
11473 ins_encode %{
11474 Register src = $src$$Register;
11475 FloatRegister dst = $dst$$FloatRegister;
11476 __ mtc1(src, dst);
11477 __ cvt_d_w(dst, dst);
11478 %}
11479 ins_pipe( fpu_regF_regF );
11480 %}
11482 instruct convF2D_reg_reg(regD dst, regF src) %{
11483 match(Set dst (ConvF2D src));
11484 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11485 ins_encode %{
11486 FloatRegister dst = $dst$$FloatRegister;
11487 FloatRegister src = $src$$FloatRegister;
11489 __ cvt_d_s(dst, src);
11490 %}
11491 ins_pipe( fpu_regF_regF );
11492 %}
11494 instruct convD2F_reg_reg(regF dst, regD src) %{
11495 match(Set dst (ConvD2F src));
11496 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11497 ins_encode %{
11498 FloatRegister dst = $dst$$FloatRegister;
11499 FloatRegister src = $src$$FloatRegister;
11501 __ cvt_s_d(dst, src);
11502 %}
11503 ins_pipe( fpu_regF_regF );
11504 %}
11506 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11507 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11508 match(Set dst (ConvD2I src));
11510 ins_cost(150);
11511 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11513 ins_encode %{
11514 FloatRegister src = $src$$FloatRegister;
11515 Register dst = $dst$$Register;
11517 Label Done;
11519 __ trunc_w_d(F30, src);
11520 // max_int: 2147483647
11521 __ move(AT, 0x7fffffff);
11522 __ mfc1(dst, F30);
11524 __ bne(dst, AT, Done);
11525 __ delayed()->mtc1(R0, F30);
11527 __ cvt_d_w(F30, F30);
11528 __ c_ult_d(src, F30);
11529 __ bc1f(Done);
11530 __ delayed()->addiu(T9, R0, -1);
11532 __ c_un_d(src, src); //NaN?
11533 __ subu32(dst, T9, AT);
11534 __ movt(dst, R0);
11536 __ bind(Done);
11537 %}
11538 ins_pipe( pipe_slow );
11539 %}
11541 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11542 match(Set dst (ConvD2I src));
11544 ins_cost(250);
11545 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11547 ins_encode %{
11548 FloatRegister src = $src$$FloatRegister;
11549 Register dst = $dst$$Register;
11550 Label L;
11552 __ trunc_w_d(F30, src);
11553 __ cfc1(AT, 31);
11554 __ li(T9, 0x10000);
11555 __ andr(AT, AT, T9);
11556 __ beq(AT, R0, L);
11557 __ delayed()->mfc1(dst, F30);
11559 __ mov_d(F12, src);
11560 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11561 __ move(dst, V0);
11562 __ bind(L);
11564 %}
11565 ins_pipe( pipe_slow );
11566 %}
11568 // Convert oop pointer into compressed form
11569 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11570 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11571 match(Set dst (EncodeP src));
11572 format %{ "encode_heap_oop $dst,$src" %}
11573 ins_encode %{
11574 Register src = $src$$Register;
11575 Register dst = $dst$$Register;
11576 if (src != dst) {
11577 __ move(dst, src);
11578 }
11579 __ encode_heap_oop(dst);
11580 %}
11581 ins_pipe( ialu_regL_regL );
11582 %}
11584 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11585 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11586 match(Set dst (EncodeP src));
11587 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11588 ins_encode %{
11589 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11590 %}
11591 ins_pipe( ialu_regL_regL );
11592 %}
11594 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11595 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11596 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11597 match(Set dst (DecodeN src));
11598 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11599 ins_encode %{
11600 Register s = $src$$Register;
11601 Register d = $dst$$Register;
11602 if (s != d) {
11603 __ move(d, s);
11604 }
11605 __ decode_heap_oop(d);
11606 %}
11607 ins_pipe( ialu_regL_regL );
11608 %}
11610 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11611 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11612 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11613 match(Set dst (DecodeN src));
11614 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11615 ins_encode %{
11616 Register s = $src$$Register;
11617 Register d = $dst$$Register;
11618 if (s != d) {
11619 __ decode_heap_oop_not_null(d, s);
11620 } else {
11621 __ decode_heap_oop_not_null(d);
11622 }
11623 %}
11624 ins_pipe( ialu_regL_regL );
11625 %}
11627 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11628 match(Set dst (EncodePKlass src));
11629 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11630 ins_encode %{
11631 __ encode_klass_not_null($dst$$Register, $src$$Register);
11632 %}
11633 ins_pipe( ialu_regL_regL );
11634 %}
11636 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11637 match(Set dst (DecodeNKlass src));
11638 format %{ "decode_heap_klass_not_null $dst,$src" %}
11639 ins_encode %{
11640 Register s = $src$$Register;
11641 Register d = $dst$$Register;
11642 if (s != d) {
11643 __ decode_klass_not_null(d, s);
11644 } else {
11645 __ decode_klass_not_null(d);
11646 }
11647 %}
11648 ins_pipe( ialu_regL_regL );
11649 %}
11651 //FIXME
11652 instruct tlsLoadP(mRegP dst) %{
11653 match(Set dst (ThreadLocal));
11655 ins_cost(0);
11656 format %{ " get_thread in $dst #@tlsLoadP" %}
11657 ins_encode %{
11658 Register dst = $dst$$Register;
11659 #ifdef OPT_THREAD
11660 __ move(dst, TREG);
11661 #else
11662 __ get_thread(dst);
11663 #endif
11664 %}
11666 ins_pipe( ialu_loadI );
11667 %}
11670 instruct checkCastPP( mRegP dst ) %{
11671 match(Set dst (CheckCastPP dst));
11673 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11674 ins_encode( /*empty encoding*/ );
11675 ins_pipe( empty );
11676 %}
11678 instruct castPP(mRegP dst)
11679 %{
11680 match(Set dst (CastPP dst));
11682 size(0);
11683 format %{ "# castPP of $dst" %}
11684 ins_encode(/* empty encoding */);
11685 ins_pipe(empty);
11686 %}
11688 instruct castII( mRegI dst ) %{
11689 match(Set dst (CastII dst));
11690 format %{ "#castII of $dst empty encoding" %}
11691 ins_encode( /*empty encoding*/ );
11692 ins_cost(0);
11693 ins_pipe( empty );
11694 %}
11696 // Return Instruction
11697 // Remove the return address & jump to it.
11698 instruct Ret() %{
11699 match(Return);
11700 format %{ "RET #@Ret" %}
11702 ins_encode %{
11703 __ jr(RA);
11704 __ nop();
11705 %}
11707 ins_pipe( pipe_jump );
11708 %}
11710 /*
11711 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11712 instruct jumpXtnd(mRegL switch_val) %{
11713 match(Jump switch_val);
11715 ins_cost(350);
11717 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11718 "jr T9\n\t"
11719 "nop" %}
11720 ins_encode %{
11721 Register table_base = $constanttablebase;
11722 int con_offset = $constantoffset;
11723 Register switch_reg = $switch_val$$Register;
11725 if (UseLoongsonISA) {
11726 if (Assembler::is_simm(con_offset, 8)) {
11727 __ gsldx(T9, table_base, switch_reg, con_offset);
11728 } else if (Assembler::is_simm16(con_offset)) {
11729 __ daddu(T9, table_base, switch_reg);
11730 __ ld(T9, T9, con_offset);
11731 } else {
11732 __ move(T9, con_offset);
11733 __ daddu(AT, table_base, switch_reg);
11734 __ gsldx(T9, AT, T9, 0);
11735 }
11736 } else {
11737 if (Assembler::is_simm16(con_offset)) {
11738 __ daddu(T9, table_base, switch_reg);
11739 __ ld(T9, T9, con_offset);
11740 } else {
11741 __ move(T9, con_offset);
11742 __ daddu(AT, table_base, switch_reg);
11743 __ daddu(AT, T9, AT);
11744 __ ld(T9, AT, 0);
11745 }
11746 }
11748 __ jr(T9);
11749 __ nop();
11751 %}
11752 ins_pipe(pipe_jump);
11753 %}
11754 */
11756 // Jump Direct - Label defines a relative address from JMP
11757 instruct jmpDir(label labl) %{
11758 match(Goto);
11759 effect(USE labl);
11761 ins_cost(300);
11762 format %{ "JMP $labl #@jmpDir" %}
11764 ins_encode %{
11765 Label &L = *($labl$$label);
11766 if(&L)
11767 __ b(L);
11768 else
11769 __ b(int(0));
11770 __ nop();
11771 %}
11773 ins_pipe( pipe_jump );
11774 ins_pc_relative(1);
11775 %}
11779 // Tail Jump; remove the return address; jump to target.
11780 // TailCall above leaves the return address around.
11781 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11782 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11783 // "restore" before this instruction (in Epilogue), we need to materialize it
11784 // in %i0.
11785 //FIXME
11786 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11787 match( TailJump jump_target ex_oop );
11788 ins_cost(200);
11789 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11790 ins_encode %{
11791 Register target = $jump_target$$Register;
11793 /* 2012/9/14 Jin: V0, V1 are indicated in:
11794 * [stubGenerator_mips.cpp] generate_forward_exception()
11795 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11796 */
11797 Register oop = $ex_oop$$Register;
11798 Register exception_oop = V0;
11799 Register exception_pc = V1;
11801 __ move(exception_pc, RA);
11802 __ move(exception_oop, oop);
11804 __ jr(target);
11805 __ nop();
11806 %}
11807 ins_pipe( pipe_jump );
11808 %}
11810 // ============================================================================
11811 // Procedure Call/Return Instructions
11812 // Call Java Static Instruction
11813 // Note: If this code changes, the corresponding ret_addr_offset() and
11814 // compute_padding() functions will have to be adjusted.
11815 instruct CallStaticJavaDirect(method meth) %{
11816 match(CallStaticJava);
11817 effect(USE meth);
11819 ins_cost(300);
11820 format %{ "CALL,static #@CallStaticJavaDirect " %}
11821 ins_encode( Java_Static_Call( meth ) );
11822 ins_pipe( pipe_slow );
11823 ins_pc_relative(1);
11824 ins_alignment(16);
11825 %}
11827 // Call Java Dynamic Instruction
11828 // Note: If this code changes, the corresponding ret_addr_offset() and
11829 // compute_padding() functions will have to be adjusted.
11830 instruct CallDynamicJavaDirect(method meth) %{
11831 match(CallDynamicJava);
11832 effect(USE meth);
11834 ins_cost(300);
11835 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11836 "CallDynamic @ CallDynamicJavaDirect" %}
11837 ins_encode( Java_Dynamic_Call( meth ) );
11838 ins_pipe( pipe_slow );
11839 ins_pc_relative(1);
11840 ins_alignment(16);
11841 %}
11843 instruct CallLeafNoFPDirect(method meth) %{
11844 match(CallLeafNoFP);
11845 effect(USE meth);
11847 ins_cost(300);
11848 format %{ "CALL_LEAF_NOFP,runtime " %}
11849 ins_encode(Java_To_Runtime(meth));
11850 ins_pipe( pipe_slow );
11851 ins_pc_relative(1);
11852 ins_alignment(16);
11853 %}
11855 // Prefetch instructions.
11857 instruct prefetchrNTA( memory mem ) %{
11858 match(PrefetchRead mem);
11859 ins_cost(125);
11861 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11862 ins_encode %{
11863 int base = $mem$$base;
11864 int index = $mem$$index;
11865 int scale = $mem$$scale;
11866 int disp = $mem$$disp;
11868 if( index != 0 ) {
11869 if (scale == 0) {
11870 __ daddu(AT, as_Register(base), as_Register(index));
11871 } else {
11872 __ dsll(AT, as_Register(index), scale);
11873 __ daddu(AT, as_Register(base), AT);
11874 }
11875 } else {
11876 __ move(AT, as_Register(base));
11877 }
11878 if( Assembler::is_simm16(disp) ) {
11879 __ daddiu(AT, as_Register(base), disp);
11880 __ daddiu(AT, AT, disp);
11881 } else {
11882 __ move(T9, disp);
11883 __ daddu(AT, as_Register(base), T9);
11884 }
11885 __ pref(0, AT, 0); //hint: 0:load
11886 %}
11887 ins_pipe(pipe_slow);
11888 %}
11890 instruct prefetchwNTA( memory mem ) %{
11891 match(PrefetchWrite mem);
11892 ins_cost(125);
11893 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11894 ins_encode %{
11895 int base = $mem$$base;
11896 int index = $mem$$index;
11897 int scale = $mem$$scale;
11898 int disp = $mem$$disp;
11900 if( index != 0 ) {
11901 if (scale == 0) {
11902 __ daddu(AT, as_Register(base), as_Register(index));
11903 } else {
11904 __ dsll(AT, as_Register(index), scale);
11905 __ daddu(AT, as_Register(base), AT);
11906 }
11907 } else {
11908 __ move(AT, as_Register(base));
11909 }
11910 if( Assembler::is_simm16(disp) ) {
11911 __ daddiu(AT, as_Register(base), disp);
11912 __ daddiu(AT, AT, disp);
11913 } else {
11914 __ move(T9, disp);
11915 __ daddu(AT, as_Register(base), T9);
11916 }
11917 __ pref(1, AT, 0); //hint: 1:store
11918 %}
11919 ins_pipe(pipe_slow);
11920 %}
11922 // Prefetch instructions for allocation.
11924 instruct prefetchAllocNTA( memory mem ) %{
11925 match(PrefetchAllocation mem);
11926 ins_cost(125);
11927 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11928 ins_encode %{
11929 int base = $mem$$base;
11930 int index = $mem$$index;
11931 int scale = $mem$$scale;
11932 int disp = $mem$$disp;
11934 Register dst = R0;
11936 if( index != 0 ) {
11937 if( Assembler::is_simm16(disp) ) {
11938 if( UseLoongsonISA ) {
11939 if (scale == 0) {
11940 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11941 } else {
11942 __ dsll(AT, as_Register(index), scale);
11943 __ gslbx(dst, as_Register(base), AT, disp);
11944 }
11945 } else {
11946 if (scale == 0) {
11947 __ addu(AT, as_Register(base), as_Register(index));
11948 } else {
11949 __ dsll(AT, as_Register(index), scale);
11950 __ addu(AT, as_Register(base), AT);
11951 }
11952 __ lb(dst, AT, disp);
11953 }
11954 } else {
11955 if (scale == 0) {
11956 __ addu(AT, as_Register(base), as_Register(index));
11957 } else {
11958 __ dsll(AT, as_Register(index), scale);
11959 __ addu(AT, as_Register(base), AT);
11960 }
11961 __ move(T9, disp);
11962 if( UseLoongsonISA ) {
11963 __ gslbx(dst, AT, T9, 0);
11964 } else {
11965 __ addu(AT, AT, T9);
11966 __ lb(dst, AT, 0);
11967 }
11968 }
11969 } else {
11970 if( Assembler::is_simm16(disp) ) {
11971 __ lb(dst, as_Register(base), disp);
11972 } else {
11973 __ move(T9, disp);
11974 if( UseLoongsonISA ) {
11975 __ gslbx(dst, as_Register(base), T9, 0);
11976 } else {
11977 __ addu(AT, as_Register(base), T9);
11978 __ lb(dst, AT, 0);
11979 }
11980 }
11981 }
11982 %}
11983 ins_pipe(pipe_slow);
11984 %}
11987 // Call runtime without safepoint
11988 instruct CallLeafDirect(method meth) %{
11989 match(CallLeaf);
11990 effect(USE meth);
11992 ins_cost(300);
11993 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
11994 ins_encode(Java_To_Runtime(meth));
11995 ins_pipe( pipe_slow );
11996 ins_pc_relative(1);
11997 ins_alignment(16);
11998 %}
12000 // Load Char (16bit unsigned)
12001 instruct loadUS(mRegI dst, memory mem) %{
12002 match(Set dst (LoadUS mem));
12004 ins_cost(125);
12005 format %{ "loadUS $dst,$mem @ loadC" %}
12006 ins_encode(load_C_enc(dst, mem));
12007 ins_pipe( ialu_loadI );
12008 %}
12010 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12011 match(Set dst (ConvI2L (LoadUS mem)));
12013 ins_cost(125);
12014 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12015 ins_encode(load_C_enc(dst, mem));
12016 ins_pipe( ialu_loadI );
12017 %}
12019 // Store Char (16bit unsigned)
12020 instruct storeC(memory mem, mRegI src) %{
12021 match(Set mem (StoreC mem src));
12023 ins_cost(125);
12024 format %{ "storeC $src,$mem @ storeC" %}
12025 ins_encode(store_C_reg_enc(mem, src));
12026 ins_pipe( ialu_loadI );
12027 %}
12030 instruct loadConF0(regF dst, immF0 zero) %{
12031 match(Set dst zero);
12032 ins_cost(100);
12034 format %{ "mov $dst, zero @ loadConF0\n"%}
12035 ins_encode %{
12036 FloatRegister dst = $dst$$FloatRegister;
12038 __ mtc1(R0, dst);
12039 %}
12040 ins_pipe( fpu_loadF );
12041 %}
12044 instruct loadConF(regF dst, immF src) %{
12045 match(Set dst src);
12046 ins_cost(125);
12048 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12049 ins_encode %{
12050 int con_offset = $constantoffset($src);
12052 if (Assembler::is_simm16(con_offset)) {
12053 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12054 } else {
12055 __ set64(AT, con_offset);
12056 if (UseLoongsonISA) {
12057 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12058 } else {
12059 __ daddu(AT, $constanttablebase, AT);
12060 __ lwc1($dst$$FloatRegister, AT, 0);
12061 }
12062 }
12063 %}
12064 ins_pipe( fpu_loadF );
12065 %}
12068 instruct loadConD0(regD dst, immD0 zero) %{
12069 match(Set dst zero);
12070 ins_cost(100);
12072 format %{ "mov $dst, zero @ loadConD0"%}
12073 ins_encode %{
12074 FloatRegister dst = as_FloatRegister($dst$$reg);
12076 __ dmtc1(R0, dst);
12077 %}
12078 ins_pipe( fpu_loadF );
12079 %}
12081 instruct loadConD(regD dst, immD src) %{
12082 match(Set dst src);
12083 ins_cost(125);
12085 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12086 ins_encode %{
12087 int con_offset = $constantoffset($src);
12089 if (Assembler::is_simm16(con_offset)) {
12090 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12091 } else {
12092 __ set64(AT, con_offset);
12093 if (UseLoongsonISA) {
12094 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12095 } else {
12096 __ daddu(AT, $constanttablebase, AT);
12097 __ ldc1($dst$$FloatRegister, AT, 0);
12098 }
12099 }
12100 %}
12101 ins_pipe( fpu_loadF );
12102 %}
12104 // Store register Float value (it is faster than store from FPU register)
12105 instruct storeF_reg( memory mem, regF src) %{
12106 match(Set mem (StoreF mem src));
12108 ins_cost(50);
12109 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12110 ins_encode(store_F_reg_enc(mem, src));
12111 ins_pipe( fpu_storeF );
12112 %}
12114 instruct storeF_imm0( memory mem, immF0 zero) %{
12115 match(Set mem (StoreF mem zero));
12117 ins_cost(40);
12118 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12119 ins_encode %{
12120 int base = $mem$$base;
12121 int index = $mem$$index;
12122 int scale = $mem$$scale;
12123 int disp = $mem$$disp;
12125 if( index != 0 ) {
12126 if(scale != 0) {
12127 __ dsll(T9, as_Register(index), scale);
12128 __ addu(AT, as_Register(base), T9);
12129 } else {
12130 __ daddu(AT, as_Register(base), as_Register(index));
12131 }
12132 if( Assembler::is_simm16(disp) ) {
12133 __ sw(R0, AT, disp);
12134 } else {
12135 __ move(T9, disp);
12136 __ addu(AT, AT, T9);
12137 __ sw(R0, AT, 0);
12138 }
12140 } else {
12141 if( Assembler::is_simm16(disp) ) {
12142 __ sw(R0, as_Register(base), disp);
12143 } else {
12144 __ move(T9, disp);
12145 __ addu(AT, as_Register(base), T9);
12146 __ sw(R0, AT, 0);
12147 }
12148 }
12149 %}
12150 ins_pipe( ialu_storeI );
12151 %}
12153 // Load Double
12154 instruct loadD(regD dst, memory mem) %{
12155 match(Set dst (LoadD mem));
12157 ins_cost(150);
12158 format %{ "loadD $dst, $mem #@loadD" %}
12159 ins_encode(load_D_enc(dst, mem));
12160 ins_pipe( ialu_loadI );
12161 %}
12163 // Load Double - UNaligned
12164 instruct loadD_unaligned(regD dst, memory mem ) %{
12165 match(Set dst (LoadD_unaligned mem));
12166 ins_cost(250);
12167 // FIXME: Jin: Need more effective ldl/ldr
12168 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12169 ins_encode(load_D_enc(dst, mem));
12170 ins_pipe( ialu_loadI );
12171 %}
12173 instruct storeD_reg( memory mem, regD src) %{
12174 match(Set mem (StoreD mem src));
12176 ins_cost(50);
12177 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12178 ins_encode(store_D_reg_enc(mem, src));
12179 ins_pipe( fpu_storeF );
12180 %}
12182 instruct storeD_imm0( memory mem, immD0 zero) %{
12183 match(Set mem (StoreD mem zero));
12185 ins_cost(40);
12186 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12187 ins_encode %{
12188 int base = $mem$$base;
12189 int index = $mem$$index;
12190 int scale = $mem$$scale;
12191 int disp = $mem$$disp;
12193 __ mtc1(R0, F30);
12194 __ cvt_d_w(F30, F30);
12196 if( index != 0 ) {
12197 if(scale != 0) {
12198 __ dsll(T9, as_Register(index), scale);
12199 __ addu(AT, as_Register(base), T9);
12200 } else {
12201 __ daddu(AT, as_Register(base), as_Register(index));
12202 }
12203 if( Assembler::is_simm16(disp) ) {
12204 __ sdc1(F30, AT, disp);
12205 } else {
12206 __ move(T9, disp);
12207 __ addu(AT, AT, T9);
12208 __ sdc1(F30, AT, 0);
12209 }
12211 } else {
12212 if( Assembler::is_simm16(disp) ) {
12213 __ sdc1(F30, as_Register(base), disp);
12214 } else {
12215 __ move(T9, disp);
12216 __ addu(AT, as_Register(base), T9);
12217 __ sdc1(F30, AT, 0);
12218 }
12219 }
12220 %}
12221 ins_pipe( ialu_storeI );
12222 %}
12224 instruct loadSSI(mRegI dst, stackSlotI src)
12225 %{
12226 match(Set dst src);
12228 ins_cost(125);
12229 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12230 ins_encode %{
12231 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12232 __ lw($dst$$Register, SP, $src$$disp);
12233 %}
12234 ins_pipe(ialu_loadI);
12235 %}
12237 instruct storeSSI(stackSlotI dst, mRegI src)
12238 %{
12239 match(Set dst src);
12241 ins_cost(100);
12242 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12243 ins_encode %{
12244 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12245 __ sw($src$$Register, SP, $dst$$disp);
12246 %}
12247 ins_pipe(ialu_storeI);
12248 %}
12250 instruct loadSSL(mRegL dst, stackSlotL src)
12251 %{
12252 match(Set dst src);
12254 ins_cost(125);
12255 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12256 ins_encode %{
12257 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12258 __ ld($dst$$Register, SP, $src$$disp);
12259 %}
12260 ins_pipe(ialu_loadI);
12261 %}
12263 instruct storeSSL(stackSlotL dst, mRegL src)
12264 %{
12265 match(Set dst src);
12267 ins_cost(100);
12268 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12269 ins_encode %{
12270 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12271 __ sd($src$$Register, SP, $dst$$disp);
12272 %}
12273 ins_pipe(ialu_storeI);
12274 %}
12276 instruct loadSSP(mRegP dst, stackSlotP src)
12277 %{
12278 match(Set dst src);
12280 ins_cost(125);
12281 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12282 ins_encode %{
12283 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12284 __ ld($dst$$Register, SP, $src$$disp);
12285 %}
12286 ins_pipe(ialu_loadI);
12287 %}
12289 instruct storeSSP(stackSlotP dst, mRegP src)
12290 %{
12291 match(Set dst src);
12293 ins_cost(100);
12294 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12295 ins_encode %{
12296 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12297 __ sd($src$$Register, SP, $dst$$disp);
12298 %}
12299 ins_pipe(ialu_storeI);
12300 %}
12302 instruct loadSSF(regF dst, stackSlotF src)
12303 %{
12304 match(Set dst src);
12306 ins_cost(125);
12307 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12308 ins_encode %{
12309 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12310 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12311 %}
12312 ins_pipe(ialu_loadI);
12313 %}
12315 instruct storeSSF(stackSlotF dst, regF src)
12316 %{
12317 match(Set dst src);
12319 ins_cost(100);
12320 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12321 ins_encode %{
12322 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12323 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12324 %}
12325 ins_pipe(fpu_storeF);
12326 %}
12328 // Use the same format since predicate() can not be used here.
12329 instruct loadSSD(regD dst, stackSlotD src)
12330 %{
12331 match(Set dst src);
12333 ins_cost(125);
12334 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12335 ins_encode %{
12336 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12337 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12338 %}
12339 ins_pipe(ialu_loadI);
12340 %}
12342 instruct storeSSD(stackSlotD dst, regD src)
12343 %{
12344 match(Set dst src);
12346 ins_cost(100);
12347 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12348 ins_encode %{
12349 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12350 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12351 %}
12352 ins_pipe(fpu_storeF);
12353 %}
12355 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12356 match( Set cr (FastLock object box) );
12357 effect( TEMP tmp, TEMP scr, USE_KILL box );
12358 ins_cost(300);
12359 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12360 ins_encode %{
12361 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12362 %}
12364 ins_pipe( pipe_slow );
12365 ins_pc_relative(1);
12366 %}
12368 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12369 match( Set cr (FastUnlock object box) );
12370 effect( TEMP tmp, USE_KILL box );
12371 ins_cost(300);
12372 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12373 ins_encode %{
12374 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12375 %}
12377 ins_pipe( pipe_slow );
12378 ins_pc_relative(1);
12379 %}
12381 // Store CMS card-mark Immediate
12382 instruct storeImmCM(memory mem, immI8 src) %{
12383 match(Set mem (StoreCM mem src));
12385 ins_cost(150);
12386 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12387 // opcode(0xC6);
12388 ins_encode(store_B_immI_enc_sync(mem, src));
12389 ins_pipe( ialu_storeI );
12390 %}
12392 // Die now
12393 instruct ShouldNotReachHere( )
12394 %{
12395 match(Halt);
12396 ins_cost(300);
12398 // Use the following format syntax
12399 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12400 ins_encode %{
12401 // Here we should emit illtrap !
12403 __ stop("in ShoudNotReachHere");
12405 %}
12406 ins_pipe( pipe_jump );
12407 %}
12409 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12410 %{
12411 predicate(Universe::narrow_oop_shift() == 0);
12412 match(Set dst mem);
12414 ins_cost(110);
12415 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12416 ins_encode %{
12417 Register dst = $dst$$Register;
12418 Register base = as_Register($mem$$base);
12419 int disp = $mem$$disp;
12421 __ daddiu(dst, base, disp);
12422 %}
12423 ins_pipe( ialu_regI_imm16 );
12424 %}
12426 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12427 %{
12428 match(Set dst mem);
12430 ins_cost(110);
12431 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12432 ins_encode %{
12433 Register dst = $dst$$Register;
12434 Register base = as_Register($mem$$base);
12435 Register index = as_Register($mem$$index);
12436 int scale = $mem$$scale;
12437 int disp = $mem$$disp;
12439 if (scale == 0) {
12440 __ daddu(AT, base, index);
12441 __ daddiu(dst, AT, disp);
12442 } else {
12443 __ dsll(AT, index, scale);
12444 __ daddu(AT, base, AT);
12445 __ daddiu(dst, AT, disp);
12446 }
12447 %}
12449 ins_pipe( ialu_regI_imm16 );
12450 %}
12452 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12453 %{
12454 match(Set dst mem);
12456 ins_cost(110);
12457 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12458 ins_encode %{
12459 Register dst = $dst$$Register;
12460 Register base = as_Register($mem$$base);
12461 Register index = as_Register($mem$$index);
12462 int scale = $mem$$scale;
12464 if (scale == 0) {
12465 __ daddu(dst, base, index);
12466 } else {
12467 __ dsll(AT, index, scale);
12468 __ daddu(dst, base, AT);
12469 }
12470 %}
12472 ins_pipe( ialu_regI_imm16 );
12473 %}
12475 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12476 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12477 match(CountedLoopEnd cop (CmpI src1 src2));
12478 effect(USE labl);
12480 ins_cost(300);
12481 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12482 ins_encode %{
12483 Register op1 = $src1$$Register;
12484 Register op2 = $src2$$Register;
12485 Label &L = *($labl$$label);
12486 int flag = $cop$$cmpcode;
12488 switch(flag)
12489 {
12490 case 0x01: //equal
12491 if (&L)
12492 __ beq(op1, op2, L);
12493 else
12494 __ beq(op1, op2, (int)0);
12495 break;
12496 case 0x02: //not_equal
12497 if (&L)
12498 __ bne(op1, op2, L);
12499 else
12500 __ bne(op1, op2, (int)0);
12501 break;
12502 case 0x03: //above
12503 __ slt(AT, op2, op1);
12504 if(&L)
12505 __ bne(AT, R0, L);
12506 else
12507 __ bne(AT, R0, (int)0);
12508 break;
12509 case 0x04: //above_equal
12510 __ slt(AT, op1, op2);
12511 if(&L)
12512 __ beq(AT, R0, L);
12513 else
12514 __ beq(AT, R0, (int)0);
12515 break;
12516 case 0x05: //below
12517 __ slt(AT, op1, op2);
12518 if(&L)
12519 __ bne(AT, R0, L);
12520 else
12521 __ bne(AT, R0, (int)0);
12522 break;
12523 case 0x06: //below_equal
12524 __ slt(AT, op2, op1);
12525 if(&L)
12526 __ beq(AT, R0, L);
12527 else
12528 __ beq(AT, R0, (int)0);
12529 break;
12530 default:
12531 Unimplemented();
12532 }
12533 __ nop();
12534 %}
12535 ins_pipe( pipe_jump );
12536 ins_pc_relative(1);
12537 %}
12540 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12541 match(CountedLoopEnd cop (CmpI src1 src2));
12542 effect(USE labl);
12544 ins_cost(250);
12545 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12546 ins_encode %{
12547 Register op1 = $src1$$Register;
12548 int op2 = $src2$$constant;
12549 Label &L = *($labl$$label);
12550 int flag = $cop$$cmpcode;
12552 __ addiu32(AT, op1, -1 * op2);
12554 switch(flag)
12555 {
12556 case 0x01: //equal
12557 if (&L)
12558 __ beq(AT, R0, L);
12559 else
12560 __ beq(AT, R0, (int)0);
12561 break;
12562 case 0x02: //not_equal
12563 if (&L)
12564 __ bne(AT, R0, L);
12565 else
12566 __ bne(AT, R0, (int)0);
12567 break;
12568 case 0x03: //above
12569 if(&L)
12570 __ bgtz(AT, L);
12571 else
12572 __ bgtz(AT, (int)0);
12573 break;
12574 case 0x04: //above_equal
12575 if(&L)
12576 __ bgez(AT, L);
12577 else
12578 __ bgez(AT,(int)0);
12579 break;
12580 case 0x05: //below
12581 if(&L)
12582 __ bltz(AT, L);
12583 else
12584 __ bltz(AT, (int)0);
12585 break;
12586 case 0x06: //below_equal
12587 if(&L)
12588 __ blez(AT, L);
12589 else
12590 __ blez(AT, (int)0);
12591 break;
12592 default:
12593 Unimplemented();
12594 }
12595 __ nop();
12596 %}
12597 ins_pipe( pipe_jump );
12598 ins_pc_relative(1);
12599 %}
12602 /*
12603 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12604 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12605 match(CountedLoopEnd cop cmp);
12606 effect(USE labl);
12608 ins_cost(300);
12609 format %{ "J$cop,u $labl\t# Loop end" %}
12610 size(6);
12611 opcode(0x0F, 0x80);
12612 ins_encode( Jcc( cop, labl) );
12613 ins_pipe( pipe_jump );
12614 ins_pc_relative(1);
12615 %}
12617 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12618 match(CountedLoopEnd cop cmp);
12619 effect(USE labl);
12621 ins_cost(200);
12622 format %{ "J$cop,u $labl\t# Loop end" %}
12623 opcode(0x0F, 0x80);
12624 ins_encode( Jcc( cop, labl) );
12625 ins_pipe( pipe_jump );
12626 ins_pc_relative(1);
12627 %}
12628 */
12630 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12631 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12632 match(If cop cr);
12633 effect(USE labl);
12635 ins_cost(300);
12636 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12638 ins_encode %{
12639 Label &L = *($labl$$label);
12640 switch($cop$$cmpcode)
12641 {
12642 case 0x01: //equal
12643 if (&L)
12644 __ bne(AT, R0, L);
12645 else
12646 __ bne(AT, R0, (int)0);
12647 break;
12648 case 0x02: //not equal
12649 if (&L)
12650 __ beq(AT, R0, L);
12651 else
12652 __ beq(AT, R0, (int)0);
12653 break;
12654 default:
12655 Unimplemented();
12656 }
12657 __ nop();
12658 %}
12660 ins_pipe( pipe_jump );
12661 ins_pc_relative(1);
12662 %}
12665 // ============================================================================
12666 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12667 // array for an instance of the superklass. Set a hidden internal cache on a
12668 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12669 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12670 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12671 match(Set result (PartialSubtypeCheck sub super));
12672 effect(KILL tmp);
12673 ins_cost(1100); // slightly larger than the next version
12674 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12676 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12677 ins_pipe( pipe_slow );
12678 %}
12681 // Conditional-store of an int value.
12682 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12683 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12684 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12685 // effect(KILL oldval);
12686 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12688 ins_encode %{
12689 Register oldval = $oldval$$Register;
12690 Register newval = $newval$$Register;
12691 Address addr(as_Register($mem$$base), $mem$$disp);
12692 Label again, failure;
12694 // int base = $mem$$base;
12695 int index = $mem$$index;
12696 int scale = $mem$$scale;
12697 int disp = $mem$$disp;
12699 guarantee(Assembler::is_simm16(disp), "");
12701 if( index != 0 ) {
12702 __ stop("in storeIConditional: index != 0");
12703 } else {
12704 __ bind(again);
12705 if(UseSyncLevel <= 1000) __ sync();
12706 __ ll(AT, addr);
12707 __ bne(AT, oldval, failure);
12708 __ delayed()->addu(AT, R0, R0);
12710 __ addu(AT, newval, R0);
12711 __ sc(AT, addr);
12712 __ beq(AT, R0, again);
12713 __ delayed()->addiu(AT, R0, 0xFF);
12714 __ bind(failure);
12715 __ sync();
12716 }
12717 %}
12719 ins_pipe( long_memory_op );
12720 %}
12722 // Conditional-store of a long value.
12723 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12724 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12725 %{
12726 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12727 effect(KILL oldval);
12729 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12730 ins_encode%{
12731 Register oldval = $oldval$$Register;
12732 Register newval = $newval$$Register;
12733 Address addr((Register)$mem$$base, $mem$$disp);
12735 int index = $mem$$index;
12736 int scale = $mem$$scale;
12737 int disp = $mem$$disp;
12739 guarantee(Assembler::is_simm16(disp), "");
12741 if( index != 0 ) {
12742 __ stop("in storeIConditional: index != 0");
12743 } else {
12744 __ cmpxchg(newval, addr, oldval);
12745 }
12746 %}
12747 ins_pipe( long_memory_op );
12748 %}
12751 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12752 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12753 effect(KILL oldval);
12754 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12755 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12756 "MOV $res, 1 @ compareAndSwapI\n\t"
12757 "BNE AT, R0 @ compareAndSwapI\n\t"
12758 "MOV $res, 0 @ compareAndSwapI\n"
12759 "L:" %}
12760 ins_encode %{
12761 Register newval = $newval$$Register;
12762 Register oldval = $oldval$$Register;
12763 Register res = $res$$Register;
12764 Address addr($mem_ptr$$Register, 0);
12765 Label L;
12767 __ cmpxchg32(newval, addr, oldval);
12768 __ move(res, AT);
12769 %}
12770 ins_pipe( long_memory_op );
12771 %}
12773 //FIXME:
12774 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12775 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12776 effect(KILL oldval);
12777 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12778 "MOV $res, AT @ compareAndSwapP\n\t"
12779 "L:" %}
12780 ins_encode %{
12781 Register newval = $newval$$Register;
12782 Register oldval = $oldval$$Register;
12783 Register res = $res$$Register;
12784 Address addr($mem_ptr$$Register, 0);
12785 Label L;
12787 __ cmpxchg(newval, addr, oldval);
12788 __ move(res, AT);
12789 %}
12790 ins_pipe( long_memory_op );
12791 %}
12793 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12794 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12795 effect(KILL oldval);
12796 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12797 "MOV $res, AT @ compareAndSwapN\n\t"
12798 "L:" %}
12799 ins_encode %{
12800 Register newval = $newval$$Register;
12801 Register oldval = $oldval$$Register;
12802 Register res = $res$$Register;
12803 Address addr($mem_ptr$$Register, 0);
12804 Label L;
12806 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12807 * Thus, we should extend oldval's sign for correct comparision.
12808 */
12809 __ sll(oldval, oldval, 0);
12811 __ cmpxchg32(newval, addr, oldval);
12812 __ move(res, AT);
12813 %}
12814 ins_pipe( long_memory_op );
12815 %}
12817 //----------Max and Min--------------------------------------------------------
12818 // Min Instructions
12819 ////
12820 // *** Min and Max using the conditional move are slower than the
12821 // *** branch version on a Pentium III.
12822 // // Conditional move for min
12823 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12824 // effect( USE_DEF op2, USE op1, USE cr );
12825 // format %{ "CMOVlt $op2,$op1\t! min" %}
12826 // opcode(0x4C,0x0F);
12827 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12828 // ins_pipe( pipe_cmov_reg );
12829 //%}
12830 //
12831 //// Min Register with Register (P6 version)
12832 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12833 // predicate(VM_Version::supports_cmov() );
12834 // match(Set op2 (MinI op1 op2));
12835 // ins_cost(200);
12836 // expand %{
12837 // eFlagsReg cr;
12838 // compI_eReg(cr,op1,op2);
12839 // cmovI_reg_lt(op2,op1,cr);
12840 // %}
12841 //%}
12843 // Min Register with Register (generic version)
12844 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12845 match(Set dst (MinI dst src));
12846 //effect(KILL flags);
12847 ins_cost(80);
12849 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12850 ins_encode %{
12851 Register dst = $dst$$Register;
12852 Register src = $src$$Register;
12854 __ slt(AT, src, dst);
12855 __ movn(dst, src, AT);
12857 %}
12859 ins_pipe( pipe_slow );
12860 %}
12862 // Max Register with Register
12863 // *** Min and Max using the conditional move are slower than the
12864 // *** branch version on a Pentium III.
12865 // // Conditional move for max
12866 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12867 // effect( USE_DEF op2, USE op1, USE cr );
12868 // format %{ "CMOVgt $op2,$op1\t! max" %}
12869 // opcode(0x4F,0x0F);
12870 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12871 // ins_pipe( pipe_cmov_reg );
12872 //%}
12873 //
12874 // // Max Register with Register (P6 version)
12875 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12876 // predicate(VM_Version::supports_cmov() );
12877 // match(Set op2 (MaxI op1 op2));
12878 // ins_cost(200);
12879 // expand %{
12880 // eFlagsReg cr;
12881 // compI_eReg(cr,op1,op2);
12882 // cmovI_reg_gt(op2,op1,cr);
12883 // %}
12884 //%}
12886 // Max Register with Register (generic version)
12887 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12888 match(Set dst (MaxI dst src));
12889 ins_cost(80);
12891 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12893 ins_encode %{
12894 Register dst = $dst$$Register;
12895 Register src = $src$$Register;
12897 __ slt(AT, dst, src);
12898 __ movn(dst, src, AT);
12900 %}
12902 ins_pipe( pipe_slow );
12903 %}
12905 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12906 match(Set dst (MaxI dst zero));
12907 ins_cost(50);
12909 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12911 ins_encode %{
12912 Register dst = $dst$$Register;
12914 __ slt(AT, dst, R0);
12915 __ movn(dst, R0, AT);
12917 %}
12919 ins_pipe( pipe_slow );
12920 %}
12922 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12923 %{
12924 match(Set dst (AndL src mask));
12926 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12927 ins_encode %{
12928 Register dst = $dst$$Register;
12929 Register src = $src$$Register;
12931 __ dext(dst, src, 0, 32);
12932 %}
12933 ins_pipe(ialu_regI_regI);
12934 %}
12936 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12937 %{
12938 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12940 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12941 ins_encode %{
12942 Register dst = $dst$$Register;
12943 Register src1 = $src1$$Register;
12944 Register src2 = $src2$$Register;
12946 if (src1 == dst) {
12947 __ dinsu(dst, src2, 32, 32);
12948 } else if (src2 == dst) {
12949 __ dsll32(dst, dst, 0);
12950 __ dins(dst, src1, 0, 32);
12951 } else {
12952 __ dext(dst, src1, 0, 32);
12953 __ dinsu(dst, src2, 32, 32);
12954 }
12955 %}
12956 ins_pipe(ialu_regI_regI);
12957 %}
12959 // Zero-extend convert int to long
12960 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
12961 %{
12962 match(Set dst (AndL (ConvI2L src) mask));
12964 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
12965 ins_encode %{
12966 Register dst = $dst$$Register;
12967 Register src = $src$$Register;
12969 __ dext(dst, src, 0, 32);
12970 %}
12971 ins_pipe(ialu_regI_regI);
12972 %}
12974 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
12975 %{
12976 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
12978 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
12979 ins_encode %{
12980 Register dst = $dst$$Register;
12981 Register src = $src$$Register;
12983 __ dext(dst, src, 0, 32);
12984 %}
12985 ins_pipe(ialu_regI_regI);
12986 %}
12988 // Match loading integer and casting it to unsigned int in long register.
12989 // LoadI + ConvI2L + AndL 0xffffffff.
12990 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
12991 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
12993 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
12994 ins_encode (load_N_enc(dst, mem));
12995 ins_pipe(ialu_loadI);
12996 %}
12998 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
12999 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13001 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13002 ins_encode (load_N_enc(dst, mem));
13003 ins_pipe(ialu_loadI);
13004 %}
13007 // ============================================================================
13008 // Safepoint Instruction
13009 instruct safePoint_poll(mRegP poll) %{
13010 match(SafePoint poll);
13011 effect(USE poll);
13013 ins_cost(125);
13014 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13016 ins_encode %{
13017 Register poll_reg = $poll$$Register;
13019 __ block_comment("Safepoint:");
13020 __ relocate(relocInfo::poll_type);
13021 __ lw(AT, poll_reg, 0);
13022 %}
13024 ins_pipe( ialu_storeI );
13025 %}
13027 //----------Arithmetic Conversion Instructions---------------------------------
13029 instruct roundFloat_nop(regF dst)
13030 %{
13031 match(Set dst (RoundFloat dst));
13033 ins_cost(0);
13034 ins_encode();
13035 ins_pipe(empty);
13036 %}
13038 instruct roundDouble_nop(regD dst)
13039 %{
13040 match(Set dst (RoundDouble dst));
13042 ins_cost(0);
13043 ins_encode();
13044 ins_pipe(empty);
13045 %}
13047 //---------- Zeros Count Instructions ------------------------------------------
13048 // CountLeadingZerosINode CountTrailingZerosINode
13049 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13050 predicate(UseCountLeadingZerosInstruction);
13051 match(Set dst (CountLeadingZerosI src));
13053 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13054 ins_encode %{
13055 __ clz($dst$$Register, $src$$Register);
13056 %}
13057 ins_pipe( ialu_regL_regL );
13058 %}
13060 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13061 predicate(UseCountLeadingZerosInstruction);
13062 match(Set dst (CountLeadingZerosL src));
13064 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13065 ins_encode %{
13066 __ dclz($dst$$Register, $src$$Register);
13067 %}
13068 ins_pipe( ialu_regL_regL );
13069 %}
13071 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13072 predicate(UseCountTrailingZerosInstruction);
13073 match(Set dst (CountTrailingZerosI src));
13075 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13076 ins_encode %{
13077 // ctz and dctz is gs instructions.
13078 __ ctz($dst$$Register, $src$$Register);
13079 %}
13080 ins_pipe( ialu_regL_regL );
13081 %}
13083 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13084 predicate(UseCountTrailingZerosInstruction);
13085 match(Set dst (CountTrailingZerosL src));
13087 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13088 ins_encode %{
13089 __ dctz($dst$$Register, $src$$Register);
13090 %}
13091 ins_pipe( ialu_regL_regL );
13092 %}
13094 // ====================VECTOR INSTRUCTIONS=====================================
13096 // Load vectors (8 bytes long)
13097 instruct loadV8(vecD dst, memory mem) %{
13098 predicate(n->as_LoadVector()->memory_size() == 8);
13099 match(Set dst (LoadVector mem));
13100 ins_cost(125);
13101 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13102 ins_encode(load_D_enc(dst, mem));
13103 ins_pipe( fpu_loadF );
13104 %}
13106 // Store vectors (8 bytes long)
13107 instruct storeV8(memory mem, vecD src) %{
13108 predicate(n->as_StoreVector()->memory_size() == 8);
13109 match(Set mem (StoreVector mem src));
13110 ins_cost(145);
13111 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13112 ins_encode(store_D_reg_enc(mem, src));
13113 ins_pipe( fpu_storeF );
13114 %}
13116 instruct Repl8B(vecD dst, mRegI src) %{
13117 predicate(n->as_Vector()->length() == 8);
13118 match(Set dst (ReplicateB src));
13119 format %{ "replv_ob AT, $src\n\t"
13120 "dmtc1 AT, $dst\t! replicate8B" %}
13121 ins_encode %{
13122 __ replv_ob(AT, $src$$Register);
13123 __ dmtc1(AT, $dst$$FloatRegister);
13124 %}
13125 ins_pipe( pipe_mtc1 );
13126 %}
13128 instruct Repl8B_imm(vecD dst, immI con) %{
13129 predicate(n->as_Vector()->length() == 8);
13130 match(Set dst (ReplicateB con));
13131 format %{ "repl_ob AT, [$con]\n\t"
13132 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13133 ins_encode %{
13134 int val = $con$$constant;
13135 __ repl_ob(AT, val);
13136 __ dmtc1(AT, $dst$$FloatRegister);
13137 %}
13138 ins_pipe( pipe_mtc1 );
13139 %}
13141 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13142 predicate(n->as_Vector()->length() == 8);
13143 match(Set dst (ReplicateB zero));
13144 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13145 ins_encode %{
13146 __ dmtc1(R0, $dst$$FloatRegister);
13147 %}
13148 ins_pipe( pipe_mtc1 );
13149 %}
13151 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13152 predicate(n->as_Vector()->length() == 8);
13153 match(Set dst (ReplicateB M1));
13154 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13155 ins_encode %{
13156 __ nor(AT, R0, R0);
13157 __ dmtc1(AT, $dst$$FloatRegister);
13158 %}
13159 ins_pipe( pipe_mtc1 );
13160 %}
13162 instruct Repl4S(vecD dst, mRegI src) %{
13163 predicate(n->as_Vector()->length() == 4);
13164 match(Set dst (ReplicateS src));
13165 format %{ "replv_qh AT, $src\n\t"
13166 "dmtc1 AT, $dst\t! replicate4S" %}
13167 ins_encode %{
13168 __ replv_qh(AT, $src$$Register);
13169 __ dmtc1(AT, $dst$$FloatRegister);
13170 %}
13171 ins_pipe( pipe_mtc1 );
13172 %}
13174 instruct Repl4S_imm(vecD dst, immI con) %{
13175 predicate(n->as_Vector()->length() == 4);
13176 match(Set dst (ReplicateS con));
13177 format %{ "replv_qh AT, [$con]\n\t"
13178 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13179 ins_encode %{
13180 int val = $con$$constant;
13181 if ( Assembler::is_simm(val, 10)) {
13182 //repl_qh supports 10 bits immediate
13183 __ repl_qh(AT, val);
13184 } else {
13185 __ li32(AT, val);
13186 __ replv_qh(AT, AT);
13187 }
13188 __ dmtc1(AT, $dst$$FloatRegister);
13189 %}
13190 ins_pipe( pipe_mtc1 );
13191 %}
13193 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13194 predicate(n->as_Vector()->length() == 4);
13195 match(Set dst (ReplicateS zero));
13196 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13197 ins_encode %{
13198 __ dmtc1(R0, $dst$$FloatRegister);
13199 %}
13200 ins_pipe( pipe_mtc1 );
13201 %}
13203 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13204 predicate(n->as_Vector()->length() == 4);
13205 match(Set dst (ReplicateS M1));
13206 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13207 ins_encode %{
13208 __ nor(AT, R0, R0);
13209 __ dmtc1(AT, $dst$$FloatRegister);
13210 %}
13211 ins_pipe( pipe_mtc1 );
13212 %}
13214 // Replicate integer (4 byte) scalar to be vector
13215 instruct Repl2I(vecD dst, mRegI src) %{
13216 predicate(n->as_Vector()->length() == 2);
13217 match(Set dst (ReplicateI src));
13218 format %{ "dins AT, $src, 0, 32\n\t"
13219 "dinsu AT, $src, 32, 32\n\t"
13220 "dmtc1 AT, $dst\t! replicate2I" %}
13221 ins_encode %{
13222 __ dins(AT, $src$$Register, 0, 32);
13223 __ dinsu(AT, $src$$Register, 32, 32);
13224 __ dmtc1(AT, $dst$$FloatRegister);
13225 %}
13226 ins_pipe( pipe_mtc1 );
13227 %}
13229 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13230 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13231 predicate(n->as_Vector()->length() == 2);
13232 match(Set dst (ReplicateI con));
13233 effect(KILL tmp);
13234 format %{ "li32 AT, [$con], 32\n\t"
13235 "replv_pw AT, AT\n\t"
13236 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13237 ins_encode %{
13238 int val = $con$$constant;
13239 __ li32(AT, val);
13240 __ replv_pw(AT, AT);
13241 __ dmtc1(AT, $dst$$FloatRegister);
13242 %}
13243 ins_pipe( pipe_mtc1 );
13244 %}
13246 // Replicate integer (4 byte) scalar zero to be vector
13247 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13248 predicate(n->as_Vector()->length() == 2);
13249 match(Set dst (ReplicateI zero));
13250 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13251 ins_encode %{
13252 __ dmtc1(R0, $dst$$FloatRegister);
13253 %}
13254 ins_pipe( pipe_mtc1 );
13255 %}
13257 // Replicate integer (4 byte) scalar -1 to be vector
13258 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13259 predicate(n->as_Vector()->length() == 2);
13260 match(Set dst (ReplicateI M1));
13261 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13262 ins_encode %{
13263 __ nor(AT, R0, R0);
13264 __ dmtc1(AT, $dst$$FloatRegister);
13265 %}
13266 ins_pipe( pipe_mtc1 );
13267 %}
13269 // Replicate float (4 byte) scalar to be vector
13270 instruct Repl2F(vecD dst, regF src) %{
13271 predicate(n->as_Vector()->length() == 2);
13272 match(Set dst (ReplicateF src));
13273 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13274 ins_encode %{
13275 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13276 %}
13277 ins_pipe( pipe_slow );
13278 %}
13280 // Replicate float (4 byte) scalar zero to be vector
13281 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13282 predicate(n->as_Vector()->length() == 2);
13283 match(Set dst (ReplicateF zero));
13284 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13285 ins_encode %{
13286 __ dmtc1(R0, $dst$$FloatRegister);
13287 %}
13288 ins_pipe( pipe_mtc1 );
13289 %}
13292 // ====================VECTOR ARITHMETIC=======================================
13294 // --------------------------------- ADD --------------------------------------
13296 // Floats vector add
13297 instruct vadd2F(vecD dst, vecD src) %{
13298 predicate(n->as_Vector()->length() == 2);
13299 match(Set dst (AddVF dst src));
13300 format %{ "add.ps $dst,$src\t! add packed2F" %}
13301 ins_encode %{
13302 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13303 %}
13304 ins_pipe( pipe_slow );
13305 %}
13307 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13308 predicate(n->as_Vector()->length() == 2);
13309 match(Set dst (AddVF src1 src2));
13310 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13311 ins_encode %{
13312 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13313 %}
13314 ins_pipe( fpu_regF_regF );
13315 %}
13317 // --------------------------------- SUB --------------------------------------
13319 // Floats vector sub
13320 instruct vsub2F(vecD dst, vecD src) %{
13321 predicate(n->as_Vector()->length() == 2);
13322 match(Set dst (SubVF dst src));
13323 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13324 ins_encode %{
13325 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13326 %}
13327 ins_pipe( fpu_regF_regF );
13328 %}
13330 // --------------------------------- MUL --------------------------------------
13332 // Floats vector mul
13333 instruct vmul2F(vecD dst, vecD src) %{
13334 predicate(n->as_Vector()->length() == 2);
13335 match(Set dst (MulVF dst src));
13336 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13337 ins_encode %{
13338 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13339 %}
13340 ins_pipe( fpu_regF_regF );
13341 %}
13343 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13344 predicate(n->as_Vector()->length() == 2);
13345 match(Set dst (MulVF src1 src2));
13346 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13347 ins_encode %{
13348 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13349 %}
13350 ins_pipe( fpu_regF_regF );
13351 %}
13353 // --------------------------------- DIV --------------------------------------
13354 // MIPS do not have div.ps
13357 //----------PEEPHOLE RULES-----------------------------------------------------
13358 // These must follow all instruction definitions as they use the names
13359 // defined in the instructions definitions.
13360 //
13361 // peepmatch ( root_instr_name [preceeding_instruction]* );
13362 //
13363 // peepconstraint %{
13364 // (instruction_number.operand_name relational_op instruction_number.operand_name
13365 // [, ...] );
13366 // // instruction numbers are zero-based using left to right order in peepmatch
13367 //
13368 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13369 // // provide an instruction_number.operand_name for each operand that appears
13370 // // in the replacement instruction's match rule
13371 //
13372 // ---------VM FLAGS---------------------------------------------------------
13373 //
13374 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13375 //
13376 // Each peephole rule is given an identifying number starting with zero and
13377 // increasing by one in the order seen by the parser. An individual peephole
13378 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13379 // on the command-line.
13380 //
13381 // ---------CURRENT LIMITATIONS----------------------------------------------
13382 //
13383 // Only match adjacent instructions in same basic block
13384 // Only equality constraints
13385 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13386 // Only one replacement instruction
13387 //
13388 // ---------EXAMPLE----------------------------------------------------------
13389 //
13390 // // pertinent parts of existing instructions in architecture description
13391 // instruct movI(eRegI dst, eRegI src) %{
13392 // match(Set dst (CopyI src));
13393 // %}
13394 //
13395 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13396 // match(Set dst (AddI dst src));
13397 // effect(KILL cr);
13398 // %}
13399 //
13400 // // Change (inc mov) to lea
13401 // peephole %{
13402 // // increment preceeded by register-register move
13403 // peepmatch ( incI_eReg movI );
13404 // // require that the destination register of the increment
13405 // // match the destination register of the move
13406 // peepconstraint ( 0.dst == 1.dst );
13407 // // construct a replacement instruction that sets
13408 // // the destination to ( move's source register + one )
13409 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13410 // %}
13411 //
13412 // Implementation no longer uses movX instructions since
13413 // machine-independent system no longer uses CopyX nodes.
13414 //
13415 // peephole %{
13416 // peepmatch ( incI_eReg movI );
13417 // peepconstraint ( 0.dst == 1.dst );
13418 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13419 // %}
13420 //
13421 // peephole %{
13422 // peepmatch ( decI_eReg movI );
13423 // peepconstraint ( 0.dst == 1.dst );
13424 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13425 // %}
13426 //
13427 // peephole %{
13428 // peepmatch ( addI_eReg_imm movI );
13429 // peepconstraint ( 0.dst == 1.dst );
13430 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13431 // %}
13432 //
13433 // peephole %{
13434 // peepmatch ( addP_eReg_imm movP );
13435 // peepconstraint ( 0.dst == 1.dst );
13436 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13437 // %}
13439 // // Change load of spilled value to only a spill
13440 // instruct storeI(memory mem, eRegI src) %{
13441 // match(Set mem (StoreI mem src));
13442 // %}
13443 //
13444 // instruct loadI(eRegI dst, memory mem) %{
13445 // match(Set dst (LoadI mem));
13446 // %}
13447 //
13448 //peephole %{
13449 // peepmatch ( loadI storeI );
13450 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13451 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13452 //%}
13454 //----------SMARTSPILL RULES---------------------------------------------------
13455 // These must follow all instruction definitions as they use the names
13456 // defined in the instructions definitions.