Thu, 23 Feb 2017 05:09:55 -0500
[C2] Add instruct sarL2I_Reg_immI_32_63 in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 /*
585 // Note that the code buffer's insts_mark is always relative to insts.
586 // That's why we must use the macroassembler to generate a handler.
587 MacroAssembler _masm(&cbuf);
588 address base = __ start_a_stub(size_deopt_handler());
589 if (base == NULL) return 0; // CodeBuffer::expand failed
590 int offset = __ offset();
592 #ifdef _LP64
593 address the_pc = (address) __ pc();
594 Label next;
595 // push a "the_pc" on the stack without destroying any registers
596 // as they all may be live.
598 // push address of "next"
599 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
600 __ bind(next);
601 // adjust it so it matches "the_pc"
602 __ subptr(Address(rsp, 0), __ offset() - offset);
603 #else
604 InternalAddress here(__ pc());
605 __ pushptr(here.addr());
606 #endif
608 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
609 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
610 __ end_a_stub();
611 return offset;
612 */
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a handler.
615 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(size_deopt_handler());
619 // FIXME
620 if (base == NULL) return 0; // CodeBuffer::expand failed
621 int offset = __ offset();
623 __ block_comment("; emit_deopt_handler");
625 cbuf.set_insts_mark();
626 __ relocate(relocInfo::runtime_call_type);
628 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
629 __ jalr(T9);
630 __ delayed()->nop();
631 __ align(16);
632 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
633 __ end_a_stub();
634 return offset;
635 }
638 const bool Matcher::match_rule_supported(int opcode) {
639 if (!has_match_rule(opcode))
640 return false;
642 switch (opcode) {
643 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
644 case Op_CountLeadingZerosI:
645 case Op_CountLeadingZerosL:
646 if (!UseCountLeadingZerosInstruction)
647 return false;
648 break;
649 case Op_CountTrailingZerosI:
650 case Op_CountTrailingZerosL:
651 if (!UseCountTrailingZerosInstruction)
652 return false;
653 break;
654 }
656 return true; // Per default match rules are supported.
657 }
659 //FIXME
660 // emit call stub, compiled java to interpreter
661 void emit_java_to_interp(CodeBuffer &cbuf ) {
662 // Stub is fixed up when the corresponding call is converted from calling
663 // compiled code to calling interpreted code.
664 // mov rbx,0
665 // jmp -1
667 address mark = cbuf.insts_mark(); // get mark within main instrs section
669 // Note that the code buffer's insts_mark is always relative to insts.
670 // That's why we must use the macroassembler to generate a stub.
671 MacroAssembler _masm(&cbuf);
673 address base =
674 __ start_a_stub(Compile::MAX_stubs_size);
675 if (base == NULL) return; // CodeBuffer::expand failed
676 // static stub relocation stores the instruction address of the call
678 __ relocate(static_stub_Relocation::spec(mark), 0);
680 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
681 /*
682 int oop_index = __ oop_recorder()->allocate_index(NULL);
683 RelocationHolder rspec = oop_Relocation::spec(oop_index);
684 __ relocate(rspec);
685 */
687 // static stub relocation also tags the methodOop in the code-stream.
688 __ li48(S3, (long)0);
689 // This is recognized as unresolved by relocs/nativeInst/ic code
691 __ relocate(relocInfo::runtime_call_type);
693 cbuf.set_insts_mark();
694 address call_pc = (address)-1;
695 __ li48(AT, (long)call_pc);
696 __ jr(AT);
697 __ nop();
698 __ align(16);
699 __ end_a_stub();
700 // Update current stubs pointer and restore code_end.
701 }
703 // size of call stub, compiled java to interpretor
704 uint size_java_to_interp() {
705 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
706 return round_to(size, 16);
707 }
709 // relocation entries for call stub, compiled java to interpreter
710 uint reloc_java_to_interp() {
711 return 16; // in emit_java_to_interp + in Java_Static_Call
712 }
714 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
715 if( Assembler::is_simm16(offset) ) return true;
716 else
717 {
718 assert(false, "Not implemented yet !" );
719 Unimplemented();
720 }
721 }
724 // No additional cost for CMOVL.
725 const int Matcher::long_cmove_cost() { return 0; }
727 // No CMOVF/CMOVD with SSE2
728 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
730 // Does the CPU require late expand (see block.cpp for description of late expand)?
731 const bool Matcher::require_postalloc_expand = false;
733 // Should the Matcher clone shifts on addressing modes, expecting them
734 // to be subsumed into complex addressing expressions or compute them
735 // into registers? True for Intel but false for most RISCs
736 const bool Matcher::clone_shift_expressions = false;
738 // Do we need to mask the count passed to shift instructions or does
739 // the cpu only look at the lower 5/6 bits anyway?
740 const bool Matcher::need_masked_shift_count = false;
742 bool Matcher::narrow_oop_use_complex_address() {
743 NOT_LP64(ShouldNotCallThis());
744 assert(UseCompressedOops, "only for compressed oops code");
745 return false;
746 }
748 bool Matcher::narrow_klass_use_complex_address() {
749 NOT_LP64(ShouldNotCallThis());
750 assert(UseCompressedClassPointers, "only for compressed klass code");
751 return false;
752 }
754 // This is UltraSparc specific, true just means we have fast l2f conversion
755 const bool Matcher::convL2FSupported(void) {
756 return true;
757 }
759 // Max vector size in bytes. 0 if not supported.
760 const int Matcher::vector_width_in_bytes(BasicType bt) {
761 assert(MaxVectorSize == 8, "");
762 return 8;
763 }
765 // Vector ideal reg
766 const int Matcher::vector_ideal_reg(int size) {
767 assert(MaxVectorSize == 8, "");
768 switch(size) {
769 case 8: return Op_VecD;
770 }
771 ShouldNotReachHere();
772 return 0;
773 }
775 // Only lowest bits of xmm reg are used for vector shift count.
776 const int Matcher::vector_shift_count_ideal_reg(int size) {
777 fatal("vector shift is not supported");
778 return Node::NotAMachineReg;
779 }
781 // Limits on vector size (number of elements) loaded into vector.
782 const int Matcher::max_vector_size(const BasicType bt) {
783 assert(is_java_primitive(bt), "only primitive type vectors");
784 return vector_width_in_bytes(bt)/type2aelembytes(bt);
785 }
787 const int Matcher::min_vector_size(const BasicType bt) {
788 return max_vector_size(bt); // Same as max.
789 }
791 // MIPS supports misaligned vectors store/load? FIXME
792 const bool Matcher::misaligned_vectors_ok() {
793 return false;
794 //return !AlignVector; // can be changed by flag
795 }
797 // Register for DIVI projection of divmodI
798 RegMask Matcher::divI_proj_mask() {
799 ShouldNotReachHere();
800 return RegMask();
801 }
803 // Register for MODI projection of divmodI
804 RegMask Matcher::modI_proj_mask() {
805 ShouldNotReachHere();
806 return RegMask();
807 }
809 // Register for DIVL projection of divmodL
810 RegMask Matcher::divL_proj_mask() {
811 ShouldNotReachHere();
812 return RegMask();
813 }
815 int Matcher::regnum_to_fpu_offset(int regnum) {
816 return regnum - 32; // The FP registers are in the second chunk
817 }
820 const bool Matcher::isSimpleConstant64(jlong value) {
821 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
822 return true;
823 }
826 // Return whether or not this register is ever used as an argument. This
827 // function is used on startup to build the trampoline stubs in generateOptoStub.
828 // Registers not mentioned will be killed by the VM call in the trampoline, and
829 // arguments in those registers not be available to the callee.
830 bool Matcher::can_be_java_arg( int reg ) {
831 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
832 if ( reg == T0_num || reg == T0_H_num
833 || reg == A0_num || reg == A0_H_num
834 || reg == A1_num || reg == A1_H_num
835 || reg == A2_num || reg == A2_H_num
836 || reg == A3_num || reg == A3_H_num
837 || reg == A4_num || reg == A4_H_num
838 || reg == A5_num || reg == A5_H_num
839 || reg == A6_num || reg == A6_H_num
840 || reg == A7_num || reg == A7_H_num )
841 return true;
843 if ( reg == F12_num || reg == F12_H_num
844 || reg == F13_num || reg == F13_H_num
845 || reg == F14_num || reg == F14_H_num
846 || reg == F15_num || reg == F15_H_num
847 || reg == F16_num || reg == F16_H_num
848 || reg == F17_num || reg == F17_H_num
849 || reg == F18_num || reg == F18_H_num
850 || reg == F19_num || reg == F19_H_num )
851 return true;
853 return false;
854 }
856 bool Matcher::is_spillable_arg( int reg ) {
857 return can_be_java_arg(reg);
858 }
860 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
861 return false;
862 }
864 // Register for MODL projection of divmodL
865 RegMask Matcher::modL_proj_mask() {
866 ShouldNotReachHere();
867 return RegMask();
868 }
870 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
871 return FP_REG_mask();
872 }
874 // MIPS doesn't support AES intrinsics
875 const bool Matcher::pass_original_key_for_aes() {
876 return false;
877 }
879 // The address of the call instruction needs to be 16-byte aligned to
880 // ensure that it does not span a cache line so that it can be patched.
882 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 // The address of the call instruction needs to be 16-byte aligned to
895 // ensure that it does not span a cache line so that it can be patched.
896 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
897 //li64 <--- skip
899 //lui
900 //ori
901 //dsll
902 //ori
904 //jalr
905 //nop
907 current_offset += 4 * 6; // skip li64
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 int CallLeafDirectNode::compute_padding(int current_offset) const {
924 //lui
925 //ori
926 //dsll
927 //ori
929 //jalr
930 //nop
932 return round_to(current_offset, alignment_required()) - current_offset;
933 }
935 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
936 //lui
937 //ori
938 //dsll
939 //ori
941 //jalr
942 //nop
944 return round_to(current_offset, alignment_required()) - current_offset;
945 }
947 // If CPU can load and store mis-aligned doubles directly then no fixup is
948 // needed. Else we split the double into 2 integer pieces and move it
949 // piece-by-piece. Only happens when passing doubles into C code as the
950 // Java calling convention forces doubles to be aligned.
951 const bool Matcher::misaligned_doubles_ok = false;
952 // Do floats take an entire double register or just half?
953 //const bool Matcher::float_in_double = true;
954 bool Matcher::float_in_double() { return false; }
955 // Threshold size for cleararray.
956 const int Matcher::init_array_short_size = 8 * BytesPerLong;
957 // Do ints take an entire long register or just half?
958 const bool Matcher::int_in_long = true;
959 // Is it better to copy float constants, or load them directly from memory?
960 // Intel can load a float constant from a direct address, requiring no
961 // extra registers. Most RISCs will have to materialize an address into a
962 // register first, so they would do better to copy the constant from stack.
963 const bool Matcher::rematerialize_float_constants = false;
964 // Advertise here if the CPU requires explicit rounding operations
965 // to implement the UseStrictFP mode.
966 const bool Matcher::strict_fp_requires_explicit_rounding = false;
967 // The ecx parameter to rep stos for the ClearArray node is in dwords.
968 const bool Matcher::init_array_count_is_in_bytes = false;
971 // Indicate if the safepoint node needs the polling page as an input.
972 // Since MIPS doesn't have absolute addressing, it needs.
973 bool SafePointNode::needs_polling_address_input() {
974 return true;
975 }
977 // !!!!! Special hack to get all type of calls to specify the byte offset
978 // from the start of the call to the point where the return address
979 // will point.
980 int MachCallStaticJavaNode::ret_addr_offset() {
981 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
982 //The value ought to be 16 bytes.
983 //lui
984 //ori
985 //dsll
986 //ori
987 //jalr
988 //nop
989 return NativeCall::instruction_size;
990 }
992 int MachCallDynamicJavaNode::ret_addr_offset() {
993 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
995 // return NativeCall::instruction_size;
996 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
997 //The value ought to be 4 + 16 bytes.
998 //lui IC_Klass,
999 //ori IC_Klass,
1000 //dsll IC_Klass
1001 //ori IC_Klass
1002 //lui T9
1003 //ori T9
1004 //dsll T9
1005 //ori T9
1006 //jalr T9
1007 //nop
1008 return 6 * 4 + NativeCall::instruction_size;
1010 }
1012 /*
1013 // EMIT_OPCODE()
1014 void emit_opcode(CodeBuffer &cbuf, int code) {
1015 *(cbuf.code_end()) = (unsigned char)code;
1016 cbuf.set_code_end(cbuf.code_end() + 1);
1017 }
1018 */
1020 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1021 int format) {
1022 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1023 cbuf.insts()->emit_int32(d32);
1024 }
1026 //=============================================================================
1028 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1030 static enum RC rc_class( OptoReg::Name reg ) {
1031 if( !OptoReg::is_valid(reg) ) return rc_bad;
1032 if (OptoReg::is_stack(reg)) return rc_stack;
1033 VMReg r = OptoReg::as_VMReg(reg);
1034 if (r->is_Register()) return rc_int;
1035 assert(r->is_FloatRegister(), "must be");
1036 return rc_float;
1037 }
1039 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1040 // Get registers to move
1041 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1042 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1043 OptoReg::Name dst_second = ra_->get_reg_second(this );
1044 OptoReg::Name dst_first = ra_->get_reg_first(this );
1046 enum RC src_second_rc = rc_class(src_second);
1047 enum RC src_first_rc = rc_class(src_first);
1048 enum RC dst_second_rc = rc_class(dst_second);
1049 enum RC dst_first_rc = rc_class(dst_first);
1051 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1053 // Generate spill code!
1054 int size = 0;
1056 if( src_first == dst_first && src_second == dst_second )
1057 return 0; // Self copy, no move
1059 if (src_first_rc == rc_stack) {
1060 // mem ->
1061 if (dst_first_rc == rc_stack) {
1062 // mem -> mem
1063 assert(src_second != dst_first, "overlap");
1064 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1065 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1066 // 64-bit
1067 int src_offset = ra_->reg2offset(src_first);
1068 int dst_offset = ra_->reg2offset(dst_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ ld(AT, Address(SP, src_offset));
1072 __ sd(AT, Address(SP, dst_offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1078 "sd AT, [SP + #%d]",
1079 src_offset, dst_offset);
1080 }
1081 #endif
1082 }
1083 size += 8;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 // No pushl/popl, so:
1089 int src_offset = ra_->reg2offset(src_first);
1090 int dst_offset = ra_->reg2offset(dst_first);
1091 if (cbuf) {
1092 MacroAssembler _masm(cbuf);
1093 __ lw(AT, Address(SP, src_offset));
1094 __ sw(AT, Address(SP, dst_offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 st->print("lw AT, [SP + #%d] spill 2\n\t"
1100 "sw AT, [SP + #%d]\n\t",
1101 src_offset, dst_offset);
1102 }
1103 #endif
1104 }
1105 size += 8;
1106 }
1107 return size;
1108 } else if (dst_first_rc == rc_int) {
1109 // mem -> gpr
1110 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1111 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1112 // 64-bit
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if(!do_size){
1120 if (size != 0) st->print("\n\t");
1121 st->print("ld %s, [SP + #%d]\t# spill 3",
1122 Matcher::regName[dst_first],
1123 offset);
1124 }
1125 #endif
1126 }
1127 size += 4;
1128 } else {
1129 // 32-bit
1130 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1131 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1132 int offset = ra_->reg2offset(src_first);
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 if (this->ideal_reg() == Op_RegI)
1136 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 else
1138 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1139 #ifndef PRODUCT
1140 } else {
1141 if(!do_size){
1142 if (size != 0) st->print("\n\t");
1143 if (this->ideal_reg() == Op_RegI)
1144 st->print("lw %s, [SP + #%d]\t# spill 4",
1145 Matcher::regName[dst_first],
1146 offset);
1147 else
1148 st->print("lwu %s, [SP + #%d]\t# spill 5",
1149 Matcher::regName[dst_first],
1150 offset);
1151 }
1152 #endif
1153 }
1154 size += 4;
1155 }
1156 return size;
1157 } else if (dst_first_rc == rc_float) {
1158 // mem-> xmm
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(src_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1190 Matcher::regName[dst_first],
1191 offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 }
1199 } else if (src_first_rc == rc_int) {
1200 // gpr ->
1201 if (dst_first_rc == rc_stack) {
1202 // gpr -> mem
1203 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1204 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1205 // 64-bit
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sd %s, [SP + #%d] # spill 8",
1215 Matcher::regName[src_first],
1216 offset);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 } else {
1222 // 32-bit
1223 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1224 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1225 int offset = ra_->reg2offset(dst_first);
1226 if (cbuf) {
1227 MacroAssembler _masm(cbuf);
1228 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("sw %s, [SP + #%d]\t# spill 9",
1234 Matcher::regName[src_first], offset);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s <-- %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s <-- %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s <-- %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s <-- %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 if (UseLoongsonISA) {
1500 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1501 } else {
1502 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1503 st->cr(); st->print("\t");
1504 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1505 }
1507 if( do_polling() && C->is_method_compilation() ) {
1508 st->print("Poll Safepoint # MachEpilogNode");
1509 }
1510 }
1511 #endif
1513 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 Compile *C = ra_->C;
1515 MacroAssembler _masm(&cbuf);
1516 int framesize = C->frame_size_in_bytes();
1518 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1520 __ daddiu(SP, SP, framesize);
1522 if (UseLoongsonISA) {
1523 __ gslq(RA, FP, SP, -wordSize*2);
1524 } else {
1525 __ ld(RA, SP, -wordSize );
1526 __ ld(FP, SP, -wordSize*2 );
1527 }
1529 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1530 if( do_polling() && C->is_method_compilation() ) {
1531 #ifndef OPT_SAFEPOINT
1532 __ set64(AT, (long)os::get_polling_page());
1533 __ relocate(relocInfo::poll_return_type);
1534 __ lw(AT, AT, 0);
1535 #else
1536 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1537 __ relocate(relocInfo::poll_return_type);
1538 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1539 #endif
1540 }
1541 }
1543 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1544 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1545 }
1547 int MachEpilogNode::reloc() const {
1548 return 0; // a large enough number
1549 }
1551 const Pipeline * MachEpilogNode::pipeline() const {
1552 return MachNode::pipeline_class();
1553 }
1555 int MachEpilogNode::safepoint_offset() const { return 0; }
1557 //=============================================================================
1559 #ifndef PRODUCT
1560 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1561 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1562 int reg = ra_->get_reg_first(this);
1563 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1564 }
1565 #endif
1568 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1569 return 4;
1570 }
1572 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1573 MacroAssembler _masm(&cbuf);
1574 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1575 int reg = ra_->get_encode(this);
1577 __ addi(as_Register(reg), SP, offset);
1578 /*
1579 if( offset >= 128 ) {
1580 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1581 emit_rm(cbuf, 0x2, reg, 0x04);
1582 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1583 emit_d32(cbuf, offset);
1584 }
1585 else {
1586 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1587 emit_rm(cbuf, 0x1, reg, 0x04);
1588 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1589 emit_d8(cbuf, offset);
1590 }
1591 */
1592 }
1595 //static int sizeof_FFree_Float_Stack_All = -1;
1597 int MachCallRuntimeNode::ret_addr_offset() {
1598 //lui
1599 //ori
1600 //dsll
1601 //ori
1602 //jalr
1603 //nop
1604 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1605 return NativeCall::instruction_size;
1606 // return 16;
1607 }
1613 //=============================================================================
1614 #ifndef PRODUCT
1615 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1616 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1617 }
1618 #endif
1620 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1621 MacroAssembler _masm(&cbuf);
1622 int i = 0;
1623 for(i = 0; i < _count; i++)
1624 __ nop();
1625 }
1627 uint MachNopNode::size(PhaseRegAlloc *) const {
1628 return 4 * _count;
1629 }
1630 const Pipeline* MachNopNode::pipeline() const {
1631 return MachNode::pipeline_class();
1632 }
1634 //=============================================================================
1636 //=============================================================================
1637 #ifndef PRODUCT
1638 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1639 st->print_cr("load_klass(AT, T0)");
1640 st->print_cr("\tbeq(AT, iCache, L)");
1641 st->print_cr("\tnop");
1642 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1643 st->print_cr("\tnop");
1644 st->print_cr("\tnop");
1645 st->print_cr(" L:");
1646 }
1647 #endif
1650 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651 MacroAssembler _masm(&cbuf);
1652 #ifdef ASSERT
1653 //uint code_size = cbuf.code_size();
1654 #endif
1655 int ic_reg = Matcher::inline_cache_reg_encode();
1656 Label L;
1657 Register receiver = T0;
1658 Register iCache = as_Register(ic_reg);
1659 __ load_klass(AT, receiver);
1660 __ beq(AT, iCache, L);
1661 __ nop();
1663 __ relocate(relocInfo::runtime_call_type);
1664 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1665 __ jr(T9);
1666 __ nop();
1668 /* WARNING these NOPs are critical so that verified entry point is properly
1669 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1670 __ align(CodeEntryAlignment);
1671 __ bind(L);
1672 }
1674 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1675 return MachNode::size(ra_);
1676 }
1680 //=============================================================================
1682 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1684 int Compile::ConstantTable::calculate_table_base_offset() const {
1685 return 0; // absolute addressing, no offset
1686 }
1688 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1689 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1690 ShouldNotReachHere();
1691 }
1693 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1694 Compile* C = ra_->C;
1695 Compile::ConstantTable& constant_table = C->constant_table();
1696 MacroAssembler _masm(&cbuf);
1698 Register Rtoc = as_Register(ra_->get_encode(this));
1699 CodeSection* consts_section = __ code()->consts();
1700 int consts_size = consts_section->align_at_start(consts_section->size());
1701 assert(constant_table.size() == consts_size, "must be equal");
1703 if (consts_section->size()) {
1704 // Materialize the constant table base.
1705 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1706 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1707 __ relocate(relocInfo::internal_pc_type);
1708 __ li48(Rtoc, (long)baseaddr);
1709 }
1710 }
1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1713 // li48 (4 insts)
1714 return 4 * 4;
1715 }
1717 #ifndef PRODUCT
1718 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1719 Register r = as_Register(ra_->get_encode(this));
1720 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1721 }
1722 #endif
1725 //=============================================================================
1726 #ifndef PRODUCT
1727 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1728 Compile* C = ra_->C;
1730 int framesize = C->frame_size_in_bytes();
1731 int bangsize = C->bang_size_in_bytes();
1732 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1734 // Calls to C2R adapters often do not accept exceptional returns.
1735 // We require that their callers must bang for them. But be careful, because
1736 // some VM calls (such as call site linkage) can use several kilobytes of
1737 // stack. But the stack safety zone should account for that.
1738 // See bugs 4446381, 4468289, 4497237.
1739 if (C->need_stack_bang(bangsize)) {
1740 st->print_cr("# stack bang"); st->print("\t");
1741 }
1742 if (UseLoongsonISA) {
1743 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1744 } else {
1745 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1746 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1747 }
1748 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1749 st->print("daddiu SP, SP, -%d \t",framesize);
1750 }
1751 #endif
1754 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1755 Compile* C = ra_->C;
1756 MacroAssembler _masm(&cbuf);
1758 int framesize = C->frame_size_in_bytes();
1759 int bangsize = C->bang_size_in_bytes();
1761 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1763 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1765 if (C->need_stack_bang(framesize)) {
1766 __ generate_stack_overflow_check(framesize);
1767 }
1769 if (UseLoongsonISA) {
1770 __ gssq(RA, FP, SP, -wordSize*2);
1771 } else {
1772 __ sd(RA, SP, -wordSize);
1773 __ sd(FP, SP, -wordSize*2);
1774 }
1775 __ daddiu(FP, SP, -wordSize*2);
1776 __ daddiu(SP, SP, -framesize);
1777 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1778 __ nop();
1780 C->set_frame_complete(cbuf.insts_size());
1781 if (C->has_mach_constant_base_node()) {
1782 // NOTE: We set the table base offset here because users might be
1783 // emitted before MachConstantBaseNode.
1784 Compile::ConstantTable& constant_table = C->constant_table();
1785 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1786 }
1788 }
1791 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1792 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1793 return MachNode::size(ra_); // too many variables; just compute it the hard way
1794 }
1796 int MachPrologNode::reloc() const {
1797 return 0; // a large enough number
1798 }
1800 %}
1802 //----------ENCODING BLOCK-----------------------------------------------------
1803 // This block specifies the encoding classes used by the compiler to output
1804 // byte streams. Encoding classes generate functions which are called by
1805 // Machine Instruction Nodes in order to generate the bit encoding of the
1806 // instruction. Operands specify their base encoding interface with the
1807 // interface keyword. There are currently supported four interfaces,
1808 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1809 // operand to generate a function which returns its register number when
1810 // queried. CONST_INTER causes an operand to generate a function which
1811 // returns the value of the constant when queried. MEMORY_INTER causes an
1812 // operand to generate four functions which return the Base Register, the
1813 // Index Register, the Scale Value, and the Offset Value of the operand when
1814 // queried. COND_INTER causes an operand to generate six functions which
1815 // return the encoding code (ie - encoding bits for the instruction)
1816 // associated with each basic boolean condition for a conditional instruction.
1817 // Instructions specify two basic values for encoding. They use the
1818 // ins_encode keyword to specify their encoding class (which must be one of
1819 // the class names specified in the encoding block), and they use the
1820 // opcode keyword to specify, in order, their primary, secondary, and
1821 // tertiary opcode. Only the opcode sections which a particular instruction
1822 // needs for encoding need to be specified.
1823 encode %{
1824 /*
1825 Alias:
1826 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1827 118 B14: # B19 B15 <- B13 Freq: 0.899955
1828 118 add S1, S2, V0 #@addP_reg_reg
1829 11c lb S0, [S1 + #-8257524] #@loadB
1830 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1831 */
1832 //Load byte signed
1833 enc_class load_B_enc (mRegI dst, memory mem) %{
1834 MacroAssembler _masm(&cbuf);
1835 int dst = $dst$$reg;
1836 int base = $mem$$base;
1837 int index = $mem$$index;
1838 int scale = $mem$$scale;
1839 int disp = $mem$$disp;
1841 if( index != 0 ) {
1842 if( Assembler::is_simm16(disp) ) {
1843 if( UseLoongsonISA ) {
1844 if (scale == 0) {
1845 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1849 }
1850 } else {
1851 if (scale == 0) {
1852 __ addu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ addu(AT, as_Register(base), AT);
1856 }
1857 __ lb(as_Register(dst), AT, disp);
1858 }
1859 } else {
1860 if (scale == 0) {
1861 __ addu(AT, as_Register(base), as_Register(index));
1862 } else {
1863 __ dsll(AT, as_Register(index), scale);
1864 __ addu(AT, as_Register(base), AT);
1865 }
1866 __ move(T9, disp);
1867 if( UseLoongsonISA ) {
1868 __ gslbx(as_Register(dst), AT, T9, 0);
1869 } else {
1870 __ addu(AT, AT, T9);
1871 __ lb(as_Register(dst), AT, 0);
1872 }
1873 }
1874 } else {
1875 if( Assembler::is_simm16(disp) ) {
1876 __ lb(as_Register(dst), as_Register(base), disp);
1877 } else {
1878 __ move(T9, disp);
1879 if( UseLoongsonISA ) {
1880 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1881 } else {
1882 __ addu(AT, as_Register(base), T9);
1883 __ lb(as_Register(dst), AT, 0);
1884 }
1885 }
1886 }
1887 %}
1889 //Load byte unsigned
1890 enc_class load_UB_enc (mRegI dst, memory mem) %{
1891 MacroAssembler _masm(&cbuf);
1892 int dst = $dst$$reg;
1893 int base = $mem$$base;
1894 int index = $mem$$index;
1895 int scale = $mem$$scale;
1896 int disp = $mem$$disp;
1898 if( index != 0 ) {
1899 if (scale == 0) {
1900 __ daddu(AT, as_Register(base), as_Register(index));
1901 } else {
1902 __ dsll(AT, as_Register(index), scale);
1903 __ daddu(AT, as_Register(base), AT);
1904 }
1905 if( Assembler::is_simm16(disp) ) {
1906 __ lbu(as_Register(dst), AT, disp);
1907 } else {
1908 __ move(T9, disp);
1909 __ daddu(AT, AT, T9);
1910 __ lbu(as_Register(dst), AT, 0);
1911 }
1912 } else {
1913 if( Assembler::is_simm16(disp) ) {
1914 __ lbu(as_Register(dst), as_Register(base), disp);
1915 } else {
1916 __ move(T9, disp);
1917 __ daddu(AT, as_Register(base), T9);
1918 __ lbu(as_Register(dst), AT, 0);
1919 }
1920 }
1921 %}
1923 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1924 MacroAssembler _masm(&cbuf);
1925 int src = $src$$reg;
1926 int base = $mem$$base;
1927 int index = $mem$$index;
1928 int scale = $mem$$scale;
1929 int disp = $mem$$disp;
1931 if( index != 0 ) {
1932 if (scale == 0) {
1933 if( Assembler::is_simm(disp, 8) ) {
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1936 } else {
1937 __ addu(AT, as_Register(base), as_Register(index));
1938 __ sb(as_Register(src), AT, disp);
1939 }
1940 } else if( Assembler::is_simm16(disp) ) {
1941 __ addu(AT, as_Register(base), as_Register(index));
1942 __ sb(as_Register(src), AT, disp);
1943 } else {
1944 __ addu(AT, as_Register(base), as_Register(index));
1945 __ move(T9, disp);
1946 if (UseLoongsonISA) {
1947 __ gssbx(as_Register(src), AT, T9, 0);
1948 } else {
1949 __ addu(AT, AT, T9);
1950 __ sb(as_Register(src), AT, 0);
1951 }
1952 }
1953 } else {
1954 __ dsll(AT, as_Register(index), scale);
1955 if( Assembler::is_simm(disp, 8) ) {
1956 if (UseLoongsonISA) {
1957 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1958 } else {
1959 __ addu(AT, as_Register(base), AT);
1960 __ sb(as_Register(src), AT, disp);
1961 }
1962 } else if( Assembler::is_simm16(disp) ) {
1963 __ addu(AT, as_Register(base), AT);
1964 __ sb(as_Register(src), AT, disp);
1965 } else {
1966 __ addu(AT, as_Register(base), AT);
1967 __ move(T9, disp);
1968 if (UseLoongsonISA) {
1969 __ gssbx(as_Register(src), AT, T9, 0);
1970 } else {
1971 __ addu(AT, AT, T9);
1972 __ sb(as_Register(src), AT, 0);
1973 }
1974 }
1975 }
1976 } else {
1977 if( Assembler::is_simm16(disp) ) {
1978 __ sb(as_Register(src), as_Register(base), disp);
1979 } else {
1980 __ move(T9, disp);
1981 if (UseLoongsonISA) {
1982 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1983 } else {
1984 __ addu(AT, as_Register(base), T9);
1985 __ sb(as_Register(src), AT, 0);
1986 }
1987 }
1988 }
1989 %}
1991 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1992 MacroAssembler _masm(&cbuf);
1993 int base = $mem$$base;
1994 int index = $mem$$index;
1995 int scale = $mem$$scale;
1996 int disp = $mem$$disp;
1997 int value = $src$$constant;
1999 if( index != 0 ) {
2000 if (!UseLoongsonISA) {
2001 if (scale == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 } else {
2004 __ dsll(AT, as_Register(index), scale);
2005 __ daddu(AT, as_Register(base), AT);
2006 }
2007 if( Assembler::is_simm16(disp) ) {
2008 if (value == 0) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 } else {
2015 if (value == 0) {
2016 __ move(T9, disp);
2017 __ daddu(AT, AT, T9);
2018 __ sb(R0, AT, 0);
2019 } else {
2020 __ move(T9, disp);
2021 __ daddu(AT, AT, T9);
2022 __ move(T9, value);
2023 __ sb(T9, AT, 0);
2024 }
2025 }
2026 } else {
2028 if (scale == 0) {
2029 if( Assembler::is_simm(disp, 8) ) {
2030 if (value == 0) {
2031 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2032 } else {
2033 __ move(T9, value);
2034 __ gssbx(T9, as_Register(base), as_Register(index), disp);
2035 }
2036 } else if( Assembler::is_simm16(disp) ) {
2037 __ daddu(AT, as_Register(base), as_Register(index));
2038 if (value == 0) {
2039 __ sb(R0, AT, disp);
2040 } else {
2041 __ move(T9, value);
2042 __ sb(T9, AT, disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ daddu(AT, as_Register(base), as_Register(index));
2047 __ move(T9, disp);
2048 __ gssbx(R0, AT, T9, 0);
2049 } else {
2050 __ move(AT, disp);
2051 __ move(T9, value);
2052 __ daddu(AT, as_Register(base), AT);
2053 __ gssbx(T9, AT, as_Register(index), 0);
2054 }
2055 }
2057 } else {
2059 if( Assembler::is_simm(disp, 8) ) {
2060 __ dsll(AT, as_Register(index), scale);
2061 if (value == 0) {
2062 __ gssbx(R0, as_Register(base), AT, disp);
2063 } else {
2064 __ move(T9, value);
2065 __ gssbx(T9, as_Register(base), AT, disp);
2066 }
2067 } else if( Assembler::is_simm16(disp) ) {
2068 __ dsll(AT, as_Register(index), scale);
2069 __ daddu(AT, as_Register(base), AT);
2070 if (value == 0) {
2071 __ sb(R0, AT, disp);
2072 } else {
2073 __ move(T9, value);
2074 __ sb(T9, AT, disp);
2075 }
2076 } else {
2077 __ dsll(AT, as_Register(index), scale);
2078 if (value == 0) {
2079 __ daddu(AT, as_Register(base), AT);
2080 __ move(T9, disp);
2081 __ gssbx(R0, AT, T9, 0);
2082 } else {
2083 __ move(T9, disp);
2084 __ daddu(AT, AT, T9);
2085 __ move(T9, value);
2086 __ gssbx(T9, as_Register(base), AT, 0);
2087 }
2088 }
2089 }
2090 }
2091 } else {
2092 if( Assembler::is_simm16(disp) ) {
2093 if (value == 0) {
2094 __ sb(R0, as_Register(base), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ sb(AT, as_Register(base), disp);
2098 }
2099 } else {
2100 if (value == 0) {
2101 __ move(T9, disp);
2102 if (UseLoongsonISA) {
2103 __ gssbx(R0, as_Register(base), T9, 0);
2104 } else {
2105 __ daddu(AT, as_Register(base), T9);
2106 __ sb(R0, AT, 0);
2107 }
2108 } else {
2109 __ move(T9, disp);
2110 if (UseLoongsonISA) {
2111 __ move(AT, value);
2112 __ gssbx(AT, as_Register(base), T9, 0);
2113 } else {
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2121 %}
2124 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2125 MacroAssembler _masm(&cbuf);
2126 int base = $mem$$base;
2127 int index = $mem$$index;
2128 int scale = $mem$$scale;
2129 int disp = $mem$$disp;
2130 int value = $src$$constant;
2132 if( index != 0 ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 }
2139 if( Assembler::is_simm16(disp) ) {
2140 if (value == 0) {
2141 __ sb(R0, AT, disp);
2142 } else {
2143 __ move(T9, value);
2144 __ sb(T9, AT, disp);
2145 }
2146 } else {
2147 if (value == 0) {
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ sb(R0, AT, 0);
2151 } else {
2152 __ move(T9, disp);
2153 __ daddu(AT, AT, T9);
2154 __ move(T9, value);
2155 __ sb(T9, AT, 0);
2156 }
2157 }
2158 } else {
2159 if( Assembler::is_simm16(disp) ) {
2160 if (value == 0) {
2161 __ sb(R0, as_Register(base), disp);
2162 } else {
2163 __ move(AT, value);
2164 __ sb(AT, as_Register(base), disp);
2165 }
2166 } else {
2167 if (value == 0) {
2168 __ move(T9, disp);
2169 __ daddu(AT, as_Register(base), T9);
2170 __ sb(R0, AT, 0);
2171 } else {
2172 __ move(T9, disp);
2173 __ daddu(AT, as_Register(base), T9);
2174 __ move(T9, value);
2175 __ sb(T9, AT, 0);
2176 }
2177 }
2178 }
2180 __ sync();
2181 %}
2183 // Load Short (16bit signed)
2184 enc_class load_S_enc (mRegI dst, memory mem) %{
2185 MacroAssembler _masm(&cbuf);
2186 int dst = $dst$$reg;
2187 int base = $mem$$base;
2188 int index = $mem$$index;
2189 int scale = $mem$$scale;
2190 int disp = $mem$$disp;
2192 if( index != 0 ) {
2193 if (scale == 0) {
2194 __ daddu(AT, as_Register(base), as_Register(index));
2195 } else {
2196 __ dsll(AT, as_Register(index), scale);
2197 __ daddu(AT, as_Register(base), AT);
2198 }
2199 if( Assembler::is_simm16(disp) ) {
2200 __ lh(as_Register(dst), AT, disp);
2201 } else {
2202 __ move(T9, disp);
2203 __ addu(AT, AT, T9);
2204 __ lh(as_Register(dst), AT, 0);
2205 }
2206 } else {
2207 if( Assembler::is_simm16(disp) ) {
2208 __ lh(as_Register(dst), as_Register(base), disp);
2209 } else {
2210 __ move(T9, disp);
2211 __ addu(AT, as_Register(base), T9);
2212 __ lh(as_Register(dst), AT, 0);
2213 }
2214 }
2215 %}
2217 // Load Char (16bit unsigned)
2218 enc_class load_C_enc (mRegI dst, memory mem) %{
2219 MacroAssembler _masm(&cbuf);
2220 int dst = $dst$$reg;
2221 int base = $mem$$base;
2222 int index = $mem$$index;
2223 int scale = $mem$$scale;
2224 int disp = $mem$$disp;
2226 if( index != 0 ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ daddu(AT, as_Register(base), AT);
2232 }
2233 if( Assembler::is_simm16(disp) ) {
2234 __ lhu(as_Register(dst), AT, disp);
2235 } else {
2236 __ move(T9, disp);
2237 __ addu(AT, AT, T9);
2238 __ lhu(as_Register(dst), AT, 0);
2239 }
2240 } else {
2241 if( Assembler::is_simm16(disp) ) {
2242 __ lhu(as_Register(dst), as_Register(base), disp);
2243 } else {
2244 __ move(T9, disp);
2245 __ daddu(AT, as_Register(base), T9);
2246 __ lhu(as_Register(dst), AT, 0);
2247 }
2248 }
2249 %}
2251 // Store Char (16bit unsigned)
2252 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2253 MacroAssembler _masm(&cbuf);
2254 int src = $src$$reg;
2255 int base = $mem$$base;
2256 int index = $mem$$index;
2257 int scale = $mem$$scale;
2258 int disp = $mem$$disp;
2260 if( index != 0 ) {
2261 if( Assembler::is_simm16(disp) ) {
2262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2263 if (scale == 0) {
2264 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2268 }
2269 } else {
2270 if (scale == 0) {
2271 __ addu(AT, as_Register(base), as_Register(index));
2272 } else {
2273 __ dsll(AT, as_Register(index), scale);
2274 __ addu(AT, as_Register(base), AT);
2275 }
2276 __ sh(as_Register(src), AT, disp);
2277 }
2278 } else {
2279 if (scale == 0) {
2280 __ addu(AT, as_Register(base), as_Register(index));
2281 } else {
2282 __ dsll(AT, as_Register(index), scale);
2283 __ addu(AT, as_Register(base), AT);
2284 }
2285 __ move(T9, disp);
2286 if( UseLoongsonISA ) {
2287 __ gsshx(as_Register(src), AT, T9, 0);
2288 } else {
2289 __ addu(AT, AT, T9);
2290 __ sh(as_Register(src), AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 __ sh(as_Register(src), as_Register(base), disp);
2296 } else {
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2300 } else {
2301 __ addu(AT, as_Register(base), T9);
2302 __ sh(as_Register(src), AT, 0);
2303 }
2304 }
2305 }
2306 %}
2308 enc_class load_I_enc (mRegI dst, memory mem) %{
2309 MacroAssembler _masm(&cbuf);
2310 int dst = $dst$$reg;
2311 int base = $mem$$base;
2312 int index = $mem$$index;
2313 int scale = $mem$$scale;
2314 int disp = $mem$$disp;
2316 if( index != 0 ) {
2317 if( Assembler::is_simm16(disp) ) {
2318 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2319 if (scale == 0) {
2320 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2324 }
2325 } else {
2326 if (scale == 0) {
2327 __ addu(AT, as_Register(base), as_Register(index));
2328 } else {
2329 __ dsll(AT, as_Register(index), scale);
2330 __ addu(AT, as_Register(base), AT);
2331 }
2332 __ lw(as_Register(dst), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ move(T9, disp);
2342 if( UseLoongsonISA ) {
2343 __ gslwx(as_Register(dst), AT, T9, 0);
2344 } else {
2345 __ addu(AT, AT, T9);
2346 __ lw(as_Register(dst), AT, 0);
2347 }
2348 }
2349 } else {
2350 if( Assembler::is_simm16(disp) ) {
2351 __ lw(as_Register(dst), as_Register(base), disp);
2352 } else {
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2356 } else {
2357 __ addu(AT, as_Register(base), T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 }
2362 %}
2364 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2365 MacroAssembler _masm(&cbuf);
2366 int src = $src$$reg;
2367 int base = $mem$$base;
2368 int index = $mem$$index;
2369 int scale = $mem$$scale;
2370 int disp = $mem$$disp;
2372 if( index != 0 ) {
2373 if( Assembler::is_simm16(disp) ) {
2374 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2375 if (scale == 0) {
2376 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2380 }
2381 } else {
2382 if (scale == 0) {
2383 __ addu(AT, as_Register(base), as_Register(index));
2384 } else {
2385 __ dsll(AT, as_Register(index), scale);
2386 __ addu(AT, as_Register(base), AT);
2387 }
2388 __ sw(as_Register(src), AT, disp);
2389 }
2390 } else {
2391 if (scale == 0) {
2392 __ addu(AT, as_Register(base), as_Register(index));
2393 } else {
2394 __ dsll(AT, as_Register(index), scale);
2395 __ addu(AT, as_Register(base), AT);
2396 }
2397 __ move(T9, disp);
2398 if( UseLoongsonISA ) {
2399 __ gsswx(as_Register(src), AT, T9, 0);
2400 } else {
2401 __ addu(AT, AT, T9);
2402 __ sw(as_Register(src), AT, 0);
2403 }
2404 }
2405 } else {
2406 if( Assembler::is_simm16(disp) ) {
2407 __ sw(as_Register(src), as_Register(base), disp);
2408 } else {
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2412 } else {
2413 __ addu(AT, as_Register(base), T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 }
2418 %}
2420 enc_class store_I_immI_enc (memory mem, immI src) %{
2421 MacroAssembler _masm(&cbuf);
2422 int base = $mem$$base;
2423 int index = $mem$$index;
2424 int scale = $mem$$scale;
2425 int disp = $mem$$disp;
2426 int value = $src$$constant;
2428 if( index != 0 ) {
2429 if (scale == 0) {
2430 __ daddu(AT, as_Register(base), as_Register(index));
2431 } else {
2432 __ dsll(AT, as_Register(index), scale);
2433 __ daddu(AT, as_Register(base), AT);
2434 }
2435 if( Assembler::is_simm16(disp) ) {
2436 if (value == 0) {
2437 __ sw(R0, AT, disp);
2438 } else {
2439 __ move(T9, value);
2440 __ sw(T9, AT, disp);
2441 }
2442 } else {
2443 if (value == 0) {
2444 __ move(T9, disp);
2445 __ addu(AT, AT, T9);
2446 __ sw(R0, AT, 0);
2447 } else {
2448 __ move(T9, disp);
2449 __ addu(AT, AT, T9);
2450 __ move(T9, value);
2451 __ sw(T9, AT, 0);
2452 }
2453 }
2454 } else {
2455 if( Assembler::is_simm16(disp) ) {
2456 if (value == 0) {
2457 __ sw(R0, as_Register(base), disp);
2458 } else {
2459 __ move(AT, value);
2460 __ sw(AT, as_Register(base), disp);
2461 }
2462 } else {
2463 if (value == 0) {
2464 __ move(T9, disp);
2465 __ addu(AT, as_Register(base), T9);
2466 __ sw(R0, AT, 0);
2467 } else {
2468 __ move(T9, disp);
2469 __ addu(AT, as_Register(base), T9);
2470 __ move(T9, value);
2471 __ sw(T9, AT, 0);
2472 }
2473 }
2474 }
2475 %}
2477 enc_class load_N_enc (mRegN dst, memory mem) %{
2478 MacroAssembler _masm(&cbuf);
2479 int dst = $dst$$reg;
2480 int base = $mem$$base;
2481 int index = $mem$$index;
2482 int scale = $mem$$scale;
2483 int disp = $mem$$disp;
2484 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2485 assert(disp_reloc == relocInfo::none, "cannot have disp");
2487 if( index != 0 ) {
2488 if (scale == 0) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 } else {
2491 __ dsll(AT, as_Register(index), scale);
2492 __ daddu(AT, as_Register(base), AT);
2493 }
2494 if( Assembler::is_simm16(disp) ) {
2495 __ lwu(as_Register(dst), AT, disp);
2496 } else {
2497 __ li(T9, disp);
2498 __ daddu(AT, AT, T9);
2499 __ lwu(as_Register(dst), AT, 0);
2500 }
2501 } else {
2502 if( Assembler::is_simm16(disp) ) {
2503 __ lwu(as_Register(dst), as_Register(base), disp);
2504 } else {
2505 __ li(T9, disp);
2506 __ daddu(AT, as_Register(base), T9);
2507 __ lwu(as_Register(dst), AT, 0);
2508 }
2509 }
2511 %}
2514 enc_class load_P_enc (mRegP dst, memory mem) %{
2515 MacroAssembler _masm(&cbuf);
2516 int dst = $dst$$reg;
2517 int base = $mem$$base;
2518 int index = $mem$$index;
2519 int scale = $mem$$scale;
2520 int disp = $mem$$disp;
2521 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2522 assert(disp_reloc == relocInfo::none, "cannot have disp");
2524 if( index != 0 ) {
2525 if (scale == 0) {
2526 __ daddu(AT, as_Register(base), as_Register(index));
2527 } else {
2528 __ dsll(AT, as_Register(index), scale);
2529 __ daddu(AT, as_Register(base), AT);
2530 }
2531 if( Assembler::is_simm16(disp) ) {
2532 __ ld(as_Register(dst), AT, disp);
2533 } else {
2534 __ li(T9, disp);
2535 __ daddu(AT, AT, T9);
2536 __ ld(as_Register(dst), AT, 0);
2537 }
2538 } else {
2539 if( Assembler::is_simm16(disp) ) {
2540 __ ld(as_Register(dst), as_Register(base), disp);
2541 } else {
2542 __ li(T9, disp);
2543 __ daddu(AT, as_Register(base), T9);
2544 __ ld(as_Register(dst), AT, 0);
2545 }
2546 }
2547 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2548 %}
2550 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2551 MacroAssembler _masm(&cbuf);
2552 int src = $src$$reg;
2553 int base = $mem$$base;
2554 int index = $mem$$index;
2555 int scale = $mem$$scale;
2556 int disp = $mem$$disp;
2558 if( index != 0 ) {
2559 if (scale == 0) {
2560 __ daddu(AT, as_Register(base), as_Register(index));
2561 } else {
2562 __ dsll(AT, as_Register(index), scale);
2563 __ daddu(AT, as_Register(base), AT);
2564 }
2565 if( Assembler::is_simm16(disp) ) {
2566 __ sd(as_Register(src), AT, disp);
2567 } else {
2568 __ move(T9, disp);
2569 __ daddu(AT, AT, T9);
2570 __ sd(as_Register(src), AT, 0);
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 __ sd(as_Register(src), as_Register(base), disp);
2575 } else {
2576 __ move(T9, disp);
2577 __ daddu(AT, as_Register(base), T9);
2578 __ sd(as_Register(src), AT, 0);
2579 }
2580 }
2581 %}
2583 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2584 MacroAssembler _masm(&cbuf);
2585 int src = $src$$reg;
2586 int base = $mem$$base;
2587 int index = $mem$$index;
2588 int scale = $mem$$scale;
2589 int disp = $mem$$disp;
2591 if( index != 0 ) {
2592 if (scale == 0) {
2593 __ daddu(AT, as_Register(base), as_Register(index));
2594 } else {
2595 __ dsll(AT, as_Register(index), scale);
2596 __ daddu(AT, as_Register(base), AT);
2597 }
2598 if( Assembler::is_simm16(disp) ) {
2599 __ sw(as_Register(src), AT, disp);
2600 } else {
2601 __ move(T9, disp);
2602 __ addu(AT, AT, T9);
2603 __ sw(as_Register(src), AT, 0);
2604 }
2605 } else {
2606 if( Assembler::is_simm16(disp) ) {
2607 __ sw(as_Register(src), as_Register(base), disp);
2608 } else {
2609 __ move(T9, disp);
2610 __ addu(AT, as_Register(base), T9);
2611 __ sw(as_Register(src), AT, 0);
2612 }
2613 }
2614 %}
2616 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2617 MacroAssembler _masm(&cbuf);
2618 int base = $mem$$base;
2619 int index = $mem$$index;
2620 int scale = $mem$$scale;
2621 int disp = $mem$$disp;
2622 long value = $src$$constant;
2624 if( index != 0 ) {
2625 if (scale == 0) {
2626 __ daddu(AT, as_Register(base), as_Register(index));
2627 } else {
2628 __ dsll(AT, as_Register(index), scale);
2629 __ daddu(AT, as_Register(base), AT);
2630 }
2631 if( Assembler::is_simm16(disp) ) {
2632 if (value == 0) {
2633 __ sd(R0, AT, disp);
2634 } else {
2635 __ move(T9, value);
2636 __ sd(T9, AT, disp);
2637 }
2638 } else {
2639 if (value == 0) {
2640 __ move(T9, disp);
2641 __ daddu(AT, AT, T9);
2642 __ sd(R0, AT, 0);
2643 } else {
2644 __ move(T9, disp);
2645 __ daddu(AT, AT, T9);
2646 __ move(T9, value);
2647 __ sd(T9, AT, 0);
2648 }
2649 }
2650 } else {
2651 if( Assembler::is_simm16(disp) ) {
2652 if (value == 0) {
2653 __ sd(R0, as_Register(base), disp);
2654 } else {
2655 __ move(AT, value);
2656 __ sd(AT, as_Register(base), disp);
2657 }
2658 } else {
2659 if (value == 0) {
2660 __ move(T9, disp);
2661 __ daddu(AT, as_Register(base), T9);
2662 __ sd(R0, AT, 0);
2663 } else {
2664 __ move(T9, disp);
2665 __ daddu(AT, as_Register(base), T9);
2666 __ move(T9, value);
2667 __ sd(T9, AT, 0);
2668 }
2669 }
2670 }
2671 %}
2673 /*
2674 * 1d4 storeImmN [S0 + #16 (8-bit)], narrowoop: spec/benchmarks/_213_javac/Identifier:exact *
2675 * # compressed ptr ! Field: spec/benchmarks/_213_javac/Identifier.value
2676 * 0x00000055648065d4: daddu at, s0, zero
2677 * 0x00000055648065d8: lui t9, 0x0 ; {oop(a 'spec/benchmarks/_213_javac/Identifier')}
2678 * 0x00000055648065dc: ori t9, t9, 0xfffff610
2679 * 0x00000055648065e0: dsll t9, t9, 16
2680 * 0x00000055648065e4: ori t9, t9, 0xffffc628
2681 * 0x00000055648065e8: sw t9, 0x10(at)
2682 */
2683 enc_class storeImmN_enc (memory mem, immN src) %{
2684 MacroAssembler _masm(&cbuf);
2685 int base = $mem$$base;
2686 int index = $mem$$index;
2687 int scale = $mem$$scale;
2688 int disp = $mem$$disp;
2689 long * value = (long *)$src$$constant;
2691 if (value == NULL) {
2692 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
2693 if (index == 0) {
2694 __ sw(R0, as_Register(base), disp);
2695 } else {
2696 if (scale == 0) {
2697 __ daddu(AT, as_Register(base), as_Register(index));
2698 } else {
2699 __ dsll(AT, as_Register(index), scale);
2700 __ daddu(AT, as_Register(base), AT);
2701 }
2702 __ sw(R0, AT, disp);
2703 }
2705 return;
2706 }
2708 int oop_index = __ oop_recorder()->find_index((jobject)value);
2709 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2711 guarantee(scale == 0, "FIXME: scale is not zero !");
2712 guarantee(value != 0, "FIXME: value is zero !");
2714 if (index != 0) {
2715 if (scale == 0) {
2716 __ daddu(AT, as_Register(base), as_Register(index));
2717 } else {
2718 __ dsll(AT, as_Register(index), scale);
2719 __ daddu(AT, as_Register(base), AT);
2720 }
2721 if( Assembler::is_simm16(disp) ) {
2722 if(rspec.type() != relocInfo::none) {
2723 __ relocate(rspec, Assembler::narrow_oop_operand);
2724 __ li48(T9, oop_index);
2725 } else {
2726 __ set64(T9, oop_index);
2727 }
2728 __ sw(T9, AT, disp);
2729 } else {
2730 __ move(T9, disp);
2731 __ addu(AT, AT, T9);
2733 if(rspec.type() != relocInfo::none) {
2734 __ relocate(rspec, Assembler::narrow_oop_operand);
2735 __ li48(T9, oop_index);
2736 } else {
2737 __ set64(T9, oop_index);
2738 }
2739 __ sw(T9, AT, 0);
2740 }
2741 }
2742 else {
2743 if( Assembler::is_simm16(disp) ) {
2744 if($src->constant_reloc() != relocInfo::none) {
2745 __ relocate(rspec, Assembler::narrow_oop_operand);
2746 __ li48(T9, oop_index);
2747 } else {
2748 __ set64(T9, oop_index);
2749 }
2750 __ sw(T9, as_Register(base), disp);
2751 } else {
2752 __ move(T9, disp);
2753 __ daddu(AT, as_Register(base), T9);
2755 if($src->constant_reloc() != relocInfo::none){
2756 __ relocate(rspec, Assembler::narrow_oop_operand);
2757 __ li48(T9, oop_index);
2758 } else {
2759 __ set64(T9, oop_index);
2760 }
2761 __ sw(T9, AT, 0);
2762 }
2763 }
2764 %}
2766 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
2767 MacroAssembler _masm(&cbuf);
2769 assert (UseCompressedOops, "should only be used for compressed headers");
2770 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
2772 int base = $mem$$base;
2773 int index = $mem$$index;
2774 int scale = $mem$$scale;
2775 int disp = $mem$$disp;
2776 long value = $src$$constant;
2778 int klass_index = __ oop_recorder()->find_index((Klass*)value);
2779 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
2780 long narrowp = Klass::encode_klass((Klass*)value);
2782 if(index!=0){
2783 if (scale == 0) {
2784 __ daddu(AT, as_Register(base), as_Register(index));
2785 } else {
2786 __ dsll(AT, as_Register(index), scale);
2787 __ daddu(AT, as_Register(base), AT);
2788 }
2790 if( Assembler::is_simm16(disp) ) {
2791 if(rspec.type() != relocInfo::none){
2792 __ relocate(rspec, Assembler::narrow_oop_operand);
2793 __ li48(T9, narrowp);
2794 } else {
2795 __ set64(T9, narrowp);
2796 }
2797 __ sw(T9, AT, disp);
2798 } else {
2799 __ move(T9, disp);
2800 __ daddu(AT, AT, T9);
2802 if(rspec.type() != relocInfo::none){
2803 __ relocate(rspec, Assembler::narrow_oop_operand);
2804 __ li48(T9, narrowp);
2805 } else {
2806 __ set64(T9, narrowp);
2807 }
2809 __ sw(T9, AT, 0);
2810 }
2811 } else {
2812 if( Assembler::is_simm16(disp) ) {
2813 if(rspec.type() != relocInfo::none){
2814 __ relocate(rspec, Assembler::narrow_oop_operand);
2815 __ li48(T9, narrowp);
2816 }
2817 else {
2818 __ set64(T9, narrowp);
2819 }
2820 __ sw(T9, as_Register(base), disp);
2821 } else {
2822 __ move(T9, disp);
2823 __ daddu(AT, as_Register(base), T9);
2825 if(rspec.type() != relocInfo::none){
2826 __ relocate(rspec, Assembler::narrow_oop_operand);
2827 __ li48(T9, narrowp);
2828 } else {
2829 __ set64(T9, narrowp);
2830 }
2831 __ sw(T9, AT, 0);
2832 }
2833 }
2834 %}
2836 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2837 MacroAssembler _masm(&cbuf);
2838 int base = $mem$$base;
2839 int index = $mem$$index;
2840 int scale = $mem$$scale;
2841 int disp = $mem$$disp;
2843 if(index!=0){
2844 if (scale == 0) {
2845 __ daddu(AT, as_Register(base), as_Register(index));
2846 } else {
2847 __ dsll(AT, as_Register(index), scale);
2848 __ daddu(AT, as_Register(base), AT);
2849 }
2851 if( Assembler::is_simm16(disp) ) {
2852 __ sw(R0, AT, disp);
2853 } else {
2854 __ move(T9, disp);
2855 __ daddu(AT, AT, T9);
2856 __ sw(R0, AT, 0);
2857 }
2858 }
2859 else {
2860 if( Assembler::is_simm16(disp) ) {
2861 __ sw(R0, as_Register(base), disp);
2862 } else {
2863 __ move(T9, disp);
2864 __ daddu(AT, as_Register(base), T9);
2865 __ sw(R0, AT, 0);
2866 }
2867 }
2868 %}
2870 enc_class load_L_enc (mRegL dst, memory mem) %{
2871 MacroAssembler _masm(&cbuf);
2872 int base = $mem$$base;
2873 int index = $mem$$index;
2874 int scale = $mem$$scale;
2875 int disp = $mem$$disp;
2876 Register dst_reg = as_Register($dst$$reg);
2878 /*********************2013/03/27**************************
2879 * Jin: $base may contain a null object.
2880 * Server JIT force the exception_offset to be the pos of
2881 * the first instruction.
2882 * I insert such a 'null_check' at the beginning.
2883 *******************************************************/
2885 __ lw(AT, as_Register(base), 0);
2887 /*********************2012/10/04**************************
2888 * Error case found in SortTest
2889 * 337 b java.util.Arrays::sort1 (401 bytes)
2890 * B73:
2891 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2892 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2893 *
2894 * The original instructions generated here are :
2895 * __ lw(dst_lo, as_Register(base), disp);
2896 * __ lw(dst_hi, as_Register(base), disp + 4);
2897 *******************************************************/
2899 if( index != 0 ) {
2900 if (scale == 0) {
2901 __ daddu(AT, as_Register(base), as_Register(index));
2902 } else {
2903 __ dsll(AT, as_Register(index), scale);
2904 __ daddu(AT, as_Register(base), AT);
2905 }
2906 if( Assembler::is_simm16(disp) ) {
2907 __ ld(dst_reg, AT, disp);
2908 } else {
2909 __ move(T9, disp);
2910 __ daddu(AT, AT, T9);
2911 __ ld(dst_reg, AT, 0);
2912 }
2913 } else {
2914 if( Assembler::is_simm16(disp) ) {
2915 __ move(AT, as_Register(base));
2916 __ ld(dst_reg, AT, disp);
2917 } else {
2918 __ move(T9, disp);
2919 __ daddu(AT, as_Register(base), T9);
2920 __ ld(dst_reg, AT, 0);
2921 }
2922 }
2923 %}
2925 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2926 MacroAssembler _masm(&cbuf);
2927 int base = $mem$$base;
2928 int index = $mem$$index;
2929 int scale = $mem$$scale;
2930 int disp = $mem$$disp;
2931 Register src_reg = as_Register($src$$reg);
2933 if( index != 0 ) {
2934 if (scale == 0) {
2935 __ daddu(AT, as_Register(base), as_Register(index));
2936 } else {
2937 __ dsll(AT, as_Register(index), scale);
2938 __ daddu(AT, as_Register(base), AT);
2939 }
2940 if( Assembler::is_simm16(disp) ) {
2941 __ sd(src_reg, AT, disp);
2942 } else {
2943 __ move(T9, disp);
2944 __ daddu(AT, AT, T9);
2945 __ sd(src_reg, AT, 0);
2946 }
2947 } else {
2948 if( Assembler::is_simm16(disp) ) {
2949 __ move(AT, as_Register(base));
2950 __ sd(src_reg, AT, disp);
2951 } else {
2952 __ move(T9, disp);
2953 __ daddu(AT, as_Register(base), T9);
2954 __ sd(src_reg, AT, 0);
2955 }
2956 }
2957 %}
2959 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2960 MacroAssembler _masm(&cbuf);
2961 int base = $mem$$base;
2962 int index = $mem$$index;
2963 int scale = $mem$$scale;
2964 int disp = $mem$$disp;
2966 if( index != 0 ) {
2967 if (scale == 0) {
2968 __ daddu(AT, as_Register(base), as_Register(index));
2969 } else {
2970 __ dsll(AT, as_Register(index), scale);
2971 __ daddu(AT, as_Register(base), AT);
2972 }
2973 if( Assembler::is_simm16(disp) ) {
2974 __ sd(R0, AT, disp);
2975 } else {
2976 __ move(T9, disp);
2977 __ addu(AT, AT, T9);
2978 __ sd(R0, AT, 0);
2979 }
2980 } else {
2981 if( Assembler::is_simm16(disp) ) {
2982 __ move(AT, as_Register(base));
2983 __ sd(R0, AT, disp);
2984 } else {
2985 __ move(T9, disp);
2986 __ addu(AT, as_Register(base), T9);
2987 __ sd(R0, AT, 0);
2988 }
2989 }
2990 %}
2992 enc_class store_L_immL_enc (memory mem, immL src) %{
2993 MacroAssembler _masm(&cbuf);
2994 int base = $mem$$base;
2995 int index = $mem$$index;
2996 int scale = $mem$$scale;
2997 int disp = $mem$$disp;
2998 long imm = $src$$constant;
3000 if( index != 0 ) {
3001 if (scale == 0) {
3002 __ daddu(AT, as_Register(base), as_Register(index));
3003 } else {
3004 __ dsll(AT, as_Register(index), scale);
3005 __ daddu(AT, as_Register(base), AT);
3006 }
3007 if( Assembler::is_simm16(disp) ) {
3008 __ li(T9, imm);
3009 __ sd(T9, AT, disp);
3010 } else {
3011 __ move(T9, disp);
3012 __ addu(AT, AT, T9);
3013 __ li(T9, imm);
3014 __ sd(T9, AT, 0);
3015 }
3016 } else {
3017 if( Assembler::is_simm16(disp) ) {
3018 __ move(AT, as_Register(base));
3019 __ li(T9, imm);
3020 __ sd(T9, AT, disp);
3021 } else {
3022 __ move(T9, disp);
3023 __ addu(AT, as_Register(base), T9);
3024 __ li(T9, imm);
3025 __ sd(T9, AT, 0);
3026 }
3027 }
3028 %}
3030 enc_class load_F_enc (regF dst, memory mem) %{
3031 MacroAssembler _masm(&cbuf);
3032 int base = $mem$$base;
3033 int index = $mem$$index;
3034 int scale = $mem$$scale;
3035 int disp = $mem$$disp;
3036 FloatRegister dst = $dst$$FloatRegister;
3038 if( index != 0 ) {
3039 if( Assembler::is_simm16(disp) ) {
3040 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3041 if (scale == 0) {
3042 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3043 } else {
3044 __ dsll(AT, as_Register(index), scale);
3045 __ gslwxc1(dst, as_Register(base), AT, disp);
3046 }
3047 } else {
3048 if (scale == 0) {
3049 __ daddu(AT, as_Register(base), as_Register(index));
3050 } else {
3051 __ dsll(AT, as_Register(index), scale);
3052 __ daddu(AT, as_Register(base), AT);
3053 }
3054 __ lwc1(dst, AT, disp);
3055 }
3056 } else {
3057 if (scale == 0) {
3058 __ daddu(AT, as_Register(base), as_Register(index));
3059 } else {
3060 __ dsll(AT, as_Register(index), scale);
3061 __ daddu(AT, as_Register(base), AT);
3062 }
3063 __ move(T9, disp);
3064 if( UseLoongsonISA ) {
3065 __ gslwxc1(dst, AT, T9, 0);
3066 } else {
3067 __ daddu(AT, AT, T9);
3068 __ lwc1(dst, AT, 0);
3069 }
3070 }
3071 } else {
3072 if( Assembler::is_simm16(disp) ) {
3073 __ lwc1(dst, as_Register(base), disp);
3074 } else {
3075 __ move(T9, disp);
3076 if( UseLoongsonISA ) {
3077 __ gslwxc1(dst, as_Register(base), T9, 0);
3078 } else {
3079 __ daddu(AT, as_Register(base), T9);
3080 __ lwc1(dst, AT, 0);
3081 }
3082 }
3083 }
3084 %}
3086 enc_class store_F_reg_enc (memory mem, regF src) %{
3087 MacroAssembler _masm(&cbuf);
3088 int base = $mem$$base;
3089 int index = $mem$$index;
3090 int scale = $mem$$scale;
3091 int disp = $mem$$disp;
3092 FloatRegister src = $src$$FloatRegister;
3094 if( index != 0 ) {
3095 if( Assembler::is_simm16(disp) ) {
3096 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3097 if (scale == 0) {
3098 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3099 } else {
3100 __ dsll(AT, as_Register(index), scale);
3101 __ gsswxc1(src, as_Register(base), AT, disp);
3102 }
3103 } else {
3104 if (scale == 0) {
3105 __ daddu(AT, as_Register(base), as_Register(index));
3106 } else {
3107 __ dsll(AT, as_Register(index), scale);
3108 __ daddu(AT, as_Register(base), AT);
3109 }
3110 __ swc1(src, AT, disp);
3111 }
3112 } else {
3113 if (scale == 0) {
3114 __ daddu(AT, as_Register(base), as_Register(index));
3115 } else {
3116 __ dsll(AT, as_Register(index), scale);
3117 __ daddu(AT, as_Register(base), AT);
3118 }
3119 __ move(T9, disp);
3120 if( UseLoongsonISA ) {
3121 __ gsswxc1(src, AT, T9, 0);
3122 } else {
3123 __ daddu(AT, AT, T9);
3124 __ swc1(src, AT, 0);
3125 }
3126 }
3127 } else {
3128 if( Assembler::is_simm16(disp) ) {
3129 __ swc1(src, as_Register(base), disp);
3130 } else {
3131 __ move(T9, disp);
3132 if( UseLoongsonISA ) {
3133 __ gslwxc1(src, as_Register(base), T9, 0);
3134 } else {
3135 __ daddu(AT, as_Register(base), T9);
3136 __ swc1(src, AT, 0);
3137 }
3138 }
3139 }
3140 %}
3142 enc_class load_D_enc (regD dst, memory mem) %{
3143 MacroAssembler _masm(&cbuf);
3144 int base = $mem$$base;
3145 int index = $mem$$index;
3146 int scale = $mem$$scale;
3147 int disp = $mem$$disp;
3148 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3150 if( index != 0 ) {
3151 if( Assembler::is_simm16(disp) ) {
3152 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3153 if (scale == 0) {
3154 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3155 } else {
3156 __ dsll(AT, as_Register(index), scale);
3157 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3158 }
3159 } else {
3160 if (scale == 0) {
3161 __ daddu(AT, as_Register(base), as_Register(index));
3162 } else {
3163 __ dsll(AT, as_Register(index), scale);
3164 __ daddu(AT, as_Register(base), AT);
3165 }
3166 __ ldc1(dst_reg, AT, disp);
3167 }
3168 } else {
3169 if (scale == 0) {
3170 __ daddu(AT, as_Register(base), as_Register(index));
3171 } else {
3172 __ dsll(AT, as_Register(index), scale);
3173 __ daddu(AT, as_Register(base), AT);
3174 }
3175 __ move(T9, disp);
3176 if( UseLoongsonISA ) {
3177 __ gsldxc1(dst_reg, AT, T9, 0);
3178 } else {
3179 __ addu(AT, AT, T9);
3180 __ ldc1(dst_reg, AT, 0);
3181 }
3182 }
3183 } else {
3184 if( Assembler::is_simm16(disp) ) {
3185 __ ldc1(dst_reg, as_Register(base), disp);
3186 } else {
3187 __ move(T9, disp);
3188 if( UseLoongsonISA ) {
3189 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3190 } else {
3191 __ addu(AT, as_Register(base), T9);
3192 __ ldc1(dst_reg, AT, 0);
3193 }
3194 }
3195 }
3196 %}
3198 enc_class store_D_reg_enc (memory mem, regD src) %{
3199 MacroAssembler _masm(&cbuf);
3200 int base = $mem$$base;
3201 int index = $mem$$index;
3202 int scale = $mem$$scale;
3203 int disp = $mem$$disp;
3204 FloatRegister src_reg = as_FloatRegister($src$$reg);
3206 if( index != 0 ) {
3207 if( Assembler::is_simm16(disp) ) {
3208 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3209 if (scale == 0) {
3210 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3211 } else {
3212 __ dsll(AT, as_Register(index), scale);
3213 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3214 }
3215 } else {
3216 if (scale == 0) {
3217 __ daddu(AT, as_Register(base), as_Register(index));
3218 } else {
3219 __ dsll(AT, as_Register(index), scale);
3220 __ daddu(AT, as_Register(base), AT);
3221 }
3222 __ sdc1(src_reg, AT, disp);
3223 }
3224 } else {
3225 if (scale == 0) {
3226 __ daddu(AT, as_Register(base), as_Register(index));
3227 } else {
3228 __ dsll(AT, as_Register(index), scale);
3229 __ daddu(AT, as_Register(base), AT);
3230 }
3231 __ move(T9, disp);
3232 if( UseLoongsonISA ) {
3233 __ gssdxc1(src_reg, AT, T9, 0);
3234 } else {
3235 __ addu(AT, AT, T9);
3236 __ sdc1(src_reg, AT, 0);
3237 }
3238 }
3239 } else {
3240 if( Assembler::is_simm16(disp) ) {
3241 __ sdc1(src_reg, as_Register(base), disp);
3242 } else {
3243 __ move(T9, disp);
3244 if( UseLoongsonISA ) {
3245 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3246 } else {
3247 __ addu(AT, as_Register(base), T9);
3248 __ sdc1(src_reg, AT, 0);
3249 }
3250 }
3251 }
3252 %}
3254 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3255 MacroAssembler _masm(&cbuf);
3256 // This is the instruction starting address for relocation info.
3257 __ block_comment("Java_To_Runtime");
3258 cbuf.set_insts_mark();
3259 __ relocate(relocInfo::runtime_call_type);
3261 __ li48(T9, (long)$meth$$method);
3262 __ jalr(T9);
3263 __ nop();
3264 %}
3266 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3267 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3268 // who we intended to call.
3269 MacroAssembler _masm(&cbuf);
3270 cbuf.set_insts_mark();
3272 if ( !_method ) {
3273 __ relocate(relocInfo::runtime_call_type);
3274 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3275 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3276 } else if(_optimized_virtual) {
3277 __ relocate(relocInfo::opt_virtual_call_type);
3278 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3279 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3280 } else {
3281 __ relocate(relocInfo::static_call_type);
3282 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3283 // static_call_Relocation::spec(), RELOC_IMM32 );
3284 }
3286 __ li(T9, $meth$$method);
3287 __ jalr(T9);
3288 __ nop();
3289 if( _method ) { // Emit stub for static call
3290 emit_java_to_interp(cbuf);
3291 }
3292 %}
3295 /*
3296 * [Ref: LIR_Assembler::ic_call() ]
3297 */
3298 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3299 MacroAssembler _masm(&cbuf);
3300 __ block_comment("Java_Dynamic_Call");
3301 __ ic_call((address)$meth$$method);
3302 %}
3305 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3306 Register flags = $cr$$Register;
3307 Label L;
3309 MacroAssembler _masm(&cbuf);
3311 __ addu(flags, R0, R0);
3312 __ beq(AT, R0, L);
3313 __ delayed()->nop();
3314 __ move(flags, 0xFFFFFFFF);
3315 __ bind(L);
3316 %}
3318 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3319 Register result = $result$$Register;
3320 Register sub = $sub$$Register;
3321 Register super = $super$$Register;
3322 Register length = $tmp$$Register;
3323 Register tmp = T9;
3324 Label miss;
3326 /* 2012/9/28 Jin: result may be the same as sub
3327 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3328 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3329 * 4bc mov S2, NULL #@loadConP
3330 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3331 */
3332 MacroAssembler _masm(&cbuf);
3333 Label done;
3334 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3335 NULL, &miss,
3336 /*set_cond_codes:*/ true);
3337 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3338 __ move(result, 0);
3339 __ b(done);
3340 __ nop();
3342 __ bind(miss);
3343 __ move(result, 1);
3344 __ bind(done);
3345 %}
3347 %}
3350 //---------MIPS FRAME--------------------------------------------------------------
3351 // Definition of frame structure and management information.
3352 //
3353 // S T A C K L A Y O U T Allocators stack-slot number
3354 // | (to get allocators register number
3355 // G Owned by | | v add SharedInfo::stack0)
3356 // r CALLER | |
3357 // o | +--------+ pad to even-align allocators stack-slot
3358 // w V | pad0 | numbers; owned by CALLER
3359 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3360 // h ^ | in | 5
3361 // | | args | 4 Holes in incoming args owned by SELF
3362 // | | old | | 3
3363 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3364 // v | | ret | 3 return address
3365 // Owned by +--------+
3366 // Self | pad2 | 2 pad to align old SP
3367 // | +--------+ 1
3368 // | | locks | 0
3369 // | +--------+----> SharedInfo::stack0, even aligned
3370 // | | pad1 | 11 pad to align new SP
3371 // | +--------+
3372 // | | | 10
3373 // | | spills | 9 spills
3374 // V | | 8 (pad0 slot for callee)
3375 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3376 // ^ | out | 7
3377 // | | args | 6 Holes in outgoing args owned by CALLEE
3378 // Owned by new | |
3379 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3380 // | |
3381 //
3382 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3383 // known from SELF's arguments and the Java calling convention.
3384 // Region 6-7 is determined per call site.
3385 // Note 2: If the calling convention leaves holes in the incoming argument
3386 // area, those holes are owned by SELF. Holes in the outgoing area
3387 // are owned by the CALLEE. Holes should not be nessecary in the
3388 // incoming area, as the Java calling convention is completely under
3389 // the control of the AD file. Doubles can be sorted and packed to
3390 // avoid holes. Holes in the outgoing arguments may be nessecary for
3391 // varargs C calling conventions.
3392 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3393 // even aligned with pad0 as needed.
3394 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3395 // region 6-11 is even aligned; it may be padded out more so that
3396 // the region from SP to FP meets the minimum stack alignment.
3397 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3398 // alignment. Region 11, pad1, may be dynamically extended so that
3399 // SP meets the minimum alignment.
3402 frame %{
3404 stack_direction(TOWARDS_LOW);
3406 // These two registers define part of the calling convention
3407 // between compiled code and the interpreter.
3408 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3409 // for more information. by yjl 3/16/2006
3411 inline_cache_reg(T1); // Inline Cache Register
3412 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3413 /*
3414 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3415 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3416 */
3418 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3419 cisc_spilling_operand_name(indOffset32);
3421 // Number of stack slots consumed by locking an object
3422 // generate Compile::sync_stack_slots
3423 #ifdef _LP64
3424 sync_stack_slots(2);
3425 #else
3426 sync_stack_slots(1);
3427 #endif
3429 frame_pointer(SP);
3431 // Interpreter stores its frame pointer in a register which is
3432 // stored to the stack by I2CAdaptors.
3433 // I2CAdaptors convert from interpreted java to compiled java.
3435 interpreter_frame_pointer(FP);
3437 // generate Matcher::stack_alignment
3438 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3440 // Number of stack slots between incoming argument block and the start of
3441 // a new frame. The PROLOG must add this many slots to the stack. The
3442 // EPILOG must remove this many slots. Intel needs one slot for
3443 // return address.
3444 // generate Matcher::in_preserve_stack_slots
3445 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3446 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3448 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3449 // for calls to C. Supports the var-args backing area for register parms.
3450 varargs_C_out_slots_killed(0);
3452 // The after-PROLOG location of the return address. Location of
3453 // return address specifies a type (REG or STACK) and a number
3454 // representing the register number (i.e. - use a register name) or
3455 // stack slot.
3456 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3457 // Otherwise, it is above the locks and verification slot and alignment word
3458 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3459 return_addr(REG RA);
3461 // Body of function which returns an integer array locating
3462 // arguments either in registers or in stack slots. Passed an array
3463 // of ideal registers called "sig" and a "length" count. Stack-slot
3464 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3465 // arguments for a CALLEE. Incoming stack arguments are
3466 // automatically biased by the preserve_stack_slots field above.
3469 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3470 // StartNode::calling_convention call this. by yjl 3/16/2006
3471 calling_convention %{
3472 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3473 %}
3478 // Body of function which returns an integer array locating
3479 // arguments either in registers or in stack slots. Passed an array
3480 // of ideal registers called "sig" and a "length" count. Stack-slot
3481 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3482 // arguments for a CALLEE. Incoming stack arguments are
3483 // automatically biased by the preserve_stack_slots field above.
3486 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3487 c_calling_convention %{
3488 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3489 %}
3492 // Location of C & interpreter return values
3493 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3494 // SEE Matcher::match. by yjl 3/16/2006
3495 c_return_value %{
3496 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3497 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3498 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3499 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3500 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3501 %}
3503 // Location of return values
3504 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3505 // SEE Matcher::match. by yjl 3/16/2006
3507 return_value %{
3508 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3509 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3510 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3511 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3512 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3513 %}
3515 %}
3517 //----------ATTRIBUTES---------------------------------------------------------
3518 //----------Operand Attributes-------------------------------------------------
3519 op_attrib op_cost(0); // Required cost attribute
3521 //----------Instruction Attributes---------------------------------------------
3522 ins_attrib ins_cost(100); // Required cost attribute
3523 ins_attrib ins_size(32); // Required size attribute (in bits)
3524 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3525 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3526 // non-matching short branch variant of some
3527 // long branch?
3528 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3529 // specifies the alignment that some part of the instruction (not
3530 // necessarily the start) requires. If > 1, a compute_padding()
3531 // function must be provided for the instruction
3533 //----------OPERANDS-----------------------------------------------------------
3534 // Operand definitions must precede instruction definitions for correct parsing
3535 // in the ADLC because operands constitute user defined types which are used in
3536 // instruction definitions.
3538 // Vectors
3539 operand vecD() %{
3540 constraint(ALLOC_IN_RC(dbl_reg));
3541 match(VecD);
3543 format %{ %}
3544 interface(REG_INTER);
3545 %}
3547 // Flags register, used as output of compare instructions
3548 operand FlagsReg() %{
3549 constraint(ALLOC_IN_RC(mips_flags));
3550 match(RegFlags);
3552 format %{ "EFLAGS" %}
3553 interface(REG_INTER);
3554 %}
3556 //----------Simple Operands----------------------------------------------------
3557 //TODO: Should we need to define some more special immediate number ?
3558 // Immediate Operands
3559 // Integer Immediate
3560 operand immI() %{
3561 match(ConI);
3562 //TODO: should not match immI8 here LEE
3563 match(immI8);
3565 op_cost(20);
3566 format %{ %}
3567 interface(CONST_INTER);
3568 %}
3570 // Long Immediate 8-bit
3571 operand immL8()
3572 %{
3573 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3574 match(ConL);
3576 op_cost(5);
3577 format %{ %}
3578 interface(CONST_INTER);
3579 %}
3581 // Constant for test vs zero
3582 operand immI0() %{
3583 predicate(n->get_int() == 0);
3584 match(ConI);
3586 op_cost(0);
3587 format %{ %}
3588 interface(CONST_INTER);
3589 %}
3591 // Constant for increment
3592 operand immI1() %{
3593 predicate(n->get_int() == 1);
3594 match(ConI);
3596 op_cost(0);
3597 format %{ %}
3598 interface(CONST_INTER);
3599 %}
3601 // Constant for decrement
3602 operand immI_M1() %{
3603 predicate(n->get_int() == -1);
3604 match(ConI);
3606 op_cost(0);
3607 format %{ %}
3608 interface(CONST_INTER);
3609 %}
3611 operand immI_MaxI() %{
3612 predicate(n->get_int() == 2147483647);
3613 match(ConI);
3615 op_cost(0);
3616 format %{ %}
3617 interface(CONST_INTER);
3618 %}
3620 // Valid scale values for addressing modes
3621 operand immI2() %{
3622 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3623 match(ConI);
3625 format %{ %}
3626 interface(CONST_INTER);
3627 %}
3629 operand immI8() %{
3630 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3631 match(ConI);
3633 op_cost(5);
3634 format %{ %}
3635 interface(CONST_INTER);
3636 %}
3638 operand immI16() %{
3639 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3640 match(ConI);
3642 op_cost(10);
3643 format %{ %}
3644 interface(CONST_INTER);
3645 %}
3647 // Constant for long shifts
3648 operand immI_32() %{
3649 predicate( n->get_int() == 32 );
3650 match(ConI);
3652 op_cost(0);
3653 format %{ %}
3654 interface(CONST_INTER);
3655 %}
3657 operand immI_63() %{
3658 predicate( n->get_int() == 63 );
3659 match(ConI);
3661 op_cost(0);
3662 format %{ %}
3663 interface(CONST_INTER);
3664 %}
3666 operand immI_0_31() %{
3667 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3668 match(ConI);
3670 op_cost(0);
3671 format %{ %}
3672 interface(CONST_INTER);
3673 %}
3675 // Operand for non-negtive integer mask
3676 operand immI_nonneg_mask() %{
3677 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3678 match(ConI);
3680 op_cost(0);
3681 format %{ %}
3682 interface(CONST_INTER);
3683 %}
3685 operand immI_32_63() %{
3686 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3687 match(ConI);
3688 op_cost(0);
3690 format %{ %}
3691 interface(CONST_INTER);
3692 %}
3694 operand immI16_sub() %{
3695 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3696 match(ConI);
3698 op_cost(10);
3699 format %{ %}
3700 interface(CONST_INTER);
3701 %}
3703 operand immI_0_32767() %{
3704 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3705 match(ConI);
3706 op_cost(0);
3708 format %{ %}
3709 interface(CONST_INTER);
3710 %}
3712 operand immI_0_65535() %{
3713 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3714 match(ConI);
3715 op_cost(0);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 operand immI_1() %{
3722 predicate( n->get_int() == 1 );
3723 match(ConI);
3725 op_cost(0);
3726 format %{ %}
3727 interface(CONST_INTER);
3728 %}
3730 operand immI_2() %{
3731 predicate( n->get_int() == 2 );
3732 match(ConI);
3734 op_cost(0);
3735 format %{ %}
3736 interface(CONST_INTER);
3737 %}
3739 operand immI_3() %{
3740 predicate( n->get_int() == 3 );
3741 match(ConI);
3743 op_cost(0);
3744 format %{ %}
3745 interface(CONST_INTER);
3746 %}
3748 operand immI_7() %{
3749 predicate( n->get_int() == 7 );
3750 match(ConI);
3752 format %{ %}
3753 interface(CONST_INTER);
3754 %}
3756 // Immediates for special shifts (sign extend)
3758 // Constants for increment
3759 operand immI_16() %{
3760 predicate( n->get_int() == 16 );
3761 match(ConI);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 operand immI_24() %{
3768 predicate( n->get_int() == 24 );
3769 match(ConI);
3771 format %{ %}
3772 interface(CONST_INTER);
3773 %}
3775 // Constant for byte-wide masking
3776 operand immI_255() %{
3777 predicate( n->get_int() == 255 );
3778 match(ConI);
3780 op_cost(0);
3781 format %{ %}
3782 interface(CONST_INTER);
3783 %}
3785 operand immI_65535() %{
3786 predicate( n->get_int() == 65535 );
3787 match(ConI);
3789 op_cost(5);
3790 format %{ %}
3791 interface(CONST_INTER);
3792 %}
3794 operand immI_65536() %{
3795 predicate( n->get_int() == 65536 );
3796 match(ConI);
3798 op_cost(5);
3799 format %{ %}
3800 interface(CONST_INTER);
3801 %}
3803 operand immI_M65536() %{
3804 predicate( n->get_int() == -65536 );
3805 match(ConI);
3807 op_cost(5);
3808 format %{ %}
3809 interface(CONST_INTER);
3810 %}
3812 // Pointer Immediate
3813 operand immP() %{
3814 match(ConP);
3816 op_cost(10);
3817 format %{ %}
3818 interface(CONST_INTER);
3819 %}
3821 operand immP31()
3822 %{
3823 predicate(n->as_Type()->type()->reloc() == relocInfo::none
3824 && (n->get_ptr() >> 31) == 0);
3825 match(ConP);
3827 op_cost(5);
3828 format %{ %}
3829 interface(CONST_INTER);
3830 %}
3832 // NULL Pointer Immediate
3833 operand immP0() %{
3834 predicate( n->get_ptr() == 0 );
3835 match(ConP);
3836 op_cost(0);
3838 format %{ %}
3839 interface(CONST_INTER);
3840 %}
3842 // Pointer Immediate: 64-bit
3843 operand immP_set() %{
3844 match(ConP);
3846 op_cost(5);
3847 // formats are generated automatically for constants and base registers
3848 format %{ %}
3849 interface(CONST_INTER);
3850 %}
3852 // Pointer Immediate: 64-bit
3853 operand immP_load() %{
3854 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3855 match(ConP);
3857 op_cost(5);
3858 // formats are generated automatically for constants and base registers
3859 format %{ %}
3860 interface(CONST_INTER);
3861 %}
3863 // Pointer Immediate: 64-bit
3864 operand immP_no_oop_cheap() %{
3865 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3866 match(ConP);
3868 op_cost(5);
3869 // formats are generated automatically for constants and base registers
3870 format %{ %}
3871 interface(CONST_INTER);
3872 %}
3874 // Pointer for polling page
3875 operand immP_poll() %{
3876 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3877 match(ConP);
3878 op_cost(5);
3880 format %{ %}
3881 interface(CONST_INTER);
3882 %}
3884 // Pointer Immediate
3885 operand immN() %{
3886 match(ConN);
3888 op_cost(10);
3889 format %{ %}
3890 interface(CONST_INTER);
3891 %}
3893 operand immNKlass() %{
3894 match(ConNKlass);
3896 op_cost(10);
3897 format %{ %}
3898 interface(CONST_INTER);
3899 %}
3901 // NULL Pointer Immediate
3902 operand immN0() %{
3903 predicate(n->get_narrowcon() == 0);
3904 match(ConN);
3906 op_cost(5);
3907 format %{ %}
3908 interface(CONST_INTER);
3909 %}
3911 // Long Immediate
3912 operand immL() %{
3913 match(ConL);
3915 op_cost(20);
3916 format %{ %}
3917 interface(CONST_INTER);
3918 %}
3920 // Long Immediate zero
3921 operand immL0() %{
3922 predicate( n->get_long() == 0L );
3923 match(ConL);
3924 op_cost(0);
3926 format %{ %}
3927 interface(CONST_INTER);
3928 %}
3930 operand immL7() %{
3931 predicate( n->get_long() == 7L );
3932 match(ConL);
3933 op_cost(0);
3935 format %{ %}
3936 interface(CONST_INTER);
3937 %}
3939 operand immL_M1() %{
3940 predicate( n->get_long() == -1L );
3941 match(ConL);
3942 op_cost(0);
3944 format %{ %}
3945 interface(CONST_INTER);
3946 %}
3948 // bit 0..2 zero
3949 operand immL_M8() %{
3950 predicate( n->get_long() == -8L );
3951 match(ConL);
3952 op_cost(0);
3954 format %{ %}
3955 interface(CONST_INTER);
3956 %}
3958 // bit 2 zero
3959 operand immL_M5() %{
3960 predicate( n->get_long() == -5L );
3961 match(ConL);
3962 op_cost(0);
3964 format %{ %}
3965 interface(CONST_INTER);
3966 %}
3968 // bit 1..2 zero
3969 operand immL_M7() %{
3970 predicate( n->get_long() == -7L );
3971 match(ConL);
3972 op_cost(0);
3974 format %{ %}
3975 interface(CONST_INTER);
3976 %}
3978 // bit 0..1 zero
3979 operand immL_M4() %{
3980 predicate( n->get_long() == -4L );
3981 match(ConL);
3982 op_cost(0);
3984 format %{ %}
3985 interface(CONST_INTER);
3986 %}
3988 // bit 3..6 zero
3989 operand immL_M121() %{
3990 predicate( n->get_long() == -121L );
3991 match(ConL);
3992 op_cost(0);
3994 format %{ %}
3995 interface(CONST_INTER);
3996 %}
3998 // Long immediate from 0 to 127.
3999 // Used for a shorter form of long mul by 10.
4000 operand immL_127() %{
4001 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4002 match(ConL);
4003 op_cost(0);
4005 format %{ %}
4006 interface(CONST_INTER);
4007 %}
4009 // Operand for non-negtive long mask
4010 operand immL_nonneg_mask() %{
4011 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4012 match(ConL);
4014 op_cost(0);
4015 format %{ %}
4016 interface(CONST_INTER);
4017 %}
4019 operand immL_0_65535() %{
4020 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4021 match(ConL);
4022 op_cost(0);
4024 format %{ %}
4025 interface(CONST_INTER);
4026 %}
4028 // Long Immediate: cheap (materialize in <= 3 instructions)
4029 operand immL_cheap() %{
4030 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4031 match(ConL);
4032 op_cost(0);
4034 format %{ %}
4035 interface(CONST_INTER);
4036 %}
4038 // Long Immediate: expensive (materialize in > 3 instructions)
4039 operand immL_expensive() %{
4040 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4041 match(ConL);
4042 op_cost(0);
4044 format %{ %}
4045 interface(CONST_INTER);
4046 %}
4048 operand immL16() %{
4049 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4050 match(ConL);
4052 op_cost(10);
4053 format %{ %}
4054 interface(CONST_INTER);
4055 %}
4057 operand immL16_sub() %{
4058 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4059 match(ConL);
4061 op_cost(10);
4062 format %{ %}
4063 interface(CONST_INTER);
4064 %}
4066 // Long Immediate: low 32-bit mask
4067 operand immL_32bits() %{
4068 predicate(n->get_long() == 0xFFFFFFFFL);
4069 match(ConL);
4070 op_cost(20);
4072 format %{ %}
4073 interface(CONST_INTER);
4074 %}
4076 // Long Immediate 32-bit signed
4077 operand immL32()
4078 %{
4079 predicate(n->get_long() == (int) (n->get_long()));
4080 match(ConL);
4082 op_cost(15);
4083 format %{ %}
4084 interface(CONST_INTER);
4085 %}
4088 //single-precision floating-point zero
4089 operand immF0() %{
4090 predicate(jint_cast(n->getf()) == 0);
4091 match(ConF);
4093 op_cost(5);
4094 format %{ %}
4095 interface(CONST_INTER);
4096 %}
4098 //single-precision floating-point immediate
4099 operand immF() %{
4100 match(ConF);
4102 op_cost(20);
4103 format %{ %}
4104 interface(CONST_INTER);
4105 %}
4107 //double-precision floating-point zero
4108 operand immD0() %{
4109 predicate(jlong_cast(n->getd()) == 0);
4110 match(ConD);
4112 op_cost(5);
4113 format %{ %}
4114 interface(CONST_INTER);
4115 %}
4117 //double-precision floating-point immediate
4118 operand immD() %{
4119 match(ConD);
4121 op_cost(20);
4122 format %{ %}
4123 interface(CONST_INTER);
4124 %}
4126 // Register Operands
4127 // Integer Register
4128 operand mRegI() %{
4129 constraint(ALLOC_IN_RC(int_reg));
4130 match(RegI);
4132 format %{ %}
4133 interface(REG_INTER);
4134 %}
4136 operand no_Ax_mRegI() %{
4137 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4138 match(RegI);
4139 match(mRegI);
4141 format %{ %}
4142 interface(REG_INTER);
4143 %}
4145 operand mS0RegI() %{
4146 constraint(ALLOC_IN_RC(s0_reg));
4147 match(RegI);
4148 match(mRegI);
4150 format %{ "S0" %}
4151 interface(REG_INTER);
4152 %}
4154 operand mS1RegI() %{
4155 constraint(ALLOC_IN_RC(s1_reg));
4156 match(RegI);
4157 match(mRegI);
4159 format %{ "S1" %}
4160 interface(REG_INTER);
4161 %}
4163 operand mS2RegI() %{
4164 constraint(ALLOC_IN_RC(s2_reg));
4165 match(RegI);
4166 match(mRegI);
4168 format %{ "S2" %}
4169 interface(REG_INTER);
4170 %}
4172 operand mS3RegI() %{
4173 constraint(ALLOC_IN_RC(s3_reg));
4174 match(RegI);
4175 match(mRegI);
4177 format %{ "S3" %}
4178 interface(REG_INTER);
4179 %}
4181 operand mS4RegI() %{
4182 constraint(ALLOC_IN_RC(s4_reg));
4183 match(RegI);
4184 match(mRegI);
4186 format %{ "S4" %}
4187 interface(REG_INTER);
4188 %}
4190 operand mS5RegI() %{
4191 constraint(ALLOC_IN_RC(s5_reg));
4192 match(RegI);
4193 match(mRegI);
4195 format %{ "S5" %}
4196 interface(REG_INTER);
4197 %}
4199 operand mS6RegI() %{
4200 constraint(ALLOC_IN_RC(s6_reg));
4201 match(RegI);
4202 match(mRegI);
4204 format %{ "S6" %}
4205 interface(REG_INTER);
4206 %}
4208 operand mS7RegI() %{
4209 constraint(ALLOC_IN_RC(s7_reg));
4210 match(RegI);
4211 match(mRegI);
4213 format %{ "S7" %}
4214 interface(REG_INTER);
4215 %}
4218 operand mT0RegI() %{
4219 constraint(ALLOC_IN_RC(t0_reg));
4220 match(RegI);
4221 match(mRegI);
4223 format %{ "T0" %}
4224 interface(REG_INTER);
4225 %}
4227 operand mT1RegI() %{
4228 constraint(ALLOC_IN_RC(t1_reg));
4229 match(RegI);
4230 match(mRegI);
4232 format %{ "T1" %}
4233 interface(REG_INTER);
4234 %}
4236 operand mT2RegI() %{
4237 constraint(ALLOC_IN_RC(t2_reg));
4238 match(RegI);
4239 match(mRegI);
4241 format %{ "T2" %}
4242 interface(REG_INTER);
4243 %}
4245 operand mT3RegI() %{
4246 constraint(ALLOC_IN_RC(t3_reg));
4247 match(RegI);
4248 match(mRegI);
4250 format %{ "T3" %}
4251 interface(REG_INTER);
4252 %}
4254 operand mT8RegI() %{
4255 constraint(ALLOC_IN_RC(t8_reg));
4256 match(RegI);
4257 match(mRegI);
4259 format %{ "T8" %}
4260 interface(REG_INTER);
4261 %}
4263 operand mT9RegI() %{
4264 constraint(ALLOC_IN_RC(t9_reg));
4265 match(RegI);
4266 match(mRegI);
4268 format %{ "T9" %}
4269 interface(REG_INTER);
4270 %}
4272 operand mA0RegI() %{
4273 constraint(ALLOC_IN_RC(a0_reg));
4274 match(RegI);
4275 match(mRegI);
4277 format %{ "A0" %}
4278 interface(REG_INTER);
4279 %}
4281 operand mA1RegI() %{
4282 constraint(ALLOC_IN_RC(a1_reg));
4283 match(RegI);
4284 match(mRegI);
4286 format %{ "A1" %}
4287 interface(REG_INTER);
4288 %}
4290 operand mA2RegI() %{
4291 constraint(ALLOC_IN_RC(a2_reg));
4292 match(RegI);
4293 match(mRegI);
4295 format %{ "A2" %}
4296 interface(REG_INTER);
4297 %}
4299 operand mA3RegI() %{
4300 constraint(ALLOC_IN_RC(a3_reg));
4301 match(RegI);
4302 match(mRegI);
4304 format %{ "A3" %}
4305 interface(REG_INTER);
4306 %}
4308 operand mA4RegI() %{
4309 constraint(ALLOC_IN_RC(a4_reg));
4310 match(RegI);
4311 match(mRegI);
4313 format %{ "A4" %}
4314 interface(REG_INTER);
4315 %}
4317 operand mA5RegI() %{
4318 constraint(ALLOC_IN_RC(a5_reg));
4319 match(RegI);
4320 match(mRegI);
4322 format %{ "A5" %}
4323 interface(REG_INTER);
4324 %}
4326 operand mA6RegI() %{
4327 constraint(ALLOC_IN_RC(a6_reg));
4328 match(RegI);
4329 match(mRegI);
4331 format %{ "A6" %}
4332 interface(REG_INTER);
4333 %}
4335 operand mA7RegI() %{
4336 constraint(ALLOC_IN_RC(a7_reg));
4337 match(RegI);
4338 match(mRegI);
4340 format %{ "A7" %}
4341 interface(REG_INTER);
4342 %}
4344 operand mV0RegI() %{
4345 constraint(ALLOC_IN_RC(v0_reg));
4346 match(RegI);
4347 match(mRegI);
4349 format %{ "V0" %}
4350 interface(REG_INTER);
4351 %}
4353 operand mV1RegI() %{
4354 constraint(ALLOC_IN_RC(v1_reg));
4355 match(RegI);
4356 match(mRegI);
4358 format %{ "V1" %}
4359 interface(REG_INTER);
4360 %}
4362 operand mRegN() %{
4363 constraint(ALLOC_IN_RC(int_reg));
4364 match(RegN);
4366 format %{ %}
4367 interface(REG_INTER);
4368 %}
4370 operand t0_RegN() %{
4371 constraint(ALLOC_IN_RC(t0_reg));
4372 match(RegN);
4373 match(mRegN);
4375 format %{ %}
4376 interface(REG_INTER);
4377 %}
4379 operand t1_RegN() %{
4380 constraint(ALLOC_IN_RC(t1_reg));
4381 match(RegN);
4382 match(mRegN);
4384 format %{ %}
4385 interface(REG_INTER);
4386 %}
4388 operand t2_RegN() %{
4389 constraint(ALLOC_IN_RC(t2_reg));
4390 match(RegN);
4391 match(mRegN);
4393 format %{ %}
4394 interface(REG_INTER);
4395 %}
4397 operand t3_RegN() %{
4398 constraint(ALLOC_IN_RC(t3_reg));
4399 match(RegN);
4400 match(mRegN);
4402 format %{ %}
4403 interface(REG_INTER);
4404 %}
4406 operand t8_RegN() %{
4407 constraint(ALLOC_IN_RC(t8_reg));
4408 match(RegN);
4409 match(mRegN);
4411 format %{ %}
4412 interface(REG_INTER);
4413 %}
4415 operand t9_RegN() %{
4416 constraint(ALLOC_IN_RC(t9_reg));
4417 match(RegN);
4418 match(mRegN);
4420 format %{ %}
4421 interface(REG_INTER);
4422 %}
4424 operand a0_RegN() %{
4425 constraint(ALLOC_IN_RC(a0_reg));
4426 match(RegN);
4427 match(mRegN);
4429 format %{ %}
4430 interface(REG_INTER);
4431 %}
4433 operand a1_RegN() %{
4434 constraint(ALLOC_IN_RC(a1_reg));
4435 match(RegN);
4436 match(mRegN);
4438 format %{ %}
4439 interface(REG_INTER);
4440 %}
4442 operand a2_RegN() %{
4443 constraint(ALLOC_IN_RC(a2_reg));
4444 match(RegN);
4445 match(mRegN);
4447 format %{ %}
4448 interface(REG_INTER);
4449 %}
4451 operand a3_RegN() %{
4452 constraint(ALLOC_IN_RC(a3_reg));
4453 match(RegN);
4454 match(mRegN);
4456 format %{ %}
4457 interface(REG_INTER);
4458 %}
4460 operand a4_RegN() %{
4461 constraint(ALLOC_IN_RC(a4_reg));
4462 match(RegN);
4463 match(mRegN);
4465 format %{ %}
4466 interface(REG_INTER);
4467 %}
4469 operand a5_RegN() %{
4470 constraint(ALLOC_IN_RC(a5_reg));
4471 match(RegN);
4472 match(mRegN);
4474 format %{ %}
4475 interface(REG_INTER);
4476 %}
4478 operand a6_RegN() %{
4479 constraint(ALLOC_IN_RC(a6_reg));
4480 match(RegN);
4481 match(mRegN);
4483 format %{ %}
4484 interface(REG_INTER);
4485 %}
4487 operand a7_RegN() %{
4488 constraint(ALLOC_IN_RC(a7_reg));
4489 match(RegN);
4490 match(mRegN);
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand s0_RegN() %{
4497 constraint(ALLOC_IN_RC(s0_reg));
4498 match(RegN);
4499 match(mRegN);
4501 format %{ %}
4502 interface(REG_INTER);
4503 %}
4505 operand s1_RegN() %{
4506 constraint(ALLOC_IN_RC(s1_reg));
4507 match(RegN);
4508 match(mRegN);
4510 format %{ %}
4511 interface(REG_INTER);
4512 %}
4514 operand s2_RegN() %{
4515 constraint(ALLOC_IN_RC(s2_reg));
4516 match(RegN);
4517 match(mRegN);
4519 format %{ %}
4520 interface(REG_INTER);
4521 %}
4523 operand s3_RegN() %{
4524 constraint(ALLOC_IN_RC(s3_reg));
4525 match(RegN);
4526 match(mRegN);
4528 format %{ %}
4529 interface(REG_INTER);
4530 %}
4532 operand s4_RegN() %{
4533 constraint(ALLOC_IN_RC(s4_reg));
4534 match(RegN);
4535 match(mRegN);
4537 format %{ %}
4538 interface(REG_INTER);
4539 %}
4541 operand s5_RegN() %{
4542 constraint(ALLOC_IN_RC(s5_reg));
4543 match(RegN);
4544 match(mRegN);
4546 format %{ %}
4547 interface(REG_INTER);
4548 %}
4550 operand s6_RegN() %{
4551 constraint(ALLOC_IN_RC(s6_reg));
4552 match(RegN);
4553 match(mRegN);
4555 format %{ %}
4556 interface(REG_INTER);
4557 %}
4559 operand s7_RegN() %{
4560 constraint(ALLOC_IN_RC(s7_reg));
4561 match(RegN);
4562 match(mRegN);
4564 format %{ %}
4565 interface(REG_INTER);
4566 %}
4568 operand v0_RegN() %{
4569 constraint(ALLOC_IN_RC(v0_reg));
4570 match(RegN);
4571 match(mRegN);
4573 format %{ %}
4574 interface(REG_INTER);
4575 %}
4577 operand v1_RegN() %{
4578 constraint(ALLOC_IN_RC(v1_reg));
4579 match(RegN);
4580 match(mRegN);
4582 format %{ %}
4583 interface(REG_INTER);
4584 %}
4586 // Pointer Register
4587 operand mRegP() %{
4588 constraint(ALLOC_IN_RC(p_reg));
4589 match(RegP);
4591 format %{ %}
4592 interface(REG_INTER);
4593 %}
4595 operand no_T8_mRegP() %{
4596 constraint(ALLOC_IN_RC(no_T8_p_reg));
4597 match(RegP);
4598 match(mRegP);
4600 format %{ %}
4601 interface(REG_INTER);
4602 %}
4604 operand s0_RegP()
4605 %{
4606 constraint(ALLOC_IN_RC(s0_long_reg));
4607 match(RegP);
4608 match(mRegP);
4609 match(no_T8_mRegP);
4611 format %{ %}
4612 interface(REG_INTER);
4613 %}
4615 operand s1_RegP()
4616 %{
4617 constraint(ALLOC_IN_RC(s1_long_reg));
4618 match(RegP);
4619 match(mRegP);
4620 match(no_T8_mRegP);
4622 format %{ %}
4623 interface(REG_INTER);
4624 %}
4626 operand s2_RegP()
4627 %{
4628 constraint(ALLOC_IN_RC(s2_long_reg));
4629 match(RegP);
4630 match(mRegP);
4631 match(no_T8_mRegP);
4633 format %{ %}
4634 interface(REG_INTER);
4635 %}
4637 operand s3_RegP()
4638 %{
4639 constraint(ALLOC_IN_RC(s3_long_reg));
4640 match(RegP);
4641 match(mRegP);
4642 match(no_T8_mRegP);
4644 format %{ %}
4645 interface(REG_INTER);
4646 %}
4648 operand s4_RegP()
4649 %{
4650 constraint(ALLOC_IN_RC(s4_long_reg));
4651 match(RegP);
4652 match(mRegP);
4653 match(no_T8_mRegP);
4655 format %{ %}
4656 interface(REG_INTER);
4657 %}
4659 operand s5_RegP()
4660 %{
4661 constraint(ALLOC_IN_RC(s5_long_reg));
4662 match(RegP);
4663 match(mRegP);
4664 match(no_T8_mRegP);
4666 format %{ %}
4667 interface(REG_INTER);
4668 %}
4670 operand s6_RegP()
4671 %{
4672 constraint(ALLOC_IN_RC(s6_long_reg));
4673 match(RegP);
4674 match(mRegP);
4675 match(no_T8_mRegP);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand s7_RegP()
4682 %{
4683 constraint(ALLOC_IN_RC(s7_long_reg));
4684 match(RegP);
4685 match(mRegP);
4686 match(no_T8_mRegP);
4688 format %{ %}
4689 interface(REG_INTER);
4690 %}
4692 operand t0_RegP()
4693 %{
4694 constraint(ALLOC_IN_RC(t0_long_reg));
4695 match(RegP);
4696 match(mRegP);
4697 match(no_T8_mRegP);
4699 format %{ %}
4700 interface(REG_INTER);
4701 %}
4703 operand t1_RegP()
4704 %{
4705 constraint(ALLOC_IN_RC(t1_long_reg));
4706 match(RegP);
4707 match(mRegP);
4708 match(no_T8_mRegP);
4710 format %{ %}
4711 interface(REG_INTER);
4712 %}
4714 operand t2_RegP()
4715 %{
4716 constraint(ALLOC_IN_RC(t2_long_reg));
4717 match(RegP);
4718 match(mRegP);
4719 match(no_T8_mRegP);
4721 format %{ %}
4722 interface(REG_INTER);
4723 %}
4725 operand t3_RegP()
4726 %{
4727 constraint(ALLOC_IN_RC(t3_long_reg));
4728 match(RegP);
4729 match(mRegP);
4730 match(no_T8_mRegP);
4732 format %{ %}
4733 interface(REG_INTER);
4734 %}
4736 operand t8_RegP()
4737 %{
4738 constraint(ALLOC_IN_RC(t8_long_reg));
4739 match(RegP);
4740 match(mRegP);
4742 format %{ %}
4743 interface(REG_INTER);
4744 %}
4746 operand t9_RegP()
4747 %{
4748 constraint(ALLOC_IN_RC(t9_long_reg));
4749 match(RegP);
4750 match(mRegP);
4751 match(no_T8_mRegP);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 operand a0_RegP()
4758 %{
4759 constraint(ALLOC_IN_RC(a0_long_reg));
4760 match(RegP);
4761 match(mRegP);
4762 match(no_T8_mRegP);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand a1_RegP()
4769 %{
4770 constraint(ALLOC_IN_RC(a1_long_reg));
4771 match(RegP);
4772 match(mRegP);
4773 match(no_T8_mRegP);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand a2_RegP()
4780 %{
4781 constraint(ALLOC_IN_RC(a2_long_reg));
4782 match(RegP);
4783 match(mRegP);
4784 match(no_T8_mRegP);
4786 format %{ %}
4787 interface(REG_INTER);
4788 %}
4790 operand a3_RegP()
4791 %{
4792 constraint(ALLOC_IN_RC(a3_long_reg));
4793 match(RegP);
4794 match(mRegP);
4795 match(no_T8_mRegP);
4797 format %{ %}
4798 interface(REG_INTER);
4799 %}
4801 operand a4_RegP()
4802 %{
4803 constraint(ALLOC_IN_RC(a4_long_reg));
4804 match(RegP);
4805 match(mRegP);
4806 match(no_T8_mRegP);
4808 format %{ %}
4809 interface(REG_INTER);
4810 %}
4813 operand a5_RegP()
4814 %{
4815 constraint(ALLOC_IN_RC(a5_long_reg));
4816 match(RegP);
4817 match(mRegP);
4818 match(no_T8_mRegP);
4820 format %{ %}
4821 interface(REG_INTER);
4822 %}
4824 operand a6_RegP()
4825 %{
4826 constraint(ALLOC_IN_RC(a6_long_reg));
4827 match(RegP);
4828 match(mRegP);
4829 match(no_T8_mRegP);
4831 format %{ %}
4832 interface(REG_INTER);
4833 %}
4835 operand a7_RegP()
4836 %{
4837 constraint(ALLOC_IN_RC(a7_long_reg));
4838 match(RegP);
4839 match(mRegP);
4840 match(no_T8_mRegP);
4842 format %{ %}
4843 interface(REG_INTER);
4844 %}
4846 operand v0_RegP()
4847 %{
4848 constraint(ALLOC_IN_RC(v0_long_reg));
4849 match(RegP);
4850 match(mRegP);
4851 match(no_T8_mRegP);
4853 format %{ %}
4854 interface(REG_INTER);
4855 %}
4857 operand v1_RegP()
4858 %{
4859 constraint(ALLOC_IN_RC(v1_long_reg));
4860 match(RegP);
4861 match(mRegP);
4862 match(no_T8_mRegP);
4864 format %{ %}
4865 interface(REG_INTER);
4866 %}
4868 /*
4869 operand mSPRegP(mRegP reg) %{
4870 constraint(ALLOC_IN_RC(sp_reg));
4871 match(reg);
4873 format %{ "SP" %}
4874 interface(REG_INTER);
4875 %}
4877 operand mFPRegP(mRegP reg) %{
4878 constraint(ALLOC_IN_RC(fp_reg));
4879 match(reg);
4881 format %{ "FP" %}
4882 interface(REG_INTER);
4883 %}
4884 */
4886 operand mRegL() %{
4887 constraint(ALLOC_IN_RC(long_reg));
4888 match(RegL);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4894 operand v0RegL() %{
4895 constraint(ALLOC_IN_RC(v0_long_reg));
4896 match(RegL);
4897 match(mRegL);
4899 format %{ %}
4900 interface(REG_INTER);
4901 %}
4903 operand v1RegL() %{
4904 constraint(ALLOC_IN_RC(v1_long_reg));
4905 match(RegL);
4906 match(mRegL);
4908 format %{ %}
4909 interface(REG_INTER);
4910 %}
4912 operand a0RegL() %{
4913 constraint(ALLOC_IN_RC(a0_long_reg));
4914 match(RegL);
4915 match(mRegL);
4917 format %{ "A0" %}
4918 interface(REG_INTER);
4919 %}
4921 operand a1RegL() %{
4922 constraint(ALLOC_IN_RC(a1_long_reg));
4923 match(RegL);
4924 match(mRegL);
4926 format %{ %}
4927 interface(REG_INTER);
4928 %}
4930 operand a2RegL() %{
4931 constraint(ALLOC_IN_RC(a2_long_reg));
4932 match(RegL);
4933 match(mRegL);
4935 format %{ %}
4936 interface(REG_INTER);
4937 %}
4939 operand a3RegL() %{
4940 constraint(ALLOC_IN_RC(a3_long_reg));
4941 match(RegL);
4942 match(mRegL);
4944 format %{ %}
4945 interface(REG_INTER);
4946 %}
4948 operand t0RegL() %{
4949 constraint(ALLOC_IN_RC(t0_long_reg));
4950 match(RegL);
4951 match(mRegL);
4953 format %{ %}
4954 interface(REG_INTER);
4955 %}
4957 operand t1RegL() %{
4958 constraint(ALLOC_IN_RC(t1_long_reg));
4959 match(RegL);
4960 match(mRegL);
4962 format %{ %}
4963 interface(REG_INTER);
4964 %}
4966 operand t2RegL() %{
4967 constraint(ALLOC_IN_RC(t2_long_reg));
4968 match(RegL);
4969 match(mRegL);
4971 format %{ %}
4972 interface(REG_INTER);
4973 %}
4975 operand t3RegL() %{
4976 constraint(ALLOC_IN_RC(t3_long_reg));
4977 match(RegL);
4978 match(mRegL);
4980 format %{ %}
4981 interface(REG_INTER);
4982 %}
4984 operand t8RegL() %{
4985 constraint(ALLOC_IN_RC(t8_long_reg));
4986 match(RegL);
4987 match(mRegL);
4989 format %{ %}
4990 interface(REG_INTER);
4991 %}
4993 operand a4RegL() %{
4994 constraint(ALLOC_IN_RC(a4_long_reg));
4995 match(RegL);
4996 match(mRegL);
4998 format %{ %}
4999 interface(REG_INTER);
5000 %}
5002 operand a5RegL() %{
5003 constraint(ALLOC_IN_RC(a5_long_reg));
5004 match(RegL);
5005 match(mRegL);
5007 format %{ %}
5008 interface(REG_INTER);
5009 %}
5011 operand a6RegL() %{
5012 constraint(ALLOC_IN_RC(a6_long_reg));
5013 match(RegL);
5014 match(mRegL);
5016 format %{ %}
5017 interface(REG_INTER);
5018 %}
5020 operand a7RegL() %{
5021 constraint(ALLOC_IN_RC(a7_long_reg));
5022 match(RegL);
5023 match(mRegL);
5025 format %{ %}
5026 interface(REG_INTER);
5027 %}
5029 operand s0RegL() %{
5030 constraint(ALLOC_IN_RC(s0_long_reg));
5031 match(RegL);
5032 match(mRegL);
5034 format %{ %}
5035 interface(REG_INTER);
5036 %}
5038 operand s1RegL() %{
5039 constraint(ALLOC_IN_RC(s1_long_reg));
5040 match(RegL);
5041 match(mRegL);
5043 format %{ %}
5044 interface(REG_INTER);
5045 %}
5047 operand s2RegL() %{
5048 constraint(ALLOC_IN_RC(s2_long_reg));
5049 match(RegL);
5050 match(mRegL);
5052 format %{ %}
5053 interface(REG_INTER);
5054 %}
5056 operand s3RegL() %{
5057 constraint(ALLOC_IN_RC(s3_long_reg));
5058 match(RegL);
5059 match(mRegL);
5061 format %{ %}
5062 interface(REG_INTER);
5063 %}
5065 operand s4RegL() %{
5066 constraint(ALLOC_IN_RC(s4_long_reg));
5067 match(RegL);
5068 match(mRegL);
5070 format %{ %}
5071 interface(REG_INTER);
5072 %}
5074 operand s7RegL() %{
5075 constraint(ALLOC_IN_RC(s7_long_reg));
5076 match(RegL);
5077 match(mRegL);
5079 format %{ %}
5080 interface(REG_INTER);
5081 %}
5083 // Floating register operands
5084 operand regF() %{
5085 constraint(ALLOC_IN_RC(flt_reg));
5086 match(RegF);
5088 format %{ %}
5089 interface(REG_INTER);
5090 %}
5092 //Double Precision Floating register operands
5093 operand regD() %{
5094 constraint(ALLOC_IN_RC(dbl_reg));
5095 match(RegD);
5097 format %{ %}
5098 interface(REG_INTER);
5099 %}
5101 //----------Memory Operands----------------------------------------------------
5102 // Indirect Memory Operand
5103 operand indirect(mRegP reg) %{
5104 constraint(ALLOC_IN_RC(p_reg));
5105 match(reg);
5107 format %{ "[$reg] @ indirect" %}
5108 interface(MEMORY_INTER) %{
5109 base($reg);
5110 index(0x0); /* NO_INDEX */
5111 scale(0x0);
5112 disp(0x0);
5113 %}
5114 %}
5116 // Indirect Memory Plus Short Offset Operand
5117 operand indOffset8(mRegP reg, immL8 off)
5118 %{
5119 constraint(ALLOC_IN_RC(p_reg));
5120 match(AddP reg off);
5122 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5123 interface(MEMORY_INTER) %{
5124 base($reg);
5125 index(0x0); /* NO_INDEX */
5126 scale(0x0);
5127 disp($off);
5128 %}
5129 %}
5131 // Indirect Memory Times Scale Plus Index Register
5132 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5133 %{
5134 constraint(ALLOC_IN_RC(p_reg));
5135 match(AddP reg (LShiftL lreg scale));
5137 op_cost(10);
5138 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5139 interface(MEMORY_INTER) %{
5140 base($reg);
5141 index($lreg);
5142 scale($scale);
5143 disp(0x0);
5144 %}
5145 %}
5148 // [base + index + offset]
5149 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5150 %{
5151 constraint(ALLOC_IN_RC(p_reg));
5152 op_cost(5);
5153 match(AddP (AddP base index) off);
5155 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5156 interface(MEMORY_INTER) %{
5157 base($base);
5158 index($index);
5159 scale(0x0);
5160 disp($off);
5161 %}
5162 %}
5164 // [base + index + offset]
5165 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5166 %{
5167 constraint(ALLOC_IN_RC(p_reg));
5168 op_cost(5);
5169 match(AddP (AddP base (ConvI2L index)) off);
5171 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5172 interface(MEMORY_INTER) %{
5173 base($base);
5174 index($index);
5175 scale(0x0);
5176 disp($off);
5177 %}
5178 %}
5180 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5181 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5182 %{
5183 constraint(ALLOC_IN_RC(p_reg));
5184 match(AddP (AddP reg (LShiftL lreg scale)) off);
5186 op_cost(10);
5187 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5188 interface(MEMORY_INTER) %{
5189 base($reg);
5190 index($lreg);
5191 scale($scale);
5192 disp($off);
5193 %}
5194 %}
5196 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5197 %{
5198 constraint(ALLOC_IN_RC(p_reg));
5199 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5201 op_cost(10);
5202 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5203 interface(MEMORY_INTER) %{
5204 base($reg);
5205 index($ireg);
5206 scale($scale);
5207 disp($off);
5208 %}
5209 %}
5211 // [base + index<<scale + offset]
5212 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5213 %{
5214 constraint(ALLOC_IN_RC(p_reg));
5215 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5216 op_cost(10);
5217 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5219 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5220 interface(MEMORY_INTER) %{
5221 base($base);
5222 index($index);
5223 scale($scale);
5224 disp($off);
5225 %}
5226 %}
5228 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5229 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5230 %{
5231 predicate(Universe::narrow_oop_shift() == 0);
5232 constraint(ALLOC_IN_RC(p_reg));
5233 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5235 op_cost(10);
5236 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5237 interface(MEMORY_INTER) %{
5238 base($reg);
5239 index($lreg);
5240 scale($scale);
5241 disp($off);
5242 %}
5243 %}
5245 // [base + index<<scale + offset] for compressd Oops
5246 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5247 %{
5248 constraint(ALLOC_IN_RC(p_reg));
5249 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5250 predicate(Universe::narrow_oop_shift() == 0);
5251 op_cost(10);
5252 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5254 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5255 interface(MEMORY_INTER) %{
5256 base($base);
5257 index($index);
5258 scale($scale);
5259 disp($off);
5260 %}
5261 %}
5263 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5264 // Indirect Memory Plus Long Offset Operand
5265 operand indOffset32(mRegP reg, immL32 off) %{
5266 constraint(ALLOC_IN_RC(p_reg));
5267 op_cost(20);
5268 match(AddP reg off);
5270 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5271 interface(MEMORY_INTER) %{
5272 base($reg);
5273 index(0x0); /* NO_INDEX */
5274 scale(0x0);
5275 disp($off);
5276 %}
5277 %}
5279 // Indirect Memory Plus Index Register
5280 operand indIndex(mRegP addr, mRegL index) %{
5281 constraint(ALLOC_IN_RC(p_reg));
5282 match(AddP addr index);
5284 op_cost(20);
5285 format %{"[$addr + $index] @ indIndex" %}
5286 interface(MEMORY_INTER) %{
5287 base($addr);
5288 index($index);
5289 scale(0x0);
5290 disp(0x0);
5291 %}
5292 %}
5294 operand indirectNarrowKlass(mRegN reg)
5295 %{
5296 predicate(Universe::narrow_klass_shift() == 0);
5297 constraint(ALLOC_IN_RC(p_reg));
5298 op_cost(10);
5299 match(DecodeNKlass reg);
5301 format %{ "[$reg] @ indirectNarrowKlass" %}
5302 interface(MEMORY_INTER) %{
5303 base($reg);
5304 index(0x0);
5305 scale(0x0);
5306 disp(0x0);
5307 %}
5308 %}
5310 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5311 %{
5312 predicate(Universe::narrow_klass_shift() == 0);
5313 constraint(ALLOC_IN_RC(p_reg));
5314 op_cost(10);
5315 match(AddP (DecodeNKlass reg) off);
5317 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5318 interface(MEMORY_INTER) %{
5319 base($reg);
5320 index(0x0);
5321 scale(0x0);
5322 disp($off);
5323 %}
5324 %}
5326 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5327 %{
5328 predicate(Universe::narrow_klass_shift() == 0);
5329 constraint(ALLOC_IN_RC(p_reg));
5330 op_cost(10);
5331 match(AddP (DecodeNKlass reg) off);
5333 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5334 interface(MEMORY_INTER) %{
5335 base($reg);
5336 index(0x0);
5337 scale(0x0);
5338 disp($off);
5339 %}
5340 %}
5342 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5343 %{
5344 predicate(Universe::narrow_klass_shift() == 0);
5345 constraint(ALLOC_IN_RC(p_reg));
5346 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5348 op_cost(10);
5349 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5350 interface(MEMORY_INTER) %{
5351 base($reg);
5352 index($lreg);
5353 scale(0x0);
5354 disp($off);
5355 %}
5356 %}
5358 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5359 %{
5360 predicate(Universe::narrow_klass_shift() == 0);
5361 constraint(ALLOC_IN_RC(p_reg));
5362 match(AddP (DecodeNKlass reg) lreg);
5364 op_cost(10);
5365 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5366 interface(MEMORY_INTER) %{
5367 base($reg);
5368 index($lreg);
5369 scale(0x0);
5370 disp(0x0);
5371 %}
5372 %}
5374 // Indirect Memory Operand
5375 operand indirectNarrow(mRegN reg)
5376 %{
5377 predicate(Universe::narrow_oop_shift() == 0);
5378 constraint(ALLOC_IN_RC(p_reg));
5379 op_cost(10);
5380 match(DecodeN reg);
5382 format %{ "[$reg] @ indirectNarrow" %}
5383 interface(MEMORY_INTER) %{
5384 base($reg);
5385 index(0x0);
5386 scale(0x0);
5387 disp(0x0);
5388 %}
5389 %}
5391 // Indirect Memory Plus Short Offset Operand
5392 operand indOffset8Narrow(mRegN reg, immL8 off)
5393 %{
5394 predicate(Universe::narrow_oop_shift() == 0);
5395 constraint(ALLOC_IN_RC(p_reg));
5396 op_cost(10);
5397 match(AddP (DecodeN reg) off);
5399 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5400 interface(MEMORY_INTER) %{
5401 base($reg);
5402 index(0x0);
5403 scale(0x0);
5404 disp($off);
5405 %}
5406 %}
5408 // Indirect Memory Plus Index Register Plus Offset Operand
5409 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5410 %{
5411 predicate(Universe::narrow_oop_shift() == 0);
5412 constraint(ALLOC_IN_RC(p_reg));
5413 match(AddP (AddP (DecodeN reg) lreg) off);
5415 op_cost(10);
5416 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5417 interface(MEMORY_INTER) %{
5418 base($reg);
5419 index($lreg);
5420 scale(0x0);
5421 disp($off);
5422 %}
5423 %}
5425 //----------Load Long Memory Operands------------------------------------------
5426 // The load-long idiom will use it's address expression again after loading
5427 // the first word of the long. If the load-long destination overlaps with
5428 // registers used in the addressing expression, the 2nd half will be loaded
5429 // from a clobbered address. Fix this by requiring that load-long use
5430 // address registers that do not overlap with the load-long target.
5432 // load-long support
5433 operand load_long_RegP() %{
5434 constraint(ALLOC_IN_RC(p_reg));
5435 match(RegP);
5436 match(mRegP);
5437 op_cost(100);
5438 format %{ %}
5439 interface(REG_INTER);
5440 %}
5442 // Indirect Memory Operand Long
5443 operand load_long_indirect(load_long_RegP reg) %{
5444 constraint(ALLOC_IN_RC(p_reg));
5445 match(reg);
5447 format %{ "[$reg]" %}
5448 interface(MEMORY_INTER) %{
5449 base($reg);
5450 index(0x0);
5451 scale(0x0);
5452 disp(0x0);
5453 %}
5454 %}
5456 // Indirect Memory Plus Long Offset Operand
5457 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5458 match(AddP reg off);
5460 format %{ "[$reg + $off]" %}
5461 interface(MEMORY_INTER) %{
5462 base($reg);
5463 index(0x0);
5464 scale(0x0);
5465 disp($off);
5466 %}
5467 %}
5469 //----------Conditional Branch Operands----------------------------------------
5470 // Comparison Op - This is the operation of the comparison, and is limited to
5471 // the following set of codes:
5472 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5473 //
5474 // Other attributes of the comparison, such as unsignedness, are specified
5475 // by the comparison instruction that sets a condition code flags register.
5476 // That result is represented by a flags operand whose subtype is appropriate
5477 // to the unsignedness (etc.) of the comparison.
5478 //
5479 // Later, the instruction which matches both the Comparison Op (a Bool) and
5480 // the flags (produced by the Cmp) specifies the coding of the comparison op
5481 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5483 // Comparision Code
5484 operand cmpOp() %{
5485 match(Bool);
5487 format %{ "" %}
5488 interface(COND_INTER) %{
5489 equal(0x01);
5490 not_equal(0x02);
5491 greater(0x03);
5492 greater_equal(0x04);
5493 less(0x05);
5494 less_equal(0x06);
5495 overflow(0x7);
5496 no_overflow(0x8);
5497 %}
5498 %}
5501 // Comparision Code
5502 // Comparison Code, unsigned compare. Used by FP also, with
5503 // C2 (unordered) turned into GT or LT already. The other bits
5504 // C0 and C3 are turned into Carry & Zero flags.
5505 operand cmpOpU() %{
5506 match(Bool);
5508 format %{ "" %}
5509 interface(COND_INTER) %{
5510 equal(0x01);
5511 not_equal(0x02);
5512 greater(0x03);
5513 greater_equal(0x04);
5514 less(0x05);
5515 less_equal(0x06);
5516 overflow(0x7);
5517 no_overflow(0x8);
5518 %}
5519 %}
5521 /*
5522 // Comparison Code, unsigned compare. Used by FP also, with
5523 // C2 (unordered) turned into GT or LT already. The other bits
5524 // C0 and C3 are turned into Carry & Zero flags.
5525 operand cmpOpU() %{
5526 match(Bool);
5528 format %{ "" %}
5529 interface(COND_INTER) %{
5530 equal(0x4);
5531 not_equal(0x5);
5532 less(0x2);
5533 greater_equal(0x3);
5534 less_equal(0x6);
5535 greater(0x7);
5536 %}
5537 %}
5538 */
5539 /*
5540 // Comparison Code for FP conditional move
5541 operand cmpOp_fcmov() %{
5542 match(Bool);
5544 format %{ "" %}
5545 interface(COND_INTER) %{
5546 equal (0x01);
5547 not_equal (0x02);
5548 greater (0x03);
5549 greater_equal(0x04);
5550 less (0x05);
5551 less_equal (0x06);
5552 %}
5553 %}
5555 // Comparision Code used in long compares
5556 operand cmpOp_commute() %{
5557 match(Bool);
5559 format %{ "" %}
5560 interface(COND_INTER) %{
5561 equal(0x4);
5562 not_equal(0x5);
5563 less(0xF);
5564 greater_equal(0xE);
5565 less_equal(0xD);
5566 greater(0xC);
5567 %}
5568 %}
5569 */
5571 //----------Special Memory Operands--------------------------------------------
5572 // Stack Slot Operand - This operand is used for loading and storing temporary
5573 // values on the stack where a match requires a value to
5574 // flow through memory.
5575 operand stackSlotP(sRegP reg) %{
5576 constraint(ALLOC_IN_RC(stack_slots));
5577 // No match rule because this operand is only generated in matching
5578 op_cost(50);
5579 format %{ "[$reg]" %}
5580 interface(MEMORY_INTER) %{
5581 base(0x1d); // SP
5582 index(0x0); // No Index
5583 scale(0x0); // No Scale
5584 disp($reg); // Stack Offset
5585 %}
5586 %}
5588 operand stackSlotI(sRegI reg) %{
5589 constraint(ALLOC_IN_RC(stack_slots));
5590 // No match rule because this operand is only generated in matching
5591 op_cost(50);
5592 format %{ "[$reg]" %}
5593 interface(MEMORY_INTER) %{
5594 base(0x1d); // SP
5595 index(0x0); // No Index
5596 scale(0x0); // No Scale
5597 disp($reg); // Stack Offset
5598 %}
5599 %}
5601 operand stackSlotF(sRegF reg) %{
5602 constraint(ALLOC_IN_RC(stack_slots));
5603 // No match rule because this operand is only generated in matching
5604 op_cost(50);
5605 format %{ "[$reg]" %}
5606 interface(MEMORY_INTER) %{
5607 base(0x1d); // SP
5608 index(0x0); // No Index
5609 scale(0x0); // No Scale
5610 disp($reg); // Stack Offset
5611 %}
5612 %}
5614 operand stackSlotD(sRegD reg) %{
5615 constraint(ALLOC_IN_RC(stack_slots));
5616 // No match rule because this operand is only generated in matching
5617 op_cost(50);
5618 format %{ "[$reg]" %}
5619 interface(MEMORY_INTER) %{
5620 base(0x1d); // SP
5621 index(0x0); // No Index
5622 scale(0x0); // No Scale
5623 disp($reg); // Stack Offset
5624 %}
5625 %}
5627 operand stackSlotL(sRegL reg) %{
5628 constraint(ALLOC_IN_RC(stack_slots));
5629 // No match rule because this operand is only generated in matching
5630 op_cost(50);
5631 format %{ "[$reg]" %}
5632 interface(MEMORY_INTER) %{
5633 base(0x1d); // SP
5634 index(0x0); // No Index
5635 scale(0x0); // No Scale
5636 disp($reg); // Stack Offset
5637 %}
5638 %}
5641 //------------------------OPERAND CLASSES--------------------------------------
5642 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5643 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5646 //----------PIPELINE-----------------------------------------------------------
5647 // Rules which define the behavior of the target architectures pipeline.
5649 pipeline %{
5651 //----------ATTRIBUTES---------------------------------------------------------
5652 attributes %{
5653 fixed_size_instructions; // Fixed size instructions
5654 branch_has_delay_slot; // branch have delay slot in gs2
5655 max_instructions_per_bundle = 1; // 1 instruction per bundle
5656 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5657 bundle_unit_size=4;
5658 instruction_unit_size = 4; // An instruction is 4 bytes long
5659 instruction_fetch_unit_size = 16; // The processor fetches one line
5660 instruction_fetch_units = 1; // of 16 bytes
5662 // List of nop instructions
5663 nops( MachNop );
5664 %}
5666 //----------RESOURCES----------------------------------------------------------
5667 // Resources are the functional units available to the machine
5669 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5671 //----------PIPELINE DESCRIPTION-----------------------------------------------
5672 // Pipeline Description specifies the stages in the machine's pipeline
5674 // IF: fetch
5675 // ID: decode
5676 // RD: read
5677 // CA: caculate
5678 // WB: write back
5679 // CM: commit
5681 pipe_desc(IF, ID, RD, CA, WB, CM);
5684 //----------PIPELINE CLASSES---------------------------------------------------
5685 // Pipeline Classes describe the stages in which input and output are
5686 // referenced by the hardware pipeline.
5688 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5689 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5690 single_instruction;
5691 src1 : RD(read);
5692 src2 : RD(read);
5693 dst : WB(write)+1;
5694 DECODE : ID;
5695 ALU : CA;
5696 %}
5698 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5699 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5700 src1 : RD(read);
5701 src2 : RD(read);
5702 dst : WB(write)+5;
5703 DECODE : ID;
5704 ALU2 : CA;
5705 %}
5707 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5708 src1 : RD(read);
5709 src2 : RD(read);
5710 dst : WB(write)+10;
5711 DECODE : ID;
5712 ALU2 : CA;
5713 %}
5715 //No.19 Integer div operation : dst <-- reg1 div reg2
5716 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5717 src1 : RD(read);
5718 src2 : RD(read);
5719 dst : WB(write)+10;
5720 DECODE : ID;
5721 ALU2 : CA;
5722 %}
5724 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5725 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5726 instruction_count(2);
5727 src1 : RD(read);
5728 src2 : RD(read);
5729 dst : WB(write)+10;
5730 DECODE : ID;
5731 ALU2 : CA;
5732 %}
5734 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5735 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5736 instruction_count(2);
5737 src1 : RD(read);
5738 src2 : RD(read);
5739 dst : WB(write);
5740 DECODE : ID;
5741 ALU : CA;
5742 %}
5744 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5745 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5746 instruction_count(2);
5747 src : RD(read);
5748 dst : WB(write);
5749 DECODE : ID;
5750 ALU : CA;
5751 %}
5753 //no.16 load Long from memory :
5754 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5755 instruction_count(2);
5756 mem : RD(read);
5757 dst : WB(write)+5;
5758 DECODE : ID;
5759 MEM : RD;
5760 %}
5762 //No.17 Store Long to Memory :
5763 pipe_class ialu_storeL(mRegL src, memory mem) %{
5764 instruction_count(2);
5765 mem : RD(read);
5766 src : RD(read);
5767 DECODE : ID;
5768 MEM : RD;
5769 %}
5771 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5772 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5773 single_instruction;
5774 src : RD(read);
5775 dst : WB(write);
5776 DECODE : ID;
5777 ALU : CA;
5778 %}
5780 //No.3 Integer move operation : dst <-- reg
5781 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5782 src : RD(read);
5783 dst : WB(write);
5784 DECODE : ID;
5785 ALU : CA;
5786 %}
5788 //No.4 No instructions : do nothing
5789 pipe_class empty( ) %{
5790 instruction_count(0);
5791 %}
5793 //No.5 UnConditional branch :
5794 pipe_class pipe_jump( label labl ) %{
5795 multiple_bundles;
5796 DECODE : ID;
5797 BR : RD;
5798 %}
5800 //No.6 ALU Conditional branch :
5801 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5802 multiple_bundles;
5803 src1 : RD(read);
5804 src2 : RD(read);
5805 DECODE : ID;
5806 BR : RD;
5807 %}
5809 //no.7 load integer from memory :
5810 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5811 mem : RD(read);
5812 dst : WB(write)+3;
5813 DECODE : ID;
5814 MEM : RD;
5815 %}
5817 //No.8 Store Integer to Memory :
5818 pipe_class ialu_storeI(mRegI src, memory mem) %{
5819 mem : RD(read);
5820 src : RD(read);
5821 DECODE : ID;
5822 MEM : RD;
5823 %}
5826 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5827 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5828 src1 : RD(read);
5829 src2 : RD(read);
5830 dst : WB(write);
5831 DECODE : ID;
5832 FPU : CA;
5833 %}
5835 //No.22 Floating div operation : dst <-- reg1 div reg2
5836 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5837 src1 : RD(read);
5838 src2 : RD(read);
5839 dst : WB(write);
5840 DECODE : ID;
5841 FPU2 : CA;
5842 %}
5844 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5845 src : RD(read);
5846 dst : WB(write);
5847 DECODE : ID;
5848 FPU1 : CA;
5849 %}
5851 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5852 src : RD(read);
5853 dst : WB(write);
5854 DECODE : ID;
5855 FPU1 : CA;
5856 %}
5858 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5859 src : RD(read);
5860 dst : WB(write);
5861 DECODE : ID;
5862 MEM : RD;
5863 %}
5865 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5866 src : RD(read);
5867 dst : WB(write);
5868 DECODE : ID;
5869 MEM : RD(5);
5870 %}
5872 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5873 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5874 multiple_bundles;
5875 src1 : RD(read);
5876 src2 : RD(read);
5877 dst : WB(write);
5878 DECODE : ID;
5879 FPU2 : CA;
5880 %}
5882 //No.11 Load Floating from Memory :
5883 pipe_class fpu_loadF(regF dst, memory mem) %{
5884 instruction_count(1);
5885 mem : RD(read);
5886 dst : WB(write)+3;
5887 DECODE : ID;
5888 MEM : RD;
5889 %}
5891 //No.12 Store Floating to Memory :
5892 pipe_class fpu_storeF(regF src, memory mem) %{
5893 instruction_count(1);
5894 mem : RD(read);
5895 src : RD(read);
5896 DECODE : ID;
5897 MEM : RD;
5898 %}
5900 //No.13 FPU Conditional branch :
5901 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5902 multiple_bundles;
5903 src1 : RD(read);
5904 src2 : RD(read);
5905 DECODE : ID;
5906 BR : RD;
5907 %}
5909 //No.14 Floating FPU reg operation : dst <-- op reg
5910 pipe_class fpu1_regF(regF dst, regF src) %{
5911 src : RD(read);
5912 dst : WB(write);
5913 DECODE : ID;
5914 FPU : CA;
5915 %}
5917 pipe_class long_memory_op() %{
5918 instruction_count(10); multiple_bundles; force_serialization;
5919 fixed_latency(30);
5920 %}
5922 pipe_class simple_call() %{
5923 instruction_count(10); multiple_bundles; force_serialization;
5924 fixed_latency(200);
5925 BR : RD;
5926 %}
5928 pipe_class call() %{
5929 instruction_count(10); multiple_bundles; force_serialization;
5930 fixed_latency(200);
5931 %}
5933 //FIXME:
5934 //No.9 Piple slow : for multi-instructions
5935 pipe_class pipe_slow( ) %{
5936 instruction_count(20);
5937 force_serialization;
5938 multiple_bundles;
5939 fixed_latency(50);
5940 %}
5942 %}
5946 //----------INSTRUCTIONS-------------------------------------------------------
5947 //
5948 // match -- States which machine-independent subtree may be replaced
5949 // by this instruction.
5950 // ins_cost -- The estimated cost of this instruction is used by instruction
5951 // selection to identify a minimum cost tree of machine
5952 // instructions that matches a tree of machine-independent
5953 // instructions.
5954 // format -- A string providing the disassembly for this instruction.
5955 // The value of an instruction's operand may be inserted
5956 // by referring to it with a '$' prefix.
5957 // opcode -- Three instruction opcodes may be provided. These are referred
5958 // to within an encode class as $primary, $secondary, and $tertiary
5959 // respectively. The primary opcode is commonly used to
5960 // indicate the type of machine instruction, while secondary
5961 // and tertiary are often used for prefix options or addressing
5962 // modes.
5963 // ins_encode -- A list of encode classes with parameters. The encode class
5964 // name must have been defined in an 'enc_class' specification
5965 // in the encode section of the architecture description.
5968 // Load Integer
5969 instruct loadI(mRegI dst, memory mem) %{
5970 match(Set dst (LoadI mem));
5972 ins_cost(125);
5973 format %{ "lw $dst, $mem #@loadI" %}
5974 ins_encode (load_I_enc(dst, mem));
5975 ins_pipe( ialu_loadI );
5976 %}
5978 instruct loadI_convI2L(mRegL dst, memory mem) %{
5979 match(Set dst (ConvI2L (LoadI mem)));
5981 ins_cost(125);
5982 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5983 ins_encode (load_I_enc(dst, mem));
5984 ins_pipe( ialu_loadI );
5985 %}
5987 // Load Integer (32 bit signed) to Byte (8 bit signed)
5988 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5989 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5991 ins_cost(125);
5992 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5993 ins_encode(load_B_enc(dst, mem));
5994 ins_pipe(ialu_loadI);
5995 %}
5997 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5998 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5999 match(Set dst (AndI (LoadI mem) mask));
6001 ins_cost(125);
6002 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6003 ins_encode(load_UB_enc(dst, mem));
6004 ins_pipe(ialu_loadI);
6005 %}
6007 // Load Integer (32 bit signed) to Short (16 bit signed)
6008 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6009 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6011 ins_cost(125);
6012 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6013 ins_encode(load_S_enc(dst, mem));
6014 ins_pipe(ialu_loadI);
6015 %}
6017 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6018 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6019 match(Set dst (AndI (LoadI mem) mask));
6021 ins_cost(125);
6022 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6023 ins_encode(load_C_enc(dst, mem));
6024 ins_pipe(ialu_loadI);
6025 %}
6027 // Load Long.
6028 instruct loadL(mRegL dst, memory mem) %{
6029 // predicate(!((LoadLNode*)n)->require_atomic_access());
6030 match(Set dst (LoadL mem));
6032 ins_cost(250);
6033 format %{ "ld $dst, $mem #@loadL" %}
6034 ins_encode(load_L_enc(dst, mem));
6035 ins_pipe( ialu_loadL );
6036 %}
6038 // Load Long - UNaligned
6039 instruct loadL_unaligned(mRegL dst, memory mem) %{
6040 match(Set dst (LoadL_unaligned mem));
6042 // FIXME: Jin: Need more effective ldl/ldr
6043 ins_cost(450);
6044 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6045 ins_encode(load_L_enc(dst, mem));
6046 ins_pipe( ialu_loadL );
6047 %}
6049 // Store Long
6050 instruct storeL_reg(memory mem, mRegL src) %{
6051 predicate(!((StoreLNode*)n)->require_atomic_access());
6052 match(Set mem (StoreL mem src));
6054 ins_cost(200);
6055 format %{ "sd $mem, $src #@storeL_reg\n" %}
6056 ins_encode(store_L_reg_enc(mem, src));
6057 ins_pipe( ialu_storeL );
6058 %}
6060 //FIXME:volatile! atomic!
6061 // Volatile Store Long. Must be atomic, so move it into
6062 // the FP TOS and then do a 64-bit FIST. Has to probe the
6063 // target address before the store (for null-ptr checks)
6064 // so the memory operand is used twice in the encoding.
6065 instruct storeL_reg_atomic(memory mem, mRegL src) %{
6066 predicate(((StoreLNode*)n)->require_atomic_access());
6067 match(Set mem (StoreL mem src));
6069 ins_cost(200);
6070 format %{ "sw $mem, $src #@storeL_reg_atomic\n" %}
6071 ins_encode %{
6072 Register src = as_Register($src$$reg);
6074 int base = $mem$$base;
6075 int index = $mem$$index;
6076 int scale = $mem$$scale;
6077 int disp = $mem$$disp;
6079 if( index != 0 ) {
6080 if( Assembler::is_simm16(disp) ) {
6081 if (scale == 0) {
6082 __ addu(AT, as_Register(base), as_Register(index));
6083 } else {
6084 __ dsll(AT, as_Register(index), scale);
6085 __ addu(AT, as_Register(base), AT);
6086 }
6087 __ sd(src, AT, disp);
6088 } else {
6089 if (scale == 0) {
6090 __ addu(AT, as_Register(base), as_Register(index));
6091 } else {
6092 __ dsll(AT, as_Register(index), scale);
6093 __ addu(AT, as_Register(base), AT);
6094 }
6095 __ move(T9, disp);
6096 __ addu(AT, AT, T9);
6097 __ sd(src, AT, 0);
6098 }
6099 } else {
6100 if( Assembler::is_simm16(disp) ) {
6101 __ move(AT, as_Register(base));
6102 __ sd(src, AT, disp);
6103 } else {
6104 __ move(AT, as_Register(base));
6105 __ move(T9, disp);
6106 __ addu(AT, AT, T9);
6107 __ sd(src, AT, 0);
6108 }
6109 }
6111 %}
6112 ins_pipe( ialu_storeL );
6113 %}
6115 instruct storeL_immL0(memory mem, immL0 zero) %{
6116 match(Set mem (StoreL mem zero));
6118 ins_cost(180);
6119 format %{ "sd $mem, zero #@storeL_immL0" %}
6120 ins_encode(store_L_immL0_enc(mem, zero));
6121 ins_pipe( ialu_storeL );
6122 %}
6124 instruct storeL_imm(memory mem, immL src) %{
6125 match(Set mem (StoreL mem src));
6127 ins_cost(200);
6128 format %{ "sw $mem, $src #@storeL_imm" %}
6129 ins_encode(store_L_immL_enc(mem, src));
6130 ins_pipe( ialu_storeL );
6131 %}
6133 // Load Compressed Pointer
6134 instruct loadN(mRegN dst, memory mem)
6135 %{
6136 match(Set dst (LoadN mem));
6138 ins_cost(125); // XXX
6139 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6140 ins_encode (load_N_enc(dst, mem));
6141 ins_pipe( ialu_loadI ); // XXX
6142 %}
6144 // Load Pointer
6145 instruct loadP(mRegP dst, memory mem) %{
6146 match(Set dst (LoadP mem));
6148 ins_cost(125);
6149 format %{ "ld $dst, $mem #@loadP" %}
6150 ins_encode (load_P_enc(dst, mem));
6151 ins_pipe( ialu_loadI );
6152 %}
6154 // Load Klass Pointer
6155 instruct loadKlass(mRegP dst, memory mem) %{
6156 match(Set dst (LoadKlass mem));
6158 ins_cost(125);
6159 format %{ "MOV $dst,$mem @ loadKlass" %}
6160 ins_encode (load_P_enc(dst, mem));
6161 ins_pipe( ialu_loadI );
6162 %}
6164 // Load narrow Klass Pointer
6165 instruct loadNKlass(mRegN dst, memory mem)
6166 %{
6167 match(Set dst (LoadNKlass mem));
6169 ins_cost(125); // XXX
6170 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6171 ins_encode (load_N_enc(dst, mem));
6172 ins_pipe( ialu_loadI ); // XXX
6173 %}
6175 // Load Constant
6176 instruct loadConI(mRegI dst, immI src) %{
6177 match(Set dst src);
6179 ins_cost(150);
6180 format %{ "mov $dst, $src #@loadConI" %}
6181 ins_encode %{
6182 Register dst = $dst$$Register;
6183 int value = $src$$constant;
6184 __ move(dst, value);
6185 %}
6186 ins_pipe( ialu_regI_regI );
6187 %}
6190 instruct loadConL_set64(mRegL dst, immL src) %{
6191 match(Set dst src);
6192 ins_cost(120);
6193 format %{ "li $dst, $src @ loadConL_set64" %}
6194 ins_encode %{
6195 __ set64($dst$$Register, $src$$constant);
6196 %}
6197 ins_pipe(ialu_regL_regL);
6198 %}
6200 /*
6201 // Load long value from constant table (predicated by immL_expensive).
6202 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6203 match(Set dst src);
6204 ins_cost(150);
6205 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6206 ins_encode %{
6207 int con_offset = $constantoffset($src);
6209 if (Assembler::is_simm16(con_offset)) {
6210 __ ld($dst$$Register, $constanttablebase, con_offset);
6211 } else {
6212 __ set64(AT, con_offset);
6213 if (UseLoongsonISA) {
6214 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6215 } else {
6216 __ daddu(AT, $constanttablebase, AT);
6217 __ ld($dst$$Register, AT, 0);
6218 }
6219 }
6220 %}
6221 ins_pipe(ialu_loadI);
6222 %}
6223 */
6225 instruct loadConL16(mRegL dst, immL16 src) %{
6226 match(Set dst src);
6227 ins_cost(105);
6228 format %{ "mov $dst, $src #@loadConL16" %}
6229 ins_encode %{
6230 Register dst_reg = as_Register($dst$$reg);
6231 int value = $src$$constant;
6232 __ daddiu(dst_reg, R0, value);
6233 %}
6234 ins_pipe( ialu_regL_regL );
6235 %}
6238 instruct loadConL0(mRegL dst, immL0 src) %{
6239 match(Set dst src);
6240 ins_cost(100);
6241 format %{ "mov $dst, zero #@loadConL0" %}
6242 ins_encode %{
6243 Register dst_reg = as_Register($dst$$reg);
6244 __ daddu(dst_reg, R0, R0);
6245 %}
6246 ins_pipe( ialu_regL_regL );
6247 %}
6249 // Load Range
6250 instruct loadRange(mRegI dst, memory mem) %{
6251 match(Set dst (LoadRange mem));
6253 ins_cost(125);
6254 format %{ "MOV $dst,$mem @ loadRange" %}
6255 ins_encode(load_I_enc(dst, mem));
6256 ins_pipe( ialu_loadI );
6257 %}
6260 instruct storeP(memory mem, mRegP src ) %{
6261 match(Set mem (StoreP mem src));
6263 ins_cost(125);
6264 format %{ "sd $src, $mem #@storeP" %}
6265 ins_encode(store_P_reg_enc(mem, src));
6266 ins_pipe( ialu_storeI );
6267 %}
6269 /*
6270 [Ref: loadConP]
6272 Error:
6273 0x2d4b6d40: lui t9, 0x4f <--- handle
6274 0x2d4b6d44: addiu t9, t9, 0xffff808c
6275 0x2d4b6d48: sw t9, 0x4(s2)
6277 OK:
6278 0x2cc5ed40: lui t9, 0x336a <--- klass
6279 0x2cc5ed44: addiu t9, t9, 0x5a10
6280 0x2cc5ed48: sw t9, 0x4(s2)
6281 */
6282 // Store Pointer Immediate; null pointers or constant oops that do not
6283 // need card-mark barriers.
6285 // Store NULL Pointer, mark word, or other simple pointer constant.
6286 instruct storeImmP(memory mem, immP31 src) %{
6287 match(Set mem (StoreP mem src));
6289 ins_cost(150);
6290 format %{ "mov $mem, $src #@storeImmP" %}
6291 ins_encode(store_P_immP_enc(mem, src));
6292 ins_pipe( ialu_storeI );
6293 %}
6295 // Store Byte Immediate
6296 instruct storeImmB(memory mem, immI8 src) %{
6297 match(Set mem (StoreB mem src));
6299 ins_cost(150);
6300 format %{ "movb $mem, $src #@storeImmB" %}
6301 ins_encode(store_B_immI_enc(mem, src));
6302 ins_pipe( ialu_storeI );
6303 %}
6305 // Store Compressed Pointer
6306 instruct storeN(memory mem, mRegN src)
6307 %{
6308 match(Set mem (StoreN mem src));
6310 ins_cost(125); // XXX
6311 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6312 ins_encode(store_N_reg_enc(mem, src));
6313 ins_pipe( ialu_storeI );
6314 %}
6316 instruct storeNKlass(memory mem, mRegN src)
6317 %{
6318 match(Set mem (StoreNKlass mem src));
6320 ins_cost(125); // XXX
6321 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6322 ins_encode(store_N_reg_enc(mem, src));
6323 ins_pipe( ialu_storeI );
6324 %}
6326 instruct storeImmN0(memory mem, immN0 zero)
6327 %{
6328 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6329 match(Set mem (StoreN mem zero));
6331 ins_cost(125); // XXX
6332 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6333 ins_encode(storeImmN0_enc(mem, zero));
6334 ins_pipe( ialu_storeI );
6335 %}
6337 instruct storeImmN(memory mem, immN src)
6338 %{
6339 match(Set mem (StoreN mem src));
6341 ins_cost(150); // XXX
6342 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6343 ins_encode(storeImmN_enc(mem, src));
6344 ins_pipe( ialu_storeI );
6345 %}
6347 instruct storeImmNKlass(memory mem, immNKlass src)
6348 %{
6349 match(Set mem (StoreNKlass mem src));
6351 ins_cost(150); // XXX
6352 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6353 ins_encode(storeImmNKlass_enc(mem, src));
6354 ins_pipe( ialu_storeI );
6355 %}
6357 // Store Byte
6358 instruct storeB(memory mem, mRegI src) %{
6359 match(Set mem (StoreB mem src));
6361 ins_cost(125);
6362 format %{ "sb $src, $mem #@storeB" %}
6363 ins_encode(store_B_reg_enc(mem, src));
6364 ins_pipe( ialu_storeI );
6365 %}
6367 instruct storeB_convL2I(memory mem, mRegL src) %{
6368 match(Set mem (StoreB mem (ConvL2I src)));
6370 ins_cost(125);
6371 format %{ "sb $src, $mem #@storeB_convL2I" %}
6372 ins_encode(store_B_reg_enc(mem, src));
6373 ins_pipe( ialu_storeI );
6374 %}
6376 // Load Byte (8bit signed)
6377 instruct loadB(mRegI dst, memory mem) %{
6378 match(Set dst (LoadB mem));
6380 ins_cost(125);
6381 format %{ "lb $dst, $mem #@loadB" %}
6382 ins_encode(load_B_enc(dst, mem));
6383 ins_pipe( ialu_loadI );
6384 %}
6386 instruct loadB_convI2L(mRegL dst, memory mem) %{
6387 match(Set dst (ConvI2L (LoadB mem)));
6389 ins_cost(125);
6390 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6391 ins_encode(load_B_enc(dst, mem));
6392 ins_pipe( ialu_loadI );
6393 %}
6395 // Load Byte (8bit UNsigned)
6396 instruct loadUB(mRegI dst, memory mem) %{
6397 match(Set dst (LoadUB mem));
6399 ins_cost(125);
6400 format %{ "lbu $dst, $mem #@loadUB" %}
6401 ins_encode(load_UB_enc(dst, mem));
6402 ins_pipe( ialu_loadI );
6403 %}
6405 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6406 match(Set dst (ConvI2L (LoadUB mem)));
6408 ins_cost(125);
6409 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6410 ins_encode(load_UB_enc(dst, mem));
6411 ins_pipe( ialu_loadI );
6412 %}
6414 // Load Short (16bit signed)
6415 instruct loadS(mRegI dst, memory mem) %{
6416 match(Set dst (LoadS mem));
6418 ins_cost(125);
6419 format %{ "lh $dst, $mem #@loadS" %}
6420 ins_encode(load_S_enc(dst, mem));
6421 ins_pipe( ialu_loadI );
6422 %}
6424 // Load Short (16 bit signed) to Byte (8 bit signed)
6425 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6426 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6428 ins_cost(125);
6429 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6430 ins_encode(load_B_enc(dst, mem));
6431 ins_pipe(ialu_loadI);
6432 %}
6434 instruct loadS_convI2L(mRegL dst, memory mem) %{
6435 match(Set dst (ConvI2L (LoadS mem)));
6437 ins_cost(125);
6438 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6439 ins_encode(load_S_enc(dst, mem));
6440 ins_pipe( ialu_loadI );
6441 %}
6443 // Store Integer Immediate
6444 instruct storeImmI(memory mem, immI src) %{
6445 match(Set mem (StoreI mem src));
6447 ins_cost(150);
6448 format %{ "mov $mem, $src #@storeImmI" %}
6449 ins_encode(store_I_immI_enc(mem, src));
6450 ins_pipe( ialu_storeI );
6451 %}
6453 // Store Integer
6454 instruct storeI(memory mem, mRegI src) %{
6455 match(Set mem (StoreI mem src));
6457 ins_cost(125);
6458 format %{ "sw $mem, $src #@storeI" %}
6459 ins_encode(store_I_reg_enc(mem, src));
6460 ins_pipe( ialu_storeI );
6461 %}
6463 instruct storeI_convL2I(memory mem, mRegL src) %{
6464 match(Set mem (StoreI mem (ConvL2I src)));
6466 ins_cost(125);
6467 format %{ "sw $mem, $src #@storeI_convL2I" %}
6468 ins_encode(store_I_reg_enc(mem, src));
6469 ins_pipe( ialu_storeI );
6470 %}
6472 // Load Float
6473 instruct loadF(regF dst, memory mem) %{
6474 match(Set dst (LoadF mem));
6476 ins_cost(150);
6477 format %{ "loadF $dst, $mem #@loadF" %}
6478 ins_encode(load_F_enc(dst, mem));
6479 ins_pipe( ialu_loadI );
6480 %}
6482 instruct loadConP_general(mRegP dst, immP src) %{
6483 match(Set dst src);
6485 ins_cost(120);
6486 format %{ "li $dst, $src #@loadConP_general" %}
6488 ins_encode %{
6489 Register dst = $dst$$Register;
6490 long* value = (long*)$src$$constant;
6491 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
6493 /* During GC, klassOop may be moved to new position in the heap.
6494 * It must be relocated.
6495 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
6496 */
6497 if (is_need_reloc) {
6498 if($src->constant_reloc() == relocInfo::metadata_type){
6499 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6500 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6502 __ relocate(rspec);
6503 __ li48(dst, (long)value);
6504 }
6506 if($src->constant_reloc() == relocInfo::oop_type){
6507 int oop_index = __ oop_recorder()->find_index((jobject)value);
6508 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6510 __ relocate(rspec);
6511 __ li48(dst, (long)value);
6512 }
6513 } else {
6514 __ set64(dst, (long)value);
6515 }
6516 %}
6518 ins_pipe( ialu_regI_regI );
6519 %}
6521 /*
6522 instruct loadConP_load(mRegP dst, immP_load src) %{
6523 match(Set dst src);
6525 ins_cost(100);
6526 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6528 ins_encode %{
6530 int con_offset = $constantoffset($src);
6532 if (Assembler::is_simm16(con_offset)) {
6533 __ ld($dst$$Register, $constanttablebase, con_offset);
6534 } else {
6535 __ set64(AT, con_offset);
6536 if (UseLoongsonISA) {
6537 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6538 } else {
6539 __ daddu(AT, $constanttablebase, AT);
6540 __ ld($dst$$Register, AT, 0);
6541 }
6542 }
6543 %}
6545 ins_pipe(ialu_loadI);
6546 %}
6547 */
6549 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6550 match(Set dst src);
6552 ins_cost(80);
6553 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6555 ins_encode %{
6556 __ set64($dst$$Register, $src$$constant);
6557 %}
6559 ins_pipe(ialu_regI_regI);
6560 %}
6563 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6564 match(Set dst src);
6566 ins_cost(50);
6567 format %{ "li $dst, $src #@loadConP_poll" %}
6569 ins_encode %{
6570 Register dst = $dst$$Register;
6571 intptr_t value = (intptr_t)$src$$constant;
6573 __ set64(dst, (jlong)value);
6574 %}
6576 ins_pipe( ialu_regI_regI );
6577 %}
6579 instruct loadConP0(mRegP dst, immP0 src)
6580 %{
6581 match(Set dst src);
6583 ins_cost(50);
6584 format %{ "mov $dst, R0\t# ptr" %}
6585 ins_encode %{
6586 Register dst_reg = $dst$$Register;
6587 __ daddu(dst_reg, R0, R0);
6588 %}
6589 ins_pipe( ialu_regI_regI );
6590 %}
6592 instruct loadConN0(mRegN dst, immN0 src) %{
6593 match(Set dst src);
6594 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6595 ins_encode %{
6596 __ move($dst$$Register, R0);
6597 %}
6598 ins_pipe( ialu_regI_regI );
6599 %}
6601 instruct loadConN(mRegN dst, immN src) %{
6602 match(Set dst src);
6604 ins_cost(125);
6605 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6606 ins_encode %{
6607 address con = (address)$src$$constant;
6608 if (con == NULL) {
6609 ShouldNotReachHere();
6610 } else {
6611 assert (UseCompressedOops, "should only be used for compressed headers");
6612 assert (Universe::heap() != NULL, "java heap should be initialized");
6613 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6615 Register dst = $dst$$Register;
6616 long* value = (long*)$src$$constant;
6617 int oop_index = __ oop_recorder()->find_index((jobject)value);
6618 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6619 if(rspec.type()!=relocInfo::none){
6620 __ relocate(rspec, Assembler::narrow_oop_operand);
6621 __ li48(dst, oop_index);
6622 } else {
6623 __ set64(dst, oop_index);
6624 }
6625 }
6626 %}
6627 ins_pipe( ialu_regI_regI ); // XXX
6628 %}
6630 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6631 match(Set dst src);
6633 ins_cost(125);
6634 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6635 ins_encode %{
6636 address con = (address)$src$$constant;
6637 if (con == NULL) {
6638 ShouldNotReachHere();
6639 } else {
6640 Register dst = $dst$$Register;
6641 long* value = (long*)$src$$constant;
6643 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6644 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6645 long narrowp = (long)Klass::encode_klass((Klass*)value);
6647 if(rspec.type()!=relocInfo::none){
6648 __ relocate(rspec, Assembler::narrow_oop_operand);
6649 __ li48(dst, narrowp);
6650 } else {
6651 __ set64(dst, narrowp);
6652 }
6653 }
6654 %}
6655 ins_pipe( ialu_regI_regI ); // XXX
6656 %}
6658 //FIXME
6659 // Tail Call; Jump from runtime stub to Java code.
6660 // Also known as an 'interprocedural jump'.
6661 // Target of jump will eventually return to caller.
6662 // TailJump below removes the return address.
6663 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6664 match(TailCall jump_target method_oop );
6665 ins_cost(300);
6666 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6668 ins_encode %{
6669 Register target = $jump_target$$Register;
6670 Register oop = $method_oop$$Register;
6672 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6673 __ push(RA);
6675 __ move(S3, oop);
6676 __ jr(target);
6677 __ nop();
6678 %}
6680 ins_pipe( pipe_jump );
6681 %}
6683 // Create exception oop: created by stack-crawling runtime code.
6684 // Created exception is now available to this handler, and is setup
6685 // just prior to jumping to this handler. No code emitted.
6686 instruct CreateException( a0_RegP ex_oop )
6687 %{
6688 match(Set ex_oop (CreateEx));
6690 // use the following format syntax
6691 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6692 ins_encode %{
6693 /* Jin: X86 leaves this function empty */
6694 __ block_comment("CreateException is empty in X86/MIPS");
6695 %}
6696 ins_pipe( empty );
6697 // ins_pipe( pipe_jump );
6698 %}
6701 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6703 - Common try/catch:
6704 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6705 |- V0, V1 are created
6706 |- T9 <= SharedRuntime::exception_handler_for_return_address
6707 `- jr T9
6708 `- the caller's exception_handler
6709 `- jr OptoRuntime::exception_blob
6710 `- here
6711 - Rethrow(e.g. 'unwind'):
6712 * The callee:
6713 |- an exception is triggered during execution
6714 `- exits the callee method through RethrowException node
6715 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6716 `- The callee jumps to OptoRuntime::rethrow_stub()
6717 * In OptoRuntime::rethrow_stub:
6718 |- The VM calls _rethrow_Java to determine the return address in the caller method
6719 `- exits the stub with tailjmpInd
6720 |- pops exception_oop(V0) and exception_pc(V1)
6721 `- jumps to the return address(usually an exception_handler)
6722 * The caller:
6723 `- continues processing the exception_blob with V0/V1
6724 */
6726 /*
6727 Disassembling OptoRuntime::rethrow_stub()
6729 ; locals
6730 0x2d3bf320: addiu sp, sp, 0xfffffff8
6731 0x2d3bf324: sw ra, 0x4(sp)
6732 0x2d3bf328: sw fp, 0x0(sp)
6733 0x2d3bf32c: addu fp, sp, zero
6734 0x2d3bf330: addiu sp, sp, 0xfffffff0
6735 0x2d3bf334: sw ra, 0x8(sp)
6736 0x2d3bf338: sw t0, 0x4(sp)
6737 0x2d3bf33c: sw sp, 0x0(sp)
6739 ; get_thread(S2)
6740 0x2d3bf340: addu s2, sp, zero
6741 0x2d3bf344: srl s2, s2, 12
6742 0x2d3bf348: sll s2, s2, 2
6743 0x2d3bf34c: lui at, 0x2c85
6744 0x2d3bf350: addu at, at, s2
6745 0x2d3bf354: lw s2, 0xffffcc80(at)
6747 0x2d3bf358: lw s0, 0x0(sp)
6748 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6749 0x2d3bf360: sw s2, 0xc(sp)
6751 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6752 0x2d3bf364: lw a0, 0x4(sp)
6753 0x2d3bf368: lw a1, 0xc(sp)
6754 0x2d3bf36c: lw a2, 0x8(sp)
6755 ;; Java_To_Runtime
6756 0x2d3bf370: lui t9, 0x2c34
6757 0x2d3bf374: addiu t9, t9, 0xffff8a48
6758 0x2d3bf378: jalr t9
6759 0x2d3bf37c: nop
6761 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6763 0x2d3bf384: lw s0, 0xc(sp)
6764 0x2d3bf388: sw zero, 0x118(s0)
6765 0x2d3bf38c: sw zero, 0x11c(s0)
6766 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6767 0x2d3bf394: addu s2, s0, zero
6768 0x2d3bf398: sw zero, 0x144(s2)
6769 0x2d3bf39c: lw s0, 0x4(s2)
6770 0x2d3bf3a0: addiu s4, zero, 0x0
6771 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6772 0x2d3bf3a8: nop
6773 0x2d3bf3ac: addiu sp, sp, 0x10
6774 0x2d3bf3b0: addiu sp, sp, 0x8
6775 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6776 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6777 0x2d3bf3bc: lui at, 0x2b48
6778 0x2d3bf3c0: lw at, 0x100(at)
6780 ; tailjmpInd: Restores exception_oop & exception_pc
6781 0x2d3bf3c4: addu v1, ra, zero
6782 0x2d3bf3c8: addu v0, s1, zero
6783 0x2d3bf3cc: jr s3
6784 0x2d3bf3d0: nop
6785 ; Exception:
6786 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6787 0x2d3bf3d8: addiu s1, s1, 0x40
6788 0x2d3bf3dc: addiu s2, zero, 0x0
6789 0x2d3bf3e0: addiu sp, sp, 0x10
6790 0x2d3bf3e4: addiu sp, sp, 0x8
6791 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6792 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6793 0x2d3bf3f0: lui at, 0x2b48
6794 0x2d3bf3f4: lw at, 0x100(at)
6795 ; TailCalljmpInd
6796 __ push(RA); ; to be used in generate_forward_exception()
6797 0x2d3bf3f8: addu t7, s2, zero
6798 0x2d3bf3fc: jr s1
6799 0x2d3bf400: nop
6800 */
6801 // Rethrow exception:
6802 // The exception oop will come in the first argument position.
6803 // Then JUMP (not call) to the rethrow stub code.
6804 instruct RethrowException()
6805 %{
6806 match(Rethrow);
6808 // use the following format syntax
6809 format %{ "JMP rethrow_stub #@RethrowException" %}
6810 ins_encode %{
6811 __ block_comment("@ RethrowException");
6813 cbuf.set_insts_mark();
6814 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6816 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6817 __ li(T9, OptoRuntime::rethrow_stub());
6818 __ jr(T9);
6819 __ nop();
6820 %}
6821 ins_pipe( pipe_jump );
6822 %}
6824 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6825 match(If cmp (CmpP op1 zero));
6826 effect(USE labl);
6828 ins_cost(180);
6829 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6831 ins_encode %{
6832 Register op1 = $op1$$Register;
6833 Register op2 = R0;
6834 Label &L = *($labl$$label);
6835 int flag = $cmp$$cmpcode;
6837 switch(flag)
6838 {
6839 case 0x01: //equal
6840 if (&L)
6841 __ beq(op1, op2, L);
6842 else
6843 __ beq(op1, op2, (int)0);
6844 break;
6845 case 0x02: //not_equal
6846 if (&L)
6847 __ bne(op1, op2, L);
6848 else
6849 __ bne(op1, op2, (int)0);
6850 break;
6851 /*
6852 case 0x03: //above
6853 __ sltu(AT, op2, op1);
6854 if(&L)
6855 __ bne(R0, AT, L);
6856 else
6857 __ bne(R0, AT, (int)0);
6858 break;
6859 case 0x04: //above_equal
6860 __ sltu(AT, op1, op2);
6861 if(&L)
6862 __ beq(AT, R0, L);
6863 else
6864 __ beq(AT, R0, (int)0);
6865 break;
6866 case 0x05: //below
6867 __ sltu(AT, op1, op2);
6868 if(&L)
6869 __ bne(R0, AT, L);
6870 else
6871 __ bne(R0, AT, (int)0);
6872 break;
6873 case 0x06: //below_equal
6874 __ sltu(AT, op2, op1);
6875 if(&L)
6876 __ beq(AT, R0, L);
6877 else
6878 __ beq(AT, R0, (int)0);
6879 break;
6880 */
6881 default:
6882 Unimplemented();
6883 }
6884 __ nop();
6885 %}
6887 ins_pc_relative(1);
6888 ins_pipe( pipe_alu_branch );
6889 %}
6892 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6893 match(If cmp (CmpP op1 op2));
6894 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6895 effect(USE labl);
6897 ins_cost(200);
6898 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6900 ins_encode %{
6901 Register op1 = $op1$$Register;
6902 Register op2 = $op2$$Register;
6903 Label &L = *($labl$$label);
6904 int flag = $cmp$$cmpcode;
6906 switch(flag)
6907 {
6908 case 0x01: //equal
6909 if (&L)
6910 __ beq(op1, op2, L);
6911 else
6912 __ beq(op1, op2, (int)0);
6913 break;
6914 case 0x02: //not_equal
6915 if (&L)
6916 __ bne(op1, op2, L);
6917 else
6918 __ bne(op1, op2, (int)0);
6919 break;
6920 case 0x03: //above
6921 __ sltu(AT, op2, op1);
6922 if(&L)
6923 __ bne(R0, AT, L);
6924 else
6925 __ bne(R0, AT, (int)0);
6926 break;
6927 case 0x04: //above_equal
6928 __ sltu(AT, op1, op2);
6929 if(&L)
6930 __ beq(AT, R0, L);
6931 else
6932 __ beq(AT, R0, (int)0);
6933 break;
6934 case 0x05: //below
6935 __ sltu(AT, op1, op2);
6936 if(&L)
6937 __ bne(R0, AT, L);
6938 else
6939 __ bne(R0, AT, (int)0);
6940 break;
6941 case 0x06: //below_equal
6942 __ sltu(AT, op2, op1);
6943 if(&L)
6944 __ beq(AT, R0, L);
6945 else
6946 __ beq(AT, R0, (int)0);
6947 break;
6948 default:
6949 Unimplemented();
6950 }
6951 __ nop();
6952 %}
6954 ins_pc_relative(1);
6955 ins_pipe( pipe_alu_branch );
6956 %}
6958 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6959 match(If cmp (CmpN op1 null));
6960 effect(USE labl);
6962 ins_cost(180);
6963 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6964 "BP$cmp $labl @ cmpN_null_branch" %}
6965 ins_encode %{
6966 Register op1 = $op1$$Register;
6967 Register op2 = R0;
6968 Label &L = *($labl$$label);
6969 int flag = $cmp$$cmpcode;
6971 switch(flag)
6972 {
6973 case 0x01: //equal
6974 if (&L)
6975 __ beq(op1, op2, L);
6976 else
6977 __ beq(op1, op2, (int)0);
6978 break;
6979 case 0x02: //not_equal
6980 if (&L)
6981 __ bne(op1, op2, L);
6982 else
6983 __ bne(op1, op2, (int)0);
6984 break;
6985 default:
6986 Unimplemented();
6987 }
6988 __ nop();
6989 %}
6990 //TODO: pipe_branchP or create pipe_branchN LEE
6991 ins_pc_relative(1);
6992 ins_pipe( pipe_alu_branch );
6993 %}
6995 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6996 match(If cmp (CmpN op1 op2));
6997 effect(USE labl);
6999 ins_cost(180);
7000 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7001 "BP$cmp $labl" %}
7002 ins_encode %{
7003 Register op1_reg = $op1$$Register;
7004 Register op2_reg = $op2$$Register;
7005 Label &L = *($labl$$label);
7006 int flag = $cmp$$cmpcode;
7008 switch(flag)
7009 {
7010 case 0x01: //equal
7011 if (&L)
7012 __ beq(op1_reg, op2_reg, L);
7013 else
7014 __ beq(op1_reg, op2_reg, (int)0);
7015 break;
7016 case 0x02: //not_equal
7017 if (&L)
7018 __ bne(op1_reg, op2_reg, L);
7019 else
7020 __ bne(op1_reg, op2_reg, (int)0);
7021 break;
7022 case 0x03: //above
7023 __ sltu(AT, op2_reg, op1_reg);
7024 if(&L)
7025 __ bne(R0, AT, L);
7026 else
7027 __ bne(R0, AT, (int)0);
7028 break;
7029 case 0x04: //above_equal
7030 __ sltu(AT, op1_reg, op2_reg);
7031 if(&L)
7032 __ beq(AT, R0, L);
7033 else
7034 __ beq(AT, R0, (int)0);
7035 break;
7036 case 0x05: //below
7037 __ sltu(AT, op1_reg, op2_reg);
7038 if(&L)
7039 __ bne(R0, AT, L);
7040 else
7041 __ bne(R0, AT, (int)0);
7042 break;
7043 case 0x06: //below_equal
7044 __ sltu(AT, op2_reg, op1_reg);
7045 if(&L)
7046 __ beq(AT, R0, L);
7047 else
7048 __ beq(AT, R0, (int)0);
7049 break;
7050 default:
7051 Unimplemented();
7052 }
7053 __ nop();
7054 %}
7055 ins_pc_relative(1);
7056 ins_pipe( pipe_alu_branch );
7057 %}
7059 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7060 match( If cmp (CmpU src1 src2) );
7061 effect(USE labl);
7062 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7064 ins_encode %{
7065 Register op1 = $src1$$Register;
7066 Register op2 = $src2$$Register;
7067 Label &L = *($labl$$label);
7068 int flag = $cmp$$cmpcode;
7070 switch(flag)
7071 {
7072 case 0x01: //equal
7073 if (&L)
7074 __ beq(op1, op2, L);
7075 else
7076 __ beq(op1, op2, (int)0);
7077 break;
7078 case 0x02: //not_equal
7079 if (&L)
7080 __ bne(op1, op2, L);
7081 else
7082 __ bne(op1, op2, (int)0);
7083 break;
7084 case 0x03: //above
7085 __ sltu(AT, op2, op1);
7086 if(&L)
7087 __ bne(AT, R0, L);
7088 else
7089 __ bne(AT, R0, (int)0);
7090 break;
7091 case 0x04: //above_equal
7092 __ sltu(AT, op1, op2);
7093 if(&L)
7094 __ beq(AT, R0, L);
7095 else
7096 __ beq(AT, R0, (int)0);
7097 break;
7098 case 0x05: //below
7099 __ sltu(AT, op1, op2);
7100 if(&L)
7101 __ bne(AT, R0, L);
7102 else
7103 __ bne(AT, R0, (int)0);
7104 break;
7105 case 0x06: //below_equal
7106 __ sltu(AT, op2, op1);
7107 if(&L)
7108 __ beq(AT, R0, L);
7109 else
7110 __ beq(AT, R0, (int)0);
7111 break;
7112 default:
7113 Unimplemented();
7114 }
7115 __ nop();
7116 %}
7118 ins_pc_relative(1);
7119 ins_pipe( pipe_alu_branch );
7120 %}
7123 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7124 match( If cmp (CmpU src1 src2) );
7125 effect(USE labl);
7126 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7128 ins_encode %{
7129 Register op1 = $src1$$Register;
7130 int val = $src2$$constant;
7131 Label &L = *($labl$$label);
7132 int flag = $cmp$$cmpcode;
7134 __ move(AT, val);
7135 switch(flag)
7136 {
7137 case 0x01: //equal
7138 if (&L)
7139 __ beq(op1, AT, L);
7140 else
7141 __ beq(op1, AT, (int)0);
7142 break;
7143 case 0x02: //not_equal
7144 if (&L)
7145 __ bne(op1, AT, L);
7146 else
7147 __ bne(op1, AT, (int)0);
7148 break;
7149 case 0x03: //above
7150 __ sltu(AT, AT, op1);
7151 if(&L)
7152 __ bne(R0, AT, L);
7153 else
7154 __ bne(R0, AT, (int)0);
7155 break;
7156 case 0x04: //above_equal
7157 __ sltu(AT, op1, AT);
7158 if(&L)
7159 __ beq(AT, R0, L);
7160 else
7161 __ beq(AT, R0, (int)0);
7162 break;
7163 case 0x05: //below
7164 __ sltu(AT, op1, AT);
7165 if(&L)
7166 __ bne(R0, AT, L);
7167 else
7168 __ bne(R0, AT, (int)0);
7169 break;
7170 case 0x06: //below_equal
7171 __ sltu(AT, AT, op1);
7172 if(&L)
7173 __ beq(AT, R0, L);
7174 else
7175 __ beq(AT, R0, (int)0);
7176 break;
7177 default:
7178 Unimplemented();
7179 }
7180 __ nop();
7181 %}
7183 ins_pc_relative(1);
7184 ins_pipe( pipe_alu_branch );
7185 %}
7187 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7188 match( If cmp (CmpI src1 src2) );
7189 effect(USE labl);
7190 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7192 ins_encode %{
7193 Register op1 = $src1$$Register;
7194 Register op2 = $src2$$Register;
7195 Label &L = *($labl$$label);
7196 int flag = $cmp$$cmpcode;
7198 switch(flag)
7199 {
7200 case 0x01: //equal
7201 if (&L)
7202 __ beq(op1, op2, L);
7203 else
7204 __ beq(op1, op2, (int)0);
7205 break;
7206 case 0x02: //not_equal
7207 if (&L)
7208 __ bne(op1, op2, L);
7209 else
7210 __ bne(op1, op2, (int)0);
7211 break;
7212 case 0x03: //above
7213 __ slt(AT, op2, op1);
7214 if(&L)
7215 __ bne(R0, AT, L);
7216 else
7217 __ bne(R0, AT, (int)0);
7218 break;
7219 case 0x04: //above_equal
7220 __ slt(AT, op1, op2);
7221 if(&L)
7222 __ beq(AT, R0, L);
7223 else
7224 __ beq(AT, R0, (int)0);
7225 break;
7226 case 0x05: //below
7227 __ slt(AT, op1, op2);
7228 if(&L)
7229 __ bne(R0, AT, L);
7230 else
7231 __ bne(R0, AT, (int)0);
7232 break;
7233 case 0x06: //below_equal
7234 __ slt(AT, op2, op1);
7235 if(&L)
7236 __ beq(AT, R0, L);
7237 else
7238 __ beq(AT, R0, (int)0);
7239 break;
7240 default:
7241 Unimplemented();
7242 }
7243 __ nop();
7244 %}
7246 ins_pc_relative(1);
7247 ins_pipe( pipe_alu_branch );
7248 %}
7250 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7251 match( If cmp (CmpI src1 src2) );
7252 effect(USE labl);
7253 ins_cost(170);
7254 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7256 ins_encode %{
7257 Register op1 = $src1$$Register;
7258 // int val = $src2$$constant;
7259 Label &L = *($labl$$label);
7260 int flag = $cmp$$cmpcode;
7262 //__ move(AT, val);
7263 switch(flag)
7264 {
7265 case 0x01: //equal
7266 if (&L)
7267 __ beq(op1, R0, L);
7268 else
7269 __ beq(op1, R0, (int)0);
7270 break;
7271 case 0x02: //not_equal
7272 if (&L)
7273 __ bne(op1, R0, L);
7274 else
7275 __ bne(op1, R0, (int)0);
7276 break;
7277 case 0x03: //greater
7278 if(&L)
7279 __ bgtz(op1, L);
7280 else
7281 __ bgtz(op1, (int)0);
7282 break;
7283 case 0x04: //greater_equal
7284 if(&L)
7285 __ bgez(op1, L);
7286 else
7287 __ bgez(op1, (int)0);
7288 break;
7289 case 0x05: //less
7290 if(&L)
7291 __ bltz(op1, L);
7292 else
7293 __ bltz(op1, (int)0);
7294 break;
7295 case 0x06: //less_equal
7296 if(&L)
7297 __ blez(op1, L);
7298 else
7299 __ blez(op1, (int)0);
7300 break;
7301 default:
7302 Unimplemented();
7303 }
7304 __ nop();
7305 %}
7307 ins_pc_relative(1);
7308 ins_pipe( pipe_alu_branch );
7309 %}
7312 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7313 match( If cmp (CmpI src1 src2) );
7314 effect(USE labl);
7315 ins_cost(200);
7316 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7318 ins_encode %{
7319 Register op1 = $src1$$Register;
7320 int val = $src2$$constant;
7321 Label &L = *($labl$$label);
7322 int flag = $cmp$$cmpcode;
7324 __ move(AT, val);
7325 switch(flag)
7326 {
7327 case 0x01: //equal
7328 if (&L)
7329 __ beq(op1, AT, L);
7330 else
7331 __ beq(op1, AT, (int)0);
7332 break;
7333 case 0x02: //not_equal
7334 if (&L)
7335 __ bne(op1, AT, L);
7336 else
7337 __ bne(op1, AT, (int)0);
7338 break;
7339 case 0x03: //greater
7340 __ slt(AT, AT, op1);
7341 if(&L)
7342 __ bne(R0, AT, L);
7343 else
7344 __ bne(R0, AT, (int)0);
7345 break;
7346 case 0x04: //greater_equal
7347 __ slt(AT, op1, AT);
7348 if(&L)
7349 __ beq(AT, R0, L);
7350 else
7351 __ beq(AT, R0, (int)0);
7352 break;
7353 case 0x05: //less
7354 __ slt(AT, op1, AT);
7355 if(&L)
7356 __ bne(R0, AT, L);
7357 else
7358 __ bne(R0, AT, (int)0);
7359 break;
7360 case 0x06: //less_equal
7361 __ slt(AT, AT, op1);
7362 if(&L)
7363 __ beq(AT, R0, L);
7364 else
7365 __ beq(AT, R0, (int)0);
7366 break;
7367 default:
7368 Unimplemented();
7369 }
7370 __ nop();
7371 %}
7373 ins_pc_relative(1);
7374 ins_pipe( pipe_alu_branch );
7375 %}
7377 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7378 match( If cmp (CmpU src1 zero) );
7379 effect(USE labl);
7380 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7382 ins_encode %{
7383 Register op1 = $src1$$Register;
7384 Label &L = *($labl$$label);
7385 int flag = $cmp$$cmpcode;
7387 switch(flag)
7388 {
7389 case 0x01: //equal
7390 if (&L)
7391 __ beq(op1, R0, L);
7392 else
7393 __ beq(op1, R0, (int)0);
7394 break;
7395 case 0x02: //not_equal
7396 if (&L)
7397 __ bne(op1, R0, L);
7398 else
7399 __ bne(op1, R0, (int)0);
7400 break;
7401 case 0x03: //above
7402 if(&L)
7403 __ bne(R0, op1, L);
7404 else
7405 __ bne(R0, op1, (int)0);
7406 break;
7407 case 0x04: //above_equal
7408 if(&L)
7409 __ beq(R0, R0, L);
7410 else
7411 __ beq(R0, R0, (int)0);
7412 break;
7413 case 0x05: //below
7414 return;
7415 break;
7416 case 0x06: //below_equal
7417 if(&L)
7418 __ beq(op1, R0, L);
7419 else
7420 __ beq(op1, R0, (int)0);
7421 break;
7422 default:
7423 Unimplemented();
7424 }
7425 __ nop();
7426 %}
7428 ins_pc_relative(1);
7429 ins_pipe( pipe_alu_branch );
7430 %}
7433 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7434 match( If cmp (CmpU src1 src2) );
7435 effect(USE labl);
7436 ins_cost(180);
7437 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7439 ins_encode %{
7440 Register op1 = $src1$$Register;
7441 int val = $src2$$constant;
7442 Label &L = *($labl$$label);
7443 int flag = $cmp$$cmpcode;
7445 switch(flag)
7446 {
7447 case 0x01: //equal
7448 __ move(AT, val);
7449 if (&L)
7450 __ beq(op1, AT, L);
7451 else
7452 __ beq(op1, AT, (int)0);
7453 break;
7454 case 0x02: //not_equal
7455 __ move(AT, val);
7456 if (&L)
7457 __ bne(op1, AT, L);
7458 else
7459 __ bne(op1, AT, (int)0);
7460 break;
7461 case 0x03: //above
7462 __ move(AT, val);
7463 __ sltu(AT, AT, op1);
7464 if(&L)
7465 __ bne(R0, AT, L);
7466 else
7467 __ bne(R0, AT, (int)0);
7468 break;
7469 case 0x04: //above_equal
7470 __ sltiu(AT, op1, val);
7471 if(&L)
7472 __ beq(AT, R0, L);
7473 else
7474 __ beq(AT, R0, (int)0);
7475 break;
7476 case 0x05: //below
7477 __ sltiu(AT, op1, val);
7478 if(&L)
7479 __ bne(R0, AT, L);
7480 else
7481 __ bne(R0, AT, (int)0);
7482 break;
7483 case 0x06: //below_equal
7484 __ move(AT, val);
7485 __ sltu(AT, AT, op1);
7486 if(&L)
7487 __ beq(AT, R0, L);
7488 else
7489 __ beq(AT, R0, (int)0);
7490 break;
7491 default:
7492 Unimplemented();
7493 }
7494 __ nop();
7495 %}
7497 ins_pc_relative(1);
7498 ins_pipe( pipe_alu_branch );
7499 %}
7502 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7503 match( If cmp (CmpL src1 src2) );
7504 effect(USE labl);
7505 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7506 ins_cost(250);
7508 ins_encode %{
7509 Register opr1_reg = as_Register($src1$$reg);
7510 Register opr2_reg = as_Register($src2$$reg);
7512 Label &target = *($labl$$label);
7513 int flag = $cmp$$cmpcode;
7515 switch(flag)
7516 {
7517 case 0x01: //equal
7518 if (&target)
7519 __ beq(opr1_reg, opr2_reg, target);
7520 else
7521 __ beq(opr1_reg, opr2_reg, (int)0);
7522 __ delayed()->nop();
7523 break;
7525 case 0x02: //not_equal
7526 if(&target)
7527 __ bne(opr1_reg, opr2_reg, target);
7528 else
7529 __ bne(opr1_reg, opr2_reg, (int)0);
7530 __ delayed()->nop();
7531 break;
7533 case 0x03: //greater
7534 __ slt(AT, opr2_reg, opr1_reg);
7535 if(&target)
7536 __ bne(AT, R0, target);
7537 else
7538 __ bne(AT, R0, (int)0);
7539 __ delayed()->nop();
7540 break;
7542 case 0x04: //greater_equal
7543 __ slt(AT, opr1_reg, opr2_reg);
7544 if(&target)
7545 __ beq(AT, R0, target);
7546 else
7547 __ beq(AT, R0, (int)0);
7548 __ delayed()->nop();
7550 break;
7552 case 0x05: //less
7553 __ slt(AT, opr1_reg, opr2_reg);
7554 if(&target)
7555 __ bne(AT, R0, target);
7556 else
7557 __ bne(AT, R0, (int)0);
7558 __ delayed()->nop();
7560 break;
7562 case 0x06: //less_equal
7563 __ slt(AT, opr2_reg, opr1_reg);
7565 if(&target)
7566 __ beq(AT, R0, target);
7567 else
7568 __ beq(AT, R0, (int)0);
7569 __ delayed()->nop();
7571 break;
7573 default:
7574 Unimplemented();
7575 }
7576 %}
7579 ins_pc_relative(1);
7580 ins_pipe( pipe_alu_branch );
7581 %}
7583 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7584 match( If cmp (CmpL src1 src2) );
7585 effect(USE labl);
7586 ins_cost(180);
7587 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7589 ins_encode %{
7590 Register op1 = $src1$$Register;
7591 int val = $src2$$constant;
7592 Label &L = *($labl$$label);
7593 int flag = $cmp$$cmpcode;
7595 __ daddiu(AT, op1, -1 * val);
7596 switch(flag)
7597 {
7598 case 0x01: //equal
7599 if (&L)
7600 __ beq(R0, AT, L);
7601 else
7602 __ beq(R0, AT, (int)0);
7603 break;
7604 case 0x02: //not_equal
7605 if (&L)
7606 __ bne(R0, AT, L);
7607 else
7608 __ bne(R0, AT, (int)0);
7609 break;
7610 case 0x03: //greater
7611 if(&L)
7612 __ bgtz(AT, L);
7613 else
7614 __ bgtz(AT, (int)0);
7615 break;
7616 case 0x04: //greater_equal
7617 if(&L)
7618 __ bgez(AT, L);
7619 else
7620 __ bgez(AT, (int)0);
7621 break;
7622 case 0x05: //less
7623 if(&L)
7624 __ bltz(AT, L);
7625 else
7626 __ bltz(AT, (int)0);
7627 break;
7628 case 0x06: //less_equal
7629 if(&L)
7630 __ blez(AT, L);
7631 else
7632 __ blez(AT, (int)0);
7633 break;
7634 default:
7635 Unimplemented();
7636 }
7637 __ nop();
7638 %}
7640 ins_pc_relative(1);
7641 ins_pipe( pipe_alu_branch );
7642 %}
7645 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7646 match( If cmp (CmpI src1 src2) );
7647 effect(USE labl);
7648 ins_cost(180);
7649 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7651 ins_encode %{
7652 Register op1 = $src1$$Register;
7653 int val = $src2$$constant;
7654 Label &L = *($labl$$label);
7655 int flag = $cmp$$cmpcode;
7657 __ addiu32(AT, op1, -1 * val);
7658 switch(flag)
7659 {
7660 case 0x01: //equal
7661 if (&L)
7662 __ beq(R0, AT, L);
7663 else
7664 __ beq(R0, AT, (int)0);
7665 break;
7666 case 0x02: //not_equal
7667 if (&L)
7668 __ bne(R0, AT, L);
7669 else
7670 __ bne(R0, AT, (int)0);
7671 break;
7672 case 0x03: //greater
7673 if(&L)
7674 __ bgtz(AT, L);
7675 else
7676 __ bgtz(AT, (int)0);
7677 break;
7678 case 0x04: //greater_equal
7679 if(&L)
7680 __ bgez(AT, L);
7681 else
7682 __ bgez(AT, (int)0);
7683 break;
7684 case 0x05: //less
7685 if(&L)
7686 __ bltz(AT, L);
7687 else
7688 __ bltz(AT, (int)0);
7689 break;
7690 case 0x06: //less_equal
7691 if(&L)
7692 __ blez(AT, L);
7693 else
7694 __ blez(AT, (int)0);
7695 break;
7696 default:
7697 Unimplemented();
7698 }
7699 __ nop();
7700 %}
7702 ins_pc_relative(1);
7703 ins_pipe( pipe_alu_branch );
7704 %}
7706 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7707 match( If cmp (CmpL src1 zero) );
7708 effect(USE labl);
7709 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7710 ins_cost(150);
7712 ins_encode %{
7713 Register opr1_reg = as_Register($src1$$reg);
7714 Label &target = *($labl$$label);
7715 int flag = $cmp$$cmpcode;
7717 switch(flag)
7718 {
7719 case 0x01: //equal
7720 if (&target)
7721 __ beq(opr1_reg, R0, target);
7722 else
7723 __ beq(opr1_reg, R0, int(0));
7724 break;
7726 case 0x02: //not_equal
7727 if(&target)
7728 __ bne(opr1_reg, R0, target);
7729 else
7730 __ bne(opr1_reg, R0, (int)0);
7731 break;
7733 case 0x03: //greater
7734 if(&target)
7735 __ bgtz(opr1_reg, target);
7736 else
7737 __ bgtz(opr1_reg, (int)0);
7738 break;
7740 case 0x04: //greater_equal
7741 if(&target)
7742 __ bgez(opr1_reg, target);
7743 else
7744 __ bgez(opr1_reg, (int)0);
7745 break;
7747 case 0x05: //less
7748 __ slt(AT, opr1_reg, R0);
7749 if(&target)
7750 __ bne(AT, R0, target);
7751 else
7752 __ bne(AT, R0, (int)0);
7753 break;
7755 case 0x06: //less_equal
7756 if (&target)
7757 __ blez(opr1_reg, target);
7758 else
7759 __ blez(opr1_reg, int(0));
7760 break;
7762 default:
7763 Unimplemented();
7764 }
7765 __ delayed()->nop();
7766 %}
7769 ins_pc_relative(1);
7770 ins_pipe( pipe_alu_branch );
7771 %}
7774 //FIXME
7775 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7776 match( If cmp (CmpF src1 src2) );
7777 effect(USE labl);
7778 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7780 ins_encode %{
7781 FloatRegister reg_op1 = $src1$$FloatRegister;
7782 FloatRegister reg_op2 = $src2$$FloatRegister;
7783 Label &L = *($labl$$label);
7784 int flag = $cmp$$cmpcode;
7786 switch(flag)
7787 {
7788 case 0x01: //equal
7789 __ c_eq_s(reg_op1, reg_op2);
7790 if (&L)
7791 __ bc1t(L);
7792 else
7793 __ bc1t((int)0);
7794 break;
7795 case 0x02: //not_equal
7796 __ c_eq_s(reg_op1, reg_op2);
7797 if (&L)
7798 __ bc1f(L);
7799 else
7800 __ bc1f((int)0);
7801 break;
7802 case 0x03: //greater
7803 __ c_ule_s(reg_op1, reg_op2);
7804 if(&L)
7805 __ bc1f(L);
7806 else
7807 __ bc1f((int)0);
7808 break;
7809 case 0x04: //greater_equal
7810 __ c_ult_s(reg_op1, reg_op2);
7811 if(&L)
7812 __ bc1f(L);
7813 else
7814 __ bc1f((int)0);
7815 break;
7816 case 0x05: //less
7817 __ c_ult_s(reg_op1, reg_op2);
7818 if(&L)
7819 __ bc1t(L);
7820 else
7821 __ bc1t((int)0);
7822 break;
7823 case 0x06: //less_equal
7824 __ c_ule_s(reg_op1, reg_op2);
7825 if(&L)
7826 __ bc1t(L);
7827 else
7828 __ bc1t((int)0);
7829 break;
7830 default:
7831 Unimplemented();
7832 }
7833 __ nop();
7834 %}
7836 ins_pc_relative(1);
7837 ins_pipe(pipe_slow);
7838 %}
7840 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7841 match( If cmp (CmpD src1 src2) );
7842 effect(USE labl);
7843 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7845 ins_encode %{
7846 FloatRegister reg_op1 = $src1$$FloatRegister;
7847 FloatRegister reg_op2 = $src2$$FloatRegister;
7848 Label &L = *($labl$$label);
7849 int flag = $cmp$$cmpcode;
7851 switch(flag)
7852 {
7853 case 0x01: //equal
7854 __ c_eq_d(reg_op1, reg_op2);
7855 if (&L)
7856 __ bc1t(L);
7857 else
7858 __ bc1t((int)0);
7859 break;
7860 case 0x02: //not_equal
7861 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7862 __ c_eq_d(reg_op1, reg_op2);
7863 if (&L)
7864 __ bc1f(L);
7865 else
7866 __ bc1f((int)0);
7867 break;
7868 case 0x03: //greater
7869 __ c_ule_d(reg_op1, reg_op2);
7870 if(&L)
7871 __ bc1f(L);
7872 else
7873 __ bc1f((int)0);
7874 break;
7875 case 0x04: //greater_equal
7876 __ c_ult_d(reg_op1, reg_op2);
7877 if(&L)
7878 __ bc1f(L);
7879 else
7880 __ bc1f((int)0);
7881 break;
7882 case 0x05: //less
7883 __ c_ult_d(reg_op1, reg_op2);
7884 if(&L)
7885 __ bc1t(L);
7886 else
7887 __ bc1t((int)0);
7888 break;
7889 case 0x06: //less_equal
7890 __ c_ule_d(reg_op1, reg_op2);
7891 if(&L)
7892 __ bc1t(L);
7893 else
7894 __ bc1t((int)0);
7895 break;
7896 default:
7897 Unimplemented();
7898 }
7899 __ nop();
7900 %}
7902 ins_pc_relative(1);
7903 ins_pipe(pipe_slow);
7904 %}
7907 // Call Runtime Instruction
7908 instruct CallRuntimeDirect(method meth) %{
7909 match(CallRuntime );
7910 effect(USE meth);
7912 ins_cost(300);
7913 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7914 ins_encode( Java_To_Runtime( meth ) );
7915 ins_pipe( pipe_slow );
7916 ins_alignment(16);
7917 %}
7921 //------------------------MemBar Instructions-------------------------------
7922 //Memory barrier flavors
7924 instruct membar_acquire() %{
7925 match(MemBarAcquire);
7926 ins_cost(0);
7928 size(0);
7929 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7930 ins_encode();
7931 ins_pipe(empty);
7932 %}
7934 instruct load_fence() %{
7935 match(LoadFence);
7936 ins_cost(400);
7938 format %{ "MEMBAR @ load_fence" %}
7939 ins_encode %{
7940 __ sync();
7941 %}
7942 ins_pipe(pipe_slow);
7943 %}
7945 instruct membar_acquire_lock()
7946 %{
7947 match(MemBarAcquireLock);
7948 ins_cost(0);
7950 size(0);
7951 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7952 ins_encode();
7953 ins_pipe(empty);
7954 %}
7956 instruct membar_release() %{
7957 match(MemBarRelease);
7958 ins_cost(0);
7960 size(0);
7961 format %{ "MEMBAR-release (empty) @ membar_release" %}
7962 ins_encode();
7963 ins_pipe(empty);
7964 %}
7966 instruct store_fence() %{
7967 match(StoreFence);
7968 ins_cost(400);
7970 format %{ "MEMBAR @ store_fence" %}
7972 ins_encode %{
7973 __ sync();
7974 %}
7976 ins_pipe(pipe_slow);
7977 %}
7979 instruct membar_release_lock()
7980 %{
7981 match(MemBarReleaseLock);
7982 ins_cost(0);
7984 size(0);
7985 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7986 ins_encode();
7987 ins_pipe(empty);
7988 %}
7991 instruct membar_volatile() %{
7992 match(MemBarVolatile);
7993 ins_cost(400);
7995 format %{ "MEMBAR-volatile" %}
7996 ins_encode %{
7997 if( !os::is_MP() ) return; // Not needed on single CPU
7998 __ sync();
8000 %}
8001 ins_pipe(pipe_slow);
8002 %}
8004 instruct unnecessary_membar_volatile() %{
8005 match(MemBarVolatile);
8006 predicate(Matcher::post_store_load_barrier(n));
8007 ins_cost(0);
8009 size(0);
8010 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8011 ins_encode( );
8012 ins_pipe(empty);
8013 %}
8015 instruct membar_storestore() %{
8016 match(MemBarStoreStore);
8018 ins_cost(0);
8019 size(0);
8020 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8021 ins_encode( );
8022 ins_pipe(empty);
8023 %}
8025 //----------Move Instructions--------------------------------------------------
8026 instruct castX2P(mRegP dst, mRegL src) %{
8027 match(Set dst (CastX2P src));
8028 format %{ "castX2P $dst, $src @ castX2P" %}
8029 ins_encode %{
8030 Register src = $src$$Register;
8031 Register dst = $dst$$Register;
8033 if(src != dst)
8034 __ move(dst, src);
8035 %}
8036 ins_cost(10);
8037 ins_pipe( ialu_regI_mov );
8038 %}
8040 instruct castP2X(mRegL dst, mRegP src ) %{
8041 match(Set dst (CastP2X src));
8043 format %{ "mov $dst, $src\t #@castP2X" %}
8044 ins_encode %{
8045 Register src = $src$$Register;
8046 Register dst = $dst$$Register;
8048 if(src != dst)
8049 __ move(dst, src);
8050 %}
8051 ins_pipe( ialu_regI_mov );
8052 %}
8054 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8055 match(Set dst (MoveF2I src));
8056 effect(DEF dst, USE src);
8057 ins_cost(85);
8058 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8059 ins_encode %{
8060 Register dst = as_Register($dst$$reg);
8061 FloatRegister src = as_FloatRegister($src$$reg);
8063 __ mfc1(dst, src);
8064 %}
8065 ins_pipe( pipe_slow );
8066 %}
8068 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8069 match(Set dst (MoveI2F src));
8070 effect(DEF dst, USE src);
8071 ins_cost(85);
8072 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8073 ins_encode %{
8074 Register src = as_Register($src$$reg);
8075 FloatRegister dst = as_FloatRegister($dst$$reg);
8077 __ mtc1(src, dst);
8078 %}
8079 ins_pipe( pipe_slow );
8080 %}
8082 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8083 match(Set dst (MoveD2L src));
8084 effect(DEF dst, USE src);
8085 ins_cost(85);
8086 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8087 ins_encode %{
8088 Register dst = as_Register($dst$$reg);
8089 FloatRegister src = as_FloatRegister($src$$reg);
8091 __ dmfc1(dst, src);
8092 %}
8093 ins_pipe( pipe_slow );
8094 %}
8096 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8097 match(Set dst (MoveL2D src));
8098 effect(DEF dst, USE src);
8099 ins_cost(85);
8100 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8101 ins_encode %{
8102 FloatRegister dst = as_FloatRegister($dst$$reg);
8103 Register src = as_Register($src$$reg);
8105 __ dmtc1(src, dst);
8106 %}
8107 ins_pipe( pipe_slow );
8108 %}
8110 //----------Conditional Move---------------------------------------------------
8111 // Conditional move
8112 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8113 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8114 ins_cost(80);
8115 format %{
8116 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8117 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8118 %}
8120 ins_encode %{
8121 Register op1 = $tmp1$$Register;
8122 Register op2 = $tmp2$$Register;
8123 Register dst = $dst$$Register;
8124 Register src = $src$$Register;
8125 int flag = $cop$$cmpcode;
8127 switch(flag)
8128 {
8129 case 0x01: //equal
8130 __ subu32(AT, op1, op2);
8131 __ movz(dst, src, AT);
8132 break;
8134 case 0x02: //not_equal
8135 __ subu32(AT, op1, op2);
8136 __ movn(dst, src, AT);
8137 break;
8139 case 0x03: //great
8140 __ slt(AT, op2, op1);
8141 __ movn(dst, src, AT);
8142 break;
8144 case 0x04: //great_equal
8145 __ slt(AT, op1, op2);
8146 __ movz(dst, src, AT);
8147 break;
8149 case 0x05: //less
8150 __ slt(AT, op1, op2);
8151 __ movn(dst, src, AT);
8152 break;
8154 case 0x06: //less_equal
8155 __ slt(AT, op2, op1);
8156 __ movz(dst, src, AT);
8157 break;
8159 default:
8160 Unimplemented();
8161 }
8162 %}
8164 ins_pipe( pipe_slow );
8165 %}
8167 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8168 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8169 ins_cost(80);
8170 format %{
8171 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8172 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8173 %}
8174 ins_encode %{
8175 Register op1 = $tmp1$$Register;
8176 Register op2 = $tmp2$$Register;
8177 Register dst = $dst$$Register;
8178 Register src = $src$$Register;
8179 int flag = $cop$$cmpcode;
8181 switch(flag)
8182 {
8183 case 0x01: //equal
8184 __ subu(AT, op1, op2);
8185 __ movz(dst, src, AT);
8186 break;
8188 case 0x02: //not_equal
8189 __ subu(AT, op1, op2);
8190 __ movn(dst, src, AT);
8191 break;
8193 case 0x03: //above
8194 __ sltu(AT, op2, op1);
8195 __ movn(dst, src, AT);
8196 break;
8198 case 0x04: //above_equal
8199 __ sltu(AT, op1, op2);
8200 __ movz(dst, src, AT);
8201 break;
8203 case 0x05: //below
8204 __ sltu(AT, op1, op2);
8205 __ movn(dst, src, AT);
8206 break;
8208 case 0x06: //below_equal
8209 __ sltu(AT, op2, op1);
8210 __ movz(dst, src, AT);
8211 break;
8213 default:
8214 Unimplemented();
8215 }
8216 %}
8218 ins_pipe( pipe_slow );
8219 %}
8221 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8222 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8223 ins_cost(80);
8224 format %{
8225 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8226 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8227 %}
8228 ins_encode %{
8229 Register op1 = $tmp1$$Register;
8230 Register op2 = $tmp2$$Register;
8231 Register dst = $dst$$Register;
8232 Register src = $src$$Register;
8233 int flag = $cop$$cmpcode;
8235 switch(flag)
8236 {
8237 case 0x01: //equal
8238 __ subu32(AT, op1, op2);
8239 __ movz(dst, src, AT);
8240 break;
8242 case 0x02: //not_equal
8243 __ subu32(AT, op1, op2);
8244 __ movn(dst, src, AT);
8245 break;
8247 case 0x03: //above
8248 __ sltu(AT, op2, op1);
8249 __ movn(dst, src, AT);
8250 break;
8252 case 0x04: //above_equal
8253 __ sltu(AT, op1, op2);
8254 __ movz(dst, src, AT);
8255 break;
8257 case 0x05: //below
8258 __ sltu(AT, op1, op2);
8259 __ movn(dst, src, AT);
8260 break;
8262 case 0x06: //below_equal
8263 __ sltu(AT, op2, op1);
8264 __ movz(dst, src, AT);
8265 break;
8267 default:
8268 Unimplemented();
8269 }
8270 %}
8272 ins_pipe( pipe_slow );
8273 %}
8275 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8276 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8277 ins_cost(80);
8278 format %{
8279 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8280 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8281 %}
8282 ins_encode %{
8283 Register op1 = $tmp1$$Register;
8284 Register op2 = $tmp2$$Register;
8285 Register dst = $dst$$Register;
8286 Register src = $src$$Register;
8287 int flag = $cop$$cmpcode;
8289 switch(flag)
8290 {
8291 case 0x01: //equal
8292 __ subu32(AT, op1, op2);
8293 __ movz(dst, src, AT);
8294 break;
8296 case 0x02: //not_equal
8297 __ subu32(AT, op1, op2);
8298 __ movn(dst, src, AT);
8299 break;
8301 case 0x03: //above
8302 __ sltu(AT, op2, op1);
8303 __ movn(dst, src, AT);
8304 break;
8306 case 0x04: //above_equal
8307 __ sltu(AT, op1, op2);
8308 __ movz(dst, src, AT);
8309 break;
8311 case 0x05: //below
8312 __ sltu(AT, op1, op2);
8313 __ movn(dst, src, AT);
8314 break;
8316 case 0x06: //below_equal
8317 __ sltu(AT, op2, op1);
8318 __ movz(dst, src, AT);
8319 break;
8321 default:
8322 Unimplemented();
8323 }
8324 %}
8326 ins_pipe( pipe_slow );
8327 %}
8329 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8330 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8331 ins_cost(80);
8332 format %{
8333 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8334 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8335 %}
8336 ins_encode %{
8337 Register op1 = $tmp1$$Register;
8338 Register op2 = $tmp2$$Register;
8339 Register dst = $dst$$Register;
8340 Register src = $src$$Register;
8341 int flag = $cop$$cmpcode;
8343 switch(flag)
8344 {
8345 case 0x01: //equal
8346 __ subu(AT, op1, op2);
8347 __ movz(dst, src, AT);
8348 break;
8350 case 0x02: //not_equal
8351 __ subu(AT, op1, op2);
8352 __ movn(dst, src, AT);
8353 break;
8355 case 0x03: //above
8356 __ sltu(AT, op2, op1);
8357 __ movn(dst, src, AT);
8358 break;
8360 case 0x04: //above_equal
8361 __ sltu(AT, op1, op2);
8362 __ movz(dst, src, AT);
8363 break;
8365 case 0x05: //below
8366 __ sltu(AT, op1, op2);
8367 __ movn(dst, src, AT);
8368 break;
8370 case 0x06: //below_equal
8371 __ sltu(AT, op2, op1);
8372 __ movz(dst, src, AT);
8373 break;
8375 default:
8376 Unimplemented();
8377 }
8378 %}
8380 ins_pipe( pipe_slow );
8381 %}
8383 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8384 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8385 ins_cost(80);
8386 format %{
8387 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8388 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8389 %}
8390 ins_encode %{
8391 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8392 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8393 Register dst = as_Register($dst$$reg);
8394 Register src = as_Register($src$$reg);
8396 int flag = $cop$$cmpcode;
8398 switch(flag)
8399 {
8400 case 0x01: //equal
8401 __ c_eq_d(reg_op1, reg_op2);
8402 __ movt(dst, src);
8403 break;
8404 case 0x02: //not_equal
8405 __ c_eq_d(reg_op1, reg_op2);
8406 __ movf(dst, src);
8407 break;
8408 case 0x03: //greater
8409 __ c_ole_d(reg_op1, reg_op2);
8410 __ movf(dst, src);
8411 break;
8412 case 0x04: //greater_equal
8413 __ c_olt_d(reg_op1, reg_op2);
8414 __ movf(dst, src);
8415 break;
8416 case 0x05: //less
8417 __ c_ult_d(reg_op1, reg_op2);
8418 __ movt(dst, src);
8419 break;
8420 case 0x06: //less_equal
8421 __ c_ule_d(reg_op1, reg_op2);
8422 __ movt(dst, src);
8423 break;
8424 default:
8425 Unimplemented();
8426 }
8427 %}
8429 ins_pipe( pipe_slow );
8430 %}
8433 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8434 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8435 ins_cost(80);
8436 format %{
8437 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8438 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8439 %}
8440 ins_encode %{
8441 Register op1 = $tmp1$$Register;
8442 Register op2 = $tmp2$$Register;
8443 Register dst = $dst$$Register;
8444 Register src = $src$$Register;
8445 int flag = $cop$$cmpcode;
8447 switch(flag)
8448 {
8449 case 0x01: //equal
8450 __ subu32(AT, op1, op2);
8451 __ movz(dst, src, AT);
8452 break;
8454 case 0x02: //not_equal
8455 __ subu32(AT, op1, op2);
8456 __ movn(dst, src, AT);
8457 break;
8459 case 0x03: //above
8460 __ sltu(AT, op2, op1);
8461 __ movn(dst, src, AT);
8462 break;
8464 case 0x04: //above_equal
8465 __ sltu(AT, op1, op2);
8466 __ movz(dst, src, AT);
8467 break;
8469 case 0x05: //below
8470 __ sltu(AT, op1, op2);
8471 __ movn(dst, src, AT);
8472 break;
8474 case 0x06: //below_equal
8475 __ sltu(AT, op2, op1);
8476 __ movz(dst, src, AT);
8477 break;
8479 default:
8480 Unimplemented();
8481 }
8482 %}
8484 ins_pipe( pipe_slow );
8485 %}
8488 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8489 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8490 ins_cost(80);
8491 format %{
8492 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8493 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8494 %}
8495 ins_encode %{
8496 Register op1 = $tmp1$$Register;
8497 Register op2 = $tmp2$$Register;
8498 Register dst = $dst$$Register;
8499 Register src = $src$$Register;
8500 int flag = $cop$$cmpcode;
8502 switch(flag)
8503 {
8504 case 0x01: //equal
8505 __ subu(AT, op1, op2);
8506 __ movz(dst, src, AT);
8507 break;
8509 case 0x02: //not_equal
8510 __ subu(AT, op1, op2);
8511 __ movn(dst, src, AT);
8512 break;
8514 case 0x03: //above
8515 __ sltu(AT, op2, op1);
8516 __ movn(dst, src, AT);
8517 break;
8519 case 0x04: //above_equal
8520 __ sltu(AT, op1, op2);
8521 __ movz(dst, src, AT);
8522 break;
8524 case 0x05: //below
8525 __ sltu(AT, op1, op2);
8526 __ movn(dst, src, AT);
8527 break;
8529 case 0x06: //below_equal
8530 __ sltu(AT, op2, op1);
8531 __ movz(dst, src, AT);
8532 break;
8534 default:
8535 Unimplemented();
8536 }
8537 %}
8539 ins_pipe( pipe_slow );
8540 %}
8542 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8543 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8544 ins_cost(80);
8545 format %{
8546 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8547 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8548 %}
8549 ins_encode %{
8550 Register opr1 = as_Register($tmp1$$reg);
8551 Register opr2 = as_Register($tmp2$$reg);
8552 Register dst = $dst$$Register;
8553 Register src = $src$$Register;
8554 int flag = $cop$$cmpcode;
8556 switch(flag)
8557 {
8558 case 0x01: //equal
8559 __ subu(AT, opr1, opr2);
8560 __ movz(dst, src, AT);
8561 break;
8563 case 0x02: //not_equal
8564 __ subu(AT, opr1, opr2);
8565 __ movn(dst, src, AT);
8566 break;
8568 case 0x03: //greater
8569 __ slt(AT, opr2, opr1);
8570 __ movn(dst, src, AT);
8571 break;
8573 case 0x04: //greater_equal
8574 __ slt(AT, opr1, opr2);
8575 __ movz(dst, src, AT);
8576 break;
8578 case 0x05: //less
8579 __ slt(AT, opr1, opr2);
8580 __ movn(dst, src, AT);
8581 break;
8583 case 0x06: //less_equal
8584 __ slt(AT, opr2, opr1);
8585 __ movz(dst, src, AT);
8586 break;
8588 default:
8589 Unimplemented();
8590 }
8591 %}
8593 ins_pipe( pipe_slow );
8594 %}
8596 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8597 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8598 ins_cost(80);
8599 format %{
8600 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8601 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8602 %}
8603 ins_encode %{
8604 Register opr1 = as_Register($tmp1$$reg);
8605 Register opr2 = as_Register($tmp2$$reg);
8606 Register dst = $dst$$Register;
8607 Register src = $src$$Register;
8608 int flag = $cop$$cmpcode;
8610 switch(flag)
8611 {
8612 case 0x01: //equal
8613 __ subu(AT, opr1, opr2);
8614 __ movz(dst, src, AT);
8615 break;
8617 case 0x02: //not_equal
8618 __ subu(AT, opr1, opr2);
8619 __ movn(dst, src, AT);
8620 break;
8622 case 0x03: //greater
8623 __ slt(AT, opr2, opr1);
8624 __ movn(dst, src, AT);
8625 break;
8627 case 0x04: //greater_equal
8628 __ slt(AT, opr1, opr2);
8629 __ movz(dst, src, AT);
8630 break;
8632 case 0x05: //less
8633 __ slt(AT, opr1, opr2);
8634 __ movn(dst, src, AT);
8635 break;
8637 case 0x06: //less_equal
8638 __ slt(AT, opr2, opr1);
8639 __ movz(dst, src, AT);
8640 break;
8642 default:
8643 Unimplemented();
8644 }
8645 %}
8647 ins_pipe( pipe_slow );
8648 %}
8650 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8651 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8652 ins_cost(80);
8653 format %{
8654 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8655 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8656 %}
8657 ins_encode %{
8658 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8659 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8660 Register dst = as_Register($dst$$reg);
8661 Register src = as_Register($src$$reg);
8663 int flag = $cop$$cmpcode;
8665 switch(flag)
8666 {
8667 case 0x01: //equal
8668 __ c_eq_d(reg_op1, reg_op2);
8669 __ movt(dst, src);
8670 break;
8671 case 0x02: //not_equal
8672 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8673 __ c_eq_d(reg_op1, reg_op2);
8674 __ movf(dst, src);
8675 break;
8676 case 0x03: //greater
8677 __ c_ole_d(reg_op1, reg_op2);
8678 __ movf(dst, src);
8679 break;
8680 case 0x04: //greater_equal
8681 __ c_olt_d(reg_op1, reg_op2);
8682 __ movf(dst, src);
8683 break;
8684 case 0x05: //less
8685 __ c_ult_d(reg_op1, reg_op2);
8686 __ movt(dst, src);
8687 break;
8688 case 0x06: //less_equal
8689 __ c_ule_d(reg_op1, reg_op2);
8690 __ movt(dst, src);
8691 break;
8692 default:
8693 Unimplemented();
8694 }
8695 %}
8697 ins_pipe( pipe_slow );
8698 %}
8701 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8702 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8703 ins_cost(80);
8704 format %{
8705 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8706 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8707 %}
8708 ins_encode %{
8709 Register op1 = $tmp1$$Register;
8710 Register op2 = $tmp2$$Register;
8711 Register dst = $dst$$Register;
8712 Register src = $src$$Register;
8713 int flag = $cop$$cmpcode;
8715 switch(flag)
8716 {
8717 case 0x01: //equal
8718 __ subu(AT, op1, op2);
8719 __ movz(dst, src, AT);
8720 break;
8722 case 0x02: //not_equal
8723 __ subu(AT, op1, op2);
8724 __ movn(dst, src, AT);
8725 break;
8727 case 0x03: //above
8728 __ sltu(AT, op2, op1);
8729 __ movn(dst, src, AT);
8730 break;
8732 case 0x04: //above_equal
8733 __ sltu(AT, op1, op2);
8734 __ movz(dst, src, AT);
8735 break;
8737 case 0x05: //below
8738 __ sltu(AT, op1, op2);
8739 __ movn(dst, src, AT);
8740 break;
8742 case 0x06: //below_equal
8743 __ sltu(AT, op2, op1);
8744 __ movz(dst, src, AT);
8745 break;
8747 default:
8748 Unimplemented();
8749 }
8750 %}
8752 ins_pipe( pipe_slow );
8753 %}
8755 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8756 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8757 ins_cost(80);
8758 format %{
8759 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8760 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8761 %}
8762 ins_encode %{
8763 Register op1 = $tmp1$$Register;
8764 Register op2 = $tmp2$$Register;
8765 Register dst = $dst$$Register;
8766 Register src = $src$$Register;
8767 int flag = $cop$$cmpcode;
8769 switch(flag)
8770 {
8771 case 0x01: //equal
8772 __ subu32(AT, op1, op2);
8773 __ movz(dst, src, AT);
8774 break;
8776 case 0x02: //not_equal
8777 __ subu32(AT, op1, op2);
8778 __ movn(dst, src, AT);
8779 break;
8781 case 0x03: //above
8782 __ slt(AT, op2, op1);
8783 __ movn(dst, src, AT);
8784 break;
8786 case 0x04: //above_equal
8787 __ slt(AT, op1, op2);
8788 __ movz(dst, src, AT);
8789 break;
8791 case 0x05: //below
8792 __ slt(AT, op1, op2);
8793 __ movn(dst, src, AT);
8794 break;
8796 case 0x06: //below_equal
8797 __ slt(AT, op2, op1);
8798 __ movz(dst, src, AT);
8799 break;
8801 default:
8802 Unimplemented();
8803 }
8804 %}
8806 ins_pipe( pipe_slow );
8807 %}
8809 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8810 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8811 ins_cost(80);
8812 format %{
8813 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8814 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8815 %}
8816 ins_encode %{
8817 Register op1 = $tmp1$$Register;
8818 Register op2 = $tmp2$$Register;
8819 Register dst = $dst$$Register;
8820 Register src = $src$$Register;
8821 int flag = $cop$$cmpcode;
8823 switch(flag)
8824 {
8825 case 0x01: //equal
8826 __ subu32(AT, op1, op2);
8827 __ movz(dst, src, AT);
8828 break;
8830 case 0x02: //not_equal
8831 __ subu32(AT, op1, op2);
8832 __ movn(dst, src, AT);
8833 break;
8835 case 0x03: //above
8836 __ slt(AT, op2, op1);
8837 __ movn(dst, src, AT);
8838 break;
8840 case 0x04: //above_equal
8841 __ slt(AT, op1, op2);
8842 __ movz(dst, src, AT);
8843 break;
8845 case 0x05: //below
8846 __ slt(AT, op1, op2);
8847 __ movn(dst, src, AT);
8848 break;
8850 case 0x06: //below_equal
8851 __ slt(AT, op2, op1);
8852 __ movz(dst, src, AT);
8853 break;
8855 default:
8856 Unimplemented();
8857 }
8858 %}
8860 ins_pipe( pipe_slow );
8861 %}
8864 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8865 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8866 ins_cost(80);
8867 format %{
8868 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8869 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8870 %}
8872 ins_encode %{
8873 Register op1 = $tmp1$$Register;
8874 Register op2 = $tmp2$$Register;
8875 Register dst = as_Register($dst$$reg);
8876 Register src = as_Register($src$$reg);
8877 int flag = $cop$$cmpcode;
8879 switch(flag)
8880 {
8881 case 0x01: //equal
8882 __ subu32(AT, op1, op2);
8883 __ movz(dst, src, AT);
8884 break;
8886 case 0x02: //not_equal
8887 __ subu32(AT, op1, op2);
8888 __ movn(dst, src, AT);
8889 break;
8891 case 0x03: //great
8892 __ slt(AT, op2, op1);
8893 __ movn(dst, src, AT);
8894 break;
8896 case 0x04: //great_equal
8897 __ slt(AT, op1, op2);
8898 __ movz(dst, src, AT);
8899 break;
8901 case 0x05: //less
8902 __ slt(AT, op1, op2);
8903 __ movn(dst, src, AT);
8904 break;
8906 case 0x06: //less_equal
8907 __ slt(AT, op2, op1);
8908 __ movz(dst, src, AT);
8909 break;
8911 default:
8912 Unimplemented();
8913 }
8914 %}
8916 ins_pipe( pipe_slow );
8917 %}
8919 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8920 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8921 ins_cost(80);
8922 format %{
8923 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8924 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8925 %}
8926 ins_encode %{
8927 Register opr1 = as_Register($tmp1$$reg);
8928 Register opr2 = as_Register($tmp2$$reg);
8929 Register dst = as_Register($dst$$reg);
8930 Register src = as_Register($src$$reg);
8931 int flag = $cop$$cmpcode;
8933 switch(flag)
8934 {
8935 case 0x01: //equal
8936 __ subu(AT, opr1, opr2);
8937 __ movz(dst, src, AT);
8938 break;
8940 case 0x02: //not_equal
8941 __ subu(AT, opr1, opr2);
8942 __ movn(dst, src, AT);
8943 break;
8945 case 0x03: //greater
8946 __ slt(AT, opr2, opr1);
8947 __ movn(dst, src, AT);
8948 break;
8950 case 0x04: //greater_equal
8951 __ slt(AT, opr1, opr2);
8952 __ movz(dst, src, AT);
8953 break;
8955 case 0x05: //less
8956 __ slt(AT, opr1, opr2);
8957 __ movn(dst, src, AT);
8958 break;
8960 case 0x06: //less_equal
8961 __ slt(AT, opr2, opr1);
8962 __ movz(dst, src, AT);
8963 break;
8965 default:
8966 Unimplemented();
8967 }
8968 %}
8970 ins_pipe( pipe_slow );
8971 %}
8973 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8974 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8975 ins_cost(80);
8976 format %{
8977 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8978 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8979 %}
8980 ins_encode %{
8981 Register op1 = $tmp1$$Register;
8982 Register op2 = $tmp2$$Register;
8983 Register dst = $dst$$Register;
8984 Register src = $src$$Register;
8985 int flag = $cop$$cmpcode;
8987 switch(flag)
8988 {
8989 case 0x01: //equal
8990 __ subu32(AT, op1, op2);
8991 __ movz(dst, src, AT);
8992 break;
8994 case 0x02: //not_equal
8995 __ subu32(AT, op1, op2);
8996 __ movn(dst, src, AT);
8997 break;
8999 case 0x03: //above
9000 __ sltu(AT, op2, op1);
9001 __ movn(dst, src, AT);
9002 break;
9004 case 0x04: //above_equal
9005 __ sltu(AT, op1, op2);
9006 __ movz(dst, src, AT);
9007 break;
9009 case 0x05: //below
9010 __ sltu(AT, op1, op2);
9011 __ movn(dst, src, AT);
9012 break;
9014 case 0x06: //below_equal
9015 __ sltu(AT, op2, op1);
9016 __ movz(dst, src, AT);
9017 break;
9019 default:
9020 Unimplemented();
9021 }
9022 %}
9024 ins_pipe( pipe_slow );
9025 %}
9028 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9029 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9030 ins_cost(80);
9031 format %{
9032 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9033 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9034 %}
9035 ins_encode %{
9036 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9037 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9038 Register dst = as_Register($dst$$reg);
9039 Register src = as_Register($src$$reg);
9041 int flag = $cop$$cmpcode;
9043 switch(flag)
9044 {
9045 case 0x01: //equal
9046 __ c_eq_d(reg_op1, reg_op2);
9047 __ movt(dst, src);
9048 break;
9049 case 0x02: //not_equal
9050 __ c_eq_d(reg_op1, reg_op2);
9051 __ movf(dst, src);
9052 break;
9053 case 0x03: //greater
9054 __ c_ole_d(reg_op1, reg_op2);
9055 __ movf(dst, src);
9056 break;
9057 case 0x04: //greater_equal
9058 __ c_olt_d(reg_op1, reg_op2);
9059 __ movf(dst, src);
9060 break;
9061 case 0x05: //less
9062 __ c_ult_d(reg_op1, reg_op2);
9063 __ movt(dst, src);
9064 break;
9065 case 0x06: //less_equal
9066 __ c_ule_d(reg_op1, reg_op2);
9067 __ movt(dst, src);
9068 break;
9069 default:
9070 Unimplemented();
9071 }
9072 %}
9074 ins_pipe( pipe_slow );
9075 %}
9077 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9078 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9079 ins_cost(200);
9080 format %{
9081 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9082 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9083 %}
9084 ins_encode %{
9085 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9086 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9087 FloatRegister dst = as_FloatRegister($dst$$reg);
9088 FloatRegister src = as_FloatRegister($src$$reg);
9090 int flag = $cop$$cmpcode;
9092 Label L;
9094 switch(flag)
9095 {
9096 case 0x01: //equal
9097 __ c_eq_d(reg_op1, reg_op2);
9098 __ bc1f(L);
9099 __ nop();
9100 __ mov_d(dst, src);
9101 __ bind(L);
9102 break;
9103 case 0x02: //not_equal
9104 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9105 __ c_eq_d(reg_op1, reg_op2);
9106 __ bc1t(L);
9107 __ nop();
9108 __ mov_d(dst, src);
9109 __ bind(L);
9110 break;
9111 case 0x03: //greater
9112 __ c_ole_d(reg_op1, reg_op2);
9113 __ bc1t(L);
9114 __ nop();
9115 __ mov_d(dst, src);
9116 __ bind(L);
9117 break;
9118 case 0x04: //greater_equal
9119 __ c_olt_d(reg_op1, reg_op2);
9120 __ bc1t(L);
9121 __ nop();
9122 __ mov_d(dst, src);
9123 __ bind(L);
9124 break;
9125 case 0x05: //less
9126 __ c_ult_d(reg_op1, reg_op2);
9127 __ bc1f(L);
9128 __ nop();
9129 __ mov_d(dst, src);
9130 __ bind(L);
9131 break;
9132 case 0x06: //less_equal
9133 __ c_ule_d(reg_op1, reg_op2);
9134 __ bc1f(L);
9135 __ nop();
9136 __ mov_d(dst, src);
9137 __ bind(L);
9138 break;
9139 default:
9140 Unimplemented();
9141 }
9142 %}
9144 ins_pipe( pipe_slow );
9145 %}
9147 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9148 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9149 ins_cost(200);
9150 format %{
9151 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9152 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9153 %}
9155 ins_encode %{
9156 Register op1 = $tmp1$$Register;
9157 Register op2 = $tmp2$$Register;
9158 FloatRegister dst = as_FloatRegister($dst$$reg);
9159 FloatRegister src = as_FloatRegister($src$$reg);
9160 int flag = $cop$$cmpcode;
9161 Label L;
9163 switch(flag)
9164 {
9165 case 0x01: //equal
9166 __ bne(op1, op2, L);
9167 __ nop();
9168 __ mov_s(dst, src);
9169 __ bind(L);
9170 break;
9171 case 0x02: //not_equal
9172 __ beq(op1, op2, L);
9173 __ nop();
9174 __ mov_s(dst, src);
9175 __ bind(L);
9176 break;
9177 case 0x03: //great
9178 __ slt(AT, op2, op1);
9179 __ beq(AT, R0, L);
9180 __ nop();
9181 __ mov_s(dst, src);
9182 __ bind(L);
9183 break;
9184 case 0x04: //great_equal
9185 __ slt(AT, op1, op2);
9186 __ bne(AT, R0, L);
9187 __ nop();
9188 __ mov_s(dst, src);
9189 __ bind(L);
9190 break;
9191 case 0x05: //less
9192 __ slt(AT, op1, op2);
9193 __ beq(AT, R0, L);
9194 __ nop();
9195 __ mov_s(dst, src);
9196 __ bind(L);
9197 break;
9198 case 0x06: //less_equal
9199 __ slt(AT, op2, op1);
9200 __ bne(AT, R0, L);
9201 __ nop();
9202 __ mov_s(dst, src);
9203 __ bind(L);
9204 break;
9205 default:
9206 Unimplemented();
9207 }
9208 %}
9210 ins_pipe( pipe_slow );
9211 %}
9213 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9214 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9215 ins_cost(200);
9216 format %{
9217 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9218 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9219 %}
9221 ins_encode %{
9222 Register op1 = $tmp1$$Register;
9223 Register op2 = $tmp2$$Register;
9224 FloatRegister dst = as_FloatRegister($dst$$reg);
9225 FloatRegister src = as_FloatRegister($src$$reg);
9226 int flag = $cop$$cmpcode;
9227 Label L;
9229 switch(flag)
9230 {
9231 case 0x01: //equal
9232 __ bne(op1, op2, L);
9233 __ nop();
9234 __ mov_d(dst, src);
9235 __ bind(L);
9236 break;
9237 case 0x02: //not_equal
9238 __ beq(op1, op2, L);
9239 __ nop();
9240 __ mov_d(dst, src);
9241 __ bind(L);
9242 break;
9243 case 0x03: //great
9244 __ slt(AT, op2, op1);
9245 __ beq(AT, R0, L);
9246 __ nop();
9247 __ mov_d(dst, src);
9248 __ bind(L);
9249 break;
9250 case 0x04: //great_equal
9251 __ slt(AT, op1, op2);
9252 __ bne(AT, R0, L);
9253 __ nop();
9254 __ mov_d(dst, src);
9255 __ bind(L);
9256 break;
9257 case 0x05: //less
9258 __ slt(AT, op1, op2);
9259 __ beq(AT, R0, L);
9260 __ nop();
9261 __ mov_d(dst, src);
9262 __ bind(L);
9263 break;
9264 case 0x06: //less_equal
9265 __ slt(AT, op2, op1);
9266 __ bne(AT, R0, L);
9267 __ nop();
9268 __ mov_d(dst, src);
9269 __ bind(L);
9270 break;
9271 default:
9272 Unimplemented();
9273 }
9274 %}
9276 ins_pipe( pipe_slow );
9277 %}
9279 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9280 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9281 ins_cost(200);
9282 format %{
9283 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9284 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9285 %}
9287 ins_encode %{
9288 Register op1 = $tmp1$$Register;
9289 Register op2 = $tmp2$$Register;
9290 FloatRegister dst = as_FloatRegister($dst$$reg);
9291 FloatRegister src = as_FloatRegister($src$$reg);
9292 int flag = $cop$$cmpcode;
9293 Label L;
9295 switch(flag)
9296 {
9297 case 0x01: //equal
9298 __ bne(op1, op2, L);
9299 __ nop();
9300 __ mov_d(dst, src);
9301 __ bind(L);
9302 break;
9303 case 0x02: //not_equal
9304 __ beq(op1, op2, L);
9305 __ nop();
9306 __ mov_d(dst, src);
9307 __ bind(L);
9308 break;
9309 case 0x03: //great
9310 __ slt(AT, op2, op1);
9311 __ beq(AT, R0, L);
9312 __ nop();
9313 __ mov_d(dst, src);
9314 __ bind(L);
9315 break;
9316 case 0x04: //great_equal
9317 __ slt(AT, op1, op2);
9318 __ bne(AT, R0, L);
9319 __ nop();
9320 __ mov_d(dst, src);
9321 __ bind(L);
9322 break;
9323 case 0x05: //less
9324 __ slt(AT, op1, op2);
9325 __ beq(AT, R0, L);
9326 __ nop();
9327 __ mov_d(dst, src);
9328 __ bind(L);
9329 break;
9330 case 0x06: //less_equal
9331 __ slt(AT, op2, op1);
9332 __ bne(AT, R0, L);
9333 __ nop();
9334 __ mov_d(dst, src);
9335 __ bind(L);
9336 break;
9337 default:
9338 Unimplemented();
9339 }
9340 %}
9342 ins_pipe( pipe_slow );
9343 %}
9345 //FIXME
9346 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9347 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9348 ins_cost(80);
9349 format %{
9350 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9351 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9352 %}
9354 ins_encode %{
9355 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9356 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9357 Register dst = $dst$$Register;
9358 Register src = $src$$Register;
9359 int flag = $cop$$cmpcode;
9361 switch(flag)
9362 {
9363 case 0x01: //equal
9364 __ c_eq_s(reg_op1, reg_op2);
9365 __ movt(dst, src);
9366 break;
9367 case 0x02: //not_equal
9368 __ c_eq_s(reg_op1, reg_op2);
9369 __ movf(dst, src);
9370 break;
9371 case 0x03: //greater
9372 __ c_ole_s(reg_op1, reg_op2);
9373 __ movf(dst, src);
9374 break;
9375 case 0x04: //greater_equal
9376 __ c_olt_s(reg_op1, reg_op2);
9377 __ movf(dst, src);
9378 break;
9379 case 0x05: //less
9380 __ c_ult_s(reg_op1, reg_op2);
9381 __ movt(dst, src);
9382 break;
9383 case 0x06: //less_equal
9384 __ c_ule_s(reg_op1, reg_op2);
9385 __ movt(dst, src);
9386 break;
9387 default:
9388 Unimplemented();
9389 }
9390 %}
9391 ins_pipe( pipe_slow );
9392 %}
9394 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9395 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9396 ins_cost(200);
9397 format %{
9398 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9399 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9400 %}
9402 ins_encode %{
9403 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9404 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9405 FloatRegister dst = $dst$$FloatRegister;
9406 FloatRegister src = $src$$FloatRegister;
9407 Label L;
9408 int flag = $cop$$cmpcode;
9410 switch(flag)
9411 {
9412 case 0x01: //equal
9413 __ c_eq_s(reg_op1, reg_op2);
9414 __ bc1f(L);
9415 __ nop();
9416 __ mov_s(dst, src);
9417 __ bind(L);
9418 break;
9419 case 0x02: //not_equal
9420 __ c_eq_s(reg_op1, reg_op2);
9421 __ bc1t(L);
9422 __ nop();
9423 __ mov_s(dst, src);
9424 __ bind(L);
9425 break;
9426 case 0x03: //greater
9427 __ c_ole_s(reg_op1, reg_op2);
9428 __ bc1t(L);
9429 __ nop();
9430 __ mov_s(dst, src);
9431 __ bind(L);
9432 break;
9433 case 0x04: //greater_equal
9434 __ c_olt_s(reg_op1, reg_op2);
9435 __ bc1t(L);
9436 __ nop();
9437 __ mov_s(dst, src);
9438 __ bind(L);
9439 break;
9440 case 0x05: //less
9441 __ c_ult_s(reg_op1, reg_op2);
9442 __ bc1f(L);
9443 __ nop();
9444 __ mov_s(dst, src);
9445 __ bind(L);
9446 break;
9447 case 0x06: //less_equal
9448 __ c_ule_s(reg_op1, reg_op2);
9449 __ bc1f(L);
9450 __ nop();
9451 __ mov_s(dst, src);
9452 __ bind(L);
9453 break;
9454 default:
9455 Unimplemented();
9456 }
9457 %}
9458 ins_pipe( pipe_slow );
9459 %}
9461 // Manifest a CmpL result in an integer register. Very painful.
9462 // This is the test to avoid.
9463 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9464 match(Set dst (CmpL3 src1 src2));
9465 ins_cost(1000);
9466 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9467 ins_encode %{
9468 Register opr1 = as_Register($src1$$reg);
9469 Register opr2 = as_Register($src2$$reg);
9470 Register dst = as_Register($dst$$reg);
9472 Label Done;
9474 __ subu(AT, opr1, opr2);
9475 __ bltz(AT, Done);
9476 __ delayed()->daddiu(dst, R0, -1);
9478 __ move(dst, 1);
9479 __ movz(dst, R0, AT);
9481 __ bind(Done);
9482 %}
9483 ins_pipe( pipe_slow );
9484 %}
9486 //
9487 // less_rsult = -1
9488 // greater_result = 1
9489 // equal_result = 0
9490 // nan_result = -1
9491 //
9492 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9493 match(Set dst (CmpF3 src1 src2));
9494 ins_cost(1000);
9495 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9496 ins_encode %{
9497 FloatRegister src1 = as_FloatRegister($src1$$reg);
9498 FloatRegister src2 = as_FloatRegister($src2$$reg);
9499 Register dst = as_Register($dst$$reg);
9501 Label Done;
9503 __ c_ult_s(src1, src2);
9504 __ bc1t(Done);
9505 __ delayed()->daddiu(dst, R0, -1);
9507 __ c_eq_s(src1, src2);
9508 __ move(dst, 1);
9509 __ movt(dst, R0);
9511 __ bind(Done);
9512 %}
9513 ins_pipe( pipe_slow );
9514 %}
9516 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9517 match(Set dst (CmpD3 src1 src2));
9518 ins_cost(1000);
9519 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9520 ins_encode %{
9521 FloatRegister src1 = as_FloatRegister($src1$$reg);
9522 FloatRegister src2 = as_FloatRegister($src2$$reg);
9523 Register dst = as_Register($dst$$reg);
9525 Label Done;
9527 __ c_ult_d(src1, src2);
9528 __ bc1t(Done);
9529 __ delayed()->daddiu(dst, R0, -1);
9531 __ c_eq_d(src1, src2);
9532 __ move(dst, 1);
9533 __ movt(dst, R0);
9535 __ bind(Done);
9536 %}
9537 ins_pipe( pipe_slow );
9538 %}
9540 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9541 match(Set dummy (ClearArray cnt base));
9542 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9543 ins_encode %{
9544 //Assume cnt is the number of bytes in an array to be cleared,
9545 //and base points to the starting address of the array.
9546 Register base = $base$$Register;
9547 Register num = $cnt$$Register;
9548 Label Loop, done;
9550 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9551 __ move(T9, num); /* T9 = words */
9552 __ beq(T9, R0, done);
9553 __ nop();
9554 __ move(AT, base);
9556 __ bind(Loop);
9557 __ sd(R0, Address(AT, 0));
9558 __ daddi(AT, AT, wordSize);
9559 __ daddi(T9, T9, -1);
9560 __ bne(T9, R0, Loop);
9561 __ delayed()->nop();
9562 __ bind(done);
9563 %}
9564 ins_pipe( pipe_slow );
9565 %}
9567 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9568 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9569 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9571 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9572 ins_encode %{
9573 // Get the first character position in both strings
9574 // [8] char array, [12] offset, [16] count
9575 Register str1 = $str1$$Register;
9576 Register str2 = $str2$$Register;
9577 Register cnt1 = $cnt1$$Register;
9578 Register cnt2 = $cnt2$$Register;
9579 Register result = $result$$Register;
9581 Label L, Loop, haveResult, done;
9583 // compute the and difference of lengths (in result)
9584 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9586 // compute the shorter length (in cnt1)
9587 __ slt(AT, cnt2, cnt1);
9588 __ movn(cnt1, cnt2, AT);
9590 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9591 __ bind(Loop); // Loop begin
9592 __ beq(cnt1, R0, done);
9593 __ delayed()->lhu(AT, str1, 0);;
9595 // compare current character
9596 __ lhu(cnt2, str2, 0);
9597 __ bne(AT, cnt2, haveResult);
9598 __ delayed()->addi(str1, str1, 2);
9599 __ addi(str2, str2, 2);
9600 __ b(Loop);
9601 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9603 __ bind(haveResult);
9604 __ subu(result, AT, cnt2);
9606 __ bind(done);
9607 %}
9609 ins_pipe( pipe_slow );
9610 %}
9612 // intrinsic optimization
9613 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9614 match(Set result (StrEquals (Binary str1 str2) cnt));
9615 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9617 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9618 ins_encode %{
9619 // Get the first character position in both strings
9620 // [8] char array, [12] offset, [16] count
9621 Register str1 = $str1$$Register;
9622 Register str2 = $str2$$Register;
9623 Register cnt = $cnt$$Register;
9624 Register tmp = $temp$$Register;
9625 Register result = $result$$Register;
9627 Label Loop, done;
9630 __ beq(str1, str2, done); // same char[] ?
9631 __ daddiu(result, R0, 1);
9633 __ bind(Loop); // Loop begin
9634 __ beq(cnt, R0, done);
9635 __ daddiu(result, R0, 1); // count == 0
9637 // compare current character
9638 __ lhu(AT, str1, 0);;
9639 __ lhu(tmp, str2, 0);
9640 __ bne(AT, tmp, done);
9641 __ delayed()->daddi(result, R0, 0);
9642 __ addi(str1, str1, 2);
9643 __ addi(str2, str2, 2);
9644 __ b(Loop);
9645 __ delayed()->addi(cnt, cnt, -1); // Loop end
9647 __ bind(done);
9648 %}
9650 ins_pipe( pipe_slow );
9651 %}
9653 //----------Arithmetic Instructions-------------------------------------------
9654 //----------Addition Instructions---------------------------------------------
9655 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9656 match(Set dst (AddI src1 src2));
9658 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9659 ins_encode %{
9660 Register dst = $dst$$Register;
9661 Register src1 = $src1$$Register;
9662 Register src2 = $src2$$Register;
9663 __ addu32(dst, src1, src2);
9664 %}
9665 ins_pipe( ialu_regI_regI );
9666 %}
9668 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9669 match(Set dst (AddI src1 src2));
9671 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9672 ins_encode %{
9673 Register dst = $dst$$Register;
9674 Register src1 = $src1$$Register;
9675 int imm = $src2$$constant;
9677 if(Assembler::is_simm16(imm)) {
9678 __ addiu32(dst, src1, imm);
9679 } else {
9680 __ move(AT, imm);
9681 __ addu32(dst, src1, AT);
9682 }
9683 %}
9684 ins_pipe( ialu_regI_regI );
9685 %}
9687 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9688 match(Set dst (AddP src1 src2));
9690 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9692 ins_encode %{
9693 Register dst = $dst$$Register;
9694 Register src1 = $src1$$Register;
9695 Register src2 = $src2$$Register;
9696 __ daddu(dst, src1, src2);
9697 %}
9699 ins_pipe( ialu_regI_regI );
9700 %}
9702 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9703 match(Set dst (AddP src1 (ConvI2L src2)));
9705 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9707 ins_encode %{
9708 Register dst = $dst$$Register;
9709 Register src1 = $src1$$Register;
9710 Register src2 = $src2$$Register;
9711 __ daddu(dst, src1, src2);
9712 %}
9714 ins_pipe( ialu_regI_regI );
9715 %}
9717 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9718 match(Set dst (AddP src1 src2));
9720 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9721 ins_encode %{
9722 Register src1 = $src1$$Register;
9723 long src2 = $src2$$constant;
9724 Register dst = $dst$$Register;
9726 if(Assembler::is_simm16(src2)) {
9727 __ daddiu(dst, src1, src2);
9728 } else {
9729 __ set64(AT, src2);
9730 __ daddu(dst, src1, AT);
9731 }
9732 %}
9733 ins_pipe( ialu_regI_imm16 );
9734 %}
9736 // Add Long Register with Register
9737 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9738 match(Set dst (AddL src1 src2));
9739 ins_cost(200);
9740 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9742 ins_encode %{
9743 Register dst_reg = as_Register($dst$$reg);
9744 Register src1_reg = as_Register($src1$$reg);
9745 Register src2_reg = as_Register($src2$$reg);
9747 __ daddu(dst_reg, src1_reg, src2_reg);
9748 %}
9750 ins_pipe( ialu_regL_regL );
9751 %}
9753 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9754 %{
9755 match(Set dst (AddL src1 src2));
9757 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9758 ins_encode %{
9759 Register dst_reg = as_Register($dst$$reg);
9760 Register src1_reg = as_Register($src1$$reg);
9761 int src2_imm = $src2$$constant;
9763 __ daddiu(dst_reg, src1_reg, src2_imm);
9764 %}
9766 ins_pipe( ialu_regL_regL );
9767 %}
9769 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9770 %{
9771 match(Set dst (AddL (ConvI2L src1) src2));
9773 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9774 ins_encode %{
9775 Register dst_reg = as_Register($dst$$reg);
9776 Register src1_reg = as_Register($src1$$reg);
9777 int src2_imm = $src2$$constant;
9779 __ daddiu(dst_reg, src1_reg, src2_imm);
9780 %}
9782 ins_pipe( ialu_regL_regL );
9783 %}
9785 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9786 match(Set dst (AddL (ConvI2L src1) src2));
9787 ins_cost(200);
9788 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9790 ins_encode %{
9791 Register dst_reg = as_Register($dst$$reg);
9792 Register src1_reg = as_Register($src1$$reg);
9793 Register src2_reg = as_Register($src2$$reg);
9795 __ daddu(dst_reg, src1_reg, src2_reg);
9796 %}
9798 ins_pipe( ialu_regL_regL );
9799 %}
9801 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9802 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9803 ins_cost(200);
9804 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9806 ins_encode %{
9807 Register dst_reg = as_Register($dst$$reg);
9808 Register src1_reg = as_Register($src1$$reg);
9809 Register src2_reg = as_Register($src2$$reg);
9811 __ daddu(dst_reg, src1_reg, src2_reg);
9812 %}
9814 ins_pipe( ialu_regL_regL );
9815 %}
9817 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9818 match(Set dst (AddL src1 (ConvI2L src2)));
9819 ins_cost(200);
9820 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9822 ins_encode %{
9823 Register dst_reg = as_Register($dst$$reg);
9824 Register src1_reg = as_Register($src1$$reg);
9825 Register src2_reg = as_Register($src2$$reg);
9827 __ daddu(dst_reg, src1_reg, src2_reg);
9828 %}
9830 ins_pipe( ialu_regL_regL );
9831 %}
9833 //----------Subtraction Instructions-------------------------------------------
9834 // Integer Subtraction Instructions
9835 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9836 match(Set dst (SubI src1 src2));
9837 ins_cost(100);
9839 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9840 ins_encode %{
9841 Register dst = $dst$$Register;
9842 Register src1 = $src1$$Register;
9843 Register src2 = $src2$$Register;
9844 __ subu32(dst, src1, src2);
9845 %}
9846 ins_pipe( ialu_regI_regI );
9847 %}
9849 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9850 match(Set dst (SubI src1 src2));
9851 ins_cost(80);
9853 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9854 ins_encode %{
9855 Register dst = $dst$$Register;
9856 Register src1 = $src1$$Register;
9857 __ addiu32(dst, src1, -1 * $src2$$constant);
9858 %}
9859 ins_pipe( ialu_regI_regI );
9860 %}
9862 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9863 match(Set dst (SubI zero src));
9864 ins_cost(80);
9866 format %{ "neg $dst, $src #@negI_Reg" %}
9867 ins_encode %{
9868 Register dst = $dst$$Register;
9869 Register src = $src$$Register;
9870 __ subu32(dst, R0, src);
9871 %}
9872 ins_pipe( ialu_regI_regI );
9873 %}
9875 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9876 match(Set dst (SubL zero src));
9877 ins_cost(80);
9879 format %{ "neg $dst, $src #@negL_Reg" %}
9880 ins_encode %{
9881 Register dst = $dst$$Register;
9882 Register src = $src$$Register;
9883 __ subu(dst, R0, src);
9884 %}
9885 ins_pipe( ialu_regI_regI );
9886 %}
9888 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9889 match(Set dst (SubL src1 src2));
9890 ins_cost(80);
9892 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9893 ins_encode %{
9894 Register dst = $dst$$Register;
9895 Register src1 = $src1$$Register;
9896 __ daddiu(dst, src1, -1 * $src2$$constant);
9897 %}
9898 ins_pipe( ialu_regI_regI );
9899 %}
9901 // Subtract Long Register with Register.
9902 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9903 match(Set dst (SubL src1 src2));
9904 ins_cost(100);
9905 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9906 ins_encode %{
9907 Register dst = as_Register($dst$$reg);
9908 Register src1 = as_Register($src1$$reg);
9909 Register src2 = as_Register($src2$$reg);
9911 __ subu(dst, src1, src2);
9912 %}
9913 ins_pipe( ialu_regL_regL );
9914 %}
9916 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9917 match(Set dst (SubL src1 (ConvI2L src2)));
9918 ins_cost(100);
9919 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9920 ins_encode %{
9921 Register dst = as_Register($dst$$reg);
9922 Register src1 = as_Register($src1$$reg);
9923 Register src2 = as_Register($src2$$reg);
9925 __ subu(dst, src1, src2);
9926 %}
9927 ins_pipe( ialu_regL_regL );
9928 %}
9930 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9931 match(Set dst (SubL (ConvI2L src1) src2));
9932 ins_cost(200);
9933 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9934 ins_encode %{
9935 Register dst = as_Register($dst$$reg);
9936 Register src1 = as_Register($src1$$reg);
9937 Register src2 = as_Register($src2$$reg);
9939 __ subu(dst, src1, src2);
9940 %}
9941 ins_pipe( ialu_regL_regL );
9942 %}
9944 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9945 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9946 ins_cost(200);
9947 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9948 ins_encode %{
9949 Register dst = as_Register($dst$$reg);
9950 Register src1 = as_Register($src1$$reg);
9951 Register src2 = as_Register($src2$$reg);
9953 __ subu(dst, src1, src2);
9954 %}
9955 ins_pipe( ialu_regL_regL );
9956 %}
9958 // Integer MOD with Register
9959 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9960 match(Set dst (ModI src1 src2));
9961 ins_cost(300);
9962 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9963 ins_encode %{
9964 Register dst = $dst$$Register;
9965 Register src1 = $src1$$Register;
9966 Register src2 = $src2$$Register;
9968 //if (UseLoongsonISA) {
9969 if (0) {
9970 // 2016.08.10
9971 // Experiments show that gsmod is slower that div+mfhi.
9972 // So I just disable it here.
9973 __ gsmod(dst, src1, src2);
9974 } else {
9975 __ div(src1, src2);
9976 __ mfhi(dst);
9977 }
9978 %}
9980 //ins_pipe( ialu_mod );
9981 ins_pipe( ialu_regI_regI );
9982 %}
9984 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9985 match(Set dst (ModL src1 src2));
9986 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9988 ins_encode %{
9989 Register dst = as_Register($dst$$reg);
9990 Register op1 = as_Register($src1$$reg);
9991 Register op2 = as_Register($src2$$reg);
9993 if (UseLoongsonISA) {
9994 __ gsdmod(dst, op1, op2);
9995 } else {
9996 __ ddiv(op1, op2);
9997 __ mfhi(dst);
9998 }
9999 %}
10000 ins_pipe( pipe_slow );
10001 %}
10003 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10004 match(Set dst (MulI src1 src2));
10006 ins_cost(300);
10007 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10008 ins_encode %{
10009 Register src1 = $src1$$Register;
10010 Register src2 = $src2$$Register;
10011 Register dst = $dst$$Register;
10013 __ mul(dst, src1, src2);
10014 %}
10015 ins_pipe( ialu_mult );
10016 %}
10018 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10019 match(Set dst (AddI (MulI src1 src2) src3));
10021 ins_cost(999);
10022 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10023 ins_encode %{
10024 Register src1 = $src1$$Register;
10025 Register src2 = $src2$$Register;
10026 Register src3 = $src3$$Register;
10027 Register dst = $dst$$Register;
10029 __ mtlo(src3);
10030 __ madd(src1, src2);
10031 __ mflo(dst);
10032 %}
10033 ins_pipe( ialu_mult );
10034 %}
10036 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10037 match(Set dst (DivI src1 src2));
10039 ins_cost(300);
10040 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10041 ins_encode %{
10042 Register src1 = $src1$$Register;
10043 Register src2 = $src2$$Register;
10044 Register dst = $dst$$Register;
10046 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10047 We must trap an exception manually. */
10048 __ teq(R0, src2, 0x7);
10050 if (UseLoongsonISA) {
10051 __ gsdiv(dst, src1, src2);
10052 } else {
10053 __ div(src1, src2);
10055 __ nop();
10056 __ nop();
10057 __ mflo(dst);
10058 }
10059 %}
10060 ins_pipe( ialu_mod );
10061 %}
10063 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10064 match(Set dst (DivF src1 src2));
10066 ins_cost(300);
10067 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10068 ins_encode %{
10069 FloatRegister src1 = $src1$$FloatRegister;
10070 FloatRegister src2 = $src2$$FloatRegister;
10071 FloatRegister dst = $dst$$FloatRegister;
10073 /* Here do we need to trap an exception manually ? */
10074 __ div_s(dst, src1, src2);
10075 %}
10076 ins_pipe( pipe_slow );
10077 %}
10079 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10080 match(Set dst (DivD src1 src2));
10082 ins_cost(300);
10083 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10084 ins_encode %{
10085 FloatRegister src1 = $src1$$FloatRegister;
10086 FloatRegister src2 = $src2$$FloatRegister;
10087 FloatRegister dst = $dst$$FloatRegister;
10089 /* Here do we need to trap an exception manually ? */
10090 __ div_d(dst, src1, src2);
10091 %}
10092 ins_pipe( pipe_slow );
10093 %}
10095 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10096 match(Set dst (MulL src1 src2));
10097 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10098 ins_encode %{
10099 Register dst = as_Register($dst$$reg);
10100 Register op1 = as_Register($src1$$reg);
10101 Register op2 = as_Register($src2$$reg);
10103 if (UseLoongsonISA) {
10104 __ gsdmult(dst, op1, op2);
10105 } else {
10106 __ dmult(op1, op2);
10107 __ mflo(dst);
10108 }
10109 %}
10110 ins_pipe( pipe_slow );
10111 %}
10113 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10114 match(Set dst (MulL src1 (ConvI2L src2)));
10115 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10116 ins_encode %{
10117 Register dst = as_Register($dst$$reg);
10118 Register op1 = as_Register($src1$$reg);
10119 Register op2 = as_Register($src2$$reg);
10121 if (UseLoongsonISA) {
10122 __ gsdmult(dst, op1, op2);
10123 } else {
10124 __ dmult(op1, op2);
10125 __ mflo(dst);
10126 }
10127 %}
10128 ins_pipe( pipe_slow );
10129 %}
10131 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10132 match(Set dst (DivL src1 src2));
10133 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10135 ins_encode %{
10136 Register dst = as_Register($dst$$reg);
10137 Register op1 = as_Register($src1$$reg);
10138 Register op2 = as_Register($src2$$reg);
10140 if (UseLoongsonISA) {
10141 __ gsddiv(dst, op1, op2);
10142 } else {
10143 __ ddiv(op1, op2);
10144 __ mflo(dst);
10145 }
10146 %}
10147 ins_pipe( pipe_slow );
10148 %}
10150 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10151 match(Set dst (AddF src1 src2));
10152 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10153 ins_encode %{
10154 FloatRegister src1 = as_FloatRegister($src1$$reg);
10155 FloatRegister src2 = as_FloatRegister($src2$$reg);
10156 FloatRegister dst = as_FloatRegister($dst$$reg);
10158 __ add_s(dst, src1, src2);
10159 %}
10160 ins_pipe( fpu_regF_regF );
10161 %}
10163 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10164 match(Set dst (SubF src1 src2));
10165 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10166 ins_encode %{
10167 FloatRegister src1 = as_FloatRegister($src1$$reg);
10168 FloatRegister src2 = as_FloatRegister($src2$$reg);
10169 FloatRegister dst = as_FloatRegister($dst$$reg);
10171 __ sub_s(dst, src1, src2);
10172 %}
10173 ins_pipe( fpu_regF_regF );
10174 %}
10175 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10176 match(Set dst (AddD src1 src2));
10177 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10178 ins_encode %{
10179 FloatRegister src1 = as_FloatRegister($src1$$reg);
10180 FloatRegister src2 = as_FloatRegister($src2$$reg);
10181 FloatRegister dst = as_FloatRegister($dst$$reg);
10183 __ add_d(dst, src1, src2);
10184 %}
10185 ins_pipe( fpu_regF_regF );
10186 %}
10188 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10189 match(Set dst (SubD src1 src2));
10190 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10191 ins_encode %{
10192 FloatRegister src1 = as_FloatRegister($src1$$reg);
10193 FloatRegister src2 = as_FloatRegister($src2$$reg);
10194 FloatRegister dst = as_FloatRegister($dst$$reg);
10196 __ sub_d(dst, src1, src2);
10197 %}
10198 ins_pipe( fpu_regF_regF );
10199 %}
10201 instruct negF_reg(regF dst, regF src) %{
10202 match(Set dst (NegF src));
10203 format %{ "negF $dst, $src @negF_reg" %}
10204 ins_encode %{
10205 FloatRegister src = as_FloatRegister($src$$reg);
10206 FloatRegister dst = as_FloatRegister($dst$$reg);
10208 __ neg_s(dst, src);
10209 %}
10210 ins_pipe( fpu_regF_regF );
10211 %}
10213 instruct negD_reg(regD dst, regD src) %{
10214 match(Set dst (NegD src));
10215 format %{ "negD $dst, $src @negD_reg" %}
10216 ins_encode %{
10217 FloatRegister src = as_FloatRegister($src$$reg);
10218 FloatRegister dst = as_FloatRegister($dst$$reg);
10220 __ neg_d(dst, src);
10221 %}
10222 ins_pipe( fpu_regF_regF );
10223 %}
10226 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10227 match(Set dst (MulF src1 src2));
10228 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10229 ins_encode %{
10230 FloatRegister src1 = $src1$$FloatRegister;
10231 FloatRegister src2 = $src2$$FloatRegister;
10232 FloatRegister dst = $dst$$FloatRegister;
10234 __ mul_s(dst, src1, src2);
10235 %}
10236 ins_pipe( fpu_regF_regF );
10237 %}
10239 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10240 match(Set dst (AddF (MulF src1 src2) src3));
10241 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10242 ins_cost(44444);
10243 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10244 ins_encode %{
10245 FloatRegister src1 = $src1$$FloatRegister;
10246 FloatRegister src2 = $src2$$FloatRegister;
10247 FloatRegister src3 = $src3$$FloatRegister;
10248 FloatRegister dst = $dst$$FloatRegister;
10250 __ madd_s(dst, src1, src2, src3);
10251 %}
10252 ins_pipe( fpu_regF_regF );
10253 %}
10255 // Mul two double precision floating piont number
10256 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10257 match(Set dst (MulD src1 src2));
10258 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10259 ins_encode %{
10260 FloatRegister src1 = $src1$$FloatRegister;
10261 FloatRegister src2 = $src2$$FloatRegister;
10262 FloatRegister dst = $dst$$FloatRegister;
10264 __ mul_d(dst, src1, src2);
10265 %}
10266 ins_pipe( fpu_regF_regF );
10267 %}
10269 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10270 match(Set dst (AddD (MulD src1 src2) src3));
10271 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10272 ins_cost(44444);
10273 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10274 ins_encode %{
10275 FloatRegister src1 = $src1$$FloatRegister;
10276 FloatRegister src2 = $src2$$FloatRegister;
10277 FloatRegister src3 = $src3$$FloatRegister;
10278 FloatRegister dst = $dst$$FloatRegister;
10280 __ madd_d(dst, src1, src2, src3);
10281 %}
10282 ins_pipe( fpu_regF_regF );
10283 %}
10285 instruct absF_reg(regF dst, regF src) %{
10286 match(Set dst (AbsF src));
10287 ins_cost(100);
10288 format %{ "absF $dst, $src @absF_reg" %}
10289 ins_encode %{
10290 FloatRegister src = as_FloatRegister($src$$reg);
10291 FloatRegister dst = as_FloatRegister($dst$$reg);
10293 __ abs_s(dst, src);
10294 %}
10295 ins_pipe( fpu_regF_regF );
10296 %}
10299 // intrinsics for math_native.
10300 // AbsD SqrtD CosD SinD TanD LogD Log10D
10302 instruct absD_reg(regD dst, regD src) %{
10303 match(Set dst (AbsD src));
10304 ins_cost(100);
10305 format %{ "absD $dst, $src @absD_reg" %}
10306 ins_encode %{
10307 FloatRegister src = as_FloatRegister($src$$reg);
10308 FloatRegister dst = as_FloatRegister($dst$$reg);
10310 __ abs_d(dst, src);
10311 %}
10312 ins_pipe( fpu_regF_regF );
10313 %}
10315 instruct sqrtD_reg(regD dst, regD src) %{
10316 match(Set dst (SqrtD src));
10317 ins_cost(100);
10318 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10319 ins_encode %{
10320 FloatRegister src = as_FloatRegister($src$$reg);
10321 FloatRegister dst = as_FloatRegister($dst$$reg);
10323 __ sqrt_d(dst, src);
10324 %}
10325 ins_pipe( fpu_regF_regF );
10326 %}
10328 instruct sqrtF_reg(regF dst, regF src) %{
10329 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10330 ins_cost(100);
10331 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10332 ins_encode %{
10333 FloatRegister src = as_FloatRegister($src$$reg);
10334 FloatRegister dst = as_FloatRegister($dst$$reg);
10336 __ sqrt_s(dst, src);
10337 %}
10338 ins_pipe( fpu_regF_regF );
10339 %}
10340 //----------------------------------Logical Instructions----------------------
10341 //__________________________________Integer Logical Instructions-------------
10343 //And Instuctions
10344 // And Register with Immediate
10345 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10346 match(Set dst (AndI src1 src2));
10348 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10349 ins_encode %{
10350 Register dst = $dst$$Register;
10351 Register src = $src1$$Register;
10352 int val = $src2$$constant;
10354 __ move(AT, val);
10355 __ andr(dst, src, AT);
10356 %}
10357 ins_pipe( ialu_regI_regI );
10358 %}
10360 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10361 match(Set dst (AndI src1 src2));
10362 ins_cost(60);
10364 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10365 ins_encode %{
10366 Register dst = $dst$$Register;
10367 Register src = $src1$$Register;
10368 int val = $src2$$constant;
10370 __ andi(dst, src, val);
10371 %}
10372 ins_pipe( ialu_regI_regI );
10373 %}
10375 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10376 match(Set dst (AndI src1 mask));
10377 ins_cost(60);
10379 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10380 ins_encode %{
10381 Register dst = $dst$$Register;
10382 Register src = $src1$$Register;
10383 int size = Assembler::is_int_mask($mask$$constant);
10385 __ ext(dst, src, 0, size);
10386 %}
10387 ins_pipe( ialu_regI_regI );
10388 %}
10390 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10391 match(Set dst (AndL src1 mask));
10392 ins_cost(60);
10394 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10395 ins_encode %{
10396 Register dst = $dst$$Register;
10397 Register src = $src1$$Register;
10398 int size = Assembler::is_jlong_mask($mask$$constant);
10400 __ dext(dst, src, 0, size);
10401 %}
10402 ins_pipe( ialu_regI_regI );
10403 %}
10405 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10406 match(Set dst (XorI src1 src2));
10407 ins_cost(60);
10409 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10410 ins_encode %{
10411 Register dst = $dst$$Register;
10412 Register src = $src1$$Register;
10413 int val = $src2$$constant;
10415 __ xori(dst, src, val);
10416 %}
10417 ins_pipe( ialu_regI_regI );
10418 %}
10420 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10421 match(Set dst (XorI src1 M1));
10422 predicate(UseLoongsonISA);
10423 ins_cost(60);
10425 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10426 ins_encode %{
10427 Register dst = $dst$$Register;
10428 Register src = $src1$$Register;
10430 __ gsorn(dst, R0, src);
10431 %}
10432 ins_pipe( ialu_regI_regI );
10433 %}
10435 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10436 match(Set dst (XorL src1 src2));
10437 ins_cost(60);
10439 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10440 ins_encode %{
10441 Register dst = $dst$$Register;
10442 Register src = $src1$$Register;
10443 int val = $src2$$constant;
10445 __ xori(dst, src, val);
10446 %}
10447 ins_pipe( ialu_regI_regI );
10448 %}
10450 /*
10451 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10452 match(Set dst (XorL src1 M1));
10453 predicate(UseLoongsonISA);
10454 ins_cost(60);
10456 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10457 ins_encode %{
10458 Register dst = $dst$$Register;
10459 Register src = $src1$$Register;
10461 __ gsorn(dst, R0, src);
10462 %}
10463 ins_pipe( ialu_regI_regI );
10464 %}
10465 */
10467 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10468 match(Set dst (AndI mask (LoadB mem)));
10469 ins_cost(60);
10471 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10472 ins_encode(load_UB_enc(dst, mem));
10473 ins_pipe( ialu_loadI );
10474 %}
10476 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10477 match(Set dst (AndI (LoadB mem) mask));
10478 ins_cost(60);
10480 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10481 ins_encode(load_UB_enc(dst, mem));
10482 ins_pipe( ialu_loadI );
10483 %}
10485 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10486 match(Set dst (AndI src1 src2));
10488 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10489 ins_encode %{
10490 Register dst = $dst$$Register;
10491 Register src1 = $src1$$Register;
10492 Register src2 = $src2$$Register;
10493 __ andr(dst, src1, src2);
10494 %}
10495 ins_pipe( ialu_regI_regI );
10496 %}
10498 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10499 match(Set dst (AndI src1 (XorI src2 M1)));
10500 predicate(UseLoongsonISA);
10502 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10503 ins_encode %{
10504 Register dst = $dst$$Register;
10505 Register src1 = $src1$$Register;
10506 Register src2 = $src2$$Register;
10508 __ gsandn(dst, src1, src2);
10509 %}
10510 ins_pipe( ialu_regI_regI );
10511 %}
10513 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10514 match(Set dst (OrI src1 (XorI src2 M1)));
10515 predicate(UseLoongsonISA);
10517 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10518 ins_encode %{
10519 Register dst = $dst$$Register;
10520 Register src1 = $src1$$Register;
10521 Register src2 = $src2$$Register;
10523 __ gsorn(dst, src1, src2);
10524 %}
10525 ins_pipe( ialu_regI_regI );
10526 %}
10528 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10529 match(Set dst (AndI (XorI src1 M1) src2));
10530 predicate(UseLoongsonISA);
10532 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10533 ins_encode %{
10534 Register dst = $dst$$Register;
10535 Register src1 = $src1$$Register;
10536 Register src2 = $src2$$Register;
10538 __ gsandn(dst, src2, src1);
10539 %}
10540 ins_pipe( ialu_regI_regI );
10541 %}
10543 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10544 match(Set dst (OrI (XorI src1 M1) src2));
10545 predicate(UseLoongsonISA);
10547 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10548 ins_encode %{
10549 Register dst = $dst$$Register;
10550 Register src1 = $src1$$Register;
10551 Register src2 = $src2$$Register;
10553 __ gsorn(dst, src2, src1);
10554 %}
10555 ins_pipe( ialu_regI_regI );
10556 %}
10558 // And Long Register with Register
10559 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10560 match(Set dst (AndL src1 src2));
10561 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10562 ins_encode %{
10563 Register dst_reg = as_Register($dst$$reg);
10564 Register src1_reg = as_Register($src1$$reg);
10565 Register src2_reg = as_Register($src2$$reg);
10567 __ andr(dst_reg, src1_reg, src2_reg);
10568 %}
10569 ins_pipe( ialu_regL_regL );
10570 %}
10572 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10573 match(Set dst (AndL src1 (ConvI2L src2)));
10574 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10575 ins_encode %{
10576 Register dst_reg = as_Register($dst$$reg);
10577 Register src1_reg = as_Register($src1$$reg);
10578 Register src2_reg = as_Register($src2$$reg);
10580 __ andr(dst_reg, src1_reg, src2_reg);
10581 %}
10582 ins_pipe( ialu_regL_regL );
10583 %}
10585 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10586 match(Set dst (AndL src1 src2));
10587 ins_cost(60);
10589 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10590 ins_encode %{
10591 Register dst = $dst$$Register;
10592 Register src = $src1$$Register;
10593 long val = $src2$$constant;
10595 __ andi(dst, src, val);
10596 %}
10597 ins_pipe( ialu_regI_regI );
10598 %}
10600 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10601 match(Set dst (ConvL2I (AndL src1 src2)));
10602 ins_cost(60);
10604 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10605 ins_encode %{
10606 Register dst = $dst$$Register;
10607 Register src = $src1$$Register;
10608 long val = $src2$$constant;
10610 __ andi(dst, src, val);
10611 %}
10612 ins_pipe( ialu_regI_regI );
10613 %}
10615 /*
10616 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10617 match(Set dst (AndL src1 (XorL src2 M1)));
10618 predicate(UseLoongsonISA);
10620 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10621 ins_encode %{
10622 Register dst = $dst$$Register;
10623 Register src1 = $src1$$Register;
10624 Register src2 = $src2$$Register;
10626 __ gsandn(dst, src1, src2);
10627 %}
10628 ins_pipe( ialu_regI_regI );
10629 %}
10630 */
10632 /*
10633 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10634 match(Set dst (OrL src1 (XorL src2 M1)));
10635 predicate(UseLoongsonISA);
10637 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10638 ins_encode %{
10639 Register dst = $dst$$Register;
10640 Register src1 = $src1$$Register;
10641 Register src2 = $src2$$Register;
10643 __ gsorn(dst, src1, src2);
10644 %}
10645 ins_pipe( ialu_regI_regI );
10646 %}
10647 */
10649 /*
10650 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10651 match(Set dst (AndL (XorL src1 M1) src2));
10652 predicate(UseLoongsonISA);
10654 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10655 ins_encode %{
10656 Register dst = $dst$$Register;
10657 Register src1 = $src1$$Register;
10658 Register src2 = $src2$$Register;
10660 __ gsandn(dst, src2, src1);
10661 %}
10662 ins_pipe( ialu_regI_regI );
10663 %}
10664 */
10666 /*
10667 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10668 match(Set dst (OrL (XorL src1 M1) src2));
10669 predicate(UseLoongsonISA);
10671 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10672 ins_encode %{
10673 Register dst = $dst$$Register;
10674 Register src1 = $src1$$Register;
10675 Register src2 = $src2$$Register;
10677 __ gsorn(dst, src2, src1);
10678 %}
10679 ins_pipe( ialu_regI_regI );
10680 %}
10681 */
10683 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10684 match(Set dst (AndL dst M8));
10685 ins_cost(60);
10687 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10688 ins_encode %{
10689 Register dst = $dst$$Register;
10691 __ dins(dst, R0, 0, 3);
10692 %}
10693 ins_pipe( ialu_regI_regI );
10694 %}
10696 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10697 match(Set dst (AndL dst M5));
10698 ins_cost(60);
10700 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10701 ins_encode %{
10702 Register dst = $dst$$Register;
10704 __ dins(dst, R0, 2, 1);
10705 %}
10706 ins_pipe( ialu_regI_regI );
10707 %}
10709 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10710 match(Set dst (AndL dst M7));
10711 ins_cost(60);
10713 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10714 ins_encode %{
10715 Register dst = $dst$$Register;
10717 __ dins(dst, R0, 1, 2);
10718 %}
10719 ins_pipe( ialu_regI_regI );
10720 %}
10722 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10723 match(Set dst (AndL dst M4));
10724 ins_cost(60);
10726 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10727 ins_encode %{
10728 Register dst = $dst$$Register;
10730 __ dins(dst, R0, 0, 2);
10731 %}
10732 ins_pipe( ialu_regI_regI );
10733 %}
10735 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10736 match(Set dst (AndL dst M121));
10737 ins_cost(60);
10739 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10740 ins_encode %{
10741 Register dst = $dst$$Register;
10743 __ dins(dst, R0, 3, 4);
10744 %}
10745 ins_pipe( ialu_regI_regI );
10746 %}
10748 // Or Long Register with Register
10749 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10750 match(Set dst (OrL src1 src2));
10751 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10752 ins_encode %{
10753 Register dst_reg = $dst$$Register;
10754 Register src1_reg = $src1$$Register;
10755 Register src2_reg = $src2$$Register;
10757 __ orr(dst_reg, src1_reg, src2_reg);
10758 %}
10759 ins_pipe( ialu_regL_regL );
10760 %}
10762 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10763 match(Set dst (OrL (CastP2X src1) src2));
10764 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10765 ins_encode %{
10766 Register dst_reg = $dst$$Register;
10767 Register src1_reg = $src1$$Register;
10768 Register src2_reg = $src2$$Register;
10770 __ orr(dst_reg, src1_reg, src2_reg);
10771 %}
10772 ins_pipe( ialu_regL_regL );
10773 %}
10775 // Xor Long Register with Register
10776 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10777 match(Set dst (XorL src1 src2));
10778 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10779 ins_encode %{
10780 Register dst_reg = as_Register($dst$$reg);
10781 Register src1_reg = as_Register($src1$$reg);
10782 Register src2_reg = as_Register($src2$$reg);
10784 __ xorr(dst_reg, src1_reg, src2_reg);
10785 %}
10786 ins_pipe( ialu_regL_regL );
10787 %}
10789 // Shift Left by 8-bit immediate
10790 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10791 match(Set dst (LShiftI src shift));
10793 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10794 ins_encode %{
10795 Register src = $src$$Register;
10796 Register dst = $dst$$Register;
10797 int shamt = $shift$$constant;
10799 __ sll(dst, src, shamt);
10800 %}
10801 ins_pipe( ialu_regI_regI );
10802 %}
10804 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10805 match(Set dst (AndI (LShiftI src shift) mask));
10807 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10808 ins_encode %{
10809 Register src = $src$$Register;
10810 Register dst = $dst$$Register;
10812 __ sll(dst, src, 16);
10813 %}
10814 ins_pipe( ialu_regI_regI );
10815 %}
10817 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10818 %{
10819 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10821 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10822 ins_encode %{
10823 Register src = $src$$Register;
10824 Register dst = $dst$$Register;
10826 __ andi(dst, src, 7);
10827 %}
10828 ins_pipe(ialu_regI_regI);
10829 %}
10831 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10832 %{
10833 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10835 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10836 ins_encode %{
10837 Register src = $src1$$Register;
10838 int val = $src2$$constant;
10839 Register dst = $dst$$Register;
10841 __ ori(dst, src, val);
10842 %}
10843 ins_pipe(ialu_regI_regI);
10844 %}
10846 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10847 // This idiom is used by the compiler the i2s bytecode.
10848 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10849 %{
10850 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10852 format %{ "i2s $dst, $src\t# @i2s" %}
10853 ins_encode %{
10854 Register src = $src$$Register;
10855 Register dst = $dst$$Register;
10857 __ seh(dst, src);
10858 %}
10859 ins_pipe(ialu_regI_regI);
10860 %}
10862 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10863 // This idiom is used by the compiler for the i2b bytecode.
10864 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10865 %{
10866 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10868 format %{ "i2b $dst, $src\t# @i2b" %}
10869 ins_encode %{
10870 Register src = $src$$Register;
10871 Register dst = $dst$$Register;
10873 __ seb(dst, src);
10874 %}
10875 ins_pipe(ialu_regI_regI);
10876 %}
10879 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10880 match(Set dst (LShiftI (ConvL2I src) shift));
10882 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10883 ins_encode %{
10884 Register src = $src$$Register;
10885 Register dst = $dst$$Register;
10886 int shamt = $shift$$constant;
10888 __ sll(dst, src, shamt);
10889 %}
10890 ins_pipe( ialu_regI_regI );
10891 %}
10893 // Shift Left by 8-bit immediate
10894 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10895 match(Set dst (LShiftI src shift));
10897 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10898 ins_encode %{
10899 Register src = $src$$Register;
10900 Register dst = $dst$$Register;
10901 Register shamt = $shift$$Register;
10902 __ sllv(dst, src, shamt);
10903 %}
10904 ins_pipe( ialu_regI_regI );
10905 %}
10908 // Shift Left Long
10909 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10910 //predicate(UseNewLongLShift);
10911 match(Set dst (LShiftL src shift));
10912 ins_cost(100);
10913 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10914 ins_encode %{
10915 Register src_reg = as_Register($src$$reg);
10916 Register dst_reg = as_Register($dst$$reg);
10917 int shamt = $shift$$constant;
10919 if (__ is_simm(shamt, 5))
10920 __ dsll(dst_reg, src_reg, shamt);
10921 else
10922 {
10923 int sa = Assembler::low(shamt, 6);
10924 if (sa < 32) {
10925 __ dsll(dst_reg, src_reg, sa);
10926 } else {
10927 __ dsll32(dst_reg, src_reg, sa - 32);
10928 }
10929 }
10930 %}
10931 ins_pipe( ialu_regL_regL );
10932 %}
10934 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10935 //predicate(UseNewLongLShift);
10936 match(Set dst (LShiftL (ConvI2L src) shift));
10937 ins_cost(100);
10938 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10939 ins_encode %{
10940 Register src_reg = as_Register($src$$reg);
10941 Register dst_reg = as_Register($dst$$reg);
10942 int shamt = $shift$$constant;
10944 if (__ is_simm(shamt, 5))
10945 __ dsll(dst_reg, src_reg, shamt);
10946 else
10947 {
10948 int sa = Assembler::low(shamt, 6);
10949 if (sa < 32) {
10950 __ dsll(dst_reg, src_reg, sa);
10951 } else {
10952 __ dsll32(dst_reg, src_reg, sa - 32);
10953 }
10954 }
10955 %}
10956 ins_pipe( ialu_regL_regL );
10957 %}
10959 // Shift Left Long
10960 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10961 //predicate(UseNewLongLShift);
10962 match(Set dst (LShiftL src shift));
10963 ins_cost(100);
10964 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10965 ins_encode %{
10966 Register src_reg = as_Register($src$$reg);
10967 Register dst_reg = as_Register($dst$$reg);
10969 __ dsllv(dst_reg, src_reg, $shift$$Register);
10970 %}
10971 ins_pipe( ialu_regL_regL );
10972 %}
10974 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10975 match(Set dst (LShiftL (ConvI2L src) shift));
10976 ins_cost(100);
10977 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10978 ins_encode %{
10979 Register src_reg = as_Register($src$$reg);
10980 Register dst_reg = as_Register($dst$$reg);
10981 int shamt = $shift$$constant;
10983 if (__ is_simm(shamt, 5)) {
10984 __ dsll(dst_reg, src_reg, shamt);
10985 } else {
10986 int sa = Assembler::low(shamt, 6);
10987 if (sa < 32) {
10988 __ dsll(dst_reg, src_reg, sa);
10989 } else {
10990 __ dsll32(dst_reg, src_reg, sa - 32);
10991 }
10992 }
10993 %}
10994 ins_pipe( ialu_regL_regL );
10995 %}
10997 // Shift Right Long
10998 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10999 match(Set dst (RShiftL src shift));
11000 ins_cost(100);
11001 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11002 ins_encode %{
11003 Register src_reg = as_Register($src$$reg);
11004 Register dst_reg = as_Register($dst$$reg);
11005 int shamt = ($shift$$constant & 0x3f);
11006 if (__ is_simm(shamt, 5))
11007 __ dsra(dst_reg, src_reg, shamt);
11008 else {
11009 int sa = Assembler::low(shamt, 6);
11010 if (sa < 32) {
11011 __ dsra(dst_reg, src_reg, sa);
11012 } else {
11013 __ dsra32(dst_reg, src_reg, sa - 32);
11014 }
11015 }
11016 %}
11017 ins_pipe( ialu_regL_regL );
11018 %}
11020 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11021 match(Set dst (ConvL2I (RShiftL src shift)));
11022 ins_cost(100);
11023 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11024 ins_encode %{
11025 Register src_reg = as_Register($src$$reg);
11026 Register dst_reg = as_Register($dst$$reg);
11027 int shamt = $shift$$constant;
11029 __ dsra32(dst_reg, src_reg, shamt - 32);
11030 %}
11031 ins_pipe( ialu_regL_regL );
11032 %}
11034 // Shift Right Long arithmetically
11035 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11036 //predicate(UseNewLongLShift);
11037 match(Set dst (RShiftL src shift));
11038 ins_cost(100);
11039 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11040 ins_encode %{
11041 Register src_reg = as_Register($src$$reg);
11042 Register dst_reg = as_Register($dst$$reg);
11044 __ dsrav(dst_reg, src_reg, $shift$$Register);
11045 %}
11046 ins_pipe( ialu_regL_regL );
11047 %}
11049 // Shift Right Long logically
11050 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11051 match(Set dst (URShiftL src shift));
11052 ins_cost(100);
11053 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11054 ins_encode %{
11055 Register src_reg = as_Register($src$$reg);
11056 Register dst_reg = as_Register($dst$$reg);
11058 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11059 %}
11060 ins_pipe( ialu_regL_regL );
11061 %}
11063 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11064 match(Set dst (URShiftL src shift));
11065 ins_cost(80);
11066 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11067 ins_encode %{
11068 Register src_reg = as_Register($src$$reg);
11069 Register dst_reg = as_Register($dst$$reg);
11070 int shamt = $shift$$constant;
11072 __ dsrl(dst_reg, src_reg, shamt);
11073 %}
11074 ins_pipe( ialu_regL_regL );
11075 %}
11077 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11078 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11079 ins_cost(80);
11080 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11081 ins_encode %{
11082 Register src_reg = as_Register($src$$reg);
11083 Register dst_reg = as_Register($dst$$reg);
11084 int shamt = $shift$$constant;
11086 __ dext(dst_reg, src_reg, shamt, 31);
11087 %}
11088 ins_pipe( ialu_regL_regL );
11089 %}
11091 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11092 match(Set dst (URShiftL (CastP2X src) shift));
11093 ins_cost(80);
11094 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11095 ins_encode %{
11096 Register src_reg = as_Register($src$$reg);
11097 Register dst_reg = as_Register($dst$$reg);
11098 int shamt = $shift$$constant;
11100 __ dsrl(dst_reg, src_reg, shamt);
11101 %}
11102 ins_pipe( ialu_regL_regL );
11103 %}
11105 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11106 match(Set dst (URShiftL src shift));
11107 ins_cost(80);
11108 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11109 ins_encode %{
11110 Register src_reg = as_Register($src$$reg);
11111 Register dst_reg = as_Register($dst$$reg);
11112 int shamt = $shift$$constant;
11114 __ dsrl32(dst_reg, src_reg, shamt - 32);
11115 %}
11116 ins_pipe( ialu_regL_regL );
11117 %}
11119 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11120 match(Set dst (ConvL2I (URShiftL src shift)));
11121 predicate(n->in(1)->in(2)->get_int() > 32);
11122 ins_cost(80);
11123 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11124 ins_encode %{
11125 Register src_reg = as_Register($src$$reg);
11126 Register dst_reg = as_Register($dst$$reg);
11127 int shamt = $shift$$constant;
11129 __ dsrl32(dst_reg, src_reg, shamt - 32);
11130 %}
11131 ins_pipe( ialu_regL_regL );
11132 %}
11134 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11135 match(Set dst (URShiftL (CastP2X src) shift));
11136 ins_cost(80);
11137 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11138 ins_encode %{
11139 Register src_reg = as_Register($src$$reg);
11140 Register dst_reg = as_Register($dst$$reg);
11141 int shamt = $shift$$constant;
11143 __ dsrl32(dst_reg, src_reg, shamt - 32);
11144 %}
11145 ins_pipe( ialu_regL_regL );
11146 %}
11148 // Xor Instructions
11149 // Xor Register with Register
11150 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11151 match(Set dst (XorI src1 src2));
11153 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11155 ins_encode %{
11156 Register dst = $dst$$Register;
11157 Register src1 = $src1$$Register;
11158 Register src2 = $src2$$Register;
11159 __ xorr(dst, src1, src2);
11160 __ sll(dst, dst, 0); /* long -> int */
11161 %}
11163 ins_pipe( ialu_regI_regI );
11164 %}
11166 // Or Instructions
11167 // Or Register with Register
11168 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11169 match(Set dst (OrI src1 src2));
11171 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11172 ins_encode %{
11173 Register dst = $dst$$Register;
11174 Register src1 = $src1$$Register;
11175 Register src2 = $src2$$Register;
11176 __ orr(dst, src1, src2);
11177 %}
11179 ins_pipe( ialu_regI_regI );
11180 %}
11182 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11183 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11184 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11186 format %{ "rotr $dst, $src, 1 ...\n\t"
11187 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11188 ins_encode %{
11189 Register dst = $dst$$Register;
11190 Register src = $src$$Register;
11191 int rshift = $rshift$$constant;
11193 __ rotr(dst, src, 1);
11194 if (rshift - 1) {
11195 __ srl(dst, dst, rshift - 1);
11196 }
11197 %}
11199 ins_pipe( ialu_regI_regI );
11200 %}
11202 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11203 match(Set dst (OrI src1 (CastP2X src2)));
11205 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11206 ins_encode %{
11207 Register dst = $dst$$Register;
11208 Register src1 = $src1$$Register;
11209 Register src2 = $src2$$Register;
11210 __ orr(dst, src1, src2);
11211 %}
11213 ins_pipe( ialu_regI_regI );
11214 %}
11216 // Logical Shift Right by 8-bit immediate
11217 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11218 match(Set dst (URShiftI src shift));
11219 // effect(KILL cr);
11221 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11222 ins_encode %{
11223 Register src = $src$$Register;
11224 Register dst = $dst$$Register;
11225 int shift = $shift$$constant;
11227 __ srl(dst, src, shift);
11228 %}
11229 ins_pipe( ialu_regI_regI );
11230 %}
11232 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11233 match(Set dst (AndI (URShiftI src shift) mask));
11235 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11236 ins_encode %{
11237 Register src = $src$$Register;
11238 Register dst = $dst$$Register;
11239 int pos = $shift$$constant;
11240 int size = Assembler::is_int_mask($mask$$constant);
11242 __ ext(dst, src, pos, size);
11243 %}
11244 ins_pipe( ialu_regI_regI );
11245 %}
11247 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11248 %{
11249 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11250 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11252 ins_cost(100);
11253 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11254 ins_encode %{
11255 Register dst = $dst$$Register;
11256 int sa = $rshift$$constant;
11258 __ rotr(dst, dst, sa);
11259 %}
11260 ins_pipe( ialu_regI_regI );
11261 %}
11263 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11264 %{
11265 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11266 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11268 ins_cost(100);
11269 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11270 ins_encode %{
11271 Register dst = $dst$$Register;
11272 int sa = $rshift$$constant;
11274 __ drotr(dst, dst, sa);
11275 %}
11276 ins_pipe( ialu_regI_regI );
11277 %}
11279 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11280 %{
11281 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11282 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11284 ins_cost(100);
11285 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11286 ins_encode %{
11287 Register dst = $dst$$Register;
11288 int sa = $rshift$$constant;
11290 __ drotr32(dst, dst, sa - 32);
11291 %}
11292 ins_pipe( ialu_regI_regI );
11293 %}
11295 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11296 %{
11297 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11298 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11300 ins_cost(100);
11301 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11302 ins_encode %{
11303 Register dst = $dst$$Register;
11304 int sa = $rshift$$constant;
11306 __ rotr(dst, dst, sa);
11307 %}
11308 ins_pipe( ialu_regI_regI );
11309 %}
11311 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11312 %{
11313 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11314 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11316 ins_cost(100);
11317 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11318 ins_encode %{
11319 Register dst = $dst$$Register;
11320 int sa = $rshift$$constant;
11322 __ drotr(dst, dst, sa);
11323 %}
11324 ins_pipe( ialu_regI_regI );
11325 %}
11327 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11328 %{
11329 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11330 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11332 ins_cost(100);
11333 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11334 ins_encode %{
11335 Register dst = $dst$$Register;
11336 int sa = $rshift$$constant;
11338 __ drotr32(dst, dst, sa - 32);
11339 %}
11340 ins_pipe( ialu_regI_regI );
11341 %}
11343 // Logical Shift Right
11344 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11345 match(Set dst (URShiftI src shift));
11347 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11348 ins_encode %{
11349 Register src = $src$$Register;
11350 Register dst = $dst$$Register;
11351 Register shift = $shift$$Register;
11352 __ srlv(dst, src, shift);
11353 %}
11354 ins_pipe( ialu_regI_regI );
11355 %}
11358 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11359 match(Set dst (RShiftI src shift));
11360 // effect(KILL cr);
11362 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11363 ins_encode %{
11364 Register src = $src$$Register;
11365 Register dst = $dst$$Register;
11366 int shift = $shift$$constant;
11367 __ sra(dst, src, shift);
11368 %}
11369 ins_pipe( ialu_regI_regI );
11370 %}
11372 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11373 match(Set dst (RShiftI src shift));
11374 // effect(KILL cr);
11376 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11377 ins_encode %{
11378 Register src = $src$$Register;
11379 Register dst = $dst$$Register;
11380 Register shift = $shift$$Register;
11381 __ srav(dst, src, shift);
11382 %}
11383 ins_pipe( ialu_regI_regI );
11384 %}
11386 //----------Convert Int to Boolean---------------------------------------------
11388 instruct convI2B(mRegI dst, mRegI src) %{
11389 match(Set dst (Conv2B src));
11391 ins_cost(100);
11392 format %{ "convI2B $dst, $src @ convI2B" %}
11393 ins_encode %{
11394 Register dst = as_Register($dst$$reg);
11395 Register src = as_Register($src$$reg);
11397 if (dst != src) {
11398 __ daddiu(dst, R0, 1);
11399 __ movz(dst, R0, src);
11400 } else {
11401 __ move(AT, src);
11402 __ daddiu(dst, R0, 1);
11403 __ movz(dst, R0, AT);
11404 }
11405 %}
11407 ins_pipe( ialu_regL_regL );
11408 %}
11410 instruct convI2L_reg( mRegL dst, mRegI src) %{
11411 match(Set dst (ConvI2L src));
11413 ins_cost(100);
11414 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11415 ins_encode %{
11416 Register dst = as_Register($dst$$reg);
11417 Register src = as_Register($src$$reg);
11419 if(dst != src) __ sll(dst, src, 0);
11420 %}
11421 ins_pipe( ialu_regL_regL );
11422 %}
11425 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11426 match(Set dst (ConvL2I src));
11428 format %{ "MOV $dst, $src @ convL2I_reg" %}
11429 ins_encode %{
11430 Register dst = as_Register($dst$$reg);
11431 Register src = as_Register($src$$reg);
11433 __ sll(dst, src, 0);
11434 %}
11436 ins_pipe( ialu_regI_regI );
11437 %}
11439 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11440 match(Set dst (ConvI2L (ConvL2I src)));
11442 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11443 ins_encode %{
11444 Register dst = as_Register($dst$$reg);
11445 Register src = as_Register($src$$reg);
11447 __ sll(dst, src, 0);
11448 %}
11450 ins_pipe( ialu_regI_regI );
11451 %}
11453 instruct convL2D_reg( regD dst, mRegL src ) %{
11454 match(Set dst (ConvL2D src));
11455 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11456 ins_encode %{
11457 Register src = as_Register($src$$reg);
11458 FloatRegister dst = as_FloatRegister($dst$$reg);
11460 __ dmtc1(src, dst);
11461 __ cvt_d_l(dst, dst);
11462 %}
11464 ins_pipe( pipe_slow );
11465 %}
11467 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11468 match(Set dst (ConvD2L src));
11469 ins_cost(150);
11470 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11471 ins_encode %{
11472 Register dst = as_Register($dst$$reg);
11473 FloatRegister src = as_FloatRegister($src$$reg);
11475 Label Done;
11477 __ trunc_l_d(F30, src);
11478 // max_long: 0x7fffffffffffffff
11479 // __ set64(AT, 0x7fffffffffffffff);
11480 __ daddiu(AT, R0, -1);
11481 __ dsrl(AT, AT, 1);
11482 __ dmfc1(dst, F30);
11484 __ bne(dst, AT, Done);
11485 __ delayed()->mtc1(R0, F30);
11487 __ cvt_d_w(F30, F30);
11488 __ c_ult_d(src, F30);
11489 __ bc1f(Done);
11490 __ delayed()->daddiu(T9, R0, -1);
11492 __ c_un_d(src, src); //NaN?
11493 __ subu(dst, T9, AT);
11494 __ movt(dst, R0);
11496 __ bind(Done);
11497 %}
11499 ins_pipe( pipe_slow );
11500 %}
11502 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11503 match(Set dst (ConvD2L src));
11504 ins_cost(250);
11505 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11506 ins_encode %{
11507 Register dst = as_Register($dst$$reg);
11508 FloatRegister src = as_FloatRegister($src$$reg);
11510 Label L;
11512 __ c_un_d(src, src); //NaN?
11513 __ bc1t(L);
11514 __ delayed();
11515 __ move(dst, R0);
11517 __ trunc_l_d(F30, src);
11518 __ cfc1(AT, 31);
11519 __ li(T9, 0x10000);
11520 __ andr(AT, AT, T9);
11521 __ beq(AT, R0, L);
11522 __ delayed()->dmfc1(dst, F30);
11524 __ mov_d(F12, src);
11525 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11526 __ move(dst, V0);
11527 __ bind(L);
11528 %}
11530 ins_pipe( pipe_slow );
11531 %}
11533 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11534 match(Set dst (ConvF2I src));
11535 ins_cost(150);
11536 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11537 ins_encode %{
11538 Register dreg = $dst$$Register;
11539 FloatRegister fval = $src$$FloatRegister;
11541 __ trunc_w_s(F30, fval);
11542 __ mfc1(dreg, F30);
11543 __ c_un_s(fval, fval); //NaN?
11544 __ movt(dreg, R0);
11545 %}
11547 ins_pipe( pipe_slow );
11548 %}
11550 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11551 match(Set dst (ConvF2I src));
11552 ins_cost(250);
11553 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11554 ins_encode %{
11555 Register dreg = $dst$$Register;
11556 FloatRegister fval = $src$$FloatRegister;
11557 Label L;
11559 __ c_un_s(fval, fval); //NaN?
11560 __ bc1t(L);
11561 __ delayed();
11562 __ move(dreg, R0);
11564 __ trunc_w_s(F30, fval);
11566 /* Call SharedRuntime:f2i() to do valid convention */
11567 __ cfc1(AT, 31);
11568 __ li(T9, 0x10000);
11569 __ andr(AT, AT, T9);
11570 __ beq(AT, R0, L);
11571 __ delayed()->mfc1(dreg, F30);
11573 __ mov_s(F12, fval);
11575 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11576 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11577 *
11578 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11579 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11580 */
11581 if(dreg != V0) {
11582 __ push(V0);
11583 }
11584 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11585 if(dreg != V0) {
11586 __ move(dreg, V0);
11587 __ pop(V0);
11588 }
11589 __ bind(L);
11590 %}
11592 ins_pipe( pipe_slow );
11593 %}
11595 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11596 match(Set dst (ConvF2L src));
11597 ins_cost(150);
11598 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11599 ins_encode %{
11600 Register dreg = $dst$$Register;
11601 FloatRegister fval = $src$$FloatRegister;
11603 __ trunc_l_s(F30, fval);
11604 __ dmfc1(dreg, F30);
11605 __ c_un_s(fval, fval); //NaN?
11606 __ movt(dreg, R0);
11607 %}
11609 ins_pipe( pipe_slow );
11610 %}
11612 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11613 match(Set dst (ConvF2L src));
11614 ins_cost(250);
11615 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11616 ins_encode %{
11617 Register dst = as_Register($dst$$reg);
11618 FloatRegister fval = $src$$FloatRegister;
11619 Label L;
11621 __ c_un_s(fval, fval); //NaN?
11622 __ bc1t(L);
11623 __ delayed();
11624 __ move(dst, R0);
11626 __ trunc_l_s(F30, fval);
11627 __ cfc1(AT, 31);
11628 __ li(T9, 0x10000);
11629 __ andr(AT, AT, T9);
11630 __ beq(AT, R0, L);
11631 __ delayed()->dmfc1(dst, F30);
11633 __ mov_s(F12, fval);
11634 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11635 __ move(dst, V0);
11636 __ bind(L);
11637 %}
11639 ins_pipe( pipe_slow );
11640 %}
11642 instruct convL2F_reg( regF dst, mRegL src ) %{
11643 match(Set dst (ConvL2F src));
11644 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11645 ins_encode %{
11646 FloatRegister dst = $dst$$FloatRegister;
11647 Register src = as_Register($src$$reg);
11648 Label L;
11650 __ dmtc1(src, dst);
11651 __ cvt_s_l(dst, dst);
11652 %}
11654 ins_pipe( pipe_slow );
11655 %}
11657 instruct convI2F_reg( regF dst, mRegI src ) %{
11658 match(Set dst (ConvI2F src));
11659 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11660 ins_encode %{
11661 Register src = $src$$Register;
11662 FloatRegister dst = $dst$$FloatRegister;
11664 __ mtc1(src, dst);
11665 __ cvt_s_w(dst, dst);
11666 %}
11668 ins_pipe( fpu_regF_regF );
11669 %}
11671 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11672 match(Set dst (CmpLTMask p zero));
11673 ins_cost(100);
11675 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11676 ins_encode %{
11677 Register src = $p$$Register;
11678 Register dst = $dst$$Register;
11680 __ sra(dst, src, 31);
11681 %}
11682 ins_pipe( pipe_slow );
11683 %}
11686 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11687 match(Set dst (CmpLTMask p q));
11688 ins_cost(400);
11690 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11691 ins_encode %{
11692 Register p = $p$$Register;
11693 Register q = $q$$Register;
11694 Register dst = $dst$$Register;
11696 __ slt(dst, p, q);
11697 __ subu(dst, R0, dst);
11698 %}
11699 ins_pipe( pipe_slow );
11700 %}
11702 instruct convP2B(mRegI dst, mRegP src) %{
11703 match(Set dst (Conv2B src));
11705 ins_cost(100);
11706 format %{ "convP2B $dst, $src @ convP2B" %}
11707 ins_encode %{
11708 Register dst = as_Register($dst$$reg);
11709 Register src = as_Register($src$$reg);
11711 if (dst != src) {
11712 __ daddiu(dst, R0, 1);
11713 __ movz(dst, R0, src);
11714 } else {
11715 __ move(AT, src);
11716 __ daddiu(dst, R0, 1);
11717 __ movz(dst, R0, AT);
11718 }
11719 %}
11721 ins_pipe( ialu_regL_regL );
11722 %}
11725 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11726 match(Set dst (ConvI2D src));
11727 format %{ "conI2D $dst, $src @convI2D_reg" %}
11728 ins_encode %{
11729 Register src = $src$$Register;
11730 FloatRegister dst = $dst$$FloatRegister;
11731 __ mtc1(src, dst);
11732 __ cvt_d_w(dst, dst);
11733 %}
11734 ins_pipe( fpu_regF_regF );
11735 %}
11737 instruct convF2D_reg_reg(regD dst, regF src) %{
11738 match(Set dst (ConvF2D src));
11739 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11740 ins_encode %{
11741 FloatRegister dst = $dst$$FloatRegister;
11742 FloatRegister src = $src$$FloatRegister;
11744 __ cvt_d_s(dst, src);
11745 %}
11746 ins_pipe( fpu_regF_regF );
11747 %}
11749 instruct convD2F_reg_reg(regF dst, regD src) %{
11750 match(Set dst (ConvD2F src));
11751 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11752 ins_encode %{
11753 FloatRegister dst = $dst$$FloatRegister;
11754 FloatRegister src = $src$$FloatRegister;
11756 __ cvt_s_d(dst, src);
11757 %}
11758 ins_pipe( fpu_regF_regF );
11759 %}
11761 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11762 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11763 match(Set dst (ConvD2I src));
11765 ins_cost(150);
11766 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11768 ins_encode %{
11769 FloatRegister src = $src$$FloatRegister;
11770 Register dst = $dst$$Register;
11772 Label Done;
11774 __ trunc_w_d(F30, src);
11775 // max_int: 2147483647
11776 __ move(AT, 0x7fffffff);
11777 __ mfc1(dst, F30);
11779 __ bne(dst, AT, Done);
11780 __ delayed()->mtc1(R0, F30);
11782 __ cvt_d_w(F30, F30);
11783 __ c_ult_d(src, F30);
11784 __ bc1f(Done);
11785 __ delayed()->addiu(T9, R0, -1);
11787 __ c_un_d(src, src); //NaN?
11788 __ subu32(dst, T9, AT);
11789 __ movt(dst, R0);
11791 __ bind(Done);
11792 %}
11793 ins_pipe( pipe_slow );
11794 %}
11796 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11797 match(Set dst (ConvD2I src));
11799 ins_cost(250);
11800 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11802 ins_encode %{
11803 FloatRegister src = $src$$FloatRegister;
11804 Register dst = $dst$$Register;
11805 Label L;
11807 __ trunc_w_d(F30, src);
11808 __ cfc1(AT, 31);
11809 __ li(T9, 0x10000);
11810 __ andr(AT, AT, T9);
11811 __ beq(AT, R0, L);
11812 __ delayed()->mfc1(dst, F30);
11814 __ mov_d(F12, src);
11815 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11816 __ move(dst, V0);
11817 __ bind(L);
11819 %}
11820 ins_pipe( pipe_slow );
11821 %}
11823 // Convert oop pointer into compressed form
11824 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11825 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11826 match(Set dst (EncodeP src));
11827 format %{ "encode_heap_oop $dst,$src" %}
11828 ins_encode %{
11829 Register src = $src$$Register;
11830 Register dst = $dst$$Register;
11831 if (src != dst) {
11832 __ move(dst, src);
11833 }
11834 __ encode_heap_oop(dst);
11835 %}
11836 ins_pipe( ialu_regL_regL );
11837 %}
11839 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11840 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11841 match(Set dst (EncodeP src));
11842 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11843 ins_encode %{
11844 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11845 %}
11846 ins_pipe( ialu_regL_regL );
11847 %}
11849 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11850 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11851 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11852 match(Set dst (DecodeN src));
11853 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11854 ins_encode %{
11855 Register s = $src$$Register;
11856 Register d = $dst$$Register;
11857 if (s != d) {
11858 __ move(d, s);
11859 }
11860 __ decode_heap_oop(d);
11861 %}
11862 ins_pipe( ialu_regL_regL );
11863 %}
11865 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11866 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11867 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11868 match(Set dst (DecodeN src));
11869 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11870 ins_encode %{
11871 Register s = $src$$Register;
11872 Register d = $dst$$Register;
11873 if (s != d) {
11874 __ decode_heap_oop_not_null(d, s);
11875 } else {
11876 __ decode_heap_oop_not_null(d);
11877 }
11878 %}
11879 ins_pipe( ialu_regL_regL );
11880 %}
11882 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11883 match(Set dst (EncodePKlass src));
11884 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11885 ins_encode %{
11886 __ encode_klass_not_null($dst$$Register, $src$$Register);
11887 %}
11888 ins_pipe( ialu_regL_regL );
11889 %}
11891 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11892 match(Set dst (DecodeNKlass src));
11893 format %{ "decode_heap_klass_not_null $dst,$src" %}
11894 ins_encode %{
11895 Register s = $src$$Register;
11896 Register d = $dst$$Register;
11897 if (s != d) {
11898 __ decode_klass_not_null(d, s);
11899 } else {
11900 __ decode_klass_not_null(d);
11901 }
11902 %}
11903 ins_pipe( ialu_regL_regL );
11904 %}
11906 //FIXME
11907 instruct tlsLoadP(mRegP dst) %{
11908 match(Set dst (ThreadLocal));
11910 ins_cost(0);
11911 format %{ " get_thread in $dst #@tlsLoadP" %}
11912 ins_encode %{
11913 Register dst = $dst$$Register;
11914 #ifdef OPT_THREAD
11915 __ move(dst, TREG);
11916 #else
11917 __ get_thread(dst);
11918 #endif
11919 %}
11921 ins_pipe( ialu_loadI );
11922 %}
11925 instruct checkCastPP( mRegP dst ) %{
11926 match(Set dst (CheckCastPP dst));
11928 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11929 ins_encode( /*empty encoding*/ );
11930 ins_pipe( empty );
11931 %}
11933 instruct castPP(mRegP dst)
11934 %{
11935 match(Set dst (CastPP dst));
11937 size(0);
11938 format %{ "# castPP of $dst" %}
11939 ins_encode(/* empty encoding */);
11940 ins_pipe(empty);
11941 %}
11943 instruct castII( mRegI dst ) %{
11944 match(Set dst (CastII dst));
11945 format %{ "#castII of $dst empty encoding" %}
11946 ins_encode( /*empty encoding*/ );
11947 ins_cost(0);
11948 ins_pipe( empty );
11949 %}
11951 // Return Instruction
11952 // Remove the return address & jump to it.
11953 instruct Ret() %{
11954 match(Return);
11955 format %{ "RET #@Ret" %}
11957 ins_encode %{
11958 __ jr(RA);
11959 __ nop();
11960 %}
11962 ins_pipe( pipe_jump );
11963 %}
11965 /*
11966 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11967 instruct jumpXtnd(mRegL switch_val) %{
11968 match(Jump switch_val);
11970 ins_cost(350);
11972 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11973 "jr T9\n\t"
11974 "nop" %}
11975 ins_encode %{
11976 Register table_base = $constanttablebase;
11977 int con_offset = $constantoffset;
11978 Register switch_reg = $switch_val$$Register;
11980 if (UseLoongsonISA) {
11981 if (Assembler::is_simm(con_offset, 8)) {
11982 __ gsldx(T9, table_base, switch_reg, con_offset);
11983 } else if (Assembler::is_simm16(con_offset)) {
11984 __ daddu(T9, table_base, switch_reg);
11985 __ ld(T9, T9, con_offset);
11986 } else {
11987 __ move(T9, con_offset);
11988 __ daddu(AT, table_base, switch_reg);
11989 __ gsldx(T9, AT, T9, 0);
11990 }
11991 } else {
11992 if (Assembler::is_simm16(con_offset)) {
11993 __ daddu(T9, table_base, switch_reg);
11994 __ ld(T9, T9, con_offset);
11995 } else {
11996 __ move(T9, con_offset);
11997 __ daddu(AT, table_base, switch_reg);
11998 __ daddu(AT, T9, AT);
11999 __ ld(T9, AT, 0);
12000 }
12001 }
12003 __ jr(T9);
12004 __ nop();
12006 %}
12007 ins_pipe(pipe_jump);
12008 %}
12009 */
12011 // Jump Direct - Label defines a relative address from JMP
12012 instruct jmpDir(label labl) %{
12013 match(Goto);
12014 effect(USE labl);
12016 ins_cost(300);
12017 format %{ "JMP $labl #@jmpDir" %}
12019 ins_encode %{
12020 Label &L = *($labl$$label);
12021 if(&L)
12022 __ b(L);
12023 else
12024 __ b(int(0));
12025 __ nop();
12026 %}
12028 ins_pipe( pipe_jump );
12029 ins_pc_relative(1);
12030 %}
12034 // Tail Jump; remove the return address; jump to target.
12035 // TailCall above leaves the return address around.
12036 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12037 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12038 // "restore" before this instruction (in Epilogue), we need to materialize it
12039 // in %i0.
12040 //FIXME
12041 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12042 match( TailJump jump_target ex_oop );
12043 ins_cost(200);
12044 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12045 ins_encode %{
12046 Register target = $jump_target$$Register;
12048 /* 2012/9/14 Jin: V0, V1 are indicated in:
12049 * [stubGenerator_mips.cpp] generate_forward_exception()
12050 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12051 */
12052 Register oop = $ex_oop$$Register;
12053 Register exception_oop = V0;
12054 Register exception_pc = V1;
12056 __ move(exception_pc, RA);
12057 __ move(exception_oop, oop);
12059 __ jr(target);
12060 __ nop();
12061 %}
12062 ins_pipe( pipe_jump );
12063 %}
12065 // ============================================================================
12066 // Procedure Call/Return Instructions
12067 // Call Java Static Instruction
12068 // Note: If this code changes, the corresponding ret_addr_offset() and
12069 // compute_padding() functions will have to be adjusted.
12070 instruct CallStaticJavaDirect(method meth) %{
12071 match(CallStaticJava);
12072 effect(USE meth);
12074 ins_cost(300);
12075 format %{ "CALL,static #@CallStaticJavaDirect " %}
12076 ins_encode( Java_Static_Call( meth ) );
12077 ins_pipe( pipe_slow );
12078 ins_pc_relative(1);
12079 ins_alignment(16);
12080 %}
12082 // Call Java Dynamic Instruction
12083 // Note: If this code changes, the corresponding ret_addr_offset() and
12084 // compute_padding() functions will have to be adjusted.
12085 instruct CallDynamicJavaDirect(method meth) %{
12086 match(CallDynamicJava);
12087 effect(USE meth);
12089 ins_cost(300);
12090 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
12091 "CallDynamic @ CallDynamicJavaDirect" %}
12092 ins_encode( Java_Dynamic_Call( meth ) );
12093 ins_pipe( pipe_slow );
12094 ins_pc_relative(1);
12095 ins_alignment(16);
12096 %}
12098 instruct CallLeafNoFPDirect(method meth) %{
12099 match(CallLeafNoFP);
12100 effect(USE meth);
12102 ins_cost(300);
12103 format %{ "CALL_LEAF_NOFP,runtime " %}
12104 ins_encode(Java_To_Runtime(meth));
12105 ins_pipe( pipe_slow );
12106 ins_pc_relative(1);
12107 ins_alignment(16);
12108 %}
12110 // Prefetch instructions.
12112 instruct prefetchrNTA( memory mem ) %{
12113 match(PrefetchRead mem);
12114 ins_cost(125);
12116 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12117 ins_encode %{
12118 int base = $mem$$base;
12119 int index = $mem$$index;
12120 int scale = $mem$$scale;
12121 int disp = $mem$$disp;
12123 if( index != 0 ) {
12124 if (scale == 0) {
12125 __ daddu(AT, as_Register(base), as_Register(index));
12126 } else {
12127 __ dsll(AT, as_Register(index), scale);
12128 __ daddu(AT, as_Register(base), AT);
12129 }
12130 } else {
12131 __ move(AT, as_Register(base));
12132 }
12133 if( Assembler::is_simm16(disp) ) {
12134 __ daddiu(AT, as_Register(base), disp);
12135 __ daddiu(AT, AT, disp);
12136 } else {
12137 __ move(T9, disp);
12138 __ daddu(AT, as_Register(base), T9);
12139 }
12140 __ pref(0, AT, 0); //hint: 0:load
12141 %}
12142 ins_pipe(pipe_slow);
12143 %}
12145 instruct prefetchwNTA( memory mem ) %{
12146 match(PrefetchWrite mem);
12147 ins_cost(125);
12148 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12149 ins_encode %{
12150 int base = $mem$$base;
12151 int index = $mem$$index;
12152 int scale = $mem$$scale;
12153 int disp = $mem$$disp;
12155 if( index != 0 ) {
12156 if (scale == 0) {
12157 __ daddu(AT, as_Register(base), as_Register(index));
12158 } else {
12159 __ dsll(AT, as_Register(index), scale);
12160 __ daddu(AT, as_Register(base), AT);
12161 }
12162 } else {
12163 __ move(AT, as_Register(base));
12164 }
12165 if( Assembler::is_simm16(disp) ) {
12166 __ daddiu(AT, as_Register(base), disp);
12167 __ daddiu(AT, AT, disp);
12168 } else {
12169 __ move(T9, disp);
12170 __ daddu(AT, as_Register(base), T9);
12171 }
12172 __ pref(1, AT, 0); //hint: 1:store
12173 %}
12174 ins_pipe(pipe_slow);
12175 %}
12177 // Prefetch instructions for allocation.
12179 instruct prefetchAllocNTA( memory mem ) %{
12180 match(PrefetchAllocation mem);
12181 ins_cost(125);
12182 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12183 ins_encode %{
12184 int base = $mem$$base;
12185 int index = $mem$$index;
12186 int scale = $mem$$scale;
12187 int disp = $mem$$disp;
12189 Register dst = R0;
12191 if( index != 0 ) {
12192 if( Assembler::is_simm16(disp) ) {
12193 if( UseLoongsonISA ) {
12194 if (scale == 0) {
12195 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12196 } else {
12197 __ dsll(AT, as_Register(index), scale);
12198 __ gslbx(dst, as_Register(base), AT, disp);
12199 }
12200 } else {
12201 if (scale == 0) {
12202 __ addu(AT, as_Register(base), as_Register(index));
12203 } else {
12204 __ dsll(AT, as_Register(index), scale);
12205 __ addu(AT, as_Register(base), AT);
12206 }
12207 __ lb(dst, AT, disp);
12208 }
12209 } else {
12210 if (scale == 0) {
12211 __ addu(AT, as_Register(base), as_Register(index));
12212 } else {
12213 __ dsll(AT, as_Register(index), scale);
12214 __ addu(AT, as_Register(base), AT);
12215 }
12216 __ move(T9, disp);
12217 if( UseLoongsonISA ) {
12218 __ gslbx(dst, AT, T9, 0);
12219 } else {
12220 __ addu(AT, AT, T9);
12221 __ lb(dst, AT, 0);
12222 }
12223 }
12224 } else {
12225 if( Assembler::is_simm16(disp) ) {
12226 __ lb(dst, as_Register(base), disp);
12227 } else {
12228 __ move(T9, disp);
12229 if( UseLoongsonISA ) {
12230 __ gslbx(dst, as_Register(base), T9, 0);
12231 } else {
12232 __ addu(AT, as_Register(base), T9);
12233 __ lb(dst, AT, 0);
12234 }
12235 }
12236 }
12237 %}
12238 ins_pipe(pipe_slow);
12239 %}
12242 // Call runtime without safepoint
12243 instruct CallLeafDirect(method meth) %{
12244 match(CallLeaf);
12245 effect(USE meth);
12247 ins_cost(300);
12248 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12249 ins_encode(Java_To_Runtime(meth));
12250 ins_pipe( pipe_slow );
12251 ins_pc_relative(1);
12252 ins_alignment(16);
12253 %}
12255 // Load Char (16bit unsigned)
12256 instruct loadUS(mRegI dst, memory mem) %{
12257 match(Set dst (LoadUS mem));
12259 ins_cost(125);
12260 format %{ "loadUS $dst,$mem @ loadC" %}
12261 ins_encode(load_C_enc(dst, mem));
12262 ins_pipe( ialu_loadI );
12263 %}
12265 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12266 match(Set dst (ConvI2L (LoadUS mem)));
12268 ins_cost(125);
12269 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12270 ins_encode(load_C_enc(dst, mem));
12271 ins_pipe( ialu_loadI );
12272 %}
12274 // Store Char (16bit unsigned)
12275 instruct storeC(memory mem, mRegI src) %{
12276 match(Set mem (StoreC mem src));
12278 ins_cost(125);
12279 format %{ "storeC $src,$mem @ storeC" %}
12280 ins_encode(store_C_reg_enc(mem, src));
12281 ins_pipe( ialu_loadI );
12282 %}
12285 instruct loadConF0(regF dst, immF0 zero) %{
12286 match(Set dst zero);
12287 ins_cost(100);
12289 format %{ "mov $dst, zero @ loadConF0\n"%}
12290 ins_encode %{
12291 FloatRegister dst = $dst$$FloatRegister;
12293 __ mtc1(R0, dst);
12294 %}
12295 ins_pipe( fpu_loadF );
12296 %}
12299 instruct loadConF(regF dst, immF src) %{
12300 match(Set dst src);
12301 ins_cost(125);
12303 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12304 ins_encode %{
12305 int con_offset = $constantoffset($src);
12307 if (Assembler::is_simm16(con_offset)) {
12308 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12309 } else {
12310 __ set64(AT, con_offset);
12311 if (UseLoongsonISA) {
12312 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12313 } else {
12314 __ daddu(AT, $constanttablebase, AT);
12315 __ lwc1($dst$$FloatRegister, AT, 0);
12316 }
12317 }
12318 %}
12319 ins_pipe( fpu_loadF );
12320 %}
12323 instruct loadConD0(regD dst, immD0 zero) %{
12324 match(Set dst zero);
12325 ins_cost(100);
12327 format %{ "mov $dst, zero @ loadConD0"%}
12328 ins_encode %{
12329 FloatRegister dst = as_FloatRegister($dst$$reg);
12331 __ dmtc1(R0, dst);
12332 %}
12333 ins_pipe( fpu_loadF );
12334 %}
12336 instruct loadConD(regD dst, immD src) %{
12337 match(Set dst src);
12338 ins_cost(125);
12340 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12341 ins_encode %{
12342 int con_offset = $constantoffset($src);
12344 if (Assembler::is_simm16(con_offset)) {
12345 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12346 } else {
12347 __ set64(AT, con_offset);
12348 if (UseLoongsonISA) {
12349 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12350 } else {
12351 __ daddu(AT, $constanttablebase, AT);
12352 __ ldc1($dst$$FloatRegister, AT, 0);
12353 }
12354 }
12355 %}
12356 ins_pipe( fpu_loadF );
12357 %}
12359 // Store register Float value (it is faster than store from FPU register)
12360 instruct storeF_reg( memory mem, regF src) %{
12361 match(Set mem (StoreF mem src));
12363 ins_cost(50);
12364 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12365 ins_encode(store_F_reg_enc(mem, src));
12366 ins_pipe( fpu_storeF );
12367 %}
12369 instruct storeF_imm0( memory mem, immF0 zero) %{
12370 match(Set mem (StoreF mem zero));
12372 ins_cost(40);
12373 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12374 ins_encode %{
12375 int base = $mem$$base;
12376 int index = $mem$$index;
12377 int scale = $mem$$scale;
12378 int disp = $mem$$disp;
12380 if( index != 0 ) {
12381 if(scale != 0) {
12382 __ dsll(T9, as_Register(index), scale);
12383 __ addu(AT, as_Register(base), T9);
12384 } else {
12385 __ daddu(AT, as_Register(base), as_Register(index));
12386 }
12387 if( Assembler::is_simm16(disp) ) {
12388 __ sw(R0, AT, disp);
12389 } else {
12390 __ move(T9, disp);
12391 __ addu(AT, AT, T9);
12392 __ sw(R0, AT, 0);
12393 }
12395 } else {
12396 if( Assembler::is_simm16(disp) ) {
12397 __ sw(R0, as_Register(base), disp);
12398 } else {
12399 __ move(T9, disp);
12400 __ addu(AT, as_Register(base), T9);
12401 __ sw(R0, AT, 0);
12402 }
12403 }
12404 %}
12405 ins_pipe( ialu_storeI );
12406 %}
12408 // Load Double
12409 instruct loadD(regD dst, memory mem) %{
12410 match(Set dst (LoadD mem));
12412 ins_cost(150);
12413 format %{ "loadD $dst, $mem #@loadD" %}
12414 ins_encode(load_D_enc(dst, mem));
12415 ins_pipe( ialu_loadI );
12416 %}
12418 // Load Double - UNaligned
12419 instruct loadD_unaligned(regD dst, memory mem ) %{
12420 match(Set dst (LoadD_unaligned mem));
12421 ins_cost(250);
12422 // FIXME: Jin: Need more effective ldl/ldr
12423 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12424 ins_encode(load_D_enc(dst, mem));
12425 ins_pipe( ialu_loadI );
12426 %}
12428 instruct storeD_reg( memory mem, regD src) %{
12429 match(Set mem (StoreD mem src));
12431 ins_cost(50);
12432 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12433 ins_encode(store_D_reg_enc(mem, src));
12434 ins_pipe( fpu_storeF );
12435 %}
12437 instruct storeD_imm0( memory mem, immD0 zero) %{
12438 match(Set mem (StoreD mem zero));
12440 ins_cost(40);
12441 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12442 ins_encode %{
12443 int base = $mem$$base;
12444 int index = $mem$$index;
12445 int scale = $mem$$scale;
12446 int disp = $mem$$disp;
12448 __ mtc1(R0, F30);
12449 __ cvt_d_w(F30, F30);
12451 if( index != 0 ) {
12452 if(scale != 0) {
12453 __ dsll(T9, as_Register(index), scale);
12454 __ addu(AT, as_Register(base), T9);
12455 } else {
12456 __ daddu(AT, as_Register(base), as_Register(index));
12457 }
12458 if( Assembler::is_simm16(disp) ) {
12459 __ sdc1(F30, AT, disp);
12460 } else {
12461 __ move(T9, disp);
12462 __ addu(AT, AT, T9);
12463 __ sdc1(F30, AT, 0);
12464 }
12466 } else {
12467 if( Assembler::is_simm16(disp) ) {
12468 __ sdc1(F30, as_Register(base), disp);
12469 } else {
12470 __ move(T9, disp);
12471 __ addu(AT, as_Register(base), T9);
12472 __ sdc1(F30, AT, 0);
12473 }
12474 }
12475 %}
12476 ins_pipe( ialu_storeI );
12477 %}
12479 instruct loadSSI(mRegI dst, stackSlotI src)
12480 %{
12481 match(Set dst src);
12483 ins_cost(125);
12484 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12485 ins_encode %{
12486 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12487 __ lw($dst$$Register, SP, $src$$disp);
12488 %}
12489 ins_pipe(ialu_loadI);
12490 %}
12492 instruct storeSSI(stackSlotI dst, mRegI src)
12493 %{
12494 match(Set dst src);
12496 ins_cost(100);
12497 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12498 ins_encode %{
12499 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12500 __ sw($src$$Register, SP, $dst$$disp);
12501 %}
12502 ins_pipe(ialu_storeI);
12503 %}
12505 instruct loadSSL(mRegL dst, stackSlotL src)
12506 %{
12507 match(Set dst src);
12509 ins_cost(125);
12510 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12511 ins_encode %{
12512 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12513 __ ld($dst$$Register, SP, $src$$disp);
12514 %}
12515 ins_pipe(ialu_loadI);
12516 %}
12518 instruct storeSSL(stackSlotL dst, mRegL src)
12519 %{
12520 match(Set dst src);
12522 ins_cost(100);
12523 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12524 ins_encode %{
12525 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12526 __ sd($src$$Register, SP, $dst$$disp);
12527 %}
12528 ins_pipe(ialu_storeI);
12529 %}
12531 instruct loadSSP(mRegP dst, stackSlotP src)
12532 %{
12533 match(Set dst src);
12535 ins_cost(125);
12536 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12537 ins_encode %{
12538 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12539 __ ld($dst$$Register, SP, $src$$disp);
12540 %}
12541 ins_pipe(ialu_loadI);
12542 %}
12544 instruct storeSSP(stackSlotP dst, mRegP src)
12545 %{
12546 match(Set dst src);
12548 ins_cost(100);
12549 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12550 ins_encode %{
12551 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12552 __ sd($src$$Register, SP, $dst$$disp);
12553 %}
12554 ins_pipe(ialu_storeI);
12555 %}
12557 instruct loadSSF(regF dst, stackSlotF src)
12558 %{
12559 match(Set dst src);
12561 ins_cost(125);
12562 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12563 ins_encode %{
12564 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12565 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12566 %}
12567 ins_pipe(ialu_loadI);
12568 %}
12570 instruct storeSSF(stackSlotF dst, regF src)
12571 %{
12572 match(Set dst src);
12574 ins_cost(100);
12575 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12576 ins_encode %{
12577 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12578 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12579 %}
12580 ins_pipe(fpu_storeF);
12581 %}
12583 // Use the same format since predicate() can not be used here.
12584 instruct loadSSD(regD dst, stackSlotD src)
12585 %{
12586 match(Set dst src);
12588 ins_cost(125);
12589 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12590 ins_encode %{
12591 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12592 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12593 %}
12594 ins_pipe(ialu_loadI);
12595 %}
12597 instruct storeSSD(stackSlotD dst, regD src)
12598 %{
12599 match(Set dst src);
12601 ins_cost(100);
12602 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12603 ins_encode %{
12604 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12605 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12606 %}
12607 ins_pipe(fpu_storeF);
12608 %}
12610 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12611 match( Set cr (FastLock object box) );
12612 effect( TEMP tmp, TEMP scr, USE_KILL box );
12613 ins_cost(300);
12614 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12615 ins_encode %{
12616 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12617 %}
12619 ins_pipe( pipe_slow );
12620 ins_pc_relative(1);
12621 %}
12623 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12624 match( Set cr (FastUnlock object box) );
12625 effect( TEMP tmp, USE_KILL box );
12626 ins_cost(300);
12627 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12628 ins_encode %{
12629 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12630 %}
12632 ins_pipe( pipe_slow );
12633 ins_pc_relative(1);
12634 %}
12636 // Store CMS card-mark Immediate
12637 instruct storeImmCM(memory mem, immI8 src) %{
12638 match(Set mem (StoreCM mem src));
12640 ins_cost(150);
12641 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12642 // opcode(0xC6);
12643 ins_encode(store_B_immI_enc_sync(mem, src));
12644 ins_pipe( ialu_storeI );
12645 %}
12647 // Die now
12648 instruct ShouldNotReachHere( )
12649 %{
12650 match(Halt);
12651 ins_cost(300);
12653 // Use the following format syntax
12654 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12655 ins_encode %{
12656 // Here we should emit illtrap !
12658 __ stop("in ShoudNotReachHere");
12660 %}
12661 ins_pipe( pipe_jump );
12662 %}
12664 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12665 %{
12666 predicate(Universe::narrow_oop_shift() == 0);
12667 match(Set dst mem);
12669 ins_cost(110);
12670 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12671 ins_encode %{
12672 Register dst = $dst$$Register;
12673 Register base = as_Register($mem$$base);
12674 int disp = $mem$$disp;
12676 __ daddiu(dst, base, disp);
12677 %}
12678 ins_pipe( ialu_regI_imm16 );
12679 %}
12681 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12682 %{
12683 match(Set dst mem);
12685 ins_cost(110);
12686 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12687 ins_encode %{
12688 Register dst = $dst$$Register;
12689 Register base = as_Register($mem$$base);
12690 Register index = as_Register($mem$$index);
12691 int scale = $mem$$scale;
12692 int disp = $mem$$disp;
12694 if (scale == 0) {
12695 __ daddu(AT, base, index);
12696 __ daddiu(dst, AT, disp);
12697 } else {
12698 __ dsll(AT, index, scale);
12699 __ daddu(AT, base, AT);
12700 __ daddiu(dst, AT, disp);
12701 }
12702 %}
12704 ins_pipe( ialu_regI_imm16 );
12705 %}
12707 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12708 %{
12709 match(Set dst mem);
12711 ins_cost(110);
12712 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12713 ins_encode %{
12714 Register dst = $dst$$Register;
12715 Register base = as_Register($mem$$base);
12716 Register index = as_Register($mem$$index);
12717 int scale = $mem$$scale;
12719 if (scale == 0) {
12720 __ daddu(dst, base, index);
12721 } else {
12722 __ dsll(AT, index, scale);
12723 __ daddu(dst, base, AT);
12724 }
12725 %}
12727 ins_pipe( ialu_regI_imm16 );
12728 %}
12730 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12731 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12732 match(CountedLoopEnd cop (CmpI src1 src2));
12733 effect(USE labl);
12735 ins_cost(300);
12736 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12737 ins_encode %{
12738 Register op1 = $src1$$Register;
12739 Register op2 = $src2$$Register;
12740 Label &L = *($labl$$label);
12741 int flag = $cop$$cmpcode;
12743 switch(flag)
12744 {
12745 case 0x01: //equal
12746 if (&L)
12747 __ beq(op1, op2, L);
12748 else
12749 __ beq(op1, op2, (int)0);
12750 break;
12751 case 0x02: //not_equal
12752 if (&L)
12753 __ bne(op1, op2, L);
12754 else
12755 __ bne(op1, op2, (int)0);
12756 break;
12757 case 0x03: //above
12758 __ slt(AT, op2, op1);
12759 if(&L)
12760 __ bne(AT, R0, L);
12761 else
12762 __ bne(AT, R0, (int)0);
12763 break;
12764 case 0x04: //above_equal
12765 __ slt(AT, op1, op2);
12766 if(&L)
12767 __ beq(AT, R0, L);
12768 else
12769 __ beq(AT, R0, (int)0);
12770 break;
12771 case 0x05: //below
12772 __ slt(AT, op1, op2);
12773 if(&L)
12774 __ bne(AT, R0, L);
12775 else
12776 __ bne(AT, R0, (int)0);
12777 break;
12778 case 0x06: //below_equal
12779 __ slt(AT, op2, op1);
12780 if(&L)
12781 __ beq(AT, R0, L);
12782 else
12783 __ beq(AT, R0, (int)0);
12784 break;
12785 default:
12786 Unimplemented();
12787 }
12788 __ nop();
12789 %}
12790 ins_pipe( pipe_jump );
12791 ins_pc_relative(1);
12792 %}
12795 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12796 match(CountedLoopEnd cop (CmpI src1 src2));
12797 effect(USE labl);
12799 ins_cost(250);
12800 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12801 ins_encode %{
12802 Register op1 = $src1$$Register;
12803 int op2 = $src2$$constant;
12804 Label &L = *($labl$$label);
12805 int flag = $cop$$cmpcode;
12807 __ addiu32(AT, op1, -1 * op2);
12809 switch(flag)
12810 {
12811 case 0x01: //equal
12812 if (&L)
12813 __ beq(AT, R0, L);
12814 else
12815 __ beq(AT, R0, (int)0);
12816 break;
12817 case 0x02: //not_equal
12818 if (&L)
12819 __ bne(AT, R0, L);
12820 else
12821 __ bne(AT, R0, (int)0);
12822 break;
12823 case 0x03: //above
12824 if(&L)
12825 __ bgtz(AT, L);
12826 else
12827 __ bgtz(AT, (int)0);
12828 break;
12829 case 0x04: //above_equal
12830 if(&L)
12831 __ bgez(AT, L);
12832 else
12833 __ bgez(AT,(int)0);
12834 break;
12835 case 0x05: //below
12836 if(&L)
12837 __ bltz(AT, L);
12838 else
12839 __ bltz(AT, (int)0);
12840 break;
12841 case 0x06: //below_equal
12842 if(&L)
12843 __ blez(AT, L);
12844 else
12845 __ blez(AT, (int)0);
12846 break;
12847 default:
12848 Unimplemented();
12849 }
12850 __ nop();
12851 %}
12852 ins_pipe( pipe_jump );
12853 ins_pc_relative(1);
12854 %}
12857 /*
12858 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12859 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12860 match(CountedLoopEnd cop cmp);
12861 effect(USE labl);
12863 ins_cost(300);
12864 format %{ "J$cop,u $labl\t# Loop end" %}
12865 size(6);
12866 opcode(0x0F, 0x80);
12867 ins_encode( Jcc( cop, labl) );
12868 ins_pipe( pipe_jump );
12869 ins_pc_relative(1);
12870 %}
12872 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12873 match(CountedLoopEnd cop cmp);
12874 effect(USE labl);
12876 ins_cost(200);
12877 format %{ "J$cop,u $labl\t# Loop end" %}
12878 opcode(0x0F, 0x80);
12879 ins_encode( Jcc( cop, labl) );
12880 ins_pipe( pipe_jump );
12881 ins_pc_relative(1);
12882 %}
12883 */
12885 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12886 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12887 match(If cop cr);
12888 effect(USE labl);
12890 ins_cost(300);
12891 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12893 ins_encode %{
12894 Label &L = *($labl$$label);
12895 switch($cop$$cmpcode)
12896 {
12897 case 0x01: //equal
12898 if (&L)
12899 __ bne(AT, R0, L);
12900 else
12901 __ bne(AT, R0, (int)0);
12902 break;
12903 case 0x02: //not equal
12904 if (&L)
12905 __ beq(AT, R0, L);
12906 else
12907 __ beq(AT, R0, (int)0);
12908 break;
12909 default:
12910 Unimplemented();
12911 }
12912 __ nop();
12913 %}
12915 ins_pipe( pipe_jump );
12916 ins_pc_relative(1);
12917 %}
12920 // ============================================================================
12921 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12922 // array for an instance of the superklass. Set a hidden internal cache on a
12923 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12924 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12925 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12926 match(Set result (PartialSubtypeCheck sub super));
12927 effect(KILL tmp);
12928 ins_cost(1100); // slightly larger than the next version
12929 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12931 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12932 ins_pipe( pipe_slow );
12933 %}
12936 // Conditional-store of an int value.
12937 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12938 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12939 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12940 // effect(KILL oldval);
12941 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12943 ins_encode %{
12944 Register oldval = $oldval$$Register;
12945 Register newval = $newval$$Register;
12946 Address addr(as_Register($mem$$base), $mem$$disp);
12947 Label again, failure;
12949 // int base = $mem$$base;
12950 int index = $mem$$index;
12951 int scale = $mem$$scale;
12952 int disp = $mem$$disp;
12954 guarantee(Assembler::is_simm16(disp), "");
12956 if( index != 0 ) {
12957 __ stop("in storeIConditional: index != 0");
12958 } else {
12959 __ bind(again);
12960 __ sync();
12961 __ ll(AT, addr);
12962 __ bne(AT, oldval, failure);
12963 __ delayed()->addu(AT, R0, R0);
12965 __ addu(AT, newval, R0);
12966 __ sc(AT, addr);
12967 __ beq(AT, R0, again);
12968 __ delayed()->addiu(AT, R0, 0xFF);
12969 __ bind(failure);
12970 __ sync();
12971 }
12972 %}
12974 ins_pipe( long_memory_op );
12975 %}
12977 // Conditional-store of a long value.
12978 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12979 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12980 %{
12981 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12982 effect(KILL oldval);
12984 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12985 ins_encode%{
12986 Register oldval = $oldval$$Register;
12987 Register newval = $newval$$Register;
12988 Address addr((Register)$mem$$base, $mem$$disp);
12990 int index = $mem$$index;
12991 int scale = $mem$$scale;
12992 int disp = $mem$$disp;
12994 guarantee(Assembler::is_simm16(disp), "");
12996 if( index != 0 ) {
12997 __ stop("in storeIConditional: index != 0");
12998 } else {
12999 __ cmpxchg(newval, addr, oldval);
13000 }
13001 %}
13002 ins_pipe( long_memory_op );
13003 %}
13006 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13007 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13008 effect(KILL oldval);
13009 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13010 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13011 "MOV $res, 1 @ compareAndSwapI\n\t"
13012 "BNE AT, R0 @ compareAndSwapI\n\t"
13013 "MOV $res, 0 @ compareAndSwapI\n"
13014 "L:" %}
13015 ins_encode %{
13016 Register newval = $newval$$Register;
13017 Register oldval = $oldval$$Register;
13018 Register res = $res$$Register;
13019 Address addr($mem_ptr$$Register, 0);
13020 Label L;
13022 __ cmpxchg32(newval, addr, oldval);
13023 __ move(res, AT);
13024 %}
13025 ins_pipe( long_memory_op );
13026 %}
13028 //FIXME:
13029 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13030 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13031 effect(KILL oldval);
13032 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13033 "MOV $res, AT @ compareAndSwapP\n\t"
13034 "L:" %}
13035 ins_encode %{
13036 Register newval = $newval$$Register;
13037 Register oldval = $oldval$$Register;
13038 Register res = $res$$Register;
13039 Address addr($mem_ptr$$Register, 0);
13040 Label L;
13042 __ cmpxchg(newval, addr, oldval);
13043 __ move(res, AT);
13044 %}
13045 ins_pipe( long_memory_op );
13046 %}
13048 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13049 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13050 effect(KILL oldval);
13051 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13052 "MOV $res, AT @ compareAndSwapN\n\t"
13053 "L:" %}
13054 ins_encode %{
13055 Register newval = $newval$$Register;
13056 Register oldval = $oldval$$Register;
13057 Register res = $res$$Register;
13058 Address addr($mem_ptr$$Register, 0);
13059 Label L;
13061 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13062 * Thus, we should extend oldval's sign for correct comparision.
13063 */
13064 __ sll(oldval, oldval, 0);
13066 __ cmpxchg32(newval, addr, oldval);
13067 __ move(res, AT);
13068 %}
13069 ins_pipe( long_memory_op );
13070 %}
13072 //----------Max and Min--------------------------------------------------------
13073 // Min Instructions
13074 ////
13075 // *** Min and Max using the conditional move are slower than the
13076 // *** branch version on a Pentium III.
13077 // // Conditional move for min
13078 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13079 // effect( USE_DEF op2, USE op1, USE cr );
13080 // format %{ "CMOVlt $op2,$op1\t! min" %}
13081 // opcode(0x4C,0x0F);
13082 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13083 // ins_pipe( pipe_cmov_reg );
13084 //%}
13085 //
13086 //// Min Register with Register (P6 version)
13087 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13088 // predicate(VM_Version::supports_cmov() );
13089 // match(Set op2 (MinI op1 op2));
13090 // ins_cost(200);
13091 // expand %{
13092 // eFlagsReg cr;
13093 // compI_eReg(cr,op1,op2);
13094 // cmovI_reg_lt(op2,op1,cr);
13095 // %}
13096 //%}
13098 // Min Register with Register (generic version)
13099 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13100 match(Set dst (MinI dst src));
13101 //effect(KILL flags);
13102 ins_cost(80);
13104 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13105 ins_encode %{
13106 Register dst = $dst$$Register;
13107 Register src = $src$$Register;
13109 __ slt(AT, src, dst);
13110 __ movn(dst, src, AT);
13112 %}
13114 ins_pipe( pipe_slow );
13115 %}
13117 // Max Register with Register
13118 // *** Min and Max using the conditional move are slower than the
13119 // *** branch version on a Pentium III.
13120 // // Conditional move for max
13121 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13122 // effect( USE_DEF op2, USE op1, USE cr );
13123 // format %{ "CMOVgt $op2,$op1\t! max" %}
13124 // opcode(0x4F,0x0F);
13125 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13126 // ins_pipe( pipe_cmov_reg );
13127 //%}
13128 //
13129 // // Max Register with Register (P6 version)
13130 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13131 // predicate(VM_Version::supports_cmov() );
13132 // match(Set op2 (MaxI op1 op2));
13133 // ins_cost(200);
13134 // expand %{
13135 // eFlagsReg cr;
13136 // compI_eReg(cr,op1,op2);
13137 // cmovI_reg_gt(op2,op1,cr);
13138 // %}
13139 //%}
13141 // Max Register with Register (generic version)
13142 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13143 match(Set dst (MaxI dst src));
13144 ins_cost(80);
13146 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13148 ins_encode %{
13149 Register dst = $dst$$Register;
13150 Register src = $src$$Register;
13152 __ slt(AT, dst, src);
13153 __ movn(dst, src, AT);
13155 %}
13157 ins_pipe( pipe_slow );
13158 %}
13160 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13161 match(Set dst (MaxI dst zero));
13162 ins_cost(50);
13164 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13166 ins_encode %{
13167 Register dst = $dst$$Register;
13169 __ slt(AT, dst, R0);
13170 __ movn(dst, R0, AT);
13172 %}
13174 ins_pipe( pipe_slow );
13175 %}
13177 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13178 %{
13179 match(Set dst (AndL src mask));
13181 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13182 ins_encode %{
13183 Register dst = $dst$$Register;
13184 Register src = $src$$Register;
13186 __ dext(dst, src, 0, 32);
13187 %}
13188 ins_pipe(ialu_regI_regI);
13189 %}
13191 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13192 %{
13193 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13195 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13196 ins_encode %{
13197 Register dst = $dst$$Register;
13198 Register src1 = $src1$$Register;
13199 Register src2 = $src2$$Register;
13201 if (src1 == dst) {
13202 __ dinsu(dst, src2, 32, 32);
13203 } else if (src2 == dst) {
13204 __ dsll32(dst, dst, 0);
13205 __ dins(dst, src1, 0, 32);
13206 } else {
13207 __ dext(dst, src1, 0, 32);
13208 __ dinsu(dst, src2, 32, 32);
13209 }
13210 %}
13211 ins_pipe(ialu_regI_regI);
13212 %}
13214 // Zero-extend convert int to long
13215 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13216 %{
13217 match(Set dst (AndL (ConvI2L src) mask));
13219 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13220 ins_encode %{
13221 Register dst = $dst$$Register;
13222 Register src = $src$$Register;
13224 __ dext(dst, src, 0, 32);
13225 %}
13226 ins_pipe(ialu_regI_regI);
13227 %}
13229 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13230 %{
13231 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13233 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13234 ins_encode %{
13235 Register dst = $dst$$Register;
13236 Register src = $src$$Register;
13238 __ dext(dst, src, 0, 32);
13239 %}
13240 ins_pipe(ialu_regI_regI);
13241 %}
13243 // Match loading integer and casting it to unsigned int in long register.
13244 // LoadI + ConvI2L + AndL 0xffffffff.
13245 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13246 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13248 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13249 ins_encode (load_N_enc(dst, mem));
13250 ins_pipe(ialu_loadI);
13251 %}
13253 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13254 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13256 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13257 ins_encode (load_N_enc(dst, mem));
13258 ins_pipe(ialu_loadI);
13259 %}
13262 // ============================================================================
13263 // Safepoint Instruction
13264 instruct safePoint_poll(mRegP poll) %{
13265 match(SafePoint poll);
13266 effect(USE poll);
13268 ins_cost(125);
13269 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13271 ins_encode %{
13272 Register poll_reg = $poll$$Register;
13274 __ block_comment("Safepoint:");
13275 __ relocate(relocInfo::poll_type);
13276 __ lw(AT, poll_reg, 0);
13277 %}
13279 ins_pipe( ialu_storeI );
13280 %}
13282 //----------Arithmetic Conversion Instructions---------------------------------
13284 instruct roundFloat_nop(regF dst)
13285 %{
13286 match(Set dst (RoundFloat dst));
13288 ins_cost(0);
13289 ins_encode();
13290 ins_pipe(empty);
13291 %}
13293 instruct roundDouble_nop(regD dst)
13294 %{
13295 match(Set dst (RoundDouble dst));
13297 ins_cost(0);
13298 ins_encode();
13299 ins_pipe(empty);
13300 %}
13302 //---------- Zeros Count Instructions ------------------------------------------
13303 // CountLeadingZerosINode CountTrailingZerosINode
13304 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13305 predicate(UseCountLeadingZerosInstruction);
13306 match(Set dst (CountLeadingZerosI src));
13308 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13309 ins_encode %{
13310 __ clz($dst$$Register, $src$$Register);
13311 %}
13312 ins_pipe( ialu_regL_regL );
13313 %}
13315 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13316 predicate(UseCountLeadingZerosInstruction);
13317 match(Set dst (CountLeadingZerosL src));
13319 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13320 ins_encode %{
13321 __ dclz($dst$$Register, $src$$Register);
13322 %}
13323 ins_pipe( ialu_regL_regL );
13324 %}
13326 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13327 predicate(UseCountTrailingZerosInstruction);
13328 match(Set dst (CountTrailingZerosI src));
13330 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13331 ins_encode %{
13332 // ctz and dctz is gs instructions.
13333 __ ctz($dst$$Register, $src$$Register);
13334 %}
13335 ins_pipe( ialu_regL_regL );
13336 %}
13338 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13339 predicate(UseCountTrailingZerosInstruction);
13340 match(Set dst (CountTrailingZerosL src));
13342 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13343 ins_encode %{
13344 __ dctz($dst$$Register, $src$$Register);
13345 %}
13346 ins_pipe( ialu_regL_regL );
13347 %}
13349 // ====================VECTOR INSTRUCTIONS=====================================
13351 // Load vectors (8 bytes long)
13352 instruct loadV8(vecD dst, memory mem) %{
13353 predicate(n->as_LoadVector()->memory_size() == 8);
13354 match(Set dst (LoadVector mem));
13355 ins_cost(125);
13356 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13357 ins_encode(load_D_enc(dst, mem));
13358 ins_pipe( fpu_loadF );
13359 %}
13361 // Store vectors (8 bytes long)
13362 instruct storeV8(memory mem, vecD src) %{
13363 predicate(n->as_StoreVector()->memory_size() == 8);
13364 match(Set mem (StoreVector mem src));
13365 ins_cost(145);
13366 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13367 ins_encode(store_D_reg_enc(mem, src));
13368 ins_pipe( fpu_storeF );
13369 %}
13371 instruct Repl8B(vecD dst, mRegI src) %{
13372 predicate(n->as_Vector()->length() == 8);
13373 match(Set dst (ReplicateB src));
13374 format %{ "replv_ob AT, $src\n\t"
13375 "dmtc1 AT, $dst\t! replicate8B" %}
13376 ins_encode %{
13377 __ replv_ob(AT, $src$$Register);
13378 __ dmtc1(AT, $dst$$FloatRegister);
13379 %}
13380 ins_pipe( pipe_mtc1 );
13381 %}
13383 instruct Repl8B_imm(vecD dst, immI con) %{
13384 predicate(n->as_Vector()->length() == 8);
13385 match(Set dst (ReplicateB con));
13386 format %{ "repl_ob AT, [$con]\n\t"
13387 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13388 ins_encode %{
13389 int val = $con$$constant;
13390 __ repl_ob(AT, val);
13391 __ dmtc1(AT, $dst$$FloatRegister);
13392 %}
13393 ins_pipe( pipe_mtc1 );
13394 %}
13396 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13397 predicate(n->as_Vector()->length() == 8);
13398 match(Set dst (ReplicateB zero));
13399 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13400 ins_encode %{
13401 __ dmtc1(R0, $dst$$FloatRegister);
13402 %}
13403 ins_pipe( pipe_mtc1 );
13404 %}
13406 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13407 predicate(n->as_Vector()->length() == 8);
13408 match(Set dst (ReplicateB M1));
13409 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13410 ins_encode %{
13411 __ nor(AT, R0, R0);
13412 __ dmtc1(AT, $dst$$FloatRegister);
13413 %}
13414 ins_pipe( pipe_mtc1 );
13415 %}
13417 instruct Repl4S(vecD dst, mRegI src) %{
13418 predicate(n->as_Vector()->length() == 4);
13419 match(Set dst (ReplicateS src));
13420 format %{ "replv_qh AT, $src\n\t"
13421 "dmtc1 AT, $dst\t! replicate4S" %}
13422 ins_encode %{
13423 __ replv_qh(AT, $src$$Register);
13424 __ dmtc1(AT, $dst$$FloatRegister);
13425 %}
13426 ins_pipe( pipe_mtc1 );
13427 %}
13429 instruct Repl4S_imm(vecD dst, immI con) %{
13430 predicate(n->as_Vector()->length() == 4);
13431 match(Set dst (ReplicateS con));
13432 format %{ "replv_qh AT, [$con]\n\t"
13433 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13434 ins_encode %{
13435 int val = $con$$constant;
13436 if ( Assembler::is_simm(val, 10)) {
13437 //repl_qh supports 10 bits immediate
13438 __ repl_qh(AT, val);
13439 } else {
13440 __ li32(AT, val);
13441 __ replv_qh(AT, AT);
13442 }
13443 __ dmtc1(AT, $dst$$FloatRegister);
13444 %}
13445 ins_pipe( pipe_mtc1 );
13446 %}
13448 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13449 predicate(n->as_Vector()->length() == 4);
13450 match(Set dst (ReplicateS zero));
13451 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13452 ins_encode %{
13453 __ dmtc1(R0, $dst$$FloatRegister);
13454 %}
13455 ins_pipe( pipe_mtc1 );
13456 %}
13458 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13459 predicate(n->as_Vector()->length() == 4);
13460 match(Set dst (ReplicateS M1));
13461 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13462 ins_encode %{
13463 __ nor(AT, R0, R0);
13464 __ dmtc1(AT, $dst$$FloatRegister);
13465 %}
13466 ins_pipe( pipe_mtc1 );
13467 %}
13469 // Replicate integer (4 byte) scalar to be vector
13470 instruct Repl2I(vecD dst, mRegI src) %{
13471 predicate(n->as_Vector()->length() == 2);
13472 match(Set dst (ReplicateI src));
13473 format %{ "dins AT, $src, 0, 32\n\t"
13474 "dinsu AT, $src, 32, 32\n\t"
13475 "dmtc1 AT, $dst\t! replicate2I" %}
13476 ins_encode %{
13477 __ dins(AT, $src$$Register, 0, 32);
13478 __ dinsu(AT, $src$$Register, 32, 32);
13479 __ dmtc1(AT, $dst$$FloatRegister);
13480 %}
13481 ins_pipe( pipe_mtc1 );
13482 %}
13484 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13485 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13486 predicate(n->as_Vector()->length() == 2);
13487 match(Set dst (ReplicateI con));
13488 effect(KILL tmp);
13489 format %{ "li32 AT, [$con], 32\n\t"
13490 "replv_pw AT, AT\n\t"
13491 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13492 ins_encode %{
13493 int val = $con$$constant;
13494 __ li32(AT, val);
13495 __ replv_pw(AT, AT);
13496 __ dmtc1(AT, $dst$$FloatRegister);
13497 %}
13498 ins_pipe( pipe_mtc1 );
13499 %}
13501 // Replicate integer (4 byte) scalar zero to be vector
13502 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13503 predicate(n->as_Vector()->length() == 2);
13504 match(Set dst (ReplicateI zero));
13505 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13506 ins_encode %{
13507 __ dmtc1(R0, $dst$$FloatRegister);
13508 %}
13509 ins_pipe( pipe_mtc1 );
13510 %}
13512 // Replicate integer (4 byte) scalar -1 to be vector
13513 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13514 predicate(n->as_Vector()->length() == 2);
13515 match(Set dst (ReplicateI M1));
13516 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13517 ins_encode %{
13518 __ nor(AT, R0, R0);
13519 __ dmtc1(AT, $dst$$FloatRegister);
13520 %}
13521 ins_pipe( pipe_mtc1 );
13522 %}
13524 // Replicate float (4 byte) scalar to be vector
13525 instruct Repl2F(vecD dst, regF src) %{
13526 predicate(n->as_Vector()->length() == 2);
13527 match(Set dst (ReplicateF src));
13528 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13529 ins_encode %{
13530 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13531 %}
13532 ins_pipe( pipe_slow );
13533 %}
13535 // Replicate float (4 byte) scalar zero to be vector
13536 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13537 predicate(n->as_Vector()->length() == 2);
13538 match(Set dst (ReplicateF zero));
13539 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13540 ins_encode %{
13541 __ dmtc1(R0, $dst$$FloatRegister);
13542 %}
13543 ins_pipe( pipe_mtc1 );
13544 %}
13547 // ====================VECTOR ARITHMETIC=======================================
13549 // --------------------------------- ADD --------------------------------------
13551 // Floats vector add
13552 instruct vadd2F(vecD dst, vecD src) %{
13553 predicate(n->as_Vector()->length() == 2);
13554 match(Set dst (AddVF dst src));
13555 format %{ "add.ps $dst,$src\t! add packed2F" %}
13556 ins_encode %{
13557 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13558 %}
13559 ins_pipe( pipe_slow );
13560 %}
13562 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13563 predicate(n->as_Vector()->length() == 2);
13564 match(Set dst (AddVF src1 src2));
13565 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13566 ins_encode %{
13567 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13568 %}
13569 ins_pipe( fpu_regF_regF );
13570 %}
13572 // --------------------------------- SUB --------------------------------------
13574 // Floats vector sub
13575 instruct vsub2F(vecD dst, vecD src) %{
13576 predicate(n->as_Vector()->length() == 2);
13577 match(Set dst (SubVF dst src));
13578 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13579 ins_encode %{
13580 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13581 %}
13582 ins_pipe( fpu_regF_regF );
13583 %}
13585 // --------------------------------- MUL --------------------------------------
13587 // Floats vector mul
13588 instruct vmul2F(vecD dst, vecD src) %{
13589 predicate(n->as_Vector()->length() == 2);
13590 match(Set dst (MulVF dst src));
13591 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13592 ins_encode %{
13593 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13594 %}
13595 ins_pipe( fpu_regF_regF );
13596 %}
13598 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13599 predicate(n->as_Vector()->length() == 2);
13600 match(Set dst (MulVF src1 src2));
13601 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13602 ins_encode %{
13603 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13604 %}
13605 ins_pipe( fpu_regF_regF );
13606 %}
13608 // --------------------------------- DIV --------------------------------------
13609 // MIPS do not have div.ps
13612 //----------PEEPHOLE RULES-----------------------------------------------------
13613 // These must follow all instruction definitions as they use the names
13614 // defined in the instructions definitions.
13615 //
13616 // peepmatch ( root_instr_name [preceeding_instruction]* );
13617 //
13618 // peepconstraint %{
13619 // (instruction_number.operand_name relational_op instruction_number.operand_name
13620 // [, ...] );
13621 // // instruction numbers are zero-based using left to right order in peepmatch
13622 //
13623 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13624 // // provide an instruction_number.operand_name for each operand that appears
13625 // // in the replacement instruction's match rule
13626 //
13627 // ---------VM FLAGS---------------------------------------------------------
13628 //
13629 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13630 //
13631 // Each peephole rule is given an identifying number starting with zero and
13632 // increasing by one in the order seen by the parser. An individual peephole
13633 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13634 // on the command-line.
13635 //
13636 // ---------CURRENT LIMITATIONS----------------------------------------------
13637 //
13638 // Only match adjacent instructions in same basic block
13639 // Only equality constraints
13640 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13641 // Only one replacement instruction
13642 //
13643 // ---------EXAMPLE----------------------------------------------------------
13644 //
13645 // // pertinent parts of existing instructions in architecture description
13646 // instruct movI(eRegI dst, eRegI src) %{
13647 // match(Set dst (CopyI src));
13648 // %}
13649 //
13650 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13651 // match(Set dst (AddI dst src));
13652 // effect(KILL cr);
13653 // %}
13654 //
13655 // // Change (inc mov) to lea
13656 // peephole %{
13657 // // increment preceeded by register-register move
13658 // peepmatch ( incI_eReg movI );
13659 // // require that the destination register of the increment
13660 // // match the destination register of the move
13661 // peepconstraint ( 0.dst == 1.dst );
13662 // // construct a replacement instruction that sets
13663 // // the destination to ( move's source register + one )
13664 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13665 // %}
13666 //
13667 // Implementation no longer uses movX instructions since
13668 // machine-independent system no longer uses CopyX nodes.
13669 //
13670 // peephole %{
13671 // peepmatch ( incI_eReg movI );
13672 // peepconstraint ( 0.dst == 1.dst );
13673 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13674 // %}
13675 //
13676 // peephole %{
13677 // peepmatch ( decI_eReg movI );
13678 // peepconstraint ( 0.dst == 1.dst );
13679 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13680 // %}
13681 //
13682 // peephole %{
13683 // peepmatch ( addI_eReg_imm movI );
13684 // peepconstraint ( 0.dst == 1.dst );
13685 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13686 // %}
13687 //
13688 // peephole %{
13689 // peepmatch ( addP_eReg_imm movP );
13690 // peepconstraint ( 0.dst == 1.dst );
13691 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13692 // %}
13694 // // Change load of spilled value to only a spill
13695 // instruct storeI(memory mem, eRegI src) %{
13696 // match(Set mem (StoreI mem src));
13697 // %}
13698 //
13699 // instruct loadI(eRegI dst, memory mem) %{
13700 // match(Set dst (LoadI mem));
13701 // %}
13702 //
13703 //peephole %{
13704 // peepmatch ( loadI storeI );
13705 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13706 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13707 //%}
13709 //----------SMARTSPILL RULES---------------------------------------------------
13710 // These must follow all instruction definitions as they use the names
13711 // defined in the instructions definitions.