Wed, 22 Mar 2017 07:13:07 -0400
[C2] Redefine memory operands for MIPS.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else {
691 assert(false, "Not implemented yet !" );
692 Unimplemented();
693 }
694 }
697 // No additional cost for CMOVL.
698 const int Matcher::long_cmove_cost() { return 0; }
700 // No CMOVF/CMOVD with SSE2
701 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
703 // Does the CPU require late expand (see block.cpp for description of late expand)?
704 const bool Matcher::require_postalloc_expand = false;
706 // Should the Matcher clone shifts on addressing modes, expecting them
707 // to be subsumed into complex addressing expressions or compute them
708 // into registers? True for Intel but false for most RISCs
709 const bool Matcher::clone_shift_expressions = false;
711 // Do we need to mask the count passed to shift instructions or does
712 // the cpu only look at the lower 5/6 bits anyway?
713 const bool Matcher::need_masked_shift_count = false;
715 bool Matcher::narrow_oop_use_complex_address() {
716 NOT_LP64(ShouldNotCallThis());
717 assert(UseCompressedOops, "only for compressed oops code");
718 return false;
719 }
721 bool Matcher::narrow_klass_use_complex_address() {
722 NOT_LP64(ShouldNotCallThis());
723 assert(UseCompressedClassPointers, "only for compressed klass code");
724 return false;
725 }
727 // This is UltraSparc specific, true just means we have fast l2f conversion
728 const bool Matcher::convL2FSupported(void) {
729 return true;
730 }
732 // Max vector size in bytes. 0 if not supported.
733 const int Matcher::vector_width_in_bytes(BasicType bt) {
734 assert(MaxVectorSize == 8, "");
735 return 8;
736 }
738 // Vector ideal reg
739 const int Matcher::vector_ideal_reg(int size) {
740 assert(MaxVectorSize == 8, "");
741 switch(size) {
742 case 8: return Op_VecD;
743 }
744 ShouldNotReachHere();
745 return 0;
746 }
748 // Only lowest bits of xmm reg are used for vector shift count.
749 const int Matcher::vector_shift_count_ideal_reg(int size) {
750 fatal("vector shift is not supported");
751 return Node::NotAMachineReg;
752 }
754 // Limits on vector size (number of elements) loaded into vector.
755 const int Matcher::max_vector_size(const BasicType bt) {
756 assert(is_java_primitive(bt), "only primitive type vectors");
757 return vector_width_in_bytes(bt)/type2aelembytes(bt);
758 }
760 const int Matcher::min_vector_size(const BasicType bt) {
761 return max_vector_size(bt); // Same as max.
762 }
764 // MIPS supports misaligned vectors store/load? FIXME
765 const bool Matcher::misaligned_vectors_ok() {
766 return false;
767 //return !AlignVector; // can be changed by flag
768 }
770 // Register for DIVI projection of divmodI
771 RegMask Matcher::divI_proj_mask() {
772 ShouldNotReachHere();
773 return RegMask();
774 }
776 // Register for MODI projection of divmodI
777 RegMask Matcher::modI_proj_mask() {
778 ShouldNotReachHere();
779 return RegMask();
780 }
782 // Register for DIVL projection of divmodL
783 RegMask Matcher::divL_proj_mask() {
784 ShouldNotReachHere();
785 return RegMask();
786 }
788 int Matcher::regnum_to_fpu_offset(int regnum) {
789 return regnum - 32; // The FP registers are in the second chunk
790 }
793 const bool Matcher::isSimpleConstant64(jlong value) {
794 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
795 return true;
796 }
799 // Return whether or not this register is ever used as an argument. This
800 // function is used on startup to build the trampoline stubs in generateOptoStub.
801 // Registers not mentioned will be killed by the VM call in the trampoline, and
802 // arguments in those registers not be available to the callee.
803 bool Matcher::can_be_java_arg( int reg ) {
804 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
805 if ( reg == T0_num || reg == T0_H_num
806 || reg == A0_num || reg == A0_H_num
807 || reg == A1_num || reg == A1_H_num
808 || reg == A2_num || reg == A2_H_num
809 || reg == A3_num || reg == A3_H_num
810 || reg == A4_num || reg == A4_H_num
811 || reg == A5_num || reg == A5_H_num
812 || reg == A6_num || reg == A6_H_num
813 || reg == A7_num || reg == A7_H_num )
814 return true;
816 if ( reg == F12_num || reg == F12_H_num
817 || reg == F13_num || reg == F13_H_num
818 || reg == F14_num || reg == F14_H_num
819 || reg == F15_num || reg == F15_H_num
820 || reg == F16_num || reg == F16_H_num
821 || reg == F17_num || reg == F17_H_num
822 || reg == F18_num || reg == F18_H_num
823 || reg == F19_num || reg == F19_H_num )
824 return true;
826 return false;
827 }
829 bool Matcher::is_spillable_arg( int reg ) {
830 return can_be_java_arg(reg);
831 }
833 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
834 return false;
835 }
837 // Register for MODL projection of divmodL
838 RegMask Matcher::modL_proj_mask() {
839 ShouldNotReachHere();
840 return RegMask();
841 }
843 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
844 return FP_REG_mask();
845 }
847 // MIPS doesn't support AES intrinsics
848 const bool Matcher::pass_original_key_for_aes() {
849 return false;
850 }
852 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
853 //lui
854 //ori
855 //dsll
856 //ori
858 //jalr
859 //nop
861 return round_to(current_offset, alignment_required()) - current_offset;
862 }
864 int CallLeafDirectNode::compute_padding(int current_offset) const {
865 //lui
866 //ori
867 //dsll
868 //ori
870 //jalr
871 //nop
873 return round_to(current_offset, alignment_required()) - current_offset;
874 }
876 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
877 //lui
878 //ori
879 //dsll
880 //ori
882 //jalr
883 //nop
885 return round_to(current_offset, alignment_required()) - current_offset;
886 }
888 // If CPU can load and store mis-aligned doubles directly then no fixup is
889 // needed. Else we split the double into 2 integer pieces and move it
890 // piece-by-piece. Only happens when passing doubles into C code as the
891 // Java calling convention forces doubles to be aligned.
892 const bool Matcher::misaligned_doubles_ok = false;
893 // Do floats take an entire double register or just half?
894 //const bool Matcher::float_in_double = true;
895 bool Matcher::float_in_double() { return false; }
896 // Threshold size for cleararray.
897 const int Matcher::init_array_short_size = 8 * BytesPerLong;
898 // Do ints take an entire long register or just half?
899 const bool Matcher::int_in_long = true;
900 // Is it better to copy float constants, or load them directly from memory?
901 // Intel can load a float constant from a direct address, requiring no
902 // extra registers. Most RISCs will have to materialize an address into a
903 // register first, so they would do better to copy the constant from stack.
904 const bool Matcher::rematerialize_float_constants = false;
905 // Advertise here if the CPU requires explicit rounding operations
906 // to implement the UseStrictFP mode.
907 const bool Matcher::strict_fp_requires_explicit_rounding = false;
908 // The ecx parameter to rep stos for the ClearArray node is in dwords.
909 const bool Matcher::init_array_count_is_in_bytes = false;
912 // Indicate if the safepoint node needs the polling page as an input.
913 // Since MIPS doesn't have absolute addressing, it needs.
914 bool SafePointNode::needs_polling_address_input() {
915 return false;
916 }
918 // !!!!! Special hack to get all type of calls to specify the byte offset
919 // from the start of the call to the point where the return address
920 // will point.
921 int MachCallStaticJavaNode::ret_addr_offset() {
922 //lui
923 //ori
924 //nop
925 //nop
926 //jalr
927 //nop
928 return 24;
929 }
931 int MachCallDynamicJavaNode::ret_addr_offset() {
932 //lui IC_Klass,
933 //ori IC_Klass,
934 //dsll IC_Klass
935 //ori IC_Klass
937 //lui T9
938 //ori T9
939 //nop
940 //nop
941 //jalr T9
942 //nop
943 return 4 * 4 + 4 * 6;
944 }
946 //=============================================================================
948 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
949 enum RC { rc_bad, rc_int, rc_float, rc_stack };
950 static enum RC rc_class( OptoReg::Name reg ) {
951 if( !OptoReg::is_valid(reg) ) return rc_bad;
952 if (OptoReg::is_stack(reg)) return rc_stack;
953 VMReg r = OptoReg::as_VMReg(reg);
954 if (r->is_Register()) return rc_int;
955 assert(r->is_FloatRegister(), "must be");
956 return rc_float;
957 }
959 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
960 // Get registers to move
961 OptoReg::Name src_second = ra_->get_reg_second(in(1));
962 OptoReg::Name src_first = ra_->get_reg_first(in(1));
963 OptoReg::Name dst_second = ra_->get_reg_second(this );
964 OptoReg::Name dst_first = ra_->get_reg_first(this );
966 enum RC src_second_rc = rc_class(src_second);
967 enum RC src_first_rc = rc_class(src_first);
968 enum RC dst_second_rc = rc_class(dst_second);
969 enum RC dst_first_rc = rc_class(dst_first);
971 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
973 // Generate spill code!
974 int size = 0;
976 if( src_first == dst_first && src_second == dst_second )
977 return 0; // Self copy, no move
979 if (src_first_rc == rc_stack) {
980 // mem ->
981 if (dst_first_rc == rc_stack) {
982 // mem -> mem
983 assert(src_second != dst_first, "overlap");
984 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
985 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
986 // 64-bit
987 int src_offset = ra_->reg2offset(src_first);
988 int dst_offset = ra_->reg2offset(dst_first);
989 if (cbuf) {
990 MacroAssembler _masm(cbuf);
991 __ ld(AT, Address(SP, src_offset));
992 __ sd(AT, Address(SP, dst_offset));
993 #ifndef PRODUCT
994 } else {
995 if(!do_size){
996 if (size != 0) st->print("\n\t");
997 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
998 "sd AT, [SP + #%d]",
999 src_offset, dst_offset);
1000 }
1001 #endif
1002 }
1003 size += 8;
1004 } else {
1005 // 32-bit
1006 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1007 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1008 // No pushl/popl, so:
1009 int src_offset = ra_->reg2offset(src_first);
1010 int dst_offset = ra_->reg2offset(dst_first);
1011 if (cbuf) {
1012 MacroAssembler _masm(cbuf);
1013 __ lw(AT, Address(SP, src_offset));
1014 __ sw(AT, Address(SP, dst_offset));
1015 #ifndef PRODUCT
1016 } else {
1017 if(!do_size){
1018 if (size != 0) st->print("\n\t");
1019 st->print("lw AT, [SP + #%d] spill 2\n\t"
1020 "sw AT, [SP + #%d]\n\t",
1021 src_offset, dst_offset);
1022 }
1023 #endif
1024 }
1025 size += 8;
1026 }
1027 return size;
1028 } else if (dst_first_rc == rc_int) {
1029 // mem -> gpr
1030 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1031 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1032 // 64-bit
1033 int offset = ra_->reg2offset(src_first);
1034 if (cbuf) {
1035 MacroAssembler _masm(cbuf);
1036 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1037 #ifndef PRODUCT
1038 } else {
1039 if(!do_size){
1040 if (size != 0) st->print("\n\t");
1041 st->print("ld %s, [SP + #%d]\t# spill 3",
1042 Matcher::regName[dst_first],
1043 offset);
1044 }
1045 #endif
1046 }
1047 size += 4;
1048 } else {
1049 // 32-bit
1050 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1051 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1052 int offset = ra_->reg2offset(src_first);
1053 if (cbuf) {
1054 MacroAssembler _masm(cbuf);
1055 if (this->ideal_reg() == Op_RegI)
1056 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1057 else
1058 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1059 #ifndef PRODUCT
1060 } else {
1061 if(!do_size){
1062 if (size != 0) st->print("\n\t");
1063 if (this->ideal_reg() == Op_RegI)
1064 st->print("lw %s, [SP + #%d]\t# spill 4",
1065 Matcher::regName[dst_first],
1066 offset);
1067 else
1068 st->print("lwu %s, [SP + #%d]\t# spill 5",
1069 Matcher::regName[dst_first],
1070 offset);
1071 }
1072 #endif
1073 }
1074 size += 4;
1075 }
1076 return size;
1077 } else if (dst_first_rc == rc_float) {
1078 // mem-> xmm
1079 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1080 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1081 // 64-bit
1082 int offset = ra_->reg2offset(src_first);
1083 if (cbuf) {
1084 MacroAssembler _masm(cbuf);
1085 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1086 #ifndef PRODUCT
1087 } else {
1088 if(!do_size){
1089 if (size != 0) st->print("\n\t");
1090 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1091 Matcher::regName[dst_first],
1092 offset);
1093 }
1094 #endif
1095 }
1096 size += 4;
1097 } else {
1098 // 32-bit
1099 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1100 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1101 int offset = ra_->reg2offset(src_first);
1102 if (cbuf) {
1103 MacroAssembler _masm(cbuf);
1104 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1105 #ifndef PRODUCT
1106 } else {
1107 if(!do_size){
1108 if (size != 0) st->print("\n\t");
1109 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1110 Matcher::regName[dst_first],
1111 offset);
1112 }
1113 #endif
1114 }
1115 size += 4;
1116 }
1117 return size;
1118 }
1119 } else if (src_first_rc == rc_int) {
1120 // gpr ->
1121 if (dst_first_rc == rc_stack) {
1122 // gpr -> mem
1123 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1124 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1125 // 64-bit
1126 int offset = ra_->reg2offset(dst_first);
1127 if (cbuf) {
1128 MacroAssembler _masm(cbuf);
1129 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1130 #ifndef PRODUCT
1131 } else {
1132 if(!do_size){
1133 if (size != 0) st->print("\n\t");
1134 st->print("sd %s, [SP + #%d] # spill 8",
1135 Matcher::regName[src_first],
1136 offset);
1137 }
1138 #endif
1139 }
1140 size += 4;
1141 } else {
1142 // 32-bit
1143 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1144 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1145 int offset = ra_->reg2offset(dst_first);
1146 if (cbuf) {
1147 MacroAssembler _masm(cbuf);
1148 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1149 #ifndef PRODUCT
1150 } else {
1151 if(!do_size){
1152 if (size != 0) st->print("\n\t");
1153 st->print("sw %s, [SP + #%d]\t# spill 9",
1154 Matcher::regName[src_first], offset);
1155 }
1156 #endif
1157 }
1158 size += 4;
1159 }
1160 return size;
1161 } else if (dst_first_rc == rc_int) {
1162 // gpr -> gpr
1163 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1164 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1165 // 64-bit
1166 if (cbuf) {
1167 MacroAssembler _masm(cbuf);
1168 __ move(as_Register(Matcher::_regEncode[dst_first]),
1169 as_Register(Matcher::_regEncode[src_first]));
1170 #ifndef PRODUCT
1171 } else {
1172 if(!do_size){
1173 if (size != 0) st->print("\n\t");
1174 st->print("move(64bit) %s <-- %s\t# spill 10",
1175 Matcher::regName[dst_first],
1176 Matcher::regName[src_first]);
1177 }
1178 #endif
1179 }
1180 size += 4;
1181 return size;
1182 } else {
1183 // 32-bit
1184 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1185 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1186 if (cbuf) {
1187 MacroAssembler _masm(cbuf);
1188 if (this->ideal_reg() == Op_RegI)
1189 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1190 else
1191 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1193 #ifndef PRODUCT
1194 } else {
1195 if(!do_size){
1196 if (size != 0) st->print("\n\t");
1197 st->print("move(32-bit) %s <-- %s\t# spill 11",
1198 Matcher::regName[dst_first],
1199 Matcher::regName[src_first]);
1200 }
1201 #endif
1202 }
1203 size += 4;
1204 return size;
1205 }
1206 } else if (dst_first_rc == rc_float) {
1207 // gpr -> xmm
1208 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1209 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1210 // 64-bit
1211 if (cbuf) {
1212 MacroAssembler _masm(cbuf);
1213 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1214 #ifndef PRODUCT
1215 } else {
1216 if(!do_size){
1217 if (size != 0) st->print("\n\t");
1218 st->print("dmtc1 %s, %s\t# spill 12",
1219 Matcher::regName[dst_first],
1220 Matcher::regName[src_first]);
1221 }
1222 #endif
1223 }
1224 size += 4;
1225 } else {
1226 // 32-bit
1227 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1228 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1229 if (cbuf) {
1230 MacroAssembler _masm(cbuf);
1231 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1232 #ifndef PRODUCT
1233 } else {
1234 if(!do_size){
1235 if (size != 0) st->print("\n\t");
1236 st->print("mtc1 %s, %s\t# spill 13",
1237 Matcher::regName[dst_first],
1238 Matcher::regName[src_first]);
1239 }
1240 #endif
1241 }
1242 size += 4;
1243 }
1244 return size;
1245 }
1246 } else if (src_first_rc == rc_float) {
1247 // xmm ->
1248 if (dst_first_rc == rc_stack) {
1249 // xmm -> mem
1250 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1251 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1252 // 64-bit
1253 int offset = ra_->reg2offset(dst_first);
1254 if (cbuf) {
1255 MacroAssembler _masm(cbuf);
1256 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1257 #ifndef PRODUCT
1258 } else {
1259 if(!do_size){
1260 if (size != 0) st->print("\n\t");
1261 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1262 Matcher::regName[src_first],
1263 offset);
1264 }
1265 #endif
1266 }
1267 size += 4;
1268 } else {
1269 // 32-bit
1270 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1271 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1272 int offset = ra_->reg2offset(dst_first);
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1281 Matcher::regName[src_first],
1282 offset);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 }
1288 return size;
1289 } else if (dst_first_rc == rc_int) {
1290 // xmm -> gpr
1291 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1292 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1293 // 64-bit
1294 if (cbuf) {
1295 MacroAssembler _masm(cbuf);
1296 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1297 #ifndef PRODUCT
1298 } else {
1299 if(!do_size){
1300 if (size != 0) st->print("\n\t");
1301 st->print("dmfc1 %s, %s\t# spill 16",
1302 Matcher::regName[dst_first],
1303 Matcher::regName[src_first]);
1304 }
1305 #endif
1306 }
1307 size += 4;
1308 } else {
1309 // 32-bit
1310 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1311 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1312 if (cbuf) {
1313 MacroAssembler _masm(cbuf);
1314 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1315 #ifndef PRODUCT
1316 } else {
1317 if(!do_size){
1318 if (size != 0) st->print("\n\t");
1319 st->print("mfc1 %s, %s\t# spill 17",
1320 Matcher::regName[dst_first],
1321 Matcher::regName[src_first]);
1322 }
1323 #endif
1324 }
1325 size += 4;
1326 }
1327 return size;
1328 } else if (dst_first_rc == rc_float) {
1329 // xmm -> xmm
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 if (cbuf) {
1334 MacroAssembler _masm(cbuf);
1335 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1336 #ifndef PRODUCT
1337 } else {
1338 if(!do_size){
1339 if (size != 0) st->print("\n\t");
1340 st->print("mov_d %s <-- %s\t# spill 18",
1341 Matcher::regName[dst_first],
1342 Matcher::regName[src_first]);
1343 }
1344 #endif
1345 }
1346 size += 4;
1347 } else {
1348 // 32-bit
1349 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1350 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1351 if (cbuf) {
1352 MacroAssembler _masm(cbuf);
1353 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1354 #ifndef PRODUCT
1355 } else {
1356 if(!do_size){
1357 if (size != 0) st->print("\n\t");
1358 st->print("mov_s %s <-- %s\t# spill 19",
1359 Matcher::regName[dst_first],
1360 Matcher::regName[src_first]);
1361 }
1362 #endif
1363 }
1364 size += 4;
1365 }
1366 return size;
1367 }
1368 }
1370 assert(0," foo ");
1371 Unimplemented();
1372 return size;
1374 }
1376 #ifndef PRODUCT
1377 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1378 implementation( NULL, ra_, false, st );
1379 }
1380 #endif
1382 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1383 implementation( &cbuf, ra_, false, NULL );
1384 }
1386 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1387 return implementation( NULL, ra_, true, NULL );
1388 }
1390 //=============================================================================
1391 #
1393 #ifndef PRODUCT
1394 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1395 st->print("INT3");
1396 }
1397 #endif
1399 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1400 MacroAssembler _masm(&cbuf);
1401 __ int3();
1402 }
1404 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1405 return MachNode::size(ra_);
1406 }
1409 //=============================================================================
1410 #ifndef PRODUCT
1411 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1412 Compile *C = ra_->C;
1413 int framesize = C->frame_size_in_bytes();
1415 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1417 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1418 st->cr(); st->print("\t");
1419 if (UseLoongsonISA) {
1420 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1421 } else {
1422 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1423 st->cr(); st->print("\t");
1424 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1425 }
1427 if( do_polling() && C->is_method_compilation() ) {
1428 st->print("Poll Safepoint # MachEpilogNode");
1429 }
1430 }
1431 #endif
1433 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1434 Compile *C = ra_->C;
1435 MacroAssembler _masm(&cbuf);
1436 int framesize = C->frame_size_in_bytes();
1438 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1440 __ daddiu(SP, SP, framesize);
1442 if (UseLoongsonISA) {
1443 __ gslq(RA, FP, SP, -wordSize*2);
1444 } else {
1445 __ ld(RA, SP, -wordSize );
1446 __ ld(FP, SP, -wordSize*2 );
1447 }
1449 if( do_polling() && C->is_method_compilation() ) {
1450 __ set64(AT, (long)os::get_polling_page());
1451 __ relocate(relocInfo::poll_return_type);
1452 __ lw(AT, AT, 0);
1453 }
1454 }
1456 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1457 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1458 }
1460 int MachEpilogNode::reloc() const {
1461 return 0; // a large enough number
1462 }
1464 const Pipeline * MachEpilogNode::pipeline() const {
1465 return MachNode::pipeline_class();
1466 }
1468 int MachEpilogNode::safepoint_offset() const { return 0; }
1470 //=============================================================================
1472 #ifndef PRODUCT
1473 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1474 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1475 int reg = ra_->get_reg_first(this);
1476 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1477 }
1478 #endif
1481 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1482 return 4;
1483 }
1485 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1486 MacroAssembler _masm(&cbuf);
1487 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1488 int reg = ra_->get_encode(this);
1490 __ addi(as_Register(reg), SP, offset);
1491 /*
1492 if( offset >= 128 ) {
1493 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1494 emit_rm(cbuf, 0x2, reg, 0x04);
1495 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1496 emit_d32(cbuf, offset);
1497 }
1498 else {
1499 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1500 emit_rm(cbuf, 0x1, reg, 0x04);
1501 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1502 emit_d8(cbuf, offset);
1503 }
1504 */
1505 }
1508 //static int sizeof_FFree_Float_Stack_All = -1;
1510 int MachCallRuntimeNode::ret_addr_offset() {
1511 //lui
1512 //ori
1513 //dsll
1514 //ori
1515 //jalr
1516 //nop
1517 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1518 return NativeCall::instruction_size;
1519 // return 16;
1520 }
1526 //=============================================================================
1527 #ifndef PRODUCT
1528 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1529 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1530 }
1531 #endif
1533 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1534 MacroAssembler _masm(&cbuf);
1535 int i = 0;
1536 for(i = 0; i < _count; i++)
1537 __ nop();
1538 }
1540 uint MachNopNode::size(PhaseRegAlloc *) const {
1541 return 4 * _count;
1542 }
1543 const Pipeline* MachNopNode::pipeline() const {
1544 return MachNode::pipeline_class();
1545 }
1547 //=============================================================================
1549 //=============================================================================
1550 #ifndef PRODUCT
1551 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1552 st->print_cr("load_klass(T9, T0)");
1553 st->print_cr("\tbeq(T9, iCache, L)");
1554 st->print_cr("\tnop");
1555 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1556 st->print_cr("\tnop");
1557 st->print_cr("\tnop");
1558 st->print_cr(" L:");
1559 }
1560 #endif
1563 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1564 MacroAssembler _masm(&cbuf);
1565 #ifdef ASSERT
1566 //uint code_size = cbuf.code_size();
1567 #endif
1568 int ic_reg = Matcher::inline_cache_reg_encode();
1569 Label L;
1570 Register receiver = T0;
1571 Register iCache = as_Register(ic_reg);
1572 __ load_klass(T9, receiver);
1573 __ beq(T9, iCache, L);
1574 __ nop();
1576 __ relocate(relocInfo::runtime_call_type);
1577 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1578 __ jr(T9);
1579 __ nop();
1581 /* WARNING these NOPs are critical so that verified entry point is properly
1582 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1583 __ align(CodeEntryAlignment);
1584 __ bind(L);
1585 }
1587 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1588 return MachNode::size(ra_);
1589 }
1593 //=============================================================================
1595 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1597 int Compile::ConstantTable::calculate_table_base_offset() const {
1598 return 0; // absolute addressing, no offset
1599 }
1601 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1602 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1603 ShouldNotReachHere();
1604 }
1606 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1607 Compile* C = ra_->C;
1608 Compile::ConstantTable& constant_table = C->constant_table();
1609 MacroAssembler _masm(&cbuf);
1611 Register Rtoc = as_Register(ra_->get_encode(this));
1612 CodeSection* consts_section = __ code()->consts();
1613 int consts_size = consts_section->align_at_start(consts_section->size());
1614 assert(constant_table.size() == consts_size, "must be equal");
1616 if (consts_section->size()) {
1617 // Materialize the constant table base.
1618 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1619 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1620 __ relocate(relocInfo::internal_pc_type);
1621 __ patchable_set48(Rtoc, (long)baseaddr);
1622 }
1623 }
1625 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1626 // patchable_set48 (4 insts)
1627 return 4 * 4;
1628 }
1630 #ifndef PRODUCT
1631 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1632 Register r = as_Register(ra_->get_encode(this));
1633 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1634 }
1635 #endif
1638 //=============================================================================
1639 #ifndef PRODUCT
1640 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1641 Compile* C = ra_->C;
1643 int framesize = C->frame_size_in_bytes();
1644 int bangsize = C->bang_size_in_bytes();
1645 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1647 // Calls to C2R adapters often do not accept exceptional returns.
1648 // We require that their callers must bang for them. But be careful, because
1649 // some VM calls (such as call site linkage) can use several kilobytes of
1650 // stack. But the stack safety zone should account for that.
1651 // See bugs 4446381, 4468289, 4497237.
1652 if (C->need_stack_bang(bangsize)) {
1653 st->print_cr("# stack bang"); st->print("\t");
1654 }
1655 if (UseLoongsonISA) {
1656 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1657 } else {
1658 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1659 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1660 }
1661 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1662 st->print("daddiu SP, SP, -%d \t",framesize);
1663 }
1664 #endif
1667 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1668 Compile* C = ra_->C;
1669 MacroAssembler _masm(&cbuf);
1671 int framesize = C->frame_size_in_bytes();
1672 int bangsize = C->bang_size_in_bytes();
1674 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1676 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1678 if (C->need_stack_bang(framesize)) {
1679 __ generate_stack_overflow_check(framesize);
1680 }
1682 if (UseLoongsonISA) {
1683 __ gssq(RA, FP, SP, -wordSize*2);
1684 } else {
1685 __ sd(RA, SP, -wordSize);
1686 __ sd(FP, SP, -wordSize*2);
1687 }
1688 __ daddiu(FP, SP, -wordSize*2);
1689 __ daddiu(SP, SP, -framesize);
1690 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1691 __ nop();
1693 C->set_frame_complete(cbuf.insts_size());
1694 if (C->has_mach_constant_base_node()) {
1695 // NOTE: We set the table base offset here because users might be
1696 // emitted before MachConstantBaseNode.
1697 Compile::ConstantTable& constant_table = C->constant_table();
1698 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1699 }
1701 }
1704 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1705 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1706 return MachNode::size(ra_); // too many variables; just compute it the hard way
1707 }
1709 int MachPrologNode::reloc() const {
1710 return 0; // a large enough number
1711 }
1713 %}
1715 //----------ENCODING BLOCK-----------------------------------------------------
1716 // This block specifies the encoding classes used by the compiler to output
1717 // byte streams. Encoding classes generate functions which are called by
1718 // Machine Instruction Nodes in order to generate the bit encoding of the
1719 // instruction. Operands specify their base encoding interface with the
1720 // interface keyword. There are currently supported four interfaces,
1721 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1722 // operand to generate a function which returns its register number when
1723 // queried. CONST_INTER causes an operand to generate a function which
1724 // returns the value of the constant when queried. MEMORY_INTER causes an
1725 // operand to generate four functions which return the Base Register, the
1726 // Index Register, the Scale Value, and the Offset Value of the operand when
1727 // queried. COND_INTER causes an operand to generate six functions which
1728 // return the encoding code (ie - encoding bits for the instruction)
1729 // associated with each basic boolean condition for a conditional instruction.
1730 // Instructions specify two basic values for encoding. They use the
1731 // ins_encode keyword to specify their encoding class (which must be one of
1732 // the class names specified in the encoding block), and they use the
1733 // opcode keyword to specify, in order, their primary, secondary, and
1734 // tertiary opcode. Only the opcode sections which a particular instruction
1735 // needs for encoding need to be specified.
1736 encode %{
1737 /*
1738 Alias:
1739 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1740 118 B14: # B19 B15 <- B13 Freq: 0.899955
1741 118 add S1, S2, V0 #@addP_reg_reg
1742 11c lb S0, [S1 + #-8257524] #@loadB
1743 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1744 */
1745 //Load byte signed
1746 enc_class load_B_enc (mRegI dst, memory mem) %{
1747 MacroAssembler _masm(&cbuf);
1748 int dst = $dst$$reg;
1749 int base = $mem$$base;
1750 int index = $mem$$index;
1751 int scale = $mem$$scale;
1752 int disp = $mem$$disp;
1754 if( index != 0 ) {
1755 if( Assembler::is_simm16(disp) ) {
1756 if( UseLoongsonISA ) {
1757 if (scale == 0) {
1758 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1759 } else {
1760 __ dsll(AT, as_Register(index), scale);
1761 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1762 }
1763 } else {
1764 if (scale == 0) {
1765 __ addu(AT, as_Register(base), as_Register(index));
1766 } else {
1767 __ dsll(AT, as_Register(index), scale);
1768 __ addu(AT, as_Register(base), AT);
1769 }
1770 __ lb(as_Register(dst), AT, disp);
1771 }
1772 } else {
1773 if (scale == 0) {
1774 __ addu(AT, as_Register(base), as_Register(index));
1775 } else {
1776 __ dsll(AT, as_Register(index), scale);
1777 __ addu(AT, as_Register(base), AT);
1778 }
1779 __ move(T9, disp);
1780 if( UseLoongsonISA ) {
1781 __ gslbx(as_Register(dst), AT, T9, 0);
1782 } else {
1783 __ addu(AT, AT, T9);
1784 __ lb(as_Register(dst), AT, 0);
1785 }
1786 }
1787 } else {
1788 if( Assembler::is_simm16(disp) ) {
1789 __ lb(as_Register(dst), as_Register(base), disp);
1790 } else {
1791 __ move(T9, disp);
1792 if( UseLoongsonISA ) {
1793 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1794 } else {
1795 __ addu(AT, as_Register(base), T9);
1796 __ lb(as_Register(dst), AT, 0);
1797 }
1798 }
1799 }
1800 %}
1802 //Load byte unsigned
1803 enc_class load_UB_enc (mRegI dst, memory mem) %{
1804 MacroAssembler _masm(&cbuf);
1805 int dst = $dst$$reg;
1806 int base = $mem$$base;
1807 int index = $mem$$index;
1808 int scale = $mem$$scale;
1809 int disp = $mem$$disp;
1811 if( index != 0 ) {
1812 if (scale == 0) {
1813 __ daddu(AT, as_Register(base), as_Register(index));
1814 } else {
1815 __ dsll(AT, as_Register(index), scale);
1816 __ daddu(AT, as_Register(base), AT);
1817 }
1818 if( Assembler::is_simm16(disp) ) {
1819 __ lbu(as_Register(dst), AT, disp);
1820 } else {
1821 __ move(T9, disp);
1822 __ daddu(AT, AT, T9);
1823 __ lbu(as_Register(dst), AT, 0);
1824 }
1825 } else {
1826 if( Assembler::is_simm16(disp) ) {
1827 __ lbu(as_Register(dst), as_Register(base), disp);
1828 } else {
1829 __ move(T9, disp);
1830 __ daddu(AT, as_Register(base), T9);
1831 __ lbu(as_Register(dst), AT, 0);
1832 }
1833 }
1834 %}
1836 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1837 MacroAssembler _masm(&cbuf);
1838 int src = $src$$reg;
1839 int base = $mem$$base;
1840 int index = $mem$$index;
1841 int scale = $mem$$scale;
1842 int disp = $mem$$disp;
1844 if( index != 0 ) {
1845 if (scale == 0) {
1846 if( Assembler::is_simm(disp, 8) ) {
1847 if (UseLoongsonISA) {
1848 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1849 } else {
1850 __ addu(AT, as_Register(base), as_Register(index));
1851 __ sb(as_Register(src), AT, disp);
1852 }
1853 } else if( Assembler::is_simm16(disp) ) {
1854 __ addu(AT, as_Register(base), as_Register(index));
1855 __ sb(as_Register(src), AT, disp);
1856 } else {
1857 __ addu(AT, as_Register(base), as_Register(index));
1858 __ move(T9, disp);
1859 if (UseLoongsonISA) {
1860 __ gssbx(as_Register(src), AT, T9, 0);
1861 } else {
1862 __ addu(AT, AT, T9);
1863 __ sb(as_Register(src), AT, 0);
1864 }
1865 }
1866 } else {
1867 __ dsll(AT, as_Register(index), scale);
1868 if( Assembler::is_simm(disp, 8) ) {
1869 if (UseLoongsonISA) {
1870 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1871 } else {
1872 __ addu(AT, as_Register(base), AT);
1873 __ sb(as_Register(src), AT, disp);
1874 }
1875 } else if( Assembler::is_simm16(disp) ) {
1876 __ addu(AT, as_Register(base), AT);
1877 __ sb(as_Register(src), AT, disp);
1878 } else {
1879 __ addu(AT, as_Register(base), AT);
1880 __ move(T9, disp);
1881 if (UseLoongsonISA) {
1882 __ gssbx(as_Register(src), AT, T9, 0);
1883 } else {
1884 __ addu(AT, AT, T9);
1885 __ sb(as_Register(src), AT, 0);
1886 }
1887 }
1888 }
1889 } else {
1890 if( Assembler::is_simm16(disp) ) {
1891 __ sb(as_Register(src), as_Register(base), disp);
1892 } else {
1893 __ move(T9, disp);
1894 if (UseLoongsonISA) {
1895 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1896 } else {
1897 __ addu(AT, as_Register(base), T9);
1898 __ sb(as_Register(src), AT, 0);
1899 }
1900 }
1901 }
1902 %}
1904 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1905 MacroAssembler _masm(&cbuf);
1906 int base = $mem$$base;
1907 int index = $mem$$index;
1908 int scale = $mem$$scale;
1909 int disp = $mem$$disp;
1910 int value = $src$$constant;
1912 if( index != 0 ) {
1913 if (!UseLoongsonISA) {
1914 if (scale == 0) {
1915 __ daddu(AT, as_Register(base), as_Register(index));
1916 } else {
1917 __ dsll(AT, as_Register(index), scale);
1918 __ daddu(AT, as_Register(base), AT);
1919 }
1920 if( Assembler::is_simm16(disp) ) {
1921 if (value == 0) {
1922 __ sb(R0, AT, disp);
1923 } else {
1924 __ move(T9, value);
1925 __ sb(T9, AT, disp);
1926 }
1927 } else {
1928 if (value == 0) {
1929 __ move(T9, disp);
1930 __ daddu(AT, AT, T9);
1931 __ sb(R0, AT, 0);
1932 } else {
1933 __ move(T9, disp);
1934 __ daddu(AT, AT, T9);
1935 __ move(T9, value);
1936 __ sb(T9, AT, 0);
1937 }
1938 }
1939 } else {
1941 if (scale == 0) {
1942 if( Assembler::is_simm(disp, 8) ) {
1943 if (value == 0) {
1944 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1945 } else {
1946 __ move(T9, value);
1947 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1948 }
1949 } else if( Assembler::is_simm16(disp) ) {
1950 __ daddu(AT, as_Register(base), as_Register(index));
1951 if (value == 0) {
1952 __ sb(R0, AT, disp);
1953 } else {
1954 __ move(T9, value);
1955 __ sb(T9, AT, disp);
1956 }
1957 } else {
1958 if (value == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 __ move(T9, disp);
1961 __ gssbx(R0, AT, T9, 0);
1962 } else {
1963 __ move(AT, disp);
1964 __ move(T9, value);
1965 __ daddu(AT, as_Register(base), AT);
1966 __ gssbx(T9, AT, as_Register(index), 0);
1967 }
1968 }
1970 } else {
1972 if( Assembler::is_simm(disp, 8) ) {
1973 __ dsll(AT, as_Register(index), scale);
1974 if (value == 0) {
1975 __ gssbx(R0, as_Register(base), AT, disp);
1976 } else {
1977 __ move(T9, value);
1978 __ gssbx(T9, as_Register(base), AT, disp);
1979 }
1980 } else if( Assembler::is_simm16(disp) ) {
1981 __ dsll(AT, as_Register(index), scale);
1982 __ daddu(AT, as_Register(base), AT);
1983 if (value == 0) {
1984 __ sb(R0, AT, disp);
1985 } else {
1986 __ move(T9, value);
1987 __ sb(T9, AT, disp);
1988 }
1989 } else {
1990 __ dsll(AT, as_Register(index), scale);
1991 if (value == 0) {
1992 __ daddu(AT, as_Register(base), AT);
1993 __ move(T9, disp);
1994 __ gssbx(R0, AT, T9, 0);
1995 } else {
1996 __ move(T9, disp);
1997 __ daddu(AT, AT, T9);
1998 __ move(T9, value);
1999 __ gssbx(T9, as_Register(base), AT, 0);
2000 }
2001 }
2002 }
2003 }
2004 } else {
2005 if( Assembler::is_simm16(disp) ) {
2006 if (value == 0) {
2007 __ sb(R0, as_Register(base), disp);
2008 } else {
2009 __ move(AT, value);
2010 __ sb(AT, as_Register(base), disp);
2011 }
2012 } else {
2013 if (value == 0) {
2014 __ move(T9, disp);
2015 if (UseLoongsonISA) {
2016 __ gssbx(R0, as_Register(base), T9, 0);
2017 } else {
2018 __ daddu(AT, as_Register(base), T9);
2019 __ sb(R0, AT, 0);
2020 }
2021 } else {
2022 __ move(T9, disp);
2023 if (UseLoongsonISA) {
2024 __ move(AT, value);
2025 __ gssbx(AT, as_Register(base), T9, 0);
2026 } else {
2027 __ daddu(AT, as_Register(base), T9);
2028 __ move(T9, value);
2029 __ sb(T9, AT, 0);
2030 }
2031 }
2032 }
2033 }
2034 %}
2037 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2038 MacroAssembler _masm(&cbuf);
2039 int base = $mem$$base;
2040 int index = $mem$$index;
2041 int scale = $mem$$scale;
2042 int disp = $mem$$disp;
2043 int value = $src$$constant;
2045 if( index != 0 ) {
2046 if ( UseLoongsonISA ) {
2047 if ( Assembler::is_simm(disp,8) ) {
2048 if ( scale == 0 ) {
2049 if ( value == 0 ) {
2050 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2051 } else {
2052 __ move(AT, value);
2053 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2054 }
2055 } else {
2056 __ dsll(AT, as_Register(index), scale);
2057 if ( value == 0 ) {
2058 __ gssbx(R0, as_Register(base), AT, disp);
2059 } else {
2060 __ move(T9, value);
2061 __ gssbx(T9, as_Register(base), AT, disp);
2062 }
2063 }
2064 } else if ( Assembler::is_simm16(disp) ) {
2065 if ( scale == 0 ) {
2066 __ daddu(AT, as_Register(base), as_Register(index));
2067 if ( value == 0 ){
2068 __ sb(R0, AT, disp);
2069 } else {
2070 __ move(T9, value);
2071 __ sb(T9, AT, disp);
2072 }
2073 } else {
2074 __ dsll(AT, as_Register(index), scale);
2075 __ daddu(AT, as_Register(base), AT);
2076 if ( value == 0 ) {
2077 __ sb(R0, AT, disp);
2078 } else {
2079 __ move(T9, value);
2080 __ sb(T9, AT, disp);
2081 }
2082 }
2083 } else {
2084 if ( scale == 0 ) {
2085 __ move(AT, disp);
2086 __ daddu(AT, as_Register(index), AT);
2087 if ( value == 0 ) {
2088 __ gssbx(R0, as_Register(base), AT, 0);
2089 } else {
2090 __ move(T9, value);
2091 __ gssbx(T9, as_Register(base), AT, 0);
2092 }
2093 } else {
2094 __ dsll(AT, as_Register(index), scale);
2095 __ move(T9, disp);
2096 __ daddu(AT, AT, T9);
2097 if ( value == 0 ) {
2098 __ gssbx(R0, as_Register(base), AT, 0);
2099 } else {
2100 __ move(T9, value);
2101 __ gssbx(T9, as_Register(base), AT, 0);
2102 }
2103 }
2104 }
2105 } else { //not use loongson isa
2106 if (scale == 0) {
2107 __ daddu(AT, as_Register(base), as_Register(index));
2108 } else {
2109 __ dsll(AT, as_Register(index), scale);
2110 __ daddu(AT, as_Register(base), AT);
2111 }
2112 if( Assembler::is_simm16(disp) ) {
2113 if (value == 0) {
2114 __ sb(R0, AT, disp);
2115 } else {
2116 __ move(T9, value);
2117 __ sb(T9, AT, disp);
2118 }
2119 } else {
2120 if (value == 0) {
2121 __ move(T9, disp);
2122 __ daddu(AT, AT, T9);
2123 __ sb(R0, AT, 0);
2124 } else {
2125 __ move(T9, disp);
2126 __ daddu(AT, AT, T9);
2127 __ move(T9, value);
2128 __ sb(T9, AT, 0);
2129 }
2130 }
2131 }
2132 } else {
2133 if ( UseLoongsonISA ){
2134 if ( Assembler::is_simm16(disp) ){
2135 if ( value == 0 ) {
2136 __ sb(R0, as_Register(base), disp);
2137 } else {
2138 __ move(AT, value);
2139 __ sb(AT, as_Register(base), disp);
2140 }
2141 } else {
2142 __ move(AT, disp);
2143 if ( value == 0 ) {
2144 __ gssbx(R0, as_Register(base), AT, 0);
2145 } else {
2146 __ move(T9, value);
2147 __ gssbx(T9, as_Register(base), AT, 0);
2148 }
2149 }
2150 } else {
2151 if( Assembler::is_simm16(disp) ) {
2152 if (value == 0) {
2153 __ sb(R0, as_Register(base), disp);
2154 } else {
2155 __ move(AT, value);
2156 __ sb(AT, as_Register(base), disp);
2157 }
2158 } else {
2159 if (value == 0) {
2160 __ move(T9, disp);
2161 __ daddu(AT, as_Register(base), T9);
2162 __ sb(R0, AT, 0);
2163 } else {
2164 __ move(T9, disp);
2165 __ daddu(AT, as_Register(base), T9);
2166 __ move(T9, value);
2167 __ sb(T9, AT, 0);
2168 }
2169 }
2170 }
2171 }
2173 __ sync();
2174 %}
2176 // Load Short (16bit signed)
2177 enc_class load_S_enc (mRegI dst, memory mem) %{
2178 MacroAssembler _masm(&cbuf);
2179 int dst = $dst$$reg;
2180 int base = $mem$$base;
2181 int index = $mem$$index;
2182 int scale = $mem$$scale;
2183 int disp = $mem$$disp;
2185 if( index != 0 ) {
2186 if ( UseLoongsonISA ) {
2187 if ( Assembler::is_simm(disp, 8) ) {
2188 if (scale == 0) {
2189 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2190 } else {
2191 __ dsll(AT, as_Register(index), scale);
2192 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2193 }
2194 } else if ( Assembler::is_simm16(disp) ) {
2195 if (scale == 0) {
2196 __ daddu(AT, as_Register(base), as_Register(index));
2197 __ lh(as_Register(dst), AT, disp);
2198 } else {
2199 __ dsll(AT, as_Register(index), scale);
2200 __ daddu(AT, as_Register(base), AT);
2201 __ lh(as_Register(dst), AT, disp);
2202 }
2203 } else {
2204 if (scale == 0) {
2205 __ move(AT, disp);
2206 __ daddu(AT, as_Register(index), AT);
2207 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2208 } else {
2209 __ dsll(AT, as_Register(index), scale);
2210 __ move(T9, disp);
2211 __ daddu(AT, AT, T9);
2212 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2213 }
2214 }
2215 } else { // not use loongson isa
2216 if (scale == 0) {
2217 __ daddu(AT, as_Register(base), as_Register(index));
2218 } else {
2219 __ dsll(AT, as_Register(index), scale);
2220 __ daddu(AT, as_Register(base), AT);
2221 }
2222 if( Assembler::is_simm16(disp) ) {
2223 __ lh(as_Register(dst), AT, disp);
2224 } else {
2225 __ move(T9, disp);
2226 __ daddu(AT, AT, T9);
2227 __ lh(as_Register(dst), AT, 0);
2228 }
2229 }
2230 } else { // index is 0
2231 if ( UseLoongsonISA ) {
2232 if ( Assembler::is_simm16(disp) ) {
2233 __ lh(as_Register(dst), as_Register(base), disp);
2234 } else {
2235 __ move(T9, disp);
2236 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2237 }
2238 } else { //not use loongson isa
2239 if( Assembler::is_simm16(disp) ) {
2240 __ lh(as_Register(dst), as_Register(base), disp);
2241 } else {
2242 __ move(T9, disp);
2243 __ daddu(AT, as_Register(base), T9);
2244 __ lh(as_Register(dst), AT, 0);
2245 }
2246 }
2247 }
2248 %}
2250 // Load Char (16bit unsigned)
2251 enc_class load_C_enc (mRegI dst, memory mem) %{
2252 MacroAssembler _masm(&cbuf);
2253 int dst = $dst$$reg;
2254 int base = $mem$$base;
2255 int index = $mem$$index;
2256 int scale = $mem$$scale;
2257 int disp = $mem$$disp;
2259 if( index != 0 ) {
2260 if (scale == 0) {
2261 __ daddu(AT, as_Register(base), as_Register(index));
2262 } else {
2263 __ dsll(AT, as_Register(index), scale);
2264 __ daddu(AT, as_Register(base), AT);
2265 }
2266 if( Assembler::is_simm16(disp) ) {
2267 __ lhu(as_Register(dst), AT, disp);
2268 } else {
2269 __ move(T9, disp);
2270 __ addu(AT, AT, T9);
2271 __ lhu(as_Register(dst), AT, 0);
2272 }
2273 } else {
2274 if( Assembler::is_simm16(disp) ) {
2275 __ lhu(as_Register(dst), as_Register(base), disp);
2276 } else {
2277 __ move(T9, disp);
2278 __ daddu(AT, as_Register(base), T9);
2279 __ lhu(as_Register(dst), AT, 0);
2280 }
2281 }
2282 %}
2284 // Store Char (16bit unsigned)
2285 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2286 MacroAssembler _masm(&cbuf);
2287 int src = $src$$reg;
2288 int base = $mem$$base;
2289 int index = $mem$$index;
2290 int scale = $mem$$scale;
2291 int disp = $mem$$disp;
2293 if( index != 0 ) {
2294 if( Assembler::is_simm16(disp) ) {
2295 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2296 if (scale == 0) {
2297 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2298 } else {
2299 __ dsll(AT, as_Register(index), scale);
2300 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2301 }
2302 } else {
2303 if (scale == 0) {
2304 __ addu(AT, as_Register(base), as_Register(index));
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ addu(AT, as_Register(base), AT);
2308 }
2309 __ sh(as_Register(src), AT, disp);
2310 }
2311 } else {
2312 if (scale == 0) {
2313 __ addu(AT, as_Register(base), as_Register(index));
2314 } else {
2315 __ dsll(AT, as_Register(index), scale);
2316 __ addu(AT, as_Register(base), AT);
2317 }
2318 __ move(T9, disp);
2319 if( UseLoongsonISA ) {
2320 __ gsshx(as_Register(src), AT, T9, 0);
2321 } else {
2322 __ addu(AT, AT, T9);
2323 __ sh(as_Register(src), AT, 0);
2324 }
2325 }
2326 } else {
2327 if( Assembler::is_simm16(disp) ) {
2328 __ sh(as_Register(src), as_Register(base), disp);
2329 } else {
2330 __ move(T9, disp);
2331 if( UseLoongsonISA ) {
2332 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2333 } else {
2334 __ addu(AT, as_Register(base), T9);
2335 __ sh(as_Register(src), AT, 0);
2336 }
2337 }
2338 }
2339 %}
2341 enc_class store_C0_enc (memory mem) %{
2342 MacroAssembler _masm(&cbuf);
2343 int base = $mem$$base;
2344 int index = $mem$$index;
2345 int scale = $mem$$scale;
2346 int disp = $mem$$disp;
2348 if( index != 0 ) {
2349 if( Assembler::is_simm16(disp) ) {
2350 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2351 if (scale == 0) {
2352 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2353 } else {
2354 __ dsll(AT, as_Register(index), scale);
2355 __ gsshx(R0, as_Register(base), AT, disp);
2356 }
2357 } else {
2358 if (scale == 0) {
2359 __ addu(AT, as_Register(base), as_Register(index));
2360 } else {
2361 __ dsll(AT, as_Register(index), scale);
2362 __ addu(AT, as_Register(base), AT);
2363 }
2364 __ sh(R0, AT, disp);
2365 }
2366 } else {
2367 if (scale == 0) {
2368 __ addu(AT, as_Register(base), as_Register(index));
2369 } else {
2370 __ dsll(AT, as_Register(index), scale);
2371 __ addu(AT, as_Register(base), AT);
2372 }
2373 __ move(T9, disp);
2374 if( UseLoongsonISA ) {
2375 __ gsshx(R0, AT, T9, 0);
2376 } else {
2377 __ addu(AT, AT, T9);
2378 __ sh(R0, AT, 0);
2379 }
2380 }
2381 } else {
2382 if( Assembler::is_simm16(disp) ) {
2383 __ sh(R0, as_Register(base), disp);
2384 } else {
2385 __ move(T9, disp);
2386 if( UseLoongsonISA ) {
2387 __ gsshx(R0, as_Register(base), T9, 0);
2388 } else {
2389 __ addu(AT, as_Register(base), T9);
2390 __ sh(R0, AT, 0);
2391 }
2392 }
2393 }
2394 %}
2396 enc_class load_I_enc (mRegI dst, memory mem) %{
2397 MacroAssembler _masm(&cbuf);
2398 int dst = $dst$$reg;
2399 int base = $mem$$base;
2400 int index = $mem$$index;
2401 int scale = $mem$$scale;
2402 int disp = $mem$$disp;
2404 if( index != 0 ) {
2405 if( Assembler::is_simm16(disp) ) {
2406 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2407 if (scale == 0) {
2408 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2409 } else {
2410 __ dsll(AT, as_Register(index), scale);
2411 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2412 }
2413 } else {
2414 if (scale == 0) {
2415 __ addu(AT, as_Register(base), as_Register(index));
2416 } else {
2417 __ dsll(AT, as_Register(index), scale);
2418 __ addu(AT, as_Register(base), AT);
2419 }
2420 __ lw(as_Register(dst), AT, disp);
2421 }
2422 } else {
2423 if (scale == 0) {
2424 __ addu(AT, as_Register(base), as_Register(index));
2425 } else {
2426 __ dsll(AT, as_Register(index), scale);
2427 __ addu(AT, as_Register(base), AT);
2428 }
2429 __ move(T9, disp);
2430 if( UseLoongsonISA ) {
2431 __ gslwx(as_Register(dst), AT, T9, 0);
2432 } else {
2433 __ addu(AT, AT, T9);
2434 __ lw(as_Register(dst), AT, 0);
2435 }
2436 }
2437 } else {
2438 if( Assembler::is_simm16(disp) ) {
2439 __ lw(as_Register(dst), as_Register(base), disp);
2440 } else {
2441 __ move(T9, disp);
2442 if( UseLoongsonISA ) {
2443 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2444 } else {
2445 __ addu(AT, as_Register(base), T9);
2446 __ lw(as_Register(dst), AT, 0);
2447 }
2448 }
2449 }
2450 %}
2452 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2453 MacroAssembler _masm(&cbuf);
2454 int src = $src$$reg;
2455 int base = $mem$$base;
2456 int index = $mem$$index;
2457 int scale = $mem$$scale;
2458 int disp = $mem$$disp;
2460 if( index != 0 ) {
2461 if( Assembler::is_simm16(disp) ) {
2462 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2463 if (scale == 0) {
2464 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2465 } else {
2466 __ dsll(AT, as_Register(index), scale);
2467 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2468 }
2469 } else {
2470 if (scale == 0) {
2471 __ addu(AT, as_Register(base), as_Register(index));
2472 } else {
2473 __ dsll(AT, as_Register(index), scale);
2474 __ addu(AT, as_Register(base), AT);
2475 }
2476 __ sw(as_Register(src), AT, disp);
2477 }
2478 } else {
2479 if (scale == 0) {
2480 __ addu(AT, as_Register(base), as_Register(index));
2481 } else {
2482 __ dsll(AT, as_Register(index), scale);
2483 __ addu(AT, as_Register(base), AT);
2484 }
2485 __ move(T9, disp);
2486 if( UseLoongsonISA ) {
2487 __ gsswx(as_Register(src), AT, T9, 0);
2488 } else {
2489 __ addu(AT, AT, T9);
2490 __ sw(as_Register(src), AT, 0);
2491 }
2492 }
2493 } else {
2494 if( Assembler::is_simm16(disp) ) {
2495 __ sw(as_Register(src), as_Register(base), disp);
2496 } else {
2497 __ move(T9, disp);
2498 if( UseLoongsonISA ) {
2499 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2500 } else {
2501 __ addu(AT, as_Register(base), T9);
2502 __ sw(as_Register(src), AT, 0);
2503 }
2504 }
2505 }
2506 %}
2508 enc_class store_I_immI_enc (memory mem, immI src) %{
2509 MacroAssembler _masm(&cbuf);
2510 int base = $mem$$base;
2511 int index = $mem$$index;
2512 int scale = $mem$$scale;
2513 int disp = $mem$$disp;
2514 int value = $src$$constant;
2516 if( index != 0 ) {
2517 if ( UseLoongsonISA ) {
2518 if ( Assembler::is_simm(disp, 8) ) {
2519 if ( scale == 0 ) {
2520 if ( value == 0 ) {
2521 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2522 } else {
2523 __ move(T9, value);
2524 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2525 }
2526 } else {
2527 __ dsll(AT, as_Register(index), scale);
2528 if ( value == 0 ) {
2529 __ gsswx(R0, as_Register(base), AT, disp);
2530 } else {
2531 __ move(T9, value);
2532 __ gsswx(T9, as_Register(base), AT, disp);
2533 }
2534 }
2535 } else if ( Assembler::is_simm16(disp) ) {
2536 if ( scale == 0 ) {
2537 __ daddu(AT, as_Register(base), as_Register(index));
2538 if ( value == 0 ) {
2539 __ sw(R0, AT, disp);
2540 } else {
2541 __ move(T9, value);
2542 __ sw(T9, AT, disp);
2543 }
2544 } else {
2545 __ dsll(AT, as_Register(index), scale);
2546 __ daddu(AT, as_Register(base), AT);
2547 if ( value == 0 ) {
2548 __ sw(R0, AT, disp);
2549 } else {
2550 __ move(T9, value);
2551 __ sw(T9, AT, disp);
2552 }
2553 }
2554 } else {
2555 if ( scale == 0 ) {
2556 __ move(T9, disp);
2557 __ daddu(AT, as_Register(index), T9);
2558 if ( value ==0 ) {
2559 __ gsswx(R0, as_Register(base), AT, 0);
2560 } else {
2561 __ move(T9, value);
2562 __ gsswx(T9, as_Register(base), AT, 0);
2563 }
2564 } else {
2565 __ dsll(AT, as_Register(index), scale);
2566 __ move(T9, disp);
2567 __ daddu(AT, AT, T9);
2568 if ( value == 0 ) {
2569 __ gsswx(R0, as_Register(base), AT, 0);
2570 } else {
2571 __ move(T9, value);
2572 __ gsswx(T9, as_Register(base), AT, 0);
2573 }
2574 }
2575 }
2576 } else { //not use loongson isa
2577 if (scale == 0) {
2578 __ daddu(AT, as_Register(base), as_Register(index));
2579 } else {
2580 __ dsll(AT, as_Register(index), scale);
2581 __ daddu(AT, as_Register(base), AT);
2582 }
2583 if( Assembler::is_simm16(disp) ) {
2584 if (value == 0) {
2585 __ sw(R0, AT, disp);
2586 } else {
2587 __ move(T9, value);
2588 __ sw(T9, AT, disp);
2589 }
2590 } else {
2591 if (value == 0) {
2592 __ move(T9, disp);
2593 __ daddu(AT, AT, T9);
2594 __ sw(R0, AT, 0);
2595 } else {
2596 __ move(T9, disp);
2597 __ daddu(AT, AT, T9);
2598 __ move(T9, value);
2599 __ sw(T9, AT, 0);
2600 }
2601 }
2602 }
2603 } else {
2604 if ( UseLoongsonISA ) {
2605 if ( Assembler::is_simm16(disp) ) {
2606 if ( value == 0 ) {
2607 __ sw(R0, as_Register(base), disp);
2608 } else {
2609 __ move(AT, value);
2610 __ sw(AT, as_Register(base), disp);
2611 }
2612 } else {
2613 __ move(T9, disp);
2614 if ( value == 0 ) {
2615 __ gsswx(R0, as_Register(base), T9, 0);
2616 } else {
2617 __ move(AT, value);
2618 __ gsswx(AT, as_Register(base), T9, 0);
2619 }
2620 }
2621 } else {
2622 if( Assembler::is_simm16(disp) ) {
2623 if (value == 0) {
2624 __ sw(R0, as_Register(base), disp);
2625 } else {
2626 __ move(AT, value);
2627 __ sw(AT, as_Register(base), disp);
2628 }
2629 } else {
2630 if (value == 0) {
2631 __ move(T9, disp);
2632 __ daddu(AT, as_Register(base), T9);
2633 __ sw(R0, AT, 0);
2634 } else {
2635 __ move(T9, disp);
2636 __ daddu(AT, as_Register(base), T9);
2637 __ move(T9, value);
2638 __ sw(T9, AT, 0);
2639 }
2640 }
2641 }
2642 }
2643 %}
2645 enc_class load_N_enc (mRegN dst, memory mem) %{
2646 MacroAssembler _masm(&cbuf);
2647 int dst = $dst$$reg;
2648 int base = $mem$$base;
2649 int index = $mem$$index;
2650 int scale = $mem$$scale;
2651 int disp = $mem$$disp;
2652 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2653 assert(disp_reloc == relocInfo::none, "cannot have disp");
2655 if( index != 0 ) {
2656 if (scale == 0) {
2657 __ daddu(AT, as_Register(base), as_Register(index));
2658 } else {
2659 __ dsll(AT, as_Register(index), scale);
2660 __ daddu(AT, as_Register(base), AT);
2661 }
2662 if( Assembler::is_simm16(disp) ) {
2663 __ lwu(as_Register(dst), AT, disp);
2664 } else {
2665 __ set64(T9, disp);
2666 __ daddu(AT, AT, T9);
2667 __ lwu(as_Register(dst), AT, 0);
2668 }
2669 } else {
2670 if( Assembler::is_simm16(disp) ) {
2671 __ lwu(as_Register(dst), as_Register(base), disp);
2672 } else {
2673 __ set64(T9, disp);
2674 __ daddu(AT, as_Register(base), T9);
2675 __ lwu(as_Register(dst), AT, 0);
2676 }
2677 }
2679 %}
2682 enc_class load_P_enc (mRegP dst, memory mem) %{
2683 MacroAssembler _masm(&cbuf);
2684 int dst = $dst$$reg;
2685 int base = $mem$$base;
2686 int index = $mem$$index;
2687 int scale = $mem$$scale;
2688 int disp = $mem$$disp;
2689 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2690 assert(disp_reloc == relocInfo::none, "cannot have disp");
2692 if( index != 0 ) {
2693 if ( UseLoongsonISA ) {
2694 if ( Assembler::is_simm(disp, 8) ) {
2695 if ( scale != 0 ) {
2696 __ dsll(AT, as_Register(index), scale);
2697 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2698 } else {
2699 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2700 }
2701 } else if ( Assembler::is_simm16(disp) ){
2702 if ( scale != 0 ) {
2703 __ dsll(AT, as_Register(index), scale);
2704 __ daddu(AT, AT, as_Register(base));
2705 } else {
2706 __ daddu(AT, as_Register(index), as_Register(base));
2707 }
2708 __ ld(as_Register(dst), AT, disp);
2709 } else {
2710 if ( scale != 0 ) {
2711 __ dsll(AT, as_Register(index), scale);
2712 __ move(T9, disp);
2713 __ daddu(AT, AT, T9);
2714 } else {
2715 __ move(T9, disp);
2716 __ daddu(AT, as_Register(index), T9);
2717 }
2718 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2719 }
2720 } else { //not use loongson isa
2721 if (scale == 0) {
2722 __ daddu(AT, as_Register(base), as_Register(index));
2723 } else {
2724 __ dsll(AT, as_Register(index), scale);
2725 __ daddu(AT, as_Register(base), AT);
2726 }
2727 if( Assembler::is_simm16(disp) ) {
2728 __ ld(as_Register(dst), AT, disp);
2729 } else {
2730 __ set64(T9, disp);
2731 __ daddu(AT, AT, T9);
2732 __ ld(as_Register(dst), AT, 0);
2733 }
2734 }
2735 } else {
2736 if ( UseLoongsonISA ) {
2737 if ( Assembler::is_simm16(disp) ){
2738 __ ld(as_Register(dst), as_Register(base), disp);
2739 } else {
2740 __ set64(T9, disp);
2741 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2742 }
2743 } else { //not use loongson isa
2744 if( Assembler::is_simm16(disp) ) {
2745 __ ld(as_Register(dst), as_Register(base), disp);
2746 } else {
2747 __ set64(T9, disp);
2748 __ daddu(AT, as_Register(base), T9);
2749 __ ld(as_Register(dst), AT, 0);
2750 }
2751 }
2752 }
2753 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2754 %}
2756 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2757 MacroAssembler _masm(&cbuf);
2758 int src = $src$$reg;
2759 int base = $mem$$base;
2760 int index = $mem$$index;
2761 int scale = $mem$$scale;
2762 int disp = $mem$$disp;
2764 if( index != 0 ) {
2765 if ( UseLoongsonISA ){
2766 if ( Assembler::is_simm(disp, 8) ) {
2767 if ( scale == 0 ) {
2768 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2769 } else {
2770 __ dsll(AT, as_Register(index), scale);
2771 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2772 }
2773 } else if ( Assembler::is_simm16(disp) ) {
2774 if ( scale == 0 ) {
2775 __ daddu(AT, as_Register(base), as_Register(index));
2776 } else {
2777 __ dsll(AT, as_Register(index), scale);
2778 __ daddu(AT, as_Register(base), AT);
2779 }
2780 __ sd(as_Register(src), AT, disp);
2781 } else {
2782 if ( scale == 0 ) {
2783 __ move(T9, disp);
2784 __ daddu(AT, as_Register(index), T9);
2785 } else {
2786 __ dsll(AT, as_Register(index), scale);
2787 __ move(T9, disp);
2788 __ daddu(AT, AT, T9);
2789 }
2790 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2791 }
2792 } else { //not use loongson isa
2793 if (scale == 0) {
2794 __ daddu(AT, as_Register(base), as_Register(index));
2795 } else {
2796 __ dsll(AT, as_Register(index), scale);
2797 __ daddu(AT, as_Register(base), AT);
2798 }
2799 if( Assembler::is_simm16(disp) ) {
2800 __ sd(as_Register(src), AT, disp);
2801 } else {
2802 __ move(T9, disp);
2803 __ daddu(AT, AT, T9);
2804 __ sd(as_Register(src), AT, 0);
2805 }
2806 }
2807 } else {
2808 if ( UseLoongsonISA ) {
2809 if ( Assembler::is_simm16(disp) ) {
2810 __ sd(as_Register(src), as_Register(base), disp);
2811 } else {
2812 __ move(T9, disp);
2813 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2814 }
2815 } else {
2816 if( Assembler::is_simm16(disp) ) {
2817 __ sd(as_Register(src), as_Register(base), disp);
2818 } else {
2819 __ move(T9, disp);
2820 __ daddu(AT, as_Register(base), T9);
2821 __ sd(as_Register(src), AT, 0);
2822 }
2823 }
2824 }
2825 %}
2827 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2828 MacroAssembler _masm(&cbuf);
2829 int src = $src$$reg;
2830 int base = $mem$$base;
2831 int index = $mem$$index;
2832 int scale = $mem$$scale;
2833 int disp = $mem$$disp;
2835 if( index != 0 ) {
2836 if ( UseLoongsonISA ){
2837 if ( Assembler::is_simm(disp, 8) ) {
2838 if ( scale == 0 ) {
2839 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2840 } else {
2841 __ dsll(AT, as_Register(index), scale);
2842 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2843 }
2844 } else if ( Assembler::is_simm16(disp) ) {
2845 if ( scale == 0 ) {
2846 __ daddu(AT, as_Register(base), as_Register(index));
2847 } else {
2848 __ dsll(AT, as_Register(index), scale);
2849 __ daddu(AT, as_Register(base), AT);
2850 }
2851 __ sw(as_Register(src), AT, disp);
2852 } else {
2853 if ( scale == 0 ) {
2854 __ move(T9, disp);
2855 __ daddu(AT, as_Register(index), T9);
2856 } else {
2857 __ dsll(AT, as_Register(index), scale);
2858 __ move(T9, disp);
2859 __ daddu(AT, AT, T9);
2860 }
2861 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2862 }
2863 } else { //not use loongson isa
2864 if (scale == 0) {
2865 __ daddu(AT, as_Register(base), as_Register(index));
2866 } else {
2867 __ dsll(AT, as_Register(index), scale);
2868 __ daddu(AT, as_Register(base), AT);
2869 }
2870 if( Assembler::is_simm16(disp) ) {
2871 __ sw(as_Register(src), AT, disp);
2872 } else {
2873 __ move(T9, disp);
2874 __ daddu(AT, AT, T9);
2875 __ sw(as_Register(src), AT, 0);
2876 }
2877 }
2878 } else {
2879 if ( UseLoongsonISA ) {
2880 if ( Assembler::is_simm16(disp) ) {
2881 __ sw(as_Register(src), as_Register(base), disp);
2882 } else {
2883 __ move(T9, disp);
2884 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2885 }
2886 } else {
2887 if( Assembler::is_simm16(disp) ) {
2888 __ sw(as_Register(src), as_Register(base), disp);
2889 } else {
2890 __ move(T9, disp);
2891 __ daddu(AT, as_Register(base), T9);
2892 __ sw(as_Register(src), AT, 0);
2893 }
2894 }
2895 }
2896 %}
2898 enc_class store_P_immP0_enc (memory mem) %{
2899 MacroAssembler _masm(&cbuf);
2900 int base = $mem$$base;
2901 int index = $mem$$index;
2902 int scale = $mem$$scale;
2903 int disp = $mem$$disp;
2905 if( index != 0 ) {
2906 if (scale == 0) {
2907 if( Assembler::is_simm16(disp) ) {
2908 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2909 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2910 } else {
2911 __ daddu(AT, as_Register(base), as_Register(index));
2912 __ sd(R0, AT, disp);
2913 }
2914 } else {
2915 __ daddu(AT, as_Register(base), as_Register(index));
2916 __ move(T9, disp);
2917 if(UseLoongsonISA) {
2918 __ gssdx(R0, AT, T9, 0);
2919 } else {
2920 __ daddu(AT, AT, T9);
2921 __ sd(R0, AT, 0);
2922 }
2923 }
2924 } else {
2925 __ dsll(AT, as_Register(index), scale);
2926 if( Assembler::is_simm16(disp) ) {
2927 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2928 __ gssdx(R0, as_Register(base), AT, disp);
2929 } else {
2930 __ daddu(AT, as_Register(base), AT);
2931 __ sd(R0, AT, disp);
2932 }
2933 } else {
2934 __ daddu(AT, as_Register(base), AT);
2935 __ move(T9, disp);
2936 if (UseLoongsonISA) {
2937 __ gssdx(R0, AT, T9, 0);
2938 } else {
2939 __ daddu(AT, AT, T9);
2940 __ sd(R0, AT, 0);
2941 }
2942 }
2943 }
2944 } else {
2945 if( Assembler::is_simm16(disp) ) {
2946 __ sd(R0, as_Register(base), disp);
2947 } else {
2948 __ move(T9, disp);
2949 if (UseLoongsonISA) {
2950 __ gssdx(R0, as_Register(base), T9, 0);
2951 } else {
2952 __ daddu(AT, as_Register(base), T9);
2953 __ sd(R0, AT, 0);
2954 }
2955 }
2956 }
2957 %}
2960 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2961 MacroAssembler _masm(&cbuf);
2962 int base = $mem$$base;
2963 int index = $mem$$index;
2964 int scale = $mem$$scale;
2965 int disp = $mem$$disp;
2967 if(index!=0){
2968 if (scale == 0) {
2969 __ daddu(AT, as_Register(base), as_Register(index));
2970 } else {
2971 __ dsll(AT, as_Register(index), scale);
2972 __ daddu(AT, as_Register(base), AT);
2973 }
2975 if( Assembler::is_simm16(disp) ) {
2976 __ sw(R0, AT, disp);
2977 } else {
2978 __ move(T9, disp);
2979 __ daddu(AT, AT, T9);
2980 __ sw(R0, AT, 0);
2981 }
2982 }
2983 else {
2984 if( Assembler::is_simm16(disp) ) {
2985 __ sw(R0, as_Register(base), disp);
2986 } else {
2987 __ move(T9, disp);
2988 __ daddu(AT, as_Register(base), T9);
2989 __ sw(R0, AT, 0);
2990 }
2991 }
2992 %}
2994 enc_class load_L_enc (mRegL dst, memory mem) %{
2995 MacroAssembler _masm(&cbuf);
2996 int base = $mem$$base;
2997 int index = $mem$$index;
2998 int scale = $mem$$scale;
2999 int disp = $mem$$disp;
3000 Register dst_reg = as_Register($dst$$reg);
3002 /*********************2013/03/27**************************
3003 * Jin: $base may contain a null object.
3004 * Server JIT force the exception_offset to be the pos of
3005 * the first instruction.
3006 * I insert such a 'null_check' at the beginning.
3007 *******************************************************/
3009 __ lw(AT, as_Register(base), 0);
3011 /*********************2012/10/04**************************
3012 * Error case found in SortTest
3013 * 337 b java.util.Arrays::sort1 (401 bytes)
3014 * B73:
3015 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3016 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3017 *
3018 * The original instructions generated here are :
3019 * __ lw(dst_lo, as_Register(base), disp);
3020 * __ lw(dst_hi, as_Register(base), disp + 4);
3021 *******************************************************/
3023 if( index != 0 ) {
3024 if (scale == 0) {
3025 __ daddu(AT, as_Register(base), as_Register(index));
3026 } else {
3027 __ dsll(AT, as_Register(index), scale);
3028 __ daddu(AT, as_Register(base), AT);
3029 }
3030 if( Assembler::is_simm16(disp) ) {
3031 __ ld(dst_reg, AT, disp);
3032 } else {
3033 __ move(T9, disp);
3034 __ daddu(AT, AT, T9);
3035 __ ld(dst_reg, AT, 0);
3036 }
3037 } else {
3038 if( Assembler::is_simm16(disp) ) {
3039 __ move(AT, as_Register(base));
3040 __ ld(dst_reg, AT, disp);
3041 } else {
3042 __ move(T9, disp);
3043 __ daddu(AT, as_Register(base), T9);
3044 __ ld(dst_reg, AT, 0);
3045 }
3046 }
3047 %}
3049 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3050 MacroAssembler _masm(&cbuf);
3051 int base = $mem$$base;
3052 int index = $mem$$index;
3053 int scale = $mem$$scale;
3054 int disp = $mem$$disp;
3055 Register src_reg = as_Register($src$$reg);
3057 if( index != 0 ) {
3058 if (scale == 0) {
3059 __ daddu(AT, as_Register(base), as_Register(index));
3060 } else {
3061 __ dsll(AT, as_Register(index), scale);
3062 __ daddu(AT, as_Register(base), AT);
3063 }
3064 if( Assembler::is_simm16(disp) ) {
3065 __ sd(src_reg, AT, disp);
3066 } else {
3067 __ move(T9, disp);
3068 __ daddu(AT, AT, T9);
3069 __ sd(src_reg, AT, 0);
3070 }
3071 } else {
3072 if( Assembler::is_simm16(disp) ) {
3073 __ move(AT, as_Register(base));
3074 __ sd(src_reg, AT, disp);
3075 } else {
3076 __ move(T9, disp);
3077 __ daddu(AT, as_Register(base), T9);
3078 __ sd(src_reg, AT, 0);
3079 }
3080 }
3081 %}
3083 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3084 MacroAssembler _masm(&cbuf);
3085 int base = $mem$$base;
3086 int index = $mem$$index;
3087 int scale = $mem$$scale;
3088 int disp = $mem$$disp;
3090 if( index != 0 ) {
3091 if (scale == 0) {
3092 __ daddu(AT, as_Register(base), as_Register(index));
3093 } else {
3094 __ dsll(AT, as_Register(index), scale);
3095 __ daddu(AT, as_Register(base), AT);
3096 }
3097 if( Assembler::is_simm16(disp) ) {
3098 __ sd(R0, AT, disp);
3099 } else {
3100 __ move(T9, disp);
3101 __ addu(AT, AT, T9);
3102 __ sd(R0, AT, 0);
3103 }
3104 } else {
3105 if( Assembler::is_simm16(disp) ) {
3106 __ move(AT, as_Register(base));
3107 __ sd(R0, AT, disp);
3108 } else {
3109 __ move(T9, disp);
3110 __ addu(AT, as_Register(base), T9);
3111 __ sd(R0, AT, 0);
3112 }
3113 }
3114 %}
3116 enc_class load_F_enc (regF dst, memory mem) %{
3117 MacroAssembler _masm(&cbuf);
3118 int base = $mem$$base;
3119 int index = $mem$$index;
3120 int scale = $mem$$scale;
3121 int disp = $mem$$disp;
3122 FloatRegister dst = $dst$$FloatRegister;
3124 if( index != 0 ) {
3125 if( Assembler::is_simm16(disp) ) {
3126 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3127 if (scale == 0) {
3128 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3129 } else {
3130 __ dsll(AT, as_Register(index), scale);
3131 __ gslwxc1(dst, as_Register(base), AT, disp);
3132 }
3133 } else {
3134 if (scale == 0) {
3135 __ daddu(AT, as_Register(base), as_Register(index));
3136 } else {
3137 __ dsll(AT, as_Register(index), scale);
3138 __ daddu(AT, as_Register(base), AT);
3139 }
3140 __ lwc1(dst, AT, disp);
3141 }
3142 } else {
3143 if (scale == 0) {
3144 __ daddu(AT, as_Register(base), as_Register(index));
3145 } else {
3146 __ dsll(AT, as_Register(index), scale);
3147 __ daddu(AT, as_Register(base), AT);
3148 }
3149 __ move(T9, disp);
3150 if( UseLoongsonISA ) {
3151 __ gslwxc1(dst, AT, T9, 0);
3152 } else {
3153 __ daddu(AT, AT, T9);
3154 __ lwc1(dst, AT, 0);
3155 }
3156 }
3157 } else {
3158 if( Assembler::is_simm16(disp) ) {
3159 __ lwc1(dst, as_Register(base), disp);
3160 } else {
3161 __ move(T9, disp);
3162 if( UseLoongsonISA ) {
3163 __ gslwxc1(dst, as_Register(base), T9, 0);
3164 } else {
3165 __ daddu(AT, as_Register(base), T9);
3166 __ lwc1(dst, AT, 0);
3167 }
3168 }
3169 }
3170 %}
3172 enc_class store_F_reg_enc (memory mem, regF src) %{
3173 MacroAssembler _masm(&cbuf);
3174 int base = $mem$$base;
3175 int index = $mem$$index;
3176 int scale = $mem$$scale;
3177 int disp = $mem$$disp;
3178 FloatRegister src = $src$$FloatRegister;
3180 if( index != 0 ) {
3181 if( Assembler::is_simm16(disp) ) {
3182 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3183 if (scale == 0) {
3184 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3185 } else {
3186 __ dsll(AT, as_Register(index), scale);
3187 __ gsswxc1(src, as_Register(base), AT, disp);
3188 }
3189 } else {
3190 if (scale == 0) {
3191 __ daddu(AT, as_Register(base), as_Register(index));
3192 } else {
3193 __ dsll(AT, as_Register(index), scale);
3194 __ daddu(AT, as_Register(base), AT);
3195 }
3196 __ swc1(src, AT, disp);
3197 }
3198 } else {
3199 if (scale == 0) {
3200 __ daddu(AT, as_Register(base), as_Register(index));
3201 } else {
3202 __ dsll(AT, as_Register(index), scale);
3203 __ daddu(AT, as_Register(base), AT);
3204 }
3205 __ move(T9, disp);
3206 if( UseLoongsonISA ) {
3207 __ gsswxc1(src, AT, T9, 0);
3208 } else {
3209 __ daddu(AT, AT, T9);
3210 __ swc1(src, AT, 0);
3211 }
3212 }
3213 } else {
3214 if( Assembler::is_simm16(disp) ) {
3215 __ swc1(src, as_Register(base), disp);
3216 } else {
3217 __ move(T9, disp);
3218 if( UseLoongsonISA ) {
3219 __ gslwxc1(src, as_Register(base), T9, 0);
3220 } else {
3221 __ daddu(AT, as_Register(base), T9);
3222 __ swc1(src, AT, 0);
3223 }
3224 }
3225 }
3226 %}
3228 enc_class load_D_enc (regD dst, memory mem) %{
3229 MacroAssembler _masm(&cbuf);
3230 int base = $mem$$base;
3231 int index = $mem$$index;
3232 int scale = $mem$$scale;
3233 int disp = $mem$$disp;
3234 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3236 if( index != 0 ) {
3237 if( Assembler::is_simm16(disp) ) {
3238 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3239 if (scale == 0) {
3240 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3241 } else {
3242 __ dsll(AT, as_Register(index), scale);
3243 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3244 }
3245 } else {
3246 if (scale == 0) {
3247 __ daddu(AT, as_Register(base), as_Register(index));
3248 } else {
3249 __ dsll(AT, as_Register(index), scale);
3250 __ daddu(AT, as_Register(base), AT);
3251 }
3252 __ ldc1(dst_reg, AT, disp);
3253 }
3254 } else {
3255 if (scale == 0) {
3256 __ daddu(AT, as_Register(base), as_Register(index));
3257 } else {
3258 __ dsll(AT, as_Register(index), scale);
3259 __ daddu(AT, as_Register(base), AT);
3260 }
3261 __ move(T9, disp);
3262 if( UseLoongsonISA ) {
3263 __ gsldxc1(dst_reg, AT, T9, 0);
3264 } else {
3265 __ addu(AT, AT, T9);
3266 __ ldc1(dst_reg, AT, 0);
3267 }
3268 }
3269 } else {
3270 if( Assembler::is_simm16(disp) ) {
3271 __ ldc1(dst_reg, as_Register(base), disp);
3272 } else {
3273 __ move(T9, disp);
3274 if( UseLoongsonISA ) {
3275 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3276 } else {
3277 __ addu(AT, as_Register(base), T9);
3278 __ ldc1(dst_reg, AT, 0);
3279 }
3280 }
3281 }
3282 %}
3284 enc_class store_D_reg_enc (memory mem, regD src) %{
3285 MacroAssembler _masm(&cbuf);
3286 int base = $mem$$base;
3287 int index = $mem$$index;
3288 int scale = $mem$$scale;
3289 int disp = $mem$$disp;
3290 FloatRegister src_reg = as_FloatRegister($src$$reg);
3292 if( index != 0 ) {
3293 if( Assembler::is_simm16(disp) ) {
3294 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3295 if (scale == 0) {
3296 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3297 } else {
3298 __ dsll(AT, as_Register(index), scale);
3299 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3300 }
3301 } else {
3302 if (scale == 0) {
3303 __ daddu(AT, as_Register(base), as_Register(index));
3304 } else {
3305 __ dsll(AT, as_Register(index), scale);
3306 __ daddu(AT, as_Register(base), AT);
3307 }
3308 __ sdc1(src_reg, AT, disp);
3309 }
3310 } else {
3311 if (scale == 0) {
3312 __ daddu(AT, as_Register(base), as_Register(index));
3313 } else {
3314 __ dsll(AT, as_Register(index), scale);
3315 __ daddu(AT, as_Register(base), AT);
3316 }
3317 __ move(T9, disp);
3318 if( UseLoongsonISA ) {
3319 __ gssdxc1(src_reg, AT, T9, 0);
3320 } else {
3321 __ addu(AT, AT, T9);
3322 __ sdc1(src_reg, AT, 0);
3323 }
3324 }
3325 } else {
3326 if( Assembler::is_simm16(disp) ) {
3327 __ sdc1(src_reg, as_Register(base), disp);
3328 } else {
3329 __ move(T9, disp);
3330 if( UseLoongsonISA ) {
3331 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3332 } else {
3333 __ addu(AT, as_Register(base), T9);
3334 __ sdc1(src_reg, AT, 0);
3335 }
3336 }
3337 }
3338 %}
3340 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3341 MacroAssembler _masm(&cbuf);
3342 // This is the instruction starting address for relocation info.
3343 __ block_comment("Java_To_Runtime");
3344 cbuf.set_insts_mark();
3345 __ relocate(relocInfo::runtime_call_type);
3347 __ patchable_call((address)$meth$$method);
3348 %}
3350 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3351 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3352 // who we intended to call.
3353 MacroAssembler _masm(&cbuf);
3354 cbuf.set_insts_mark();
3356 if ( !_method ) {
3357 __ relocate(relocInfo::runtime_call_type);
3358 } else if(_optimized_virtual) {
3359 __ relocate(relocInfo::opt_virtual_call_type);
3360 } else {
3361 __ relocate(relocInfo::static_call_type);
3362 }
3364 __ patchable_call((address)($meth$$method));
3365 if( _method ) { // Emit stub for static call
3366 emit_java_to_interp(cbuf);
3367 }
3368 %}
3371 /*
3372 * [Ref: LIR_Assembler::ic_call() ]
3373 */
3374 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3375 MacroAssembler _masm(&cbuf);
3376 __ block_comment("Java_Dynamic_Call");
3377 __ ic_call((address)$meth$$method);
3378 %}
3381 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3382 Register flags = $cr$$Register;
3383 Label L;
3385 MacroAssembler _masm(&cbuf);
3387 __ addu(flags, R0, R0);
3388 __ beq(AT, R0, L);
3389 __ delayed()->nop();
3390 __ move(flags, 0xFFFFFFFF);
3391 __ bind(L);
3392 %}
3394 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3395 Register result = $result$$Register;
3396 Register sub = $sub$$Register;
3397 Register super = $super$$Register;
3398 Register length = $tmp$$Register;
3399 Register tmp = T9;
3400 Label miss;
3402 /* 2012/9/28 Jin: result may be the same as sub
3403 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3404 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3405 * 4bc mov S2, NULL #@loadConP
3406 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3407 */
3408 MacroAssembler _masm(&cbuf);
3409 Label done;
3410 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3411 NULL, &miss,
3412 /*set_cond_codes:*/ true);
3413 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3414 __ move(result, 0);
3415 __ b(done);
3416 __ nop();
3418 __ bind(miss);
3419 __ move(result, 1);
3420 __ bind(done);
3421 %}
3423 %}
3426 //---------MIPS FRAME--------------------------------------------------------------
3427 // Definition of frame structure and management information.
3428 //
3429 // S T A C K L A Y O U T Allocators stack-slot number
3430 // | (to get allocators register number
3431 // G Owned by | | v add SharedInfo::stack0)
3432 // r CALLER | |
3433 // o | +--------+ pad to even-align allocators stack-slot
3434 // w V | pad0 | numbers; owned by CALLER
3435 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3436 // h ^ | in | 5
3437 // | | args | 4 Holes in incoming args owned by SELF
3438 // | | old | | 3
3439 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3440 // v | | ret | 3 return address
3441 // Owned by +--------+
3442 // Self | pad2 | 2 pad to align old SP
3443 // | +--------+ 1
3444 // | | locks | 0
3445 // | +--------+----> SharedInfo::stack0, even aligned
3446 // | | pad1 | 11 pad to align new SP
3447 // | +--------+
3448 // | | | 10
3449 // | | spills | 9 spills
3450 // V | | 8 (pad0 slot for callee)
3451 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3452 // ^ | out | 7
3453 // | | args | 6 Holes in outgoing args owned by CALLEE
3454 // Owned by new | |
3455 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3456 // | |
3457 //
3458 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3459 // known from SELF's arguments and the Java calling convention.
3460 // Region 6-7 is determined per call site.
3461 // Note 2: If the calling convention leaves holes in the incoming argument
3462 // area, those holes are owned by SELF. Holes in the outgoing area
3463 // are owned by the CALLEE. Holes should not be nessecary in the
3464 // incoming area, as the Java calling convention is completely under
3465 // the control of the AD file. Doubles can be sorted and packed to
3466 // avoid holes. Holes in the outgoing arguments may be nessecary for
3467 // varargs C calling conventions.
3468 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3469 // even aligned with pad0 as needed.
3470 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3471 // region 6-11 is even aligned; it may be padded out more so that
3472 // the region from SP to FP meets the minimum stack alignment.
3473 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3474 // alignment. Region 11, pad1, may be dynamically extended so that
3475 // SP meets the minimum alignment.
3478 frame %{
3480 stack_direction(TOWARDS_LOW);
3482 // These two registers define part of the calling convention
3483 // between compiled code and the interpreter.
3484 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3485 // for more information. by yjl 3/16/2006
3487 inline_cache_reg(T1); // Inline Cache Register
3488 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3489 /*
3490 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3491 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3492 */
3494 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3495 cisc_spilling_operand_name(indOffset32);
3497 // Number of stack slots consumed by locking an object
3498 // generate Compile::sync_stack_slots
3499 #ifdef _LP64
3500 sync_stack_slots(2);
3501 #else
3502 sync_stack_slots(1);
3503 #endif
3505 frame_pointer(SP);
3507 // Interpreter stores its frame pointer in a register which is
3508 // stored to the stack by I2CAdaptors.
3509 // I2CAdaptors convert from interpreted java to compiled java.
3511 interpreter_frame_pointer(FP);
3513 // generate Matcher::stack_alignment
3514 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3516 // Number of stack slots between incoming argument block and the start of
3517 // a new frame. The PROLOG must add this many slots to the stack. The
3518 // EPILOG must remove this many slots. Intel needs one slot for
3519 // return address.
3520 // generate Matcher::in_preserve_stack_slots
3521 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3522 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3524 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3525 // for calls to C. Supports the var-args backing area for register parms.
3526 varargs_C_out_slots_killed(0);
3528 // The after-PROLOG location of the return address. Location of
3529 // return address specifies a type (REG or STACK) and a number
3530 // representing the register number (i.e. - use a register name) or
3531 // stack slot.
3532 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3533 // Otherwise, it is above the locks and verification slot and alignment word
3534 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3535 return_addr(REG RA);
3537 // Body of function which returns an integer array locating
3538 // arguments either in registers or in stack slots. Passed an array
3539 // of ideal registers called "sig" and a "length" count. Stack-slot
3540 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3541 // arguments for a CALLEE. Incoming stack arguments are
3542 // automatically biased by the preserve_stack_slots field above.
3545 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3546 // StartNode::calling_convention call this. by yjl 3/16/2006
3547 calling_convention %{
3548 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3549 %}
3554 // Body of function which returns an integer array locating
3555 // arguments either in registers or in stack slots. Passed an array
3556 // of ideal registers called "sig" and a "length" count. Stack-slot
3557 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3558 // arguments for a CALLEE. Incoming stack arguments are
3559 // automatically biased by the preserve_stack_slots field above.
3562 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3563 c_calling_convention %{
3564 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3565 %}
3568 // Location of C & interpreter return values
3569 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3570 // SEE Matcher::match. by yjl 3/16/2006
3571 c_return_value %{
3572 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3573 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3574 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3575 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3576 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3577 %}
3579 // Location of return values
3580 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3581 // SEE Matcher::match. by yjl 3/16/2006
3583 return_value %{
3584 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3585 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3586 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3587 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3588 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3589 %}
3591 %}
3593 //----------ATTRIBUTES---------------------------------------------------------
3594 //----------Operand Attributes-------------------------------------------------
3595 op_attrib op_cost(0); // Required cost attribute
3597 //----------Instruction Attributes---------------------------------------------
3598 ins_attrib ins_cost(100); // Required cost attribute
3599 ins_attrib ins_size(32); // Required size attribute (in bits)
3600 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3601 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3602 // non-matching short branch variant of some
3603 // long branch?
3604 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3605 // specifies the alignment that some part of the instruction (not
3606 // necessarily the start) requires. If > 1, a compute_padding()
3607 // function must be provided for the instruction
3609 //----------OPERANDS-----------------------------------------------------------
3610 // Operand definitions must precede instruction definitions for correct parsing
3611 // in the ADLC because operands constitute user defined types which are used in
3612 // instruction definitions.
3614 // Vectors
3615 operand vecD() %{
3616 constraint(ALLOC_IN_RC(dbl_reg));
3617 match(VecD);
3619 format %{ %}
3620 interface(REG_INTER);
3621 %}
3623 // Flags register, used as output of compare instructions
3624 operand FlagsReg() %{
3625 constraint(ALLOC_IN_RC(mips_flags));
3626 match(RegFlags);
3628 format %{ "EFLAGS" %}
3629 interface(REG_INTER);
3630 %}
3632 //----------Simple Operands----------------------------------------------------
3633 //TODO: Should we need to define some more special immediate number ?
3634 // Immediate Operands
3635 // Integer Immediate
3636 operand immI() %{
3637 match(ConI);
3638 //TODO: should not match immI8 here LEE
3639 match(immI8);
3641 op_cost(20);
3642 format %{ %}
3643 interface(CONST_INTER);
3644 %}
3646 // Long Immediate 8-bit
3647 operand immL8()
3648 %{
3649 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3650 match(ConL);
3652 op_cost(5);
3653 format %{ %}
3654 interface(CONST_INTER);
3655 %}
3657 // Constant for test vs zero
3658 operand immI0() %{
3659 predicate(n->get_int() == 0);
3660 match(ConI);
3662 op_cost(0);
3663 format %{ %}
3664 interface(CONST_INTER);
3665 %}
3667 // Constant for increment
3668 operand immI1() %{
3669 predicate(n->get_int() == 1);
3670 match(ConI);
3672 op_cost(0);
3673 format %{ %}
3674 interface(CONST_INTER);
3675 %}
3677 // Constant for decrement
3678 operand immI_M1() %{
3679 predicate(n->get_int() == -1);
3680 match(ConI);
3682 op_cost(0);
3683 format %{ %}
3684 interface(CONST_INTER);
3685 %}
3687 operand immI_MaxI() %{
3688 predicate(n->get_int() == 2147483647);
3689 match(ConI);
3691 op_cost(0);
3692 format %{ %}
3693 interface(CONST_INTER);
3694 %}
3696 // Valid scale values for addressing modes
3697 operand immI2() %{
3698 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3699 match(ConI);
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 operand immI8() %{
3706 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3707 match(ConI);
3709 op_cost(5);
3710 format %{ %}
3711 interface(CONST_INTER);
3712 %}
3714 operand immI16() %{
3715 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3716 match(ConI);
3718 op_cost(10);
3719 format %{ %}
3720 interface(CONST_INTER);
3721 %}
3723 // Constant for long shifts
3724 operand immI_32() %{
3725 predicate( n->get_int() == 32 );
3726 match(ConI);
3728 op_cost(0);
3729 format %{ %}
3730 interface(CONST_INTER);
3731 %}
3733 operand immI_63() %{
3734 predicate( n->get_int() == 63 );
3735 match(ConI);
3737 op_cost(0);
3738 format %{ %}
3739 interface(CONST_INTER);
3740 %}
3742 operand immI_0_31() %{
3743 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3744 match(ConI);
3746 op_cost(0);
3747 format %{ %}
3748 interface(CONST_INTER);
3749 %}
3751 // Operand for non-negtive integer mask
3752 operand immI_nonneg_mask() %{
3753 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3754 match(ConI);
3756 op_cost(0);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3761 operand immI_32_63() %{
3762 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3763 match(ConI);
3764 op_cost(0);
3766 format %{ %}
3767 interface(CONST_INTER);
3768 %}
3770 operand immI16_sub() %{
3771 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3772 match(ConI);
3774 op_cost(10);
3775 format %{ %}
3776 interface(CONST_INTER);
3777 %}
3779 operand immI_0_32767() %{
3780 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3781 match(ConI);
3782 op_cost(0);
3784 format %{ %}
3785 interface(CONST_INTER);
3786 %}
3788 operand immI_0_65535() %{
3789 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3790 match(ConI);
3791 op_cost(0);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 operand immI_1() %{
3798 predicate( n->get_int() == 1 );
3799 match(ConI);
3801 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 operand immI_2() %{
3807 predicate( n->get_int() == 2 );
3808 match(ConI);
3810 op_cost(0);
3811 format %{ %}
3812 interface(CONST_INTER);
3813 %}
3815 operand immI_3() %{
3816 predicate( n->get_int() == 3 );
3817 match(ConI);
3819 op_cost(0);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 operand immI_7() %{
3825 predicate( n->get_int() == 7 );
3826 match(ConI);
3828 format %{ %}
3829 interface(CONST_INTER);
3830 %}
3832 // Immediates for special shifts (sign extend)
3834 // Constants for increment
3835 operand immI_16() %{
3836 predicate( n->get_int() == 16 );
3837 match(ConI);
3839 format %{ %}
3840 interface(CONST_INTER);
3841 %}
3843 operand immI_24() %{
3844 predicate( n->get_int() == 24 );
3845 match(ConI);
3847 format %{ %}
3848 interface(CONST_INTER);
3849 %}
3851 // Constant for byte-wide masking
3852 operand immI_255() %{
3853 predicate( n->get_int() == 255 );
3854 match(ConI);
3856 op_cost(0);
3857 format %{ %}
3858 interface(CONST_INTER);
3859 %}
3861 operand immI_65535() %{
3862 predicate( n->get_int() == 65535 );
3863 match(ConI);
3865 op_cost(5);
3866 format %{ %}
3867 interface(CONST_INTER);
3868 %}
3870 operand immI_65536() %{
3871 predicate( n->get_int() == 65536 );
3872 match(ConI);
3874 op_cost(5);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 operand immI_M65536() %{
3880 predicate( n->get_int() == -65536 );
3881 match(ConI);
3883 op_cost(5);
3884 format %{ %}
3885 interface(CONST_INTER);
3886 %}
3888 // Pointer Immediate
3889 operand immP() %{
3890 match(ConP);
3892 op_cost(10);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 // NULL Pointer Immediate
3898 operand immP0() %{
3899 predicate( n->get_ptr() == 0 );
3900 match(ConP);
3901 op_cost(0);
3903 format %{ %}
3904 interface(CONST_INTER);
3905 %}
3907 // Pointer Immediate: 64-bit
3908 operand immP_set() %{
3909 match(ConP);
3911 op_cost(5);
3912 // formats are generated automatically for constants and base registers
3913 format %{ %}
3914 interface(CONST_INTER);
3915 %}
3917 // Pointer Immediate: 64-bit
3918 operand immP_load() %{
3919 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3920 match(ConP);
3922 op_cost(5);
3923 // formats are generated automatically for constants and base registers
3924 format %{ %}
3925 interface(CONST_INTER);
3926 %}
3928 // Pointer Immediate: 64-bit
3929 operand immP_no_oop_cheap() %{
3930 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3931 match(ConP);
3933 op_cost(5);
3934 // formats are generated automatically for constants and base registers
3935 format %{ %}
3936 interface(CONST_INTER);
3937 %}
3939 // Pointer for polling page
3940 operand immP_poll() %{
3941 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3942 match(ConP);
3943 op_cost(5);
3945 format %{ %}
3946 interface(CONST_INTER);
3947 %}
3949 // Pointer Immediate
3950 operand immN() %{
3951 match(ConN);
3953 op_cost(10);
3954 format %{ %}
3955 interface(CONST_INTER);
3956 %}
3958 operand immNKlass() %{
3959 match(ConNKlass);
3961 op_cost(10);
3962 format %{ %}
3963 interface(CONST_INTER);
3964 %}
3966 // NULL Pointer Immediate
3967 operand immN0() %{
3968 predicate(n->get_narrowcon() == 0);
3969 match(ConN);
3971 op_cost(5);
3972 format %{ %}
3973 interface(CONST_INTER);
3974 %}
3976 // Long Immediate
3977 operand immL() %{
3978 match(ConL);
3980 op_cost(20);
3981 format %{ %}
3982 interface(CONST_INTER);
3983 %}
3985 // Long Immediate zero
3986 operand immL0() %{
3987 predicate( n->get_long() == 0L );
3988 match(ConL);
3989 op_cost(0);
3991 format %{ %}
3992 interface(CONST_INTER);
3993 %}
3995 operand immL7() %{
3996 predicate( n->get_long() == 7L );
3997 match(ConL);
3998 op_cost(0);
4000 format %{ %}
4001 interface(CONST_INTER);
4002 %}
4004 operand immL_M1() %{
4005 predicate( n->get_long() == -1L );
4006 match(ConL);
4007 op_cost(0);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // bit 0..2 zero
4014 operand immL_M8() %{
4015 predicate( n->get_long() == -8L );
4016 match(ConL);
4017 op_cost(0);
4019 format %{ %}
4020 interface(CONST_INTER);
4021 %}
4023 // bit 2 zero
4024 operand immL_M5() %{
4025 predicate( n->get_long() == -5L );
4026 match(ConL);
4027 op_cost(0);
4029 format %{ %}
4030 interface(CONST_INTER);
4031 %}
4033 // bit 1..2 zero
4034 operand immL_M7() %{
4035 predicate( n->get_long() == -7L );
4036 match(ConL);
4037 op_cost(0);
4039 format %{ %}
4040 interface(CONST_INTER);
4041 %}
4043 // bit 0..1 zero
4044 operand immL_M4() %{
4045 predicate( n->get_long() == -4L );
4046 match(ConL);
4047 op_cost(0);
4049 format %{ %}
4050 interface(CONST_INTER);
4051 %}
4053 // bit 3..6 zero
4054 operand immL_M121() %{
4055 predicate( n->get_long() == -121L );
4056 match(ConL);
4057 op_cost(0);
4059 format %{ %}
4060 interface(CONST_INTER);
4061 %}
4063 // Long immediate from 0 to 127.
4064 // Used for a shorter form of long mul by 10.
4065 operand immL_127() %{
4066 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4067 match(ConL);
4068 op_cost(0);
4070 format %{ %}
4071 interface(CONST_INTER);
4072 %}
4074 // Operand for non-negtive long mask
4075 operand immL_nonneg_mask() %{
4076 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4077 match(ConL);
4079 op_cost(0);
4080 format %{ %}
4081 interface(CONST_INTER);
4082 %}
4084 operand immL_0_65535() %{
4085 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4086 match(ConL);
4087 op_cost(0);
4089 format %{ %}
4090 interface(CONST_INTER);
4091 %}
4093 // Long Immediate: cheap (materialize in <= 3 instructions)
4094 operand immL_cheap() %{
4095 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4096 match(ConL);
4097 op_cost(0);
4099 format %{ %}
4100 interface(CONST_INTER);
4101 %}
4103 // Long Immediate: expensive (materialize in > 3 instructions)
4104 operand immL_expensive() %{
4105 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4106 match(ConL);
4107 op_cost(0);
4109 format %{ %}
4110 interface(CONST_INTER);
4111 %}
4113 operand immL16() %{
4114 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4115 match(ConL);
4117 op_cost(10);
4118 format %{ %}
4119 interface(CONST_INTER);
4120 %}
4122 operand immL16_sub() %{
4123 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4124 match(ConL);
4126 op_cost(10);
4127 format %{ %}
4128 interface(CONST_INTER);
4129 %}
4131 // Long Immediate: low 32-bit mask
4132 operand immL_32bits() %{
4133 predicate(n->get_long() == 0xFFFFFFFFL);
4134 match(ConL);
4135 op_cost(20);
4137 format %{ %}
4138 interface(CONST_INTER);
4139 %}
4141 // Long Immediate 32-bit signed
4142 operand immL32()
4143 %{
4144 predicate(n->get_long() == (int) (n->get_long()));
4145 match(ConL);
4147 op_cost(15);
4148 format %{ %}
4149 interface(CONST_INTER);
4150 %}
4153 //single-precision floating-point zero
4154 operand immF0() %{
4155 predicate(jint_cast(n->getf()) == 0);
4156 match(ConF);
4158 op_cost(5);
4159 format %{ %}
4160 interface(CONST_INTER);
4161 %}
4163 //single-precision floating-point immediate
4164 operand immF() %{
4165 match(ConF);
4167 op_cost(20);
4168 format %{ %}
4169 interface(CONST_INTER);
4170 %}
4172 //double-precision floating-point zero
4173 operand immD0() %{
4174 predicate(jlong_cast(n->getd()) == 0);
4175 match(ConD);
4177 op_cost(5);
4178 format %{ %}
4179 interface(CONST_INTER);
4180 %}
4182 //double-precision floating-point immediate
4183 operand immD() %{
4184 match(ConD);
4186 op_cost(20);
4187 format %{ %}
4188 interface(CONST_INTER);
4189 %}
4191 // Register Operands
4192 // Integer Register
4193 operand mRegI() %{
4194 constraint(ALLOC_IN_RC(int_reg));
4195 match(RegI);
4197 format %{ %}
4198 interface(REG_INTER);
4199 %}
4201 operand no_Ax_mRegI() %{
4202 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4203 match(RegI);
4204 match(mRegI);
4206 format %{ %}
4207 interface(REG_INTER);
4208 %}
4210 operand mS0RegI() %{
4211 constraint(ALLOC_IN_RC(s0_reg));
4212 match(RegI);
4213 match(mRegI);
4215 format %{ "S0" %}
4216 interface(REG_INTER);
4217 %}
4219 operand mS1RegI() %{
4220 constraint(ALLOC_IN_RC(s1_reg));
4221 match(RegI);
4222 match(mRegI);
4224 format %{ "S1" %}
4225 interface(REG_INTER);
4226 %}
4228 operand mS2RegI() %{
4229 constraint(ALLOC_IN_RC(s2_reg));
4230 match(RegI);
4231 match(mRegI);
4233 format %{ "S2" %}
4234 interface(REG_INTER);
4235 %}
4237 operand mS3RegI() %{
4238 constraint(ALLOC_IN_RC(s3_reg));
4239 match(RegI);
4240 match(mRegI);
4242 format %{ "S3" %}
4243 interface(REG_INTER);
4244 %}
4246 operand mS4RegI() %{
4247 constraint(ALLOC_IN_RC(s4_reg));
4248 match(RegI);
4249 match(mRegI);
4251 format %{ "S4" %}
4252 interface(REG_INTER);
4253 %}
4255 operand mS5RegI() %{
4256 constraint(ALLOC_IN_RC(s5_reg));
4257 match(RegI);
4258 match(mRegI);
4260 format %{ "S5" %}
4261 interface(REG_INTER);
4262 %}
4264 operand mS6RegI() %{
4265 constraint(ALLOC_IN_RC(s6_reg));
4266 match(RegI);
4267 match(mRegI);
4269 format %{ "S6" %}
4270 interface(REG_INTER);
4271 %}
4273 operand mS7RegI() %{
4274 constraint(ALLOC_IN_RC(s7_reg));
4275 match(RegI);
4276 match(mRegI);
4278 format %{ "S7" %}
4279 interface(REG_INTER);
4280 %}
4283 operand mT0RegI() %{
4284 constraint(ALLOC_IN_RC(t0_reg));
4285 match(RegI);
4286 match(mRegI);
4288 format %{ "T0" %}
4289 interface(REG_INTER);
4290 %}
4292 operand mT1RegI() %{
4293 constraint(ALLOC_IN_RC(t1_reg));
4294 match(RegI);
4295 match(mRegI);
4297 format %{ "T1" %}
4298 interface(REG_INTER);
4299 %}
4301 operand mT2RegI() %{
4302 constraint(ALLOC_IN_RC(t2_reg));
4303 match(RegI);
4304 match(mRegI);
4306 format %{ "T2" %}
4307 interface(REG_INTER);
4308 %}
4310 operand mT3RegI() %{
4311 constraint(ALLOC_IN_RC(t3_reg));
4312 match(RegI);
4313 match(mRegI);
4315 format %{ "T3" %}
4316 interface(REG_INTER);
4317 %}
4319 operand mT8RegI() %{
4320 constraint(ALLOC_IN_RC(t8_reg));
4321 match(RegI);
4322 match(mRegI);
4324 format %{ "T8" %}
4325 interface(REG_INTER);
4326 %}
4328 operand mT9RegI() %{
4329 constraint(ALLOC_IN_RC(t9_reg));
4330 match(RegI);
4331 match(mRegI);
4333 format %{ "T9" %}
4334 interface(REG_INTER);
4335 %}
4337 operand mA0RegI() %{
4338 constraint(ALLOC_IN_RC(a0_reg));
4339 match(RegI);
4340 match(mRegI);
4342 format %{ "A0" %}
4343 interface(REG_INTER);
4344 %}
4346 operand mA1RegI() %{
4347 constraint(ALLOC_IN_RC(a1_reg));
4348 match(RegI);
4349 match(mRegI);
4351 format %{ "A1" %}
4352 interface(REG_INTER);
4353 %}
4355 operand mA2RegI() %{
4356 constraint(ALLOC_IN_RC(a2_reg));
4357 match(RegI);
4358 match(mRegI);
4360 format %{ "A2" %}
4361 interface(REG_INTER);
4362 %}
4364 operand mA3RegI() %{
4365 constraint(ALLOC_IN_RC(a3_reg));
4366 match(RegI);
4367 match(mRegI);
4369 format %{ "A3" %}
4370 interface(REG_INTER);
4371 %}
4373 operand mA4RegI() %{
4374 constraint(ALLOC_IN_RC(a4_reg));
4375 match(RegI);
4376 match(mRegI);
4378 format %{ "A4" %}
4379 interface(REG_INTER);
4380 %}
4382 operand mA5RegI() %{
4383 constraint(ALLOC_IN_RC(a5_reg));
4384 match(RegI);
4385 match(mRegI);
4387 format %{ "A5" %}
4388 interface(REG_INTER);
4389 %}
4391 operand mA6RegI() %{
4392 constraint(ALLOC_IN_RC(a6_reg));
4393 match(RegI);
4394 match(mRegI);
4396 format %{ "A6" %}
4397 interface(REG_INTER);
4398 %}
4400 operand mA7RegI() %{
4401 constraint(ALLOC_IN_RC(a7_reg));
4402 match(RegI);
4403 match(mRegI);
4405 format %{ "A7" %}
4406 interface(REG_INTER);
4407 %}
4409 operand mV0RegI() %{
4410 constraint(ALLOC_IN_RC(v0_reg));
4411 match(RegI);
4412 match(mRegI);
4414 format %{ "V0" %}
4415 interface(REG_INTER);
4416 %}
4418 operand mV1RegI() %{
4419 constraint(ALLOC_IN_RC(v1_reg));
4420 match(RegI);
4421 match(mRegI);
4423 format %{ "V1" %}
4424 interface(REG_INTER);
4425 %}
4427 operand mRegN() %{
4428 constraint(ALLOC_IN_RC(int_reg));
4429 match(RegN);
4431 format %{ %}
4432 interface(REG_INTER);
4433 %}
4435 operand t0_RegN() %{
4436 constraint(ALLOC_IN_RC(t0_reg));
4437 match(RegN);
4438 match(mRegN);
4440 format %{ %}
4441 interface(REG_INTER);
4442 %}
4444 operand t1_RegN() %{
4445 constraint(ALLOC_IN_RC(t1_reg));
4446 match(RegN);
4447 match(mRegN);
4449 format %{ %}
4450 interface(REG_INTER);
4451 %}
4453 operand t2_RegN() %{
4454 constraint(ALLOC_IN_RC(t2_reg));
4455 match(RegN);
4456 match(mRegN);
4458 format %{ %}
4459 interface(REG_INTER);
4460 %}
4462 operand t3_RegN() %{
4463 constraint(ALLOC_IN_RC(t3_reg));
4464 match(RegN);
4465 match(mRegN);
4467 format %{ %}
4468 interface(REG_INTER);
4469 %}
4471 operand t8_RegN() %{
4472 constraint(ALLOC_IN_RC(t8_reg));
4473 match(RegN);
4474 match(mRegN);
4476 format %{ %}
4477 interface(REG_INTER);
4478 %}
4480 operand t9_RegN() %{
4481 constraint(ALLOC_IN_RC(t9_reg));
4482 match(RegN);
4483 match(mRegN);
4485 format %{ %}
4486 interface(REG_INTER);
4487 %}
4489 operand a0_RegN() %{
4490 constraint(ALLOC_IN_RC(a0_reg));
4491 match(RegN);
4492 match(mRegN);
4494 format %{ %}
4495 interface(REG_INTER);
4496 %}
4498 operand a1_RegN() %{
4499 constraint(ALLOC_IN_RC(a1_reg));
4500 match(RegN);
4501 match(mRegN);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand a2_RegN() %{
4508 constraint(ALLOC_IN_RC(a2_reg));
4509 match(RegN);
4510 match(mRegN);
4512 format %{ %}
4513 interface(REG_INTER);
4514 %}
4516 operand a3_RegN() %{
4517 constraint(ALLOC_IN_RC(a3_reg));
4518 match(RegN);
4519 match(mRegN);
4521 format %{ %}
4522 interface(REG_INTER);
4523 %}
4525 operand a4_RegN() %{
4526 constraint(ALLOC_IN_RC(a4_reg));
4527 match(RegN);
4528 match(mRegN);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand a5_RegN() %{
4535 constraint(ALLOC_IN_RC(a5_reg));
4536 match(RegN);
4537 match(mRegN);
4539 format %{ %}
4540 interface(REG_INTER);
4541 %}
4543 operand a6_RegN() %{
4544 constraint(ALLOC_IN_RC(a6_reg));
4545 match(RegN);
4546 match(mRegN);
4548 format %{ %}
4549 interface(REG_INTER);
4550 %}
4552 operand a7_RegN() %{
4553 constraint(ALLOC_IN_RC(a7_reg));
4554 match(RegN);
4555 match(mRegN);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand s0_RegN() %{
4562 constraint(ALLOC_IN_RC(s0_reg));
4563 match(RegN);
4564 match(mRegN);
4566 format %{ %}
4567 interface(REG_INTER);
4568 %}
4570 operand s1_RegN() %{
4571 constraint(ALLOC_IN_RC(s1_reg));
4572 match(RegN);
4573 match(mRegN);
4575 format %{ %}
4576 interface(REG_INTER);
4577 %}
4579 operand s2_RegN() %{
4580 constraint(ALLOC_IN_RC(s2_reg));
4581 match(RegN);
4582 match(mRegN);
4584 format %{ %}
4585 interface(REG_INTER);
4586 %}
4588 operand s3_RegN() %{
4589 constraint(ALLOC_IN_RC(s3_reg));
4590 match(RegN);
4591 match(mRegN);
4593 format %{ %}
4594 interface(REG_INTER);
4595 %}
4597 operand s4_RegN() %{
4598 constraint(ALLOC_IN_RC(s4_reg));
4599 match(RegN);
4600 match(mRegN);
4602 format %{ %}
4603 interface(REG_INTER);
4604 %}
4606 operand s5_RegN() %{
4607 constraint(ALLOC_IN_RC(s5_reg));
4608 match(RegN);
4609 match(mRegN);
4611 format %{ %}
4612 interface(REG_INTER);
4613 %}
4615 operand s6_RegN() %{
4616 constraint(ALLOC_IN_RC(s6_reg));
4617 match(RegN);
4618 match(mRegN);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand s7_RegN() %{
4625 constraint(ALLOC_IN_RC(s7_reg));
4626 match(RegN);
4627 match(mRegN);
4629 format %{ %}
4630 interface(REG_INTER);
4631 %}
4633 operand v0_RegN() %{
4634 constraint(ALLOC_IN_RC(v0_reg));
4635 match(RegN);
4636 match(mRegN);
4638 format %{ %}
4639 interface(REG_INTER);
4640 %}
4642 operand v1_RegN() %{
4643 constraint(ALLOC_IN_RC(v1_reg));
4644 match(RegN);
4645 match(mRegN);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 // Pointer Register
4652 operand mRegP() %{
4653 constraint(ALLOC_IN_RC(p_reg));
4654 match(RegP);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4660 operand no_T8_mRegP() %{
4661 constraint(ALLOC_IN_RC(no_T8_p_reg));
4662 match(RegP);
4663 match(mRegP);
4665 format %{ %}
4666 interface(REG_INTER);
4667 %}
4669 operand s0_RegP()
4670 %{
4671 constraint(ALLOC_IN_RC(s0_long_reg));
4672 match(RegP);
4673 match(mRegP);
4674 match(no_T8_mRegP);
4676 format %{ %}
4677 interface(REG_INTER);
4678 %}
4680 operand s1_RegP()
4681 %{
4682 constraint(ALLOC_IN_RC(s1_long_reg));
4683 match(RegP);
4684 match(mRegP);
4685 match(no_T8_mRegP);
4687 format %{ %}
4688 interface(REG_INTER);
4689 %}
4691 operand s2_RegP()
4692 %{
4693 constraint(ALLOC_IN_RC(s2_long_reg));
4694 match(RegP);
4695 match(mRegP);
4696 match(no_T8_mRegP);
4698 format %{ %}
4699 interface(REG_INTER);
4700 %}
4702 operand s3_RegP()
4703 %{
4704 constraint(ALLOC_IN_RC(s3_long_reg));
4705 match(RegP);
4706 match(mRegP);
4707 match(no_T8_mRegP);
4709 format %{ %}
4710 interface(REG_INTER);
4711 %}
4713 operand s4_RegP()
4714 %{
4715 constraint(ALLOC_IN_RC(s4_long_reg));
4716 match(RegP);
4717 match(mRegP);
4718 match(no_T8_mRegP);
4720 format %{ %}
4721 interface(REG_INTER);
4722 %}
4724 operand s5_RegP()
4725 %{
4726 constraint(ALLOC_IN_RC(s5_long_reg));
4727 match(RegP);
4728 match(mRegP);
4729 match(no_T8_mRegP);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 operand s6_RegP()
4736 %{
4737 constraint(ALLOC_IN_RC(s6_long_reg));
4738 match(RegP);
4739 match(mRegP);
4740 match(no_T8_mRegP);
4742 format %{ %}
4743 interface(REG_INTER);
4744 %}
4746 operand s7_RegP()
4747 %{
4748 constraint(ALLOC_IN_RC(s7_long_reg));
4749 match(RegP);
4750 match(mRegP);
4751 match(no_T8_mRegP);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 operand t0_RegP()
4758 %{
4759 constraint(ALLOC_IN_RC(t0_long_reg));
4760 match(RegP);
4761 match(mRegP);
4762 match(no_T8_mRegP);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand t1_RegP()
4769 %{
4770 constraint(ALLOC_IN_RC(t1_long_reg));
4771 match(RegP);
4772 match(mRegP);
4773 match(no_T8_mRegP);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand t2_RegP()
4780 %{
4781 constraint(ALLOC_IN_RC(t2_long_reg));
4782 match(RegP);
4783 match(mRegP);
4784 match(no_T8_mRegP);
4786 format %{ %}
4787 interface(REG_INTER);
4788 %}
4790 operand t3_RegP()
4791 %{
4792 constraint(ALLOC_IN_RC(t3_long_reg));
4793 match(RegP);
4794 match(mRegP);
4795 match(no_T8_mRegP);
4797 format %{ %}
4798 interface(REG_INTER);
4799 %}
4801 operand t8_RegP()
4802 %{
4803 constraint(ALLOC_IN_RC(t8_long_reg));
4804 match(RegP);
4805 match(mRegP);
4807 format %{ %}
4808 interface(REG_INTER);
4809 %}
4811 operand t9_RegP()
4812 %{
4813 constraint(ALLOC_IN_RC(t9_long_reg));
4814 match(RegP);
4815 match(mRegP);
4816 match(no_T8_mRegP);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand a0_RegP()
4823 %{
4824 constraint(ALLOC_IN_RC(a0_long_reg));
4825 match(RegP);
4826 match(mRegP);
4827 match(no_T8_mRegP);
4829 format %{ %}
4830 interface(REG_INTER);
4831 %}
4833 operand a1_RegP()
4834 %{
4835 constraint(ALLOC_IN_RC(a1_long_reg));
4836 match(RegP);
4837 match(mRegP);
4838 match(no_T8_mRegP);
4840 format %{ %}
4841 interface(REG_INTER);
4842 %}
4844 operand a2_RegP()
4845 %{
4846 constraint(ALLOC_IN_RC(a2_long_reg));
4847 match(RegP);
4848 match(mRegP);
4849 match(no_T8_mRegP);
4851 format %{ %}
4852 interface(REG_INTER);
4853 %}
4855 operand a3_RegP()
4856 %{
4857 constraint(ALLOC_IN_RC(a3_long_reg));
4858 match(RegP);
4859 match(mRegP);
4860 match(no_T8_mRegP);
4862 format %{ %}
4863 interface(REG_INTER);
4864 %}
4866 operand a4_RegP()
4867 %{
4868 constraint(ALLOC_IN_RC(a4_long_reg));
4869 match(RegP);
4870 match(mRegP);
4871 match(no_T8_mRegP);
4873 format %{ %}
4874 interface(REG_INTER);
4875 %}
4878 operand a5_RegP()
4879 %{
4880 constraint(ALLOC_IN_RC(a5_long_reg));
4881 match(RegP);
4882 match(mRegP);
4883 match(no_T8_mRegP);
4885 format %{ %}
4886 interface(REG_INTER);
4887 %}
4889 operand a6_RegP()
4890 %{
4891 constraint(ALLOC_IN_RC(a6_long_reg));
4892 match(RegP);
4893 match(mRegP);
4894 match(no_T8_mRegP);
4896 format %{ %}
4897 interface(REG_INTER);
4898 %}
4900 operand a7_RegP()
4901 %{
4902 constraint(ALLOC_IN_RC(a7_long_reg));
4903 match(RegP);
4904 match(mRegP);
4905 match(no_T8_mRegP);
4907 format %{ %}
4908 interface(REG_INTER);
4909 %}
4911 operand v0_RegP()
4912 %{
4913 constraint(ALLOC_IN_RC(v0_long_reg));
4914 match(RegP);
4915 match(mRegP);
4916 match(no_T8_mRegP);
4918 format %{ %}
4919 interface(REG_INTER);
4920 %}
4922 operand v1_RegP()
4923 %{
4924 constraint(ALLOC_IN_RC(v1_long_reg));
4925 match(RegP);
4926 match(mRegP);
4927 match(no_T8_mRegP);
4929 format %{ %}
4930 interface(REG_INTER);
4931 %}
4933 /*
4934 operand mSPRegP(mRegP reg) %{
4935 constraint(ALLOC_IN_RC(sp_reg));
4936 match(reg);
4938 format %{ "SP" %}
4939 interface(REG_INTER);
4940 %}
4942 operand mFPRegP(mRegP reg) %{
4943 constraint(ALLOC_IN_RC(fp_reg));
4944 match(reg);
4946 format %{ "FP" %}
4947 interface(REG_INTER);
4948 %}
4949 */
4951 operand mRegL() %{
4952 constraint(ALLOC_IN_RC(long_reg));
4953 match(RegL);
4955 format %{ %}
4956 interface(REG_INTER);
4957 %}
4959 operand v0RegL() %{
4960 constraint(ALLOC_IN_RC(v0_long_reg));
4961 match(RegL);
4962 match(mRegL);
4964 format %{ %}
4965 interface(REG_INTER);
4966 %}
4968 operand v1RegL() %{
4969 constraint(ALLOC_IN_RC(v1_long_reg));
4970 match(RegL);
4971 match(mRegL);
4973 format %{ %}
4974 interface(REG_INTER);
4975 %}
4977 operand a0RegL() %{
4978 constraint(ALLOC_IN_RC(a0_long_reg));
4979 match(RegL);
4980 match(mRegL);
4982 format %{ "A0" %}
4983 interface(REG_INTER);
4984 %}
4986 operand a1RegL() %{
4987 constraint(ALLOC_IN_RC(a1_long_reg));
4988 match(RegL);
4989 match(mRegL);
4991 format %{ %}
4992 interface(REG_INTER);
4993 %}
4995 operand a2RegL() %{
4996 constraint(ALLOC_IN_RC(a2_long_reg));
4997 match(RegL);
4998 match(mRegL);
5000 format %{ %}
5001 interface(REG_INTER);
5002 %}
5004 operand a3RegL() %{
5005 constraint(ALLOC_IN_RC(a3_long_reg));
5006 match(RegL);
5007 match(mRegL);
5009 format %{ %}
5010 interface(REG_INTER);
5011 %}
5013 operand t0RegL() %{
5014 constraint(ALLOC_IN_RC(t0_long_reg));
5015 match(RegL);
5016 match(mRegL);
5018 format %{ %}
5019 interface(REG_INTER);
5020 %}
5022 operand t1RegL() %{
5023 constraint(ALLOC_IN_RC(t1_long_reg));
5024 match(RegL);
5025 match(mRegL);
5027 format %{ %}
5028 interface(REG_INTER);
5029 %}
5031 operand t2RegL() %{
5032 constraint(ALLOC_IN_RC(t2_long_reg));
5033 match(RegL);
5034 match(mRegL);
5036 format %{ %}
5037 interface(REG_INTER);
5038 %}
5040 operand t3RegL() %{
5041 constraint(ALLOC_IN_RC(t3_long_reg));
5042 match(RegL);
5043 match(mRegL);
5045 format %{ %}
5046 interface(REG_INTER);
5047 %}
5049 operand t8RegL() %{
5050 constraint(ALLOC_IN_RC(t8_long_reg));
5051 match(RegL);
5052 match(mRegL);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand a4RegL() %{
5059 constraint(ALLOC_IN_RC(a4_long_reg));
5060 match(RegL);
5061 match(mRegL);
5063 format %{ %}
5064 interface(REG_INTER);
5065 %}
5067 operand a5RegL() %{
5068 constraint(ALLOC_IN_RC(a5_long_reg));
5069 match(RegL);
5070 match(mRegL);
5072 format %{ %}
5073 interface(REG_INTER);
5074 %}
5076 operand a6RegL() %{
5077 constraint(ALLOC_IN_RC(a6_long_reg));
5078 match(RegL);
5079 match(mRegL);
5081 format %{ %}
5082 interface(REG_INTER);
5083 %}
5085 operand a7RegL() %{
5086 constraint(ALLOC_IN_RC(a7_long_reg));
5087 match(RegL);
5088 match(mRegL);
5090 format %{ %}
5091 interface(REG_INTER);
5092 %}
5094 operand s0RegL() %{
5095 constraint(ALLOC_IN_RC(s0_long_reg));
5096 match(RegL);
5097 match(mRegL);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand s1RegL() %{
5104 constraint(ALLOC_IN_RC(s1_long_reg));
5105 match(RegL);
5106 match(mRegL);
5108 format %{ %}
5109 interface(REG_INTER);
5110 %}
5112 operand s2RegL() %{
5113 constraint(ALLOC_IN_RC(s2_long_reg));
5114 match(RegL);
5115 match(mRegL);
5117 format %{ %}
5118 interface(REG_INTER);
5119 %}
5121 operand s3RegL() %{
5122 constraint(ALLOC_IN_RC(s3_long_reg));
5123 match(RegL);
5124 match(mRegL);
5126 format %{ %}
5127 interface(REG_INTER);
5128 %}
5130 operand s4RegL() %{
5131 constraint(ALLOC_IN_RC(s4_long_reg));
5132 match(RegL);
5133 match(mRegL);
5135 format %{ %}
5136 interface(REG_INTER);
5137 %}
5139 operand s7RegL() %{
5140 constraint(ALLOC_IN_RC(s7_long_reg));
5141 match(RegL);
5142 match(mRegL);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 // Floating register operands
5149 operand regF() %{
5150 constraint(ALLOC_IN_RC(flt_reg));
5151 match(RegF);
5153 format %{ %}
5154 interface(REG_INTER);
5155 %}
5157 //Double Precision Floating register operands
5158 operand regD() %{
5159 constraint(ALLOC_IN_RC(dbl_reg));
5160 match(RegD);
5162 format %{ %}
5163 interface(REG_INTER);
5164 %}
5166 //----------Memory Operands----------------------------------------------------
5167 operand baseOffset16(mRegP reg, immL16 off)
5168 %{
5169 constraint(ALLOC_IN_RC(p_reg));
5170 match(AddP reg off);
5172 op_cost(5);
5173 format %{ "[$reg + $off (16-bit)] @ baseOffset16" %}
5174 interface(MEMORY_INTER) %{
5175 base($reg);
5176 index(0x0);
5177 scale(0x0);
5178 disp($off);
5179 %}
5180 %}
5182 operand gsBaseIndexOffset8(mRegP base, mRegL index, immL8 off)
5183 %{
5184 predicate(UseLoongsonISA);
5185 constraint(ALLOC_IN_RC(p_reg));
5186 match(AddP (AddP base index) off);
5188 op_cost(5);
5189 format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexOffset8" %}
5190 interface(MEMORY_INTER) %{
5191 base($base);
5192 index($index);
5193 scale(0x0);
5194 disp($off);
5195 %}
5196 %}
5198 operand gsBaseIndexI2LOffset8(mRegP base, mRegI index, immL8 off)
5199 %{
5200 predicate(UseLoongsonISA);
5201 constraint(ALLOC_IN_RC(p_reg));
5202 match(AddP (AddP base (ConvI2L index)) off);
5204 op_cost(5);
5205 format %{ "[$base + $index + $off (8-bit)] @ gsBaseIndexI2LOffset8" %}
5206 interface(MEMORY_INTER) %{
5207 base($base);
5208 index($index);
5209 scale(0x0);
5210 disp($off);
5211 %}
5212 %}
5214 operand gsBaseIndexOffset0(mRegP addr, mRegL index) %{
5215 predicate(UseLoongsonISA);
5216 constraint(ALLOC_IN_RC(p_reg));
5217 match(AddP addr index);
5219 op_cost(10);
5220 format %{"[$addr + $index] @ gsBaseIndexOffset0" %}
5221 interface(MEMORY_INTER) %{
5222 base($addr);
5223 index($index);
5224 scale(0x0);
5225 disp(0x0);
5226 %}
5227 %}
5229 operand baseOffset0(mRegP reg) %{
5230 constraint(ALLOC_IN_RC(p_reg));
5231 op_cost(10);
5232 match(reg);
5234 format %{ "[$reg] @ baseOffset0" %}
5235 interface(MEMORY_INTER) %{
5236 base($reg);
5237 index(0x0);
5238 scale(0x0);
5239 disp(0x0);
5240 %}
5241 %}
5243 operand baseOffset16Narrow(mRegN reg, immL16 off)
5244 %{
5245 predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0);
5246 constraint(ALLOC_IN_RC(p_reg));
5247 match(AddP (DecodeN reg) off);
5249 op_cost(5);
5250 format %{ "[$reg + $off (16-bit)] @ baseOffset16Narrow" %}
5251 interface(MEMORY_INTER) %{
5252 base($reg);
5253 index(0x0);
5254 scale(0x0);
5255 disp($off);
5256 %}
5257 %}
5259 operand gsBaseIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5260 %{
5261 predicate(UseLoongsonISA && Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0);
5262 constraint(ALLOC_IN_RC(p_reg));
5263 match(AddP (AddP (DecodeN reg) lreg) off);
5265 op_cost(5);
5266 format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8Narrow" %}
5267 interface(MEMORY_INTER) %{
5268 base($reg);
5269 index($lreg);
5270 scale(0x0);
5271 disp($off);
5272 %}
5273 %}
5275 operand baseOffset0Narrow(mRegN reg)
5276 %{
5277 predicate(Universe::narrow_oop_base() == 0 && Universe::narrow_oop_shift() == 0);
5278 constraint(ALLOC_IN_RC(p_reg));
5279 match(DecodeN reg);
5281 op_cost(10);
5282 format %{ "[$reg] @ baseOffset0Narrow" %}
5283 interface(MEMORY_INTER) %{
5284 base($reg);
5285 index(0x0);
5286 scale(0x0);
5287 disp(0x0);
5288 %}
5289 %}
5291 operand baseOffset16NarrowKlass(mRegN reg, immL16 off)
5292 %{
5293 predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
5294 constraint(ALLOC_IN_RC(p_reg));
5295 match(AddP (DecodeNKlass reg) off);
5297 op_cost(5);
5298 format %{ "[$reg + $off (16-bit)] @ baseOffset16NarrowKlass" %}
5299 interface(MEMORY_INTER) %{
5300 base($reg);
5301 index(0x0);
5302 scale(0x0);
5303 disp($off);
5304 %}
5305 %}
5307 operand baseOffset0NarrowKlass(mRegN reg)
5308 %{
5309 predicate(Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
5310 constraint(ALLOC_IN_RC(p_reg));
5311 match(DecodeNKlass reg);
5313 op_cost(10);
5314 format %{ "[$reg] @ baseOffset0NarrowKlass" %}
5315 interface(MEMORY_INTER) %{
5316 base($reg);
5317 index(0x0);
5318 scale(0x0);
5319 disp(0x0);
5320 %}
5321 %}
5323 operand gsBaseIndexOffset8NarrowKlass(mRegN reg, mRegL lreg, immL8 off)
5324 %{
5325 predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
5326 constraint(ALLOC_IN_RC(p_reg));
5327 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5329 op_cost(5);
5330 format %{"[$reg + $off + $lreg] @ gsBaseIndexOffset8NarrowKlass" %}
5331 interface(MEMORY_INTER) %{
5332 base($reg);
5333 index($lreg);
5334 scale(0x0);
5335 disp($off);
5336 %}
5337 %}
5339 operand gsBaseIndexOffset0NarrowKlass(mRegN reg, mRegL lreg)
5340 %{
5341 predicate(UseLoongsonISA && Universe::narrow_klass_base() == 0 && Universe::narrow_klass_shift() == 0);
5342 constraint(ALLOC_IN_RC(p_reg));
5343 match(AddP (DecodeNKlass reg) lreg);
5345 op_cost(10);
5346 format %{"[$reg + $lreg] @ gsBaseIndexOffset0NarrowKlass" %}
5347 interface(MEMORY_INTER) %{
5348 base($reg);
5349 index($lreg);
5350 scale(0x0);
5351 disp(0x0);
5352 %}
5353 %}
5356 //------------------------OPERAND CLASSES--------------------------------------
5357 opclass memory(
5358 baseOffset16,
5359 gsBaseIndexOffset8,
5360 gsBaseIndexI2LOffset8,
5361 gsBaseIndexOffset0,
5362 baseOffset0,
5364 baseOffset16Narrow,
5365 gsBaseIndexOffset8Narrow,
5366 baseOffset0Narrow,
5368 baseOffset16NarrowKlass,
5369 baseOffset0NarrowKlass,
5370 gsBaseIndexOffset8NarrowKlass,
5371 gsBaseIndexOffset0NarrowKlass
5372 );
5376 //----------Conditional Branch Operands----------------------------------------
5377 // Comparison Op - This is the operation of the comparison, and is limited to
5378 // the following set of codes:
5379 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5380 //
5381 // Other attributes of the comparison, such as unsignedness, are specified
5382 // by the comparison instruction that sets a condition code flags register.
5383 // That result is represented by a flags operand whose subtype is appropriate
5384 // to the unsignedness (etc.) of the comparison.
5385 //
5386 // Later, the instruction which matches both the Comparison Op (a Bool) and
5387 // the flags (produced by the Cmp) specifies the coding of the comparison op
5388 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5390 // Comparision Code
5391 operand cmpOp() %{
5392 match(Bool);
5394 format %{ "" %}
5395 interface(COND_INTER) %{
5396 equal(0x01);
5397 not_equal(0x02);
5398 greater(0x03);
5399 greater_equal(0x04);
5400 less(0x05);
5401 less_equal(0x06);
5402 overflow(0x7);
5403 no_overflow(0x8);
5404 %}
5405 %}
5408 // Comparision Code
5409 // Comparison Code, unsigned compare. Used by FP also, with
5410 // C2 (unordered) turned into GT or LT already. The other bits
5411 // C0 and C3 are turned into Carry & Zero flags.
5412 operand cmpOpU() %{
5413 match(Bool);
5415 format %{ "" %}
5416 interface(COND_INTER) %{
5417 equal(0x01);
5418 not_equal(0x02);
5419 greater(0x03);
5420 greater_equal(0x04);
5421 less(0x05);
5422 less_equal(0x06);
5423 overflow(0x7);
5424 no_overflow(0x8);
5425 %}
5426 %}
5429 //----------Special Memory Operands--------------------------------------------
5430 // Stack Slot Operand - This operand is used for loading and storing temporary
5431 // values on the stack where a match requires a value to
5432 // flow through memory.
5433 operand stackSlotP(sRegP reg) %{
5434 constraint(ALLOC_IN_RC(stack_slots));
5435 // No match rule because this operand is only generated in matching
5436 op_cost(50);
5437 format %{ "[$reg]" %}
5438 interface(MEMORY_INTER) %{
5439 base(0x1d); // SP
5440 index(0x0); // No Index
5441 scale(0x0); // No Scale
5442 disp($reg); // Stack Offset
5443 %}
5444 %}
5446 operand stackSlotI(sRegI reg) %{
5447 constraint(ALLOC_IN_RC(stack_slots));
5448 // No match rule because this operand is only generated in matching
5449 op_cost(50);
5450 format %{ "[$reg]" %}
5451 interface(MEMORY_INTER) %{
5452 base(0x1d); // SP
5453 index(0x0); // No Index
5454 scale(0x0); // No Scale
5455 disp($reg); // Stack Offset
5456 %}
5457 %}
5459 operand stackSlotF(sRegF reg) %{
5460 constraint(ALLOC_IN_RC(stack_slots));
5461 // No match rule because this operand is only generated in matching
5462 op_cost(50);
5463 format %{ "[$reg]" %}
5464 interface(MEMORY_INTER) %{
5465 base(0x1d); // SP
5466 index(0x0); // No Index
5467 scale(0x0); // No Scale
5468 disp($reg); // Stack Offset
5469 %}
5470 %}
5472 operand stackSlotD(sRegD reg) %{
5473 constraint(ALLOC_IN_RC(stack_slots));
5474 // No match rule because this operand is only generated in matching
5475 op_cost(50);
5476 format %{ "[$reg]" %}
5477 interface(MEMORY_INTER) %{
5478 base(0x1d); // SP
5479 index(0x0); // No Index
5480 scale(0x0); // No Scale
5481 disp($reg); // Stack Offset
5482 %}
5483 %}
5485 operand stackSlotL(sRegL reg) %{
5486 constraint(ALLOC_IN_RC(stack_slots));
5487 // No match rule because this operand is only generated in matching
5488 op_cost(50);
5489 format %{ "[$reg]" %}
5490 interface(MEMORY_INTER) %{
5491 base(0x1d); // SP
5492 index(0x0); // No Index
5493 scale(0x0); // No Scale
5494 disp($reg); // Stack Offset
5495 %}
5496 %}
5498 //----------PIPELINE-----------------------------------------------------------
5499 // Rules which define the behavior of the target architectures pipeline.
5501 pipeline %{
5503 //----------ATTRIBUTES---------------------------------------------------------
5504 attributes %{
5505 fixed_size_instructions; // Fixed size instructions
5506 branch_has_delay_slot; // branch have delay slot in gs2
5507 max_instructions_per_bundle = 1; // 1 instruction per bundle
5508 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5509 bundle_unit_size=4;
5510 instruction_unit_size = 4; // An instruction is 4 bytes long
5511 instruction_fetch_unit_size = 16; // The processor fetches one line
5512 instruction_fetch_units = 1; // of 16 bytes
5514 // List of nop instructions
5515 nops( MachNop );
5516 %}
5518 //----------RESOURCES----------------------------------------------------------
5519 // Resources are the functional units available to the machine
5521 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5523 //----------PIPELINE DESCRIPTION-----------------------------------------------
5524 // Pipeline Description specifies the stages in the machine's pipeline
5526 // IF: fetch
5527 // ID: decode
5528 // RD: read
5529 // CA: caculate
5530 // WB: write back
5531 // CM: commit
5533 pipe_desc(IF, ID, RD, CA, WB, CM);
5536 //----------PIPELINE CLASSES---------------------------------------------------
5537 // Pipeline Classes describe the stages in which input and output are
5538 // referenced by the hardware pipeline.
5540 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5541 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5542 single_instruction;
5543 src1 : RD(read);
5544 src2 : RD(read);
5545 dst : WB(write)+1;
5546 DECODE : ID;
5547 ALU : CA;
5548 %}
5550 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5551 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5552 src1 : RD(read);
5553 src2 : RD(read);
5554 dst : WB(write)+5;
5555 DECODE : ID;
5556 ALU2 : CA;
5557 %}
5559 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5560 src1 : RD(read);
5561 src2 : RD(read);
5562 dst : WB(write)+10;
5563 DECODE : ID;
5564 ALU2 : CA;
5565 %}
5567 //No.19 Integer div operation : dst <-- reg1 div reg2
5568 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5569 src1 : RD(read);
5570 src2 : RD(read);
5571 dst : WB(write)+10;
5572 DECODE : ID;
5573 ALU2 : CA;
5574 %}
5576 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5577 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5578 instruction_count(2);
5579 src1 : RD(read);
5580 src2 : RD(read);
5581 dst : WB(write)+10;
5582 DECODE : ID;
5583 ALU2 : CA;
5584 %}
5586 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5587 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5588 instruction_count(2);
5589 src1 : RD(read);
5590 src2 : RD(read);
5591 dst : WB(write);
5592 DECODE : ID;
5593 ALU : CA;
5594 %}
5596 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5597 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5598 instruction_count(2);
5599 src : RD(read);
5600 dst : WB(write);
5601 DECODE : ID;
5602 ALU : CA;
5603 %}
5605 //no.16 load Long from memory :
5606 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5607 instruction_count(2);
5608 mem : RD(read);
5609 dst : WB(write)+5;
5610 DECODE : ID;
5611 MEM : RD;
5612 %}
5614 //No.17 Store Long to Memory :
5615 pipe_class ialu_storeL(mRegL src, memory mem) %{
5616 instruction_count(2);
5617 mem : RD(read);
5618 src : RD(read);
5619 DECODE : ID;
5620 MEM : RD;
5621 %}
5623 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5624 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5625 single_instruction;
5626 src : RD(read);
5627 dst : WB(write);
5628 DECODE : ID;
5629 ALU : CA;
5630 %}
5632 //No.3 Integer move operation : dst <-- reg
5633 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5634 src : RD(read);
5635 dst : WB(write);
5636 DECODE : ID;
5637 ALU : CA;
5638 %}
5640 //No.4 No instructions : do nothing
5641 pipe_class empty( ) %{
5642 instruction_count(0);
5643 %}
5645 //No.5 UnConditional branch :
5646 pipe_class pipe_jump( label labl ) %{
5647 multiple_bundles;
5648 DECODE : ID;
5649 BR : RD;
5650 %}
5652 //No.6 ALU Conditional branch :
5653 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5654 multiple_bundles;
5655 src1 : RD(read);
5656 src2 : RD(read);
5657 DECODE : ID;
5658 BR : RD;
5659 %}
5661 //no.7 load integer from memory :
5662 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5663 mem : RD(read);
5664 dst : WB(write)+3;
5665 DECODE : ID;
5666 MEM : RD;
5667 %}
5669 //No.8 Store Integer to Memory :
5670 pipe_class ialu_storeI(mRegI src, memory mem) %{
5671 mem : RD(read);
5672 src : RD(read);
5673 DECODE : ID;
5674 MEM : RD;
5675 %}
5678 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5679 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5680 src1 : RD(read);
5681 src2 : RD(read);
5682 dst : WB(write);
5683 DECODE : ID;
5684 FPU : CA;
5685 %}
5687 //No.22 Floating div operation : dst <-- reg1 div reg2
5688 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5689 src1 : RD(read);
5690 src2 : RD(read);
5691 dst : WB(write);
5692 DECODE : ID;
5693 FPU2 : CA;
5694 %}
5696 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5697 src : RD(read);
5698 dst : WB(write);
5699 DECODE : ID;
5700 FPU1 : CA;
5701 %}
5703 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5704 src : RD(read);
5705 dst : WB(write);
5706 DECODE : ID;
5707 FPU1 : CA;
5708 %}
5710 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5711 src : RD(read);
5712 dst : WB(write);
5713 DECODE : ID;
5714 MEM : RD;
5715 %}
5717 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5718 src : RD(read);
5719 dst : WB(write);
5720 DECODE : ID;
5721 MEM : RD(5);
5722 %}
5724 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5725 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5726 multiple_bundles;
5727 src1 : RD(read);
5728 src2 : RD(read);
5729 dst : WB(write);
5730 DECODE : ID;
5731 FPU2 : CA;
5732 %}
5734 //No.11 Load Floating from Memory :
5735 pipe_class fpu_loadF(regF dst, memory mem) %{
5736 instruction_count(1);
5737 mem : RD(read);
5738 dst : WB(write)+3;
5739 DECODE : ID;
5740 MEM : RD;
5741 %}
5743 //No.12 Store Floating to Memory :
5744 pipe_class fpu_storeF(regF src, memory mem) %{
5745 instruction_count(1);
5746 mem : RD(read);
5747 src : RD(read);
5748 DECODE : ID;
5749 MEM : RD;
5750 %}
5752 //No.13 FPU Conditional branch :
5753 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5754 multiple_bundles;
5755 src1 : RD(read);
5756 src2 : RD(read);
5757 DECODE : ID;
5758 BR : RD;
5759 %}
5761 //No.14 Floating FPU reg operation : dst <-- op reg
5762 pipe_class fpu1_regF(regF dst, regF src) %{
5763 src : RD(read);
5764 dst : WB(write);
5765 DECODE : ID;
5766 FPU : CA;
5767 %}
5769 pipe_class long_memory_op() %{
5770 instruction_count(10); multiple_bundles; force_serialization;
5771 fixed_latency(30);
5772 %}
5774 pipe_class simple_call() %{
5775 instruction_count(10); multiple_bundles; force_serialization;
5776 fixed_latency(200);
5777 BR : RD;
5778 %}
5780 pipe_class call() %{
5781 instruction_count(10); multiple_bundles; force_serialization;
5782 fixed_latency(200);
5783 %}
5785 //FIXME:
5786 //No.9 Piple slow : for multi-instructions
5787 pipe_class pipe_slow( ) %{
5788 instruction_count(20);
5789 force_serialization;
5790 multiple_bundles;
5791 fixed_latency(50);
5792 %}
5794 %}
5798 //----------INSTRUCTIONS-------------------------------------------------------
5799 //
5800 // match -- States which machine-independent subtree may be replaced
5801 // by this instruction.
5802 // ins_cost -- The estimated cost of this instruction is used by instruction
5803 // selection to identify a minimum cost tree of machine
5804 // instructions that matches a tree of machine-independent
5805 // instructions.
5806 // format -- A string providing the disassembly for this instruction.
5807 // The value of an instruction's operand may be inserted
5808 // by referring to it with a '$' prefix.
5809 // opcode -- Three instruction opcodes may be provided. These are referred
5810 // to within an encode class as $primary, $secondary, and $tertiary
5811 // respectively. The primary opcode is commonly used to
5812 // indicate the type of machine instruction, while secondary
5813 // and tertiary are often used for prefix options or addressing
5814 // modes.
5815 // ins_encode -- A list of encode classes with parameters. The encode class
5816 // name must have been defined in an 'enc_class' specification
5817 // in the encode section of the architecture description.
5820 // Load Integer
5821 instruct loadI(mRegI dst, memory mem) %{
5822 match(Set dst (LoadI mem));
5824 ins_cost(125);
5825 format %{ "lw $dst, $mem #@loadI" %}
5826 ins_encode (load_I_enc(dst, mem));
5827 ins_pipe( ialu_loadI );
5828 %}
5830 instruct loadI_convI2L(mRegL dst, memory mem) %{
5831 match(Set dst (ConvI2L (LoadI mem)));
5833 ins_cost(125);
5834 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5835 ins_encode (load_I_enc(dst, mem));
5836 ins_pipe( ialu_loadI );
5837 %}
5839 // Load Integer (32 bit signed) to Byte (8 bit signed)
5840 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5841 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5843 ins_cost(125);
5844 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5845 ins_encode(load_B_enc(dst, mem));
5846 ins_pipe(ialu_loadI);
5847 %}
5849 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5850 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5851 match(Set dst (AndI (LoadI mem) mask));
5853 ins_cost(125);
5854 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5855 ins_encode(load_UB_enc(dst, mem));
5856 ins_pipe(ialu_loadI);
5857 %}
5859 // Load Integer (32 bit signed) to Short (16 bit signed)
5860 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5861 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5863 ins_cost(125);
5864 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5865 ins_encode(load_S_enc(dst, mem));
5866 ins_pipe(ialu_loadI);
5867 %}
5869 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5870 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5871 match(Set dst (AndI (LoadI mem) mask));
5873 ins_cost(125);
5874 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5875 ins_encode(load_C_enc(dst, mem));
5876 ins_pipe(ialu_loadI);
5877 %}
5879 // Load Long.
5880 instruct loadL(mRegL dst, memory mem) %{
5881 // predicate(!((LoadLNode*)n)->require_atomic_access());
5882 match(Set dst (LoadL mem));
5884 ins_cost(250);
5885 format %{ "ld $dst, $mem #@loadL" %}
5886 ins_encode(load_L_enc(dst, mem));
5887 ins_pipe( ialu_loadL );
5888 %}
5890 // Load Long - UNaligned
5891 instruct loadL_unaligned(mRegL dst, memory mem) %{
5892 match(Set dst (LoadL_unaligned mem));
5894 // FIXME: Jin: Need more effective ldl/ldr
5895 ins_cost(450);
5896 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5897 ins_encode(load_L_enc(dst, mem));
5898 ins_pipe( ialu_loadL );
5899 %}
5901 // Store Long
5902 instruct storeL_reg(memory mem, mRegL src) %{
5903 match(Set mem (StoreL mem src));
5905 ins_cost(200);
5906 format %{ "sd $mem, $src #@storeL_reg\n" %}
5907 ins_encode(store_L_reg_enc(mem, src));
5908 ins_pipe( ialu_storeL );
5909 %}
5912 instruct storeL_immL0(memory mem, immL0 zero) %{
5913 match(Set mem (StoreL mem zero));
5915 ins_cost(180);
5916 format %{ "sd $mem, zero #@storeL_immL0" %}
5917 ins_encode(store_L_immL0_enc(mem, zero));
5918 ins_pipe( ialu_storeL );
5919 %}
5921 // Load Compressed Pointer
5922 instruct loadN(mRegN dst, memory mem)
5923 %{
5924 match(Set dst (LoadN mem));
5926 ins_cost(125); // XXX
5927 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5928 ins_encode (load_N_enc(dst, mem));
5929 ins_pipe( ialu_loadI ); // XXX
5930 %}
5932 instruct loadN2P(mRegP dst, memory mem)
5933 %{
5934 match(Set dst (DecodeN (LoadN mem)));
5935 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
5937 ins_cost(125); // XXX
5938 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
5939 ins_encode (load_N_enc(dst, mem));
5940 ins_pipe( ialu_loadI ); // XXX
5941 %}
5943 // Load Pointer
5944 instruct loadP(mRegP dst, memory mem) %{
5945 match(Set dst (LoadP mem));
5947 ins_cost(125);
5948 format %{ "ld $dst, $mem #@loadP" %}
5949 ins_encode (load_P_enc(dst, mem));
5950 ins_pipe( ialu_loadI );
5951 %}
5953 // Load Klass Pointer
5954 instruct loadKlass(mRegP dst, memory mem) %{
5955 match(Set dst (LoadKlass mem));
5957 ins_cost(125);
5958 format %{ "MOV $dst,$mem @ loadKlass" %}
5959 ins_encode (load_P_enc(dst, mem));
5960 ins_pipe( ialu_loadI );
5961 %}
5963 // Load narrow Klass Pointer
5964 instruct loadNKlass(mRegN dst, memory mem)
5965 %{
5966 match(Set dst (LoadNKlass mem));
5968 ins_cost(125); // XXX
5969 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5970 ins_encode (load_N_enc(dst, mem));
5971 ins_pipe( ialu_loadI ); // XXX
5972 %}
5974 instruct loadN2PKlass(mRegP dst, memory mem)
5975 %{
5976 match(Set dst (DecodeNKlass (LoadNKlass mem)));
5977 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
5979 ins_cost(125); // XXX
5980 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
5981 ins_encode (load_N_enc(dst, mem));
5982 ins_pipe( ialu_loadI ); // XXX
5983 %}
5985 // Load Constant
5986 instruct loadConI(mRegI dst, immI src) %{
5987 match(Set dst src);
5989 ins_cost(150);
5990 format %{ "mov $dst, $src #@loadConI" %}
5991 ins_encode %{
5992 Register dst = $dst$$Register;
5993 int value = $src$$constant;
5994 __ move(dst, value);
5995 %}
5996 ins_pipe( ialu_regI_regI );
5997 %}
6000 instruct loadConL_set64(mRegL dst, immL src) %{
6001 match(Set dst src);
6002 ins_cost(120);
6003 format %{ "li $dst, $src @ loadConL_set64" %}
6004 ins_encode %{
6005 __ set64($dst$$Register, $src$$constant);
6006 %}
6007 ins_pipe(ialu_regL_regL);
6008 %}
6010 /*
6011 // Load long value from constant table (predicated by immL_expensive).
6012 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6013 match(Set dst src);
6014 ins_cost(150);
6015 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6016 ins_encode %{
6017 int con_offset = $constantoffset($src);
6019 if (Assembler::is_simm16(con_offset)) {
6020 __ ld($dst$$Register, $constanttablebase, con_offset);
6021 } else {
6022 __ set64(AT, con_offset);
6023 if (UseLoongsonISA) {
6024 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6025 } else {
6026 __ daddu(AT, $constanttablebase, AT);
6027 __ ld($dst$$Register, AT, 0);
6028 }
6029 }
6030 %}
6031 ins_pipe(ialu_loadI);
6032 %}
6033 */
6035 instruct loadConL16(mRegL dst, immL16 src) %{
6036 match(Set dst src);
6037 ins_cost(105);
6038 format %{ "mov $dst, $src #@loadConL16" %}
6039 ins_encode %{
6040 Register dst_reg = as_Register($dst$$reg);
6041 int value = $src$$constant;
6042 __ daddiu(dst_reg, R0, value);
6043 %}
6044 ins_pipe( ialu_regL_regL );
6045 %}
6048 instruct loadConL0(mRegL dst, immL0 src) %{
6049 match(Set dst src);
6050 ins_cost(100);
6051 format %{ "mov $dst, zero #@loadConL0" %}
6052 ins_encode %{
6053 Register dst_reg = as_Register($dst$$reg);
6054 __ daddu(dst_reg, R0, R0);
6055 %}
6056 ins_pipe( ialu_regL_regL );
6057 %}
6059 // Load Range
6060 instruct loadRange(mRegI dst, memory mem) %{
6061 match(Set dst (LoadRange mem));
6063 ins_cost(125);
6064 format %{ "MOV $dst,$mem @ loadRange" %}
6065 ins_encode(load_I_enc(dst, mem));
6066 ins_pipe( ialu_loadI );
6067 %}
6070 instruct storeP(memory mem, mRegP src ) %{
6071 match(Set mem (StoreP mem src));
6073 ins_cost(125);
6074 format %{ "sd $src, $mem #@storeP" %}
6075 ins_encode(store_P_reg_enc(mem, src));
6076 ins_pipe( ialu_storeI );
6077 %}
6079 // Store NULL Pointer, mark word, or other simple pointer constant.
6080 instruct storeImmP0(memory mem, immP0 zero) %{
6081 match(Set mem (StoreP mem zero));
6083 ins_cost(125);
6084 format %{ "mov $mem, $zero #@storeImmP0" %}
6085 ins_encode(store_P_immP0_enc(mem));
6086 ins_pipe( ialu_storeI );
6087 %}
6089 // Store Byte Immediate
6090 instruct storeImmB(memory mem, immI8 src) %{
6091 match(Set mem (StoreB mem src));
6093 ins_cost(150);
6094 format %{ "movb $mem, $src #@storeImmB" %}
6095 ins_encode(store_B_immI_enc(mem, src));
6096 ins_pipe( ialu_storeI );
6097 %}
6099 // Store Compressed Pointer
6100 instruct storeN(memory mem, mRegN src)
6101 %{
6102 match(Set mem (StoreN mem src));
6104 ins_cost(125); // XXX
6105 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6106 ins_encode(store_N_reg_enc(mem, src));
6107 ins_pipe( ialu_storeI );
6108 %}
6110 instruct storeP2N(memory mem, mRegP src)
6111 %{
6112 match(Set mem (StoreN mem (EncodeP src)));
6113 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6115 ins_cost(125); // XXX
6116 format %{ "sw $mem, $src\t# @ storeP2N" %}
6117 ins_encode(store_N_reg_enc(mem, src));
6118 ins_pipe( ialu_storeI );
6119 %}
6121 instruct storeNKlass(memory mem, mRegN src)
6122 %{
6123 match(Set mem (StoreNKlass mem src));
6125 ins_cost(125); // XXX
6126 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6127 ins_encode(store_N_reg_enc(mem, src));
6128 ins_pipe( ialu_storeI );
6129 %}
6131 instruct storeP2NKlass(memory mem, mRegP src)
6132 %{
6133 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6134 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6136 ins_cost(125); // XXX
6137 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6138 ins_encode(store_N_reg_enc(mem, src));
6139 ins_pipe( ialu_storeI );
6140 %}
6142 instruct storeImmN0(memory mem, immN0 zero)
6143 %{
6144 match(Set mem (StoreN mem zero));
6146 ins_cost(125); // XXX
6147 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6148 ins_encode(storeImmN0_enc(mem, zero));
6149 ins_pipe( ialu_storeI );
6150 %}
6152 // Store Byte
6153 instruct storeB(memory mem, mRegI src) %{
6154 match(Set mem (StoreB mem src));
6156 ins_cost(125);
6157 format %{ "sb $src, $mem #@storeB" %}
6158 ins_encode(store_B_reg_enc(mem, src));
6159 ins_pipe( ialu_storeI );
6160 %}
6162 instruct storeB_convL2I(memory mem, mRegL src) %{
6163 match(Set mem (StoreB mem (ConvL2I src)));
6165 ins_cost(125);
6166 format %{ "sb $src, $mem #@storeB_convL2I" %}
6167 ins_encode(store_B_reg_enc(mem, src));
6168 ins_pipe( ialu_storeI );
6169 %}
6171 // Load Byte (8bit signed)
6172 instruct loadB(mRegI dst, memory mem) %{
6173 match(Set dst (LoadB mem));
6175 ins_cost(125);
6176 format %{ "lb $dst, $mem #@loadB" %}
6177 ins_encode(load_B_enc(dst, mem));
6178 ins_pipe( ialu_loadI );
6179 %}
6181 instruct loadB_convI2L(mRegL dst, memory mem) %{
6182 match(Set dst (ConvI2L (LoadB mem)));
6184 ins_cost(125);
6185 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6186 ins_encode(load_B_enc(dst, mem));
6187 ins_pipe( ialu_loadI );
6188 %}
6190 // Load Byte (8bit UNsigned)
6191 instruct loadUB(mRegI dst, memory mem) %{
6192 match(Set dst (LoadUB mem));
6194 ins_cost(125);
6195 format %{ "lbu $dst, $mem #@loadUB" %}
6196 ins_encode(load_UB_enc(dst, mem));
6197 ins_pipe( ialu_loadI );
6198 %}
6200 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6201 match(Set dst (ConvI2L (LoadUB mem)));
6203 ins_cost(125);
6204 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6205 ins_encode(load_UB_enc(dst, mem));
6206 ins_pipe( ialu_loadI );
6207 %}
6209 // Load Short (16bit signed)
6210 instruct loadS(mRegI dst, memory mem) %{
6211 match(Set dst (LoadS mem));
6213 ins_cost(125);
6214 format %{ "lh $dst, $mem #@loadS" %}
6215 ins_encode(load_S_enc(dst, mem));
6216 ins_pipe( ialu_loadI );
6217 %}
6219 // Load Short (16 bit signed) to Byte (8 bit signed)
6220 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6221 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6223 ins_cost(125);
6224 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6225 ins_encode(load_B_enc(dst, mem));
6226 ins_pipe(ialu_loadI);
6227 %}
6229 instruct loadS_convI2L(mRegL dst, memory mem) %{
6230 match(Set dst (ConvI2L (LoadS mem)));
6232 ins_cost(125);
6233 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6234 ins_encode(load_S_enc(dst, mem));
6235 ins_pipe( ialu_loadI );
6236 %}
6238 // Store Integer Immediate
6239 instruct storeImmI(memory mem, immI src) %{
6240 match(Set mem (StoreI mem src));
6242 ins_cost(150);
6243 format %{ "mov $mem, $src #@storeImmI" %}
6244 ins_encode(store_I_immI_enc(mem, src));
6245 ins_pipe( ialu_storeI );
6246 %}
6248 // Store Integer
6249 instruct storeI(memory mem, mRegI src) %{
6250 match(Set mem (StoreI mem src));
6252 ins_cost(125);
6253 format %{ "sw $mem, $src #@storeI" %}
6254 ins_encode(store_I_reg_enc(mem, src));
6255 ins_pipe( ialu_storeI );
6256 %}
6258 instruct storeI_convL2I(memory mem, mRegL src) %{
6259 match(Set mem (StoreI mem (ConvL2I src)));
6261 ins_cost(125);
6262 format %{ "sw $mem, $src #@storeI_convL2I" %}
6263 ins_encode(store_I_reg_enc(mem, src));
6264 ins_pipe( ialu_storeI );
6265 %}
6267 // Load Float
6268 instruct loadF(regF dst, memory mem) %{
6269 match(Set dst (LoadF mem));
6271 ins_cost(150);
6272 format %{ "loadF $dst, $mem #@loadF" %}
6273 ins_encode(load_F_enc(dst, mem));
6274 ins_pipe( ialu_loadI );
6275 %}
6277 instruct loadConP_general(mRegP dst, immP src) %{
6278 match(Set dst src);
6280 ins_cost(120);
6281 format %{ "li $dst, $src #@loadConP_general" %}
6283 ins_encode %{
6284 Register dst = $dst$$Register;
6285 long* value = (long*)$src$$constant;
6287 if($src->constant_reloc() == relocInfo::metadata_type){
6288 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6289 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6291 __ relocate(rspec);
6292 __ patchable_set48(dst, (long)value);
6293 }else if($src->constant_reloc() == relocInfo::oop_type){
6294 int oop_index = __ oop_recorder()->find_index((jobject)value);
6295 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6297 __ relocate(rspec);
6298 __ patchable_set48(dst, (long)value);
6299 } else if ($src->constant_reloc() == relocInfo::none) {
6300 __ set64(dst, (long)value);
6301 }
6302 %}
6304 ins_pipe( ialu_regI_regI );
6305 %}
6307 /*
6308 instruct loadConP_load(mRegP dst, immP_load src) %{
6309 match(Set dst src);
6311 ins_cost(100);
6312 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6314 ins_encode %{
6316 int con_offset = $constantoffset($src);
6318 if (Assembler::is_simm16(con_offset)) {
6319 __ ld($dst$$Register, $constanttablebase, con_offset);
6320 } else {
6321 __ set64(AT, con_offset);
6322 if (UseLoongsonISA) {
6323 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6324 } else {
6325 __ daddu(AT, $constanttablebase, AT);
6326 __ ld($dst$$Register, AT, 0);
6327 }
6328 }
6329 %}
6331 ins_pipe(ialu_loadI);
6332 %}
6333 */
6335 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6336 match(Set dst src);
6338 ins_cost(80);
6339 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6341 ins_encode %{
6342 __ set64($dst$$Register, $src$$constant);
6343 %}
6345 ins_pipe(ialu_regI_regI);
6346 %}
6349 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6350 match(Set dst src);
6352 ins_cost(50);
6353 format %{ "li $dst, $src #@loadConP_poll" %}
6355 ins_encode %{
6356 Register dst = $dst$$Register;
6357 intptr_t value = (intptr_t)$src$$constant;
6359 __ set64(dst, (jlong)value);
6360 %}
6362 ins_pipe( ialu_regI_regI );
6363 %}
6365 instruct loadConP0(mRegP dst, immP0 src)
6366 %{
6367 match(Set dst src);
6369 ins_cost(50);
6370 format %{ "mov $dst, R0\t# ptr" %}
6371 ins_encode %{
6372 Register dst_reg = $dst$$Register;
6373 __ daddu(dst_reg, R0, R0);
6374 %}
6375 ins_pipe( ialu_regI_regI );
6376 %}
6378 instruct loadConN0(mRegN dst, immN0 src) %{
6379 match(Set dst src);
6380 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6381 ins_encode %{
6382 __ move($dst$$Register, R0);
6383 %}
6384 ins_pipe( ialu_regI_regI );
6385 %}
6387 instruct loadConN(mRegN dst, immN src) %{
6388 match(Set dst src);
6390 ins_cost(125);
6391 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6392 ins_encode %{
6393 Register dst = $dst$$Register;
6394 __ set_narrow_oop(dst, (jobject)$src$$constant);
6395 %}
6396 ins_pipe( ialu_regI_regI ); // XXX
6397 %}
6399 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6400 match(Set dst src);
6402 ins_cost(125);
6403 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6404 ins_encode %{
6405 Register dst = $dst$$Register;
6406 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6407 %}
6408 ins_pipe( ialu_regI_regI ); // XXX
6409 %}
6411 //FIXME
6412 // Tail Call; Jump from runtime stub to Java code.
6413 // Also known as an 'interprocedural jump'.
6414 // Target of jump will eventually return to caller.
6415 // TailJump below removes the return address.
6416 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6417 match(TailCall jump_target method_oop );
6418 ins_cost(300);
6419 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6421 ins_encode %{
6422 Register target = $jump_target$$Register;
6423 Register oop = $method_oop$$Register;
6425 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6426 __ push(RA);
6428 __ move(S3, oop);
6429 __ jr(target);
6430 __ nop();
6431 %}
6433 ins_pipe( pipe_jump );
6434 %}
6436 // Create exception oop: created by stack-crawling runtime code.
6437 // Created exception is now available to this handler, and is setup
6438 // just prior to jumping to this handler. No code emitted.
6439 instruct CreateException( a0_RegP ex_oop )
6440 %{
6441 match(Set ex_oop (CreateEx));
6443 // use the following format syntax
6444 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6445 ins_encode %{
6446 /* Jin: X86 leaves this function empty */
6447 __ block_comment("CreateException is empty in X86/MIPS");
6448 %}
6449 ins_pipe( empty );
6450 // ins_pipe( pipe_jump );
6451 %}
6454 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6456 - Common try/catch:
6457 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6458 |- V0, V1 are created
6459 |- T9 <= SharedRuntime::exception_handler_for_return_address
6460 `- jr T9
6461 `- the caller's exception_handler
6462 `- jr OptoRuntime::exception_blob
6463 `- here
6464 - Rethrow(e.g. 'unwind'):
6465 * The callee:
6466 |- an exception is triggered during execution
6467 `- exits the callee method through RethrowException node
6468 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6469 `- The callee jumps to OptoRuntime::rethrow_stub()
6470 * In OptoRuntime::rethrow_stub:
6471 |- The VM calls _rethrow_Java to determine the return address in the caller method
6472 `- exits the stub with tailjmpInd
6473 |- pops exception_oop(V0) and exception_pc(V1)
6474 `- jumps to the return address(usually an exception_handler)
6475 * The caller:
6476 `- continues processing the exception_blob with V0/V1
6477 */
6479 /*
6480 Disassembling OptoRuntime::rethrow_stub()
6482 ; locals
6483 0x2d3bf320: addiu sp, sp, 0xfffffff8
6484 0x2d3bf324: sw ra, 0x4(sp)
6485 0x2d3bf328: sw fp, 0x0(sp)
6486 0x2d3bf32c: addu fp, sp, zero
6487 0x2d3bf330: addiu sp, sp, 0xfffffff0
6488 0x2d3bf334: sw ra, 0x8(sp)
6489 0x2d3bf338: sw t0, 0x4(sp)
6490 0x2d3bf33c: sw sp, 0x0(sp)
6492 ; get_thread(S2)
6493 0x2d3bf340: addu s2, sp, zero
6494 0x2d3bf344: srl s2, s2, 12
6495 0x2d3bf348: sll s2, s2, 2
6496 0x2d3bf34c: lui at, 0x2c85
6497 0x2d3bf350: addu at, at, s2
6498 0x2d3bf354: lw s2, 0xffffcc80(at)
6500 0x2d3bf358: lw s0, 0x0(sp)
6501 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6502 0x2d3bf360: sw s2, 0xc(sp)
6504 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6505 0x2d3bf364: lw a0, 0x4(sp)
6506 0x2d3bf368: lw a1, 0xc(sp)
6507 0x2d3bf36c: lw a2, 0x8(sp)
6508 ;; Java_To_Runtime
6509 0x2d3bf370: lui t9, 0x2c34
6510 0x2d3bf374: addiu t9, t9, 0xffff8a48
6511 0x2d3bf378: jalr t9
6512 0x2d3bf37c: nop
6514 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6516 0x2d3bf384: lw s0, 0xc(sp)
6517 0x2d3bf388: sw zero, 0x118(s0)
6518 0x2d3bf38c: sw zero, 0x11c(s0)
6519 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6520 0x2d3bf394: addu s2, s0, zero
6521 0x2d3bf398: sw zero, 0x144(s2)
6522 0x2d3bf39c: lw s0, 0x4(s2)
6523 0x2d3bf3a0: addiu s4, zero, 0x0
6524 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6525 0x2d3bf3a8: nop
6526 0x2d3bf3ac: addiu sp, sp, 0x10
6527 0x2d3bf3b0: addiu sp, sp, 0x8
6528 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6529 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6530 0x2d3bf3bc: lui at, 0x2b48
6531 0x2d3bf3c0: lw at, 0x100(at)
6533 ; tailjmpInd: Restores exception_oop & exception_pc
6534 0x2d3bf3c4: addu v1, ra, zero
6535 0x2d3bf3c8: addu v0, s1, zero
6536 0x2d3bf3cc: jr s3
6537 0x2d3bf3d0: nop
6538 ; Exception:
6539 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6540 0x2d3bf3d8: addiu s1, s1, 0x40
6541 0x2d3bf3dc: addiu s2, zero, 0x0
6542 0x2d3bf3e0: addiu sp, sp, 0x10
6543 0x2d3bf3e4: addiu sp, sp, 0x8
6544 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6545 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6546 0x2d3bf3f0: lui at, 0x2b48
6547 0x2d3bf3f4: lw at, 0x100(at)
6548 ; TailCalljmpInd
6549 __ push(RA); ; to be used in generate_forward_exception()
6550 0x2d3bf3f8: addu t7, s2, zero
6551 0x2d3bf3fc: jr s1
6552 0x2d3bf400: nop
6553 */
6554 // Rethrow exception:
6555 // The exception oop will come in the first argument position.
6556 // Then JUMP (not call) to the rethrow stub code.
6557 instruct RethrowException()
6558 %{
6559 match(Rethrow);
6561 // use the following format syntax
6562 format %{ "JMP rethrow_stub #@RethrowException" %}
6563 ins_encode %{
6564 __ block_comment("@ RethrowException");
6566 cbuf.set_insts_mark();
6567 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6569 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6570 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6571 __ jr(T9);
6572 __ nop();
6573 %}
6574 ins_pipe( pipe_jump );
6575 %}
6577 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6578 match(If cmp (CmpP op1 zero));
6579 effect(USE labl);
6581 ins_cost(180);
6582 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6584 ins_encode %{
6585 Register op1 = $op1$$Register;
6586 Register op2 = R0;
6587 Label &L = *($labl$$label);
6588 int flag = $cmp$$cmpcode;
6590 switch(flag)
6591 {
6592 case 0x01: //equal
6593 if (&L)
6594 __ beq(op1, op2, L);
6595 else
6596 __ beq(op1, op2, (int)0);
6597 break;
6598 case 0x02: //not_equal
6599 if (&L)
6600 __ bne(op1, op2, L);
6601 else
6602 __ bne(op1, op2, (int)0);
6603 break;
6604 /*
6605 case 0x03: //above
6606 __ sltu(AT, op2, op1);
6607 if(&L)
6608 __ bne(R0, AT, L);
6609 else
6610 __ bne(R0, AT, (int)0);
6611 break;
6612 case 0x04: //above_equal
6613 __ sltu(AT, op1, op2);
6614 if(&L)
6615 __ beq(AT, R0, L);
6616 else
6617 __ beq(AT, R0, (int)0);
6618 break;
6619 case 0x05: //below
6620 __ sltu(AT, op1, op2);
6621 if(&L)
6622 __ bne(R0, AT, L);
6623 else
6624 __ bne(R0, AT, (int)0);
6625 break;
6626 case 0x06: //below_equal
6627 __ sltu(AT, op2, op1);
6628 if(&L)
6629 __ beq(AT, R0, L);
6630 else
6631 __ beq(AT, R0, (int)0);
6632 break;
6633 */
6634 default:
6635 Unimplemented();
6636 }
6637 __ nop();
6638 %}
6640 ins_pc_relative(1);
6641 ins_pipe( pipe_alu_branch );
6642 %}
6644 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6645 match(If cmp (CmpP (DecodeN op1) zero));
6646 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6647 effect(USE labl);
6649 ins_cost(180);
6650 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6652 ins_encode %{
6653 Register op1 = $op1$$Register;
6654 Register op2 = R0;
6655 Label &L = *($labl$$label);
6656 int flag = $cmp$$cmpcode;
6658 switch(flag)
6659 {
6660 case 0x01: //equal
6661 if (&L)
6662 __ beq(op1, op2, L);
6663 else
6664 __ beq(op1, op2, (int)0);
6665 break;
6666 case 0x02: //not_equal
6667 if (&L)
6668 __ bne(op1, op2, L);
6669 else
6670 __ bne(op1, op2, (int)0);
6671 break;
6672 default:
6673 Unimplemented();
6674 }
6675 __ nop();
6676 %}
6678 ins_pc_relative(1);
6679 ins_pipe( pipe_alu_branch );
6680 %}
6683 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6684 match(If cmp (CmpP op1 op2));
6685 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6686 effect(USE labl);
6688 ins_cost(200);
6689 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6691 ins_encode %{
6692 Register op1 = $op1$$Register;
6693 Register op2 = $op2$$Register;
6694 Label &L = *($labl$$label);
6695 int flag = $cmp$$cmpcode;
6697 switch(flag)
6698 {
6699 case 0x01: //equal
6700 if (&L)
6701 __ beq(op1, op2, L);
6702 else
6703 __ beq(op1, op2, (int)0);
6704 break;
6705 case 0x02: //not_equal
6706 if (&L)
6707 __ bne(op1, op2, L);
6708 else
6709 __ bne(op1, op2, (int)0);
6710 break;
6711 case 0x03: //above
6712 __ sltu(AT, op2, op1);
6713 if(&L)
6714 __ bne(R0, AT, L);
6715 else
6716 __ bne(R0, AT, (int)0);
6717 break;
6718 case 0x04: //above_equal
6719 __ sltu(AT, op1, op2);
6720 if(&L)
6721 __ beq(AT, R0, L);
6722 else
6723 __ beq(AT, R0, (int)0);
6724 break;
6725 case 0x05: //below
6726 __ sltu(AT, op1, op2);
6727 if(&L)
6728 __ bne(R0, AT, L);
6729 else
6730 __ bne(R0, AT, (int)0);
6731 break;
6732 case 0x06: //below_equal
6733 __ sltu(AT, op2, op1);
6734 if(&L)
6735 __ beq(AT, R0, L);
6736 else
6737 __ beq(AT, R0, (int)0);
6738 break;
6739 default:
6740 Unimplemented();
6741 }
6742 __ nop();
6743 %}
6745 ins_pc_relative(1);
6746 ins_pipe( pipe_alu_branch );
6747 %}
6749 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6750 match(If cmp (CmpN op1 null));
6751 effect(USE labl);
6753 ins_cost(180);
6754 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6755 "BP$cmp $labl @ cmpN_null_branch" %}
6756 ins_encode %{
6757 Register op1 = $op1$$Register;
6758 Register op2 = R0;
6759 Label &L = *($labl$$label);
6760 int flag = $cmp$$cmpcode;
6762 switch(flag)
6763 {
6764 case 0x01: //equal
6765 if (&L)
6766 __ beq(op1, op2, L);
6767 else
6768 __ beq(op1, op2, (int)0);
6769 break;
6770 case 0x02: //not_equal
6771 if (&L)
6772 __ bne(op1, op2, L);
6773 else
6774 __ bne(op1, op2, (int)0);
6775 break;
6776 default:
6777 Unimplemented();
6778 }
6779 __ nop();
6780 %}
6781 //TODO: pipe_branchP or create pipe_branchN LEE
6782 ins_pc_relative(1);
6783 ins_pipe( pipe_alu_branch );
6784 %}
6786 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6787 match(If cmp (CmpN op1 op2));
6788 effect(USE labl);
6790 ins_cost(180);
6791 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6792 "BP$cmp $labl" %}
6793 ins_encode %{
6794 Register op1_reg = $op1$$Register;
6795 Register op2_reg = $op2$$Register;
6796 Label &L = *($labl$$label);
6797 int flag = $cmp$$cmpcode;
6799 switch(flag)
6800 {
6801 case 0x01: //equal
6802 if (&L)
6803 __ beq(op1_reg, op2_reg, L);
6804 else
6805 __ beq(op1_reg, op2_reg, (int)0);
6806 break;
6807 case 0x02: //not_equal
6808 if (&L)
6809 __ bne(op1_reg, op2_reg, L);
6810 else
6811 __ bne(op1_reg, op2_reg, (int)0);
6812 break;
6813 case 0x03: //above
6814 __ sltu(AT, op2_reg, op1_reg);
6815 if(&L)
6816 __ bne(R0, AT, L);
6817 else
6818 __ bne(R0, AT, (int)0);
6819 break;
6820 case 0x04: //above_equal
6821 __ sltu(AT, op1_reg, op2_reg);
6822 if(&L)
6823 __ beq(AT, R0, L);
6824 else
6825 __ beq(AT, R0, (int)0);
6826 break;
6827 case 0x05: //below
6828 __ sltu(AT, op1_reg, op2_reg);
6829 if(&L)
6830 __ bne(R0, AT, L);
6831 else
6832 __ bne(R0, AT, (int)0);
6833 break;
6834 case 0x06: //below_equal
6835 __ sltu(AT, op2_reg, op1_reg);
6836 if(&L)
6837 __ beq(AT, R0, L);
6838 else
6839 __ beq(AT, R0, (int)0);
6840 break;
6841 default:
6842 Unimplemented();
6843 }
6844 __ nop();
6845 %}
6846 ins_pc_relative(1);
6847 ins_pipe( pipe_alu_branch );
6848 %}
6850 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6851 match( If cmp (CmpU src1 src2) );
6852 effect(USE labl);
6853 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6855 ins_encode %{
6856 Register op1 = $src1$$Register;
6857 Register op2 = $src2$$Register;
6858 Label &L = *($labl$$label);
6859 int flag = $cmp$$cmpcode;
6861 switch(flag)
6862 {
6863 case 0x01: //equal
6864 if (&L)
6865 __ beq(op1, op2, L);
6866 else
6867 __ beq(op1, op2, (int)0);
6868 break;
6869 case 0x02: //not_equal
6870 if (&L)
6871 __ bne(op1, op2, L);
6872 else
6873 __ bne(op1, op2, (int)0);
6874 break;
6875 case 0x03: //above
6876 __ sltu(AT, op2, op1);
6877 if(&L)
6878 __ bne(AT, R0, L);
6879 else
6880 __ bne(AT, R0, (int)0);
6881 break;
6882 case 0x04: //above_equal
6883 __ sltu(AT, op1, op2);
6884 if(&L)
6885 __ beq(AT, R0, L);
6886 else
6887 __ beq(AT, R0, (int)0);
6888 break;
6889 case 0x05: //below
6890 __ sltu(AT, op1, op2);
6891 if(&L)
6892 __ bne(AT, R0, L);
6893 else
6894 __ bne(AT, R0, (int)0);
6895 break;
6896 case 0x06: //below_equal
6897 __ sltu(AT, op2, op1);
6898 if(&L)
6899 __ beq(AT, R0, L);
6900 else
6901 __ beq(AT, R0, (int)0);
6902 break;
6903 default:
6904 Unimplemented();
6905 }
6906 __ nop();
6907 %}
6909 ins_pc_relative(1);
6910 ins_pipe( pipe_alu_branch );
6911 %}
6914 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6915 match( If cmp (CmpU src1 src2) );
6916 effect(USE labl);
6917 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6919 ins_encode %{
6920 Register op1 = $src1$$Register;
6921 int val = $src2$$constant;
6922 Label &L = *($labl$$label);
6923 int flag = $cmp$$cmpcode;
6925 __ move(AT, val);
6926 switch(flag)
6927 {
6928 case 0x01: //equal
6929 if (&L)
6930 __ beq(op1, AT, L);
6931 else
6932 __ beq(op1, AT, (int)0);
6933 break;
6934 case 0x02: //not_equal
6935 if (&L)
6936 __ bne(op1, AT, L);
6937 else
6938 __ bne(op1, AT, (int)0);
6939 break;
6940 case 0x03: //above
6941 __ sltu(AT, AT, op1);
6942 if(&L)
6943 __ bne(R0, AT, L);
6944 else
6945 __ bne(R0, AT, (int)0);
6946 break;
6947 case 0x04: //above_equal
6948 __ sltu(AT, op1, AT);
6949 if(&L)
6950 __ beq(AT, R0, L);
6951 else
6952 __ beq(AT, R0, (int)0);
6953 break;
6954 case 0x05: //below
6955 __ sltu(AT, op1, AT);
6956 if(&L)
6957 __ bne(R0, AT, L);
6958 else
6959 __ bne(R0, AT, (int)0);
6960 break;
6961 case 0x06: //below_equal
6962 __ sltu(AT, AT, op1);
6963 if(&L)
6964 __ beq(AT, R0, L);
6965 else
6966 __ beq(AT, R0, (int)0);
6967 break;
6968 default:
6969 Unimplemented();
6970 }
6971 __ nop();
6972 %}
6974 ins_pc_relative(1);
6975 ins_pipe( pipe_alu_branch );
6976 %}
6978 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6979 match( If cmp (CmpI src1 src2) );
6980 effect(USE labl);
6981 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6983 ins_encode %{
6984 Register op1 = $src1$$Register;
6985 Register op2 = $src2$$Register;
6986 Label &L = *($labl$$label);
6987 int flag = $cmp$$cmpcode;
6989 switch(flag)
6990 {
6991 case 0x01: //equal
6992 if (&L)
6993 __ beq(op1, op2, L);
6994 else
6995 __ beq(op1, op2, (int)0);
6996 break;
6997 case 0x02: //not_equal
6998 if (&L)
6999 __ bne(op1, op2, L);
7000 else
7001 __ bne(op1, op2, (int)0);
7002 break;
7003 case 0x03: //above
7004 __ slt(AT, op2, op1);
7005 if(&L)
7006 __ bne(R0, AT, L);
7007 else
7008 __ bne(R0, AT, (int)0);
7009 break;
7010 case 0x04: //above_equal
7011 __ slt(AT, op1, op2);
7012 if(&L)
7013 __ beq(AT, R0, L);
7014 else
7015 __ beq(AT, R0, (int)0);
7016 break;
7017 case 0x05: //below
7018 __ slt(AT, op1, op2);
7019 if(&L)
7020 __ bne(R0, AT, L);
7021 else
7022 __ bne(R0, AT, (int)0);
7023 break;
7024 case 0x06: //below_equal
7025 __ slt(AT, op2, op1);
7026 if(&L)
7027 __ beq(AT, R0, L);
7028 else
7029 __ beq(AT, R0, (int)0);
7030 break;
7031 default:
7032 Unimplemented();
7033 }
7034 __ nop();
7035 %}
7037 ins_pc_relative(1);
7038 ins_pipe( pipe_alu_branch );
7039 %}
7041 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7042 match( If cmp (CmpI src1 src2) );
7043 effect(USE labl);
7044 ins_cost(170);
7045 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7047 ins_encode %{
7048 Register op1 = $src1$$Register;
7049 // int val = $src2$$constant;
7050 Label &L = *($labl$$label);
7051 int flag = $cmp$$cmpcode;
7053 //__ move(AT, val);
7054 switch(flag)
7055 {
7056 case 0x01: //equal
7057 if (&L)
7058 __ beq(op1, R0, L);
7059 else
7060 __ beq(op1, R0, (int)0);
7061 break;
7062 case 0x02: //not_equal
7063 if (&L)
7064 __ bne(op1, R0, L);
7065 else
7066 __ bne(op1, R0, (int)0);
7067 break;
7068 case 0x03: //greater
7069 if(&L)
7070 __ bgtz(op1, L);
7071 else
7072 __ bgtz(op1, (int)0);
7073 break;
7074 case 0x04: //greater_equal
7075 if(&L)
7076 __ bgez(op1, L);
7077 else
7078 __ bgez(op1, (int)0);
7079 break;
7080 case 0x05: //less
7081 if(&L)
7082 __ bltz(op1, L);
7083 else
7084 __ bltz(op1, (int)0);
7085 break;
7086 case 0x06: //less_equal
7087 if(&L)
7088 __ blez(op1, L);
7089 else
7090 __ blez(op1, (int)0);
7091 break;
7092 default:
7093 Unimplemented();
7094 }
7095 __ nop();
7096 %}
7098 ins_pc_relative(1);
7099 ins_pipe( pipe_alu_branch );
7100 %}
7103 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7104 match( If cmp (CmpI src1 src2) );
7105 effect(USE labl);
7106 ins_cost(200);
7107 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7109 ins_encode %{
7110 Register op1 = $src1$$Register;
7111 int val = $src2$$constant;
7112 Label &L = *($labl$$label);
7113 int flag = $cmp$$cmpcode;
7115 __ move(AT, val);
7116 switch(flag)
7117 {
7118 case 0x01: //equal
7119 if (&L)
7120 __ beq(op1, AT, L);
7121 else
7122 __ beq(op1, AT, (int)0);
7123 break;
7124 case 0x02: //not_equal
7125 if (&L)
7126 __ bne(op1, AT, L);
7127 else
7128 __ bne(op1, AT, (int)0);
7129 break;
7130 case 0x03: //greater
7131 __ slt(AT, AT, op1);
7132 if(&L)
7133 __ bne(R0, AT, L);
7134 else
7135 __ bne(R0, AT, (int)0);
7136 break;
7137 case 0x04: //greater_equal
7138 __ slt(AT, op1, AT);
7139 if(&L)
7140 __ beq(AT, R0, L);
7141 else
7142 __ beq(AT, R0, (int)0);
7143 break;
7144 case 0x05: //less
7145 __ slt(AT, op1, AT);
7146 if(&L)
7147 __ bne(R0, AT, L);
7148 else
7149 __ bne(R0, AT, (int)0);
7150 break;
7151 case 0x06: //less_equal
7152 __ slt(AT, AT, op1);
7153 if(&L)
7154 __ beq(AT, R0, L);
7155 else
7156 __ beq(AT, R0, (int)0);
7157 break;
7158 default:
7159 Unimplemented();
7160 }
7161 __ nop();
7162 %}
7164 ins_pc_relative(1);
7165 ins_pipe( pipe_alu_branch );
7166 %}
7168 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7169 match( If cmp (CmpU src1 zero) );
7170 effect(USE labl);
7171 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7173 ins_encode %{
7174 Register op1 = $src1$$Register;
7175 Label &L = *($labl$$label);
7176 int flag = $cmp$$cmpcode;
7178 switch(flag)
7179 {
7180 case 0x01: //equal
7181 if (&L)
7182 __ beq(op1, R0, L);
7183 else
7184 __ beq(op1, R0, (int)0);
7185 break;
7186 case 0x02: //not_equal
7187 if (&L)
7188 __ bne(op1, R0, L);
7189 else
7190 __ bne(op1, R0, (int)0);
7191 break;
7192 case 0x03: //above
7193 if(&L)
7194 __ bne(R0, op1, L);
7195 else
7196 __ bne(R0, op1, (int)0);
7197 break;
7198 case 0x04: //above_equal
7199 if(&L)
7200 __ beq(R0, R0, L);
7201 else
7202 __ beq(R0, R0, (int)0);
7203 break;
7204 case 0x05: //below
7205 return;
7206 break;
7207 case 0x06: //below_equal
7208 if(&L)
7209 __ beq(op1, R0, L);
7210 else
7211 __ beq(op1, R0, (int)0);
7212 break;
7213 default:
7214 Unimplemented();
7215 }
7216 __ nop();
7217 %}
7219 ins_pc_relative(1);
7220 ins_pipe( pipe_alu_branch );
7221 %}
7224 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7225 match( If cmp (CmpU src1 src2) );
7226 effect(USE labl);
7227 ins_cost(180);
7228 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7230 ins_encode %{
7231 Register op1 = $src1$$Register;
7232 int val = $src2$$constant;
7233 Label &L = *($labl$$label);
7234 int flag = $cmp$$cmpcode;
7236 switch(flag)
7237 {
7238 case 0x01: //equal
7239 __ move(AT, val);
7240 if (&L)
7241 __ beq(op1, AT, L);
7242 else
7243 __ beq(op1, AT, (int)0);
7244 break;
7245 case 0x02: //not_equal
7246 __ move(AT, val);
7247 if (&L)
7248 __ bne(op1, AT, L);
7249 else
7250 __ bne(op1, AT, (int)0);
7251 break;
7252 case 0x03: //above
7253 __ move(AT, val);
7254 __ sltu(AT, AT, op1);
7255 if(&L)
7256 __ bne(R0, AT, L);
7257 else
7258 __ bne(R0, AT, (int)0);
7259 break;
7260 case 0x04: //above_equal
7261 __ sltiu(AT, op1, val);
7262 if(&L)
7263 __ beq(AT, R0, L);
7264 else
7265 __ beq(AT, R0, (int)0);
7266 break;
7267 case 0x05: //below
7268 __ sltiu(AT, op1, val);
7269 if(&L)
7270 __ bne(R0, AT, L);
7271 else
7272 __ bne(R0, AT, (int)0);
7273 break;
7274 case 0x06: //below_equal
7275 __ move(AT, val);
7276 __ sltu(AT, AT, op1);
7277 if(&L)
7278 __ beq(AT, R0, L);
7279 else
7280 __ beq(AT, R0, (int)0);
7281 break;
7282 default:
7283 Unimplemented();
7284 }
7285 __ nop();
7286 %}
7288 ins_pc_relative(1);
7289 ins_pipe( pipe_alu_branch );
7290 %}
7293 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7294 match( If cmp (CmpL src1 src2) );
7295 effect(USE labl);
7296 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7297 ins_cost(250);
7299 ins_encode %{
7300 Register opr1_reg = as_Register($src1$$reg);
7301 Register opr2_reg = as_Register($src2$$reg);
7303 Label &target = *($labl$$label);
7304 int flag = $cmp$$cmpcode;
7306 switch(flag)
7307 {
7308 case 0x01: //equal
7309 if (&target)
7310 __ beq(opr1_reg, opr2_reg, target);
7311 else
7312 __ beq(opr1_reg, opr2_reg, (int)0);
7313 __ delayed()->nop();
7314 break;
7316 case 0x02: //not_equal
7317 if(&target)
7318 __ bne(opr1_reg, opr2_reg, target);
7319 else
7320 __ bne(opr1_reg, opr2_reg, (int)0);
7321 __ delayed()->nop();
7322 break;
7324 case 0x03: //greater
7325 __ slt(AT, opr2_reg, opr1_reg);
7326 if(&target)
7327 __ bne(AT, R0, target);
7328 else
7329 __ bne(AT, R0, (int)0);
7330 __ delayed()->nop();
7331 break;
7333 case 0x04: //greater_equal
7334 __ slt(AT, opr1_reg, opr2_reg);
7335 if(&target)
7336 __ beq(AT, R0, target);
7337 else
7338 __ beq(AT, R0, (int)0);
7339 __ delayed()->nop();
7341 break;
7343 case 0x05: //less
7344 __ slt(AT, opr1_reg, opr2_reg);
7345 if(&target)
7346 __ bne(AT, R0, target);
7347 else
7348 __ bne(AT, R0, (int)0);
7349 __ delayed()->nop();
7351 break;
7353 case 0x06: //less_equal
7354 __ slt(AT, opr2_reg, opr1_reg);
7356 if(&target)
7357 __ beq(AT, R0, target);
7358 else
7359 __ beq(AT, R0, (int)0);
7360 __ delayed()->nop();
7362 break;
7364 default:
7365 Unimplemented();
7366 }
7367 %}
7370 ins_pc_relative(1);
7371 ins_pipe( pipe_alu_branch );
7372 %}
7374 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7375 match( If cmp (CmpL src1 src2) );
7376 effect(USE labl);
7377 ins_cost(180);
7378 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7380 ins_encode %{
7381 Register op1 = $src1$$Register;
7382 int val = $src2$$constant;
7383 Label &L = *($labl$$label);
7384 int flag = $cmp$$cmpcode;
7386 __ daddiu(AT, op1, -1 * val);
7387 switch(flag)
7388 {
7389 case 0x01: //equal
7390 if (&L)
7391 __ beq(R0, AT, L);
7392 else
7393 __ beq(R0, AT, (int)0);
7394 break;
7395 case 0x02: //not_equal
7396 if (&L)
7397 __ bne(R0, AT, L);
7398 else
7399 __ bne(R0, AT, (int)0);
7400 break;
7401 case 0x03: //greater
7402 if(&L)
7403 __ bgtz(AT, L);
7404 else
7405 __ bgtz(AT, (int)0);
7406 break;
7407 case 0x04: //greater_equal
7408 if(&L)
7409 __ bgez(AT, L);
7410 else
7411 __ bgez(AT, (int)0);
7412 break;
7413 case 0x05: //less
7414 if(&L)
7415 __ bltz(AT, L);
7416 else
7417 __ bltz(AT, (int)0);
7418 break;
7419 case 0x06: //less_equal
7420 if(&L)
7421 __ blez(AT, L);
7422 else
7423 __ blez(AT, (int)0);
7424 break;
7425 default:
7426 Unimplemented();
7427 }
7428 __ nop();
7429 %}
7431 ins_pc_relative(1);
7432 ins_pipe( pipe_alu_branch );
7433 %}
7436 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7437 match( If cmp (CmpI src1 src2) );
7438 effect(USE labl);
7439 ins_cost(180);
7440 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7442 ins_encode %{
7443 Register op1 = $src1$$Register;
7444 int val = $src2$$constant;
7445 Label &L = *($labl$$label);
7446 int flag = $cmp$$cmpcode;
7448 __ addiu32(AT, op1, -1 * val);
7449 switch(flag)
7450 {
7451 case 0x01: //equal
7452 if (&L)
7453 __ beq(R0, AT, L);
7454 else
7455 __ beq(R0, AT, (int)0);
7456 break;
7457 case 0x02: //not_equal
7458 if (&L)
7459 __ bne(R0, AT, L);
7460 else
7461 __ bne(R0, AT, (int)0);
7462 break;
7463 case 0x03: //greater
7464 if(&L)
7465 __ bgtz(AT, L);
7466 else
7467 __ bgtz(AT, (int)0);
7468 break;
7469 case 0x04: //greater_equal
7470 if(&L)
7471 __ bgez(AT, L);
7472 else
7473 __ bgez(AT, (int)0);
7474 break;
7475 case 0x05: //less
7476 if(&L)
7477 __ bltz(AT, L);
7478 else
7479 __ bltz(AT, (int)0);
7480 break;
7481 case 0x06: //less_equal
7482 if(&L)
7483 __ blez(AT, L);
7484 else
7485 __ blez(AT, (int)0);
7486 break;
7487 default:
7488 Unimplemented();
7489 }
7490 __ nop();
7491 %}
7493 ins_pc_relative(1);
7494 ins_pipe( pipe_alu_branch );
7495 %}
7497 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7498 match( If cmp (CmpL src1 zero) );
7499 effect(USE labl);
7500 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7501 ins_cost(150);
7503 ins_encode %{
7504 Register opr1_reg = as_Register($src1$$reg);
7505 Label &target = *($labl$$label);
7506 int flag = $cmp$$cmpcode;
7508 switch(flag)
7509 {
7510 case 0x01: //equal
7511 if (&target)
7512 __ beq(opr1_reg, R0, target);
7513 else
7514 __ beq(opr1_reg, R0, int(0));
7515 break;
7517 case 0x02: //not_equal
7518 if(&target)
7519 __ bne(opr1_reg, R0, target);
7520 else
7521 __ bne(opr1_reg, R0, (int)0);
7522 break;
7524 case 0x03: //greater
7525 if(&target)
7526 __ bgtz(opr1_reg, target);
7527 else
7528 __ bgtz(opr1_reg, (int)0);
7529 break;
7531 case 0x04: //greater_equal
7532 if(&target)
7533 __ bgez(opr1_reg, target);
7534 else
7535 __ bgez(opr1_reg, (int)0);
7536 break;
7538 case 0x05: //less
7539 __ slt(AT, opr1_reg, R0);
7540 if(&target)
7541 __ bne(AT, R0, target);
7542 else
7543 __ bne(AT, R0, (int)0);
7544 break;
7546 case 0x06: //less_equal
7547 if (&target)
7548 __ blez(opr1_reg, target);
7549 else
7550 __ blez(opr1_reg, int(0));
7551 break;
7553 default:
7554 Unimplemented();
7555 }
7556 __ delayed()->nop();
7557 %}
7560 ins_pc_relative(1);
7561 ins_pipe( pipe_alu_branch );
7562 %}
7565 //FIXME
7566 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7567 match( If cmp (CmpF src1 src2) );
7568 effect(USE labl);
7569 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7571 ins_encode %{
7572 FloatRegister reg_op1 = $src1$$FloatRegister;
7573 FloatRegister reg_op2 = $src2$$FloatRegister;
7574 Label &L = *($labl$$label);
7575 int flag = $cmp$$cmpcode;
7577 switch(flag)
7578 {
7579 case 0x01: //equal
7580 __ c_eq_s(reg_op1, reg_op2);
7581 if (&L)
7582 __ bc1t(L);
7583 else
7584 __ bc1t((int)0);
7585 break;
7586 case 0x02: //not_equal
7587 __ c_eq_s(reg_op1, reg_op2);
7588 if (&L)
7589 __ bc1f(L);
7590 else
7591 __ bc1f((int)0);
7592 break;
7593 case 0x03: //greater
7594 __ c_ule_s(reg_op1, reg_op2);
7595 if(&L)
7596 __ bc1f(L);
7597 else
7598 __ bc1f((int)0);
7599 break;
7600 case 0x04: //greater_equal
7601 __ c_ult_s(reg_op1, reg_op2);
7602 if(&L)
7603 __ bc1f(L);
7604 else
7605 __ bc1f((int)0);
7606 break;
7607 case 0x05: //less
7608 __ c_ult_s(reg_op1, reg_op2);
7609 if(&L)
7610 __ bc1t(L);
7611 else
7612 __ bc1t((int)0);
7613 break;
7614 case 0x06: //less_equal
7615 __ c_ule_s(reg_op1, reg_op2);
7616 if(&L)
7617 __ bc1t(L);
7618 else
7619 __ bc1t((int)0);
7620 break;
7621 default:
7622 Unimplemented();
7623 }
7624 __ nop();
7625 %}
7627 ins_pc_relative(1);
7628 ins_pipe(pipe_slow);
7629 %}
7631 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7632 match( If cmp (CmpD src1 src2) );
7633 effect(USE labl);
7634 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7636 ins_encode %{
7637 FloatRegister reg_op1 = $src1$$FloatRegister;
7638 FloatRegister reg_op2 = $src2$$FloatRegister;
7639 Label &L = *($labl$$label);
7640 int flag = $cmp$$cmpcode;
7642 switch(flag)
7643 {
7644 case 0x01: //equal
7645 __ c_eq_d(reg_op1, reg_op2);
7646 if (&L)
7647 __ bc1t(L);
7648 else
7649 __ bc1t((int)0);
7650 break;
7651 case 0x02: //not_equal
7652 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7653 __ c_eq_d(reg_op1, reg_op2);
7654 if (&L)
7655 __ bc1f(L);
7656 else
7657 __ bc1f((int)0);
7658 break;
7659 case 0x03: //greater
7660 __ c_ule_d(reg_op1, reg_op2);
7661 if(&L)
7662 __ bc1f(L);
7663 else
7664 __ bc1f((int)0);
7665 break;
7666 case 0x04: //greater_equal
7667 __ c_ult_d(reg_op1, reg_op2);
7668 if(&L)
7669 __ bc1f(L);
7670 else
7671 __ bc1f((int)0);
7672 break;
7673 case 0x05: //less
7674 __ c_ult_d(reg_op1, reg_op2);
7675 if(&L)
7676 __ bc1t(L);
7677 else
7678 __ bc1t((int)0);
7679 break;
7680 case 0x06: //less_equal
7681 __ c_ule_d(reg_op1, reg_op2);
7682 if(&L)
7683 __ bc1t(L);
7684 else
7685 __ bc1t((int)0);
7686 break;
7687 default:
7688 Unimplemented();
7689 }
7690 __ nop();
7691 %}
7693 ins_pc_relative(1);
7694 ins_pipe(pipe_slow);
7695 %}
7698 // Call Runtime Instruction
7699 instruct CallRuntimeDirect(method meth) %{
7700 match(CallRuntime );
7701 effect(USE meth);
7703 ins_cost(300);
7704 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7705 ins_encode( Java_To_Runtime( meth ) );
7706 ins_pipe( pipe_slow );
7707 ins_alignment(16);
7708 %}
7712 //------------------------MemBar Instructions-------------------------------
7713 //Memory barrier flavors
7715 instruct membar_acquire() %{
7716 match(MemBarAcquire);
7717 ins_cost(0);
7719 size(0);
7720 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7721 ins_encode();
7722 ins_pipe(empty);
7723 %}
7725 instruct load_fence() %{
7726 match(LoadFence);
7727 ins_cost(400);
7729 format %{ "MEMBAR @ load_fence" %}
7730 ins_encode %{
7731 __ sync();
7732 %}
7733 ins_pipe(pipe_slow);
7734 %}
7736 instruct membar_acquire_lock()
7737 %{
7738 match(MemBarAcquireLock);
7739 ins_cost(0);
7741 size(0);
7742 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7743 ins_encode();
7744 ins_pipe(empty);
7745 %}
7747 instruct membar_release() %{
7748 match(MemBarRelease);
7749 ins_cost(0);
7751 size(0);
7752 format %{ "MEMBAR-release (empty) @ membar_release" %}
7753 ins_encode();
7754 ins_pipe(empty);
7755 %}
7757 instruct store_fence() %{
7758 match(StoreFence);
7759 ins_cost(400);
7761 format %{ "MEMBAR @ store_fence" %}
7763 ins_encode %{
7764 __ sync();
7765 %}
7767 ins_pipe(pipe_slow);
7768 %}
7770 instruct membar_release_lock()
7771 %{
7772 match(MemBarReleaseLock);
7773 ins_cost(0);
7775 size(0);
7776 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7777 ins_encode();
7778 ins_pipe(empty);
7779 %}
7782 instruct membar_volatile() %{
7783 match(MemBarVolatile);
7784 ins_cost(400);
7786 format %{ "MEMBAR-volatile" %}
7787 ins_encode %{
7788 if( !os::is_MP() ) return; // Not needed on single CPU
7789 __ sync();
7791 %}
7792 ins_pipe(pipe_slow);
7793 %}
7795 instruct unnecessary_membar_volatile() %{
7796 match(MemBarVolatile);
7797 predicate(Matcher::post_store_load_barrier(n));
7798 ins_cost(0);
7800 size(0);
7801 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7802 ins_encode( );
7803 ins_pipe(empty);
7804 %}
7806 instruct membar_storestore() %{
7807 match(MemBarStoreStore);
7809 ins_cost(0);
7810 size(0);
7811 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7812 ins_encode( );
7813 ins_pipe(empty);
7814 %}
7816 //----------Move Instructions--------------------------------------------------
7817 instruct castX2P(mRegP dst, mRegL src) %{
7818 match(Set dst (CastX2P src));
7819 format %{ "castX2P $dst, $src @ castX2P" %}
7820 ins_encode %{
7821 Register src = $src$$Register;
7822 Register dst = $dst$$Register;
7824 if(src != dst)
7825 __ move(dst, src);
7826 %}
7827 ins_cost(10);
7828 ins_pipe( ialu_regI_mov );
7829 %}
7831 instruct castP2X(mRegL dst, mRegP src ) %{
7832 match(Set dst (CastP2X src));
7834 format %{ "mov $dst, $src\t #@castP2X" %}
7835 ins_encode %{
7836 Register src = $src$$Register;
7837 Register dst = $dst$$Register;
7839 if(src != dst)
7840 __ move(dst, src);
7841 %}
7842 ins_pipe( ialu_regI_mov );
7843 %}
7845 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7846 match(Set dst (MoveF2I src));
7847 effect(DEF dst, USE src);
7848 ins_cost(85);
7849 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7850 ins_encode %{
7851 Register dst = as_Register($dst$$reg);
7852 FloatRegister src = as_FloatRegister($src$$reg);
7854 __ mfc1(dst, src);
7855 %}
7856 ins_pipe( pipe_slow );
7857 %}
7859 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7860 match(Set dst (MoveI2F src));
7861 effect(DEF dst, USE src);
7862 ins_cost(85);
7863 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7864 ins_encode %{
7865 Register src = as_Register($src$$reg);
7866 FloatRegister dst = as_FloatRegister($dst$$reg);
7868 __ mtc1(src, dst);
7869 %}
7870 ins_pipe( pipe_slow );
7871 %}
7873 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7874 match(Set dst (MoveD2L src));
7875 effect(DEF dst, USE src);
7876 ins_cost(85);
7877 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7878 ins_encode %{
7879 Register dst = as_Register($dst$$reg);
7880 FloatRegister src = as_FloatRegister($src$$reg);
7882 __ dmfc1(dst, src);
7883 %}
7884 ins_pipe( pipe_slow );
7885 %}
7887 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7888 match(Set dst (MoveL2D src));
7889 effect(DEF dst, USE src);
7890 ins_cost(85);
7891 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7892 ins_encode %{
7893 FloatRegister dst = as_FloatRegister($dst$$reg);
7894 Register src = as_Register($src$$reg);
7896 __ dmtc1(src, dst);
7897 %}
7898 ins_pipe( pipe_slow );
7899 %}
7901 //----------Conditional Move---------------------------------------------------
7902 // Conditional move
7903 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7904 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7905 ins_cost(80);
7906 format %{
7907 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7908 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7909 %}
7911 ins_encode %{
7912 Register op1 = $tmp1$$Register;
7913 Register op2 = $tmp2$$Register;
7914 Register dst = $dst$$Register;
7915 Register src = $src$$Register;
7916 int flag = $cop$$cmpcode;
7918 switch(flag)
7919 {
7920 case 0x01: //equal
7921 __ subu32(AT, op1, op2);
7922 __ movz(dst, src, AT);
7923 break;
7925 case 0x02: //not_equal
7926 __ subu32(AT, op1, op2);
7927 __ movn(dst, src, AT);
7928 break;
7930 case 0x03: //great
7931 __ slt(AT, op2, op1);
7932 __ movn(dst, src, AT);
7933 break;
7935 case 0x04: //great_equal
7936 __ slt(AT, op1, op2);
7937 __ movz(dst, src, AT);
7938 break;
7940 case 0x05: //less
7941 __ slt(AT, op1, op2);
7942 __ movn(dst, src, AT);
7943 break;
7945 case 0x06: //less_equal
7946 __ slt(AT, op2, op1);
7947 __ movz(dst, src, AT);
7948 break;
7950 default:
7951 Unimplemented();
7952 }
7953 %}
7955 ins_pipe( pipe_slow );
7956 %}
7958 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7959 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7960 ins_cost(80);
7961 format %{
7962 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7963 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7964 %}
7965 ins_encode %{
7966 Register op1 = $tmp1$$Register;
7967 Register op2 = $tmp2$$Register;
7968 Register dst = $dst$$Register;
7969 Register src = $src$$Register;
7970 int flag = $cop$$cmpcode;
7972 switch(flag)
7973 {
7974 case 0x01: //equal
7975 __ subu(AT, op1, op2);
7976 __ movz(dst, src, AT);
7977 break;
7979 case 0x02: //not_equal
7980 __ subu(AT, op1, op2);
7981 __ movn(dst, src, AT);
7982 break;
7984 case 0x03: //above
7985 __ sltu(AT, op2, op1);
7986 __ movn(dst, src, AT);
7987 break;
7989 case 0x04: //above_equal
7990 __ sltu(AT, op1, op2);
7991 __ movz(dst, src, AT);
7992 break;
7994 case 0x05: //below
7995 __ sltu(AT, op1, op2);
7996 __ movn(dst, src, AT);
7997 break;
7999 case 0x06: //below_equal
8000 __ sltu(AT, op2, op1);
8001 __ movz(dst, src, AT);
8002 break;
8004 default:
8005 Unimplemented();
8006 }
8007 %}
8009 ins_pipe( pipe_slow );
8010 %}
8012 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8013 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8014 ins_cost(80);
8015 format %{
8016 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8017 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8018 %}
8019 ins_encode %{
8020 Register op1 = $tmp1$$Register;
8021 Register op2 = $tmp2$$Register;
8022 Register dst = $dst$$Register;
8023 Register src = $src$$Register;
8024 int flag = $cop$$cmpcode;
8026 switch(flag)
8027 {
8028 case 0x01: //equal
8029 __ subu32(AT, op1, op2);
8030 __ movz(dst, src, AT);
8031 break;
8033 case 0x02: //not_equal
8034 __ subu32(AT, op1, op2);
8035 __ movn(dst, src, AT);
8036 break;
8038 case 0x03: //above
8039 __ sltu(AT, op2, op1);
8040 __ movn(dst, src, AT);
8041 break;
8043 case 0x04: //above_equal
8044 __ sltu(AT, op1, op2);
8045 __ movz(dst, src, AT);
8046 break;
8048 case 0x05: //below
8049 __ sltu(AT, op1, op2);
8050 __ movn(dst, src, AT);
8051 break;
8053 case 0x06: //below_equal
8054 __ sltu(AT, op2, op1);
8055 __ movz(dst, src, AT);
8056 break;
8058 default:
8059 Unimplemented();
8060 }
8061 %}
8063 ins_pipe( pipe_slow );
8064 %}
8066 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8067 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8068 ins_cost(80);
8069 format %{
8070 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8071 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8072 %}
8073 ins_encode %{
8074 Register op1 = $tmp1$$Register;
8075 Register op2 = $tmp2$$Register;
8076 Register dst = $dst$$Register;
8077 Register src = $src$$Register;
8078 int flag = $cop$$cmpcode;
8080 switch(flag)
8081 {
8082 case 0x01: //equal
8083 __ subu32(AT, op1, op2);
8084 __ movz(dst, src, AT);
8085 break;
8087 case 0x02: //not_equal
8088 __ subu32(AT, op1, op2);
8089 __ movn(dst, src, AT);
8090 break;
8092 case 0x03: //above
8093 __ sltu(AT, op2, op1);
8094 __ movn(dst, src, AT);
8095 break;
8097 case 0x04: //above_equal
8098 __ sltu(AT, op1, op2);
8099 __ movz(dst, src, AT);
8100 break;
8102 case 0x05: //below
8103 __ sltu(AT, op1, op2);
8104 __ movn(dst, src, AT);
8105 break;
8107 case 0x06: //below_equal
8108 __ sltu(AT, op2, op1);
8109 __ movz(dst, src, AT);
8110 break;
8112 default:
8113 Unimplemented();
8114 }
8115 %}
8117 ins_pipe( pipe_slow );
8118 %}
8120 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8121 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8122 ins_cost(80);
8123 format %{
8124 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8125 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8126 %}
8127 ins_encode %{
8128 Register op1 = $tmp1$$Register;
8129 Register op2 = $tmp2$$Register;
8130 Register dst = $dst$$Register;
8131 Register src = $src$$Register;
8132 int flag = $cop$$cmpcode;
8134 switch(flag)
8135 {
8136 case 0x01: //equal
8137 __ subu(AT, op1, op2);
8138 __ movz(dst, src, AT);
8139 break;
8141 case 0x02: //not_equal
8142 __ subu(AT, op1, op2);
8143 __ movn(dst, src, AT);
8144 break;
8146 case 0x03: //above
8147 __ sltu(AT, op2, op1);
8148 __ movn(dst, src, AT);
8149 break;
8151 case 0x04: //above_equal
8152 __ sltu(AT, op1, op2);
8153 __ movz(dst, src, AT);
8154 break;
8156 case 0x05: //below
8157 __ sltu(AT, op1, op2);
8158 __ movn(dst, src, AT);
8159 break;
8161 case 0x06: //below_equal
8162 __ sltu(AT, op2, op1);
8163 __ movz(dst, src, AT);
8164 break;
8166 default:
8167 Unimplemented();
8168 }
8169 %}
8171 ins_pipe( pipe_slow );
8172 %}
8174 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8175 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8176 ins_cost(80);
8177 format %{
8178 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8179 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8180 %}
8181 ins_encode %{
8182 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8183 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8184 Register dst = as_Register($dst$$reg);
8185 Register src = as_Register($src$$reg);
8187 int flag = $cop$$cmpcode;
8189 switch(flag)
8190 {
8191 case 0x01: //equal
8192 __ c_eq_d(reg_op1, reg_op2);
8193 __ movt(dst, src);
8194 break;
8195 case 0x02: //not_equal
8196 __ c_eq_d(reg_op1, reg_op2);
8197 __ movf(dst, src);
8198 break;
8199 case 0x03: //greater
8200 __ c_ole_d(reg_op1, reg_op2);
8201 __ movf(dst, src);
8202 break;
8203 case 0x04: //greater_equal
8204 __ c_olt_d(reg_op1, reg_op2);
8205 __ movf(dst, src);
8206 break;
8207 case 0x05: //less
8208 __ c_ult_d(reg_op1, reg_op2);
8209 __ movt(dst, src);
8210 break;
8211 case 0x06: //less_equal
8212 __ c_ule_d(reg_op1, reg_op2);
8213 __ movt(dst, src);
8214 break;
8215 default:
8216 Unimplemented();
8217 }
8218 %}
8220 ins_pipe( pipe_slow );
8221 %}
8224 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8225 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8226 ins_cost(80);
8227 format %{
8228 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8229 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8230 %}
8231 ins_encode %{
8232 Register op1 = $tmp1$$Register;
8233 Register op2 = $tmp2$$Register;
8234 Register dst = $dst$$Register;
8235 Register src = $src$$Register;
8236 int flag = $cop$$cmpcode;
8238 switch(flag)
8239 {
8240 case 0x01: //equal
8241 __ subu32(AT, op1, op2);
8242 __ movz(dst, src, AT);
8243 break;
8245 case 0x02: //not_equal
8246 __ subu32(AT, op1, op2);
8247 __ movn(dst, src, AT);
8248 break;
8250 case 0x03: //above
8251 __ sltu(AT, op2, op1);
8252 __ movn(dst, src, AT);
8253 break;
8255 case 0x04: //above_equal
8256 __ sltu(AT, op1, op2);
8257 __ movz(dst, src, AT);
8258 break;
8260 case 0x05: //below
8261 __ sltu(AT, op1, op2);
8262 __ movn(dst, src, AT);
8263 break;
8265 case 0x06: //below_equal
8266 __ sltu(AT, op2, op1);
8267 __ movz(dst, src, AT);
8268 break;
8270 default:
8271 Unimplemented();
8272 }
8273 %}
8275 ins_pipe( pipe_slow );
8276 %}
8279 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8280 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8281 ins_cost(80);
8282 format %{
8283 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8284 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8285 %}
8286 ins_encode %{
8287 Register op1 = $tmp1$$Register;
8288 Register op2 = $tmp2$$Register;
8289 Register dst = $dst$$Register;
8290 Register src = $src$$Register;
8291 int flag = $cop$$cmpcode;
8293 switch(flag)
8294 {
8295 case 0x01: //equal
8296 __ subu(AT, op1, op2);
8297 __ movz(dst, src, AT);
8298 break;
8300 case 0x02: //not_equal
8301 __ subu(AT, op1, op2);
8302 __ movn(dst, src, AT);
8303 break;
8305 case 0x03: //above
8306 __ sltu(AT, op2, op1);
8307 __ movn(dst, src, AT);
8308 break;
8310 case 0x04: //above_equal
8311 __ sltu(AT, op1, op2);
8312 __ movz(dst, src, AT);
8313 break;
8315 case 0x05: //below
8316 __ sltu(AT, op1, op2);
8317 __ movn(dst, src, AT);
8318 break;
8320 case 0x06: //below_equal
8321 __ sltu(AT, op2, op1);
8322 __ movz(dst, src, AT);
8323 break;
8325 default:
8326 Unimplemented();
8327 }
8328 %}
8330 ins_pipe( pipe_slow );
8331 %}
8333 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8334 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8335 ins_cost(80);
8336 format %{
8337 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8338 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8339 %}
8340 ins_encode %{
8341 Register opr1 = as_Register($tmp1$$reg);
8342 Register opr2 = as_Register($tmp2$$reg);
8343 Register dst = $dst$$Register;
8344 Register src = $src$$Register;
8345 int flag = $cop$$cmpcode;
8347 switch(flag)
8348 {
8349 case 0x01: //equal
8350 __ subu(AT, opr1, opr2);
8351 __ movz(dst, src, AT);
8352 break;
8354 case 0x02: //not_equal
8355 __ subu(AT, opr1, opr2);
8356 __ movn(dst, src, AT);
8357 break;
8359 case 0x03: //greater
8360 __ slt(AT, opr2, opr1);
8361 __ movn(dst, src, AT);
8362 break;
8364 case 0x04: //greater_equal
8365 __ slt(AT, opr1, opr2);
8366 __ movz(dst, src, AT);
8367 break;
8369 case 0x05: //less
8370 __ slt(AT, opr1, opr2);
8371 __ movn(dst, src, AT);
8372 break;
8374 case 0x06: //less_equal
8375 __ slt(AT, opr2, opr1);
8376 __ movz(dst, src, AT);
8377 break;
8379 default:
8380 Unimplemented();
8381 }
8382 %}
8384 ins_pipe( pipe_slow );
8385 %}
8387 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8388 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8389 ins_cost(80);
8390 format %{
8391 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8392 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8393 %}
8394 ins_encode %{
8395 Register opr1 = as_Register($tmp1$$reg);
8396 Register opr2 = as_Register($tmp2$$reg);
8397 Register dst = $dst$$Register;
8398 Register src = $src$$Register;
8399 int flag = $cop$$cmpcode;
8401 switch(flag)
8402 {
8403 case 0x01: //equal
8404 __ subu(AT, opr1, opr2);
8405 __ movz(dst, src, AT);
8406 break;
8408 case 0x02: //not_equal
8409 __ subu(AT, opr1, opr2);
8410 __ movn(dst, src, AT);
8411 break;
8413 case 0x03: //greater
8414 __ slt(AT, opr2, opr1);
8415 __ movn(dst, src, AT);
8416 break;
8418 case 0x04: //greater_equal
8419 __ slt(AT, opr1, opr2);
8420 __ movz(dst, src, AT);
8421 break;
8423 case 0x05: //less
8424 __ slt(AT, opr1, opr2);
8425 __ movn(dst, src, AT);
8426 break;
8428 case 0x06: //less_equal
8429 __ slt(AT, opr2, opr1);
8430 __ movz(dst, src, AT);
8431 break;
8433 default:
8434 Unimplemented();
8435 }
8436 %}
8438 ins_pipe( pipe_slow );
8439 %}
8441 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8442 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8443 ins_cost(80);
8444 format %{
8445 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8446 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8447 %}
8448 ins_encode %{
8449 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8450 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8451 Register dst = as_Register($dst$$reg);
8452 Register src = as_Register($src$$reg);
8454 int flag = $cop$$cmpcode;
8456 switch(flag)
8457 {
8458 case 0x01: //equal
8459 __ c_eq_d(reg_op1, reg_op2);
8460 __ movt(dst, src);
8461 break;
8462 case 0x02: //not_equal
8463 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8464 __ c_eq_d(reg_op1, reg_op2);
8465 __ movf(dst, src);
8466 break;
8467 case 0x03: //greater
8468 __ c_ole_d(reg_op1, reg_op2);
8469 __ movf(dst, src);
8470 break;
8471 case 0x04: //greater_equal
8472 __ c_olt_d(reg_op1, reg_op2);
8473 __ movf(dst, src);
8474 break;
8475 case 0x05: //less
8476 __ c_ult_d(reg_op1, reg_op2);
8477 __ movt(dst, src);
8478 break;
8479 case 0x06: //less_equal
8480 __ c_ule_d(reg_op1, reg_op2);
8481 __ movt(dst, src);
8482 break;
8483 default:
8484 Unimplemented();
8485 }
8486 %}
8488 ins_pipe( pipe_slow );
8489 %}
8492 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8493 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8494 ins_cost(80);
8495 format %{
8496 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8497 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8498 %}
8499 ins_encode %{
8500 Register op1 = $tmp1$$Register;
8501 Register op2 = $tmp2$$Register;
8502 Register dst = $dst$$Register;
8503 Register src = $src$$Register;
8504 int flag = $cop$$cmpcode;
8506 switch(flag)
8507 {
8508 case 0x01: //equal
8509 __ subu(AT, op1, op2);
8510 __ movz(dst, src, AT);
8511 break;
8513 case 0x02: //not_equal
8514 __ subu(AT, op1, op2);
8515 __ movn(dst, src, AT);
8516 break;
8518 case 0x03: //above
8519 __ sltu(AT, op2, op1);
8520 __ movn(dst, src, AT);
8521 break;
8523 case 0x04: //above_equal
8524 __ sltu(AT, op1, op2);
8525 __ movz(dst, src, AT);
8526 break;
8528 case 0x05: //below
8529 __ sltu(AT, op1, op2);
8530 __ movn(dst, src, AT);
8531 break;
8533 case 0x06: //below_equal
8534 __ sltu(AT, op2, op1);
8535 __ movz(dst, src, AT);
8536 break;
8538 default:
8539 Unimplemented();
8540 }
8541 %}
8543 ins_pipe( pipe_slow );
8544 %}
8546 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8547 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8548 ins_cost(80);
8549 format %{
8550 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8551 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8552 %}
8553 ins_encode %{
8554 Register op1 = $tmp1$$Register;
8555 Register op2 = $tmp2$$Register;
8556 Register dst = $dst$$Register;
8557 Register src = $src$$Register;
8558 int flag = $cop$$cmpcode;
8560 switch(flag)
8561 {
8562 case 0x01: //equal
8563 __ subu32(AT, op1, op2);
8564 __ movz(dst, src, AT);
8565 break;
8567 case 0x02: //not_equal
8568 __ subu32(AT, op1, op2);
8569 __ movn(dst, src, AT);
8570 break;
8572 case 0x03: //above
8573 __ slt(AT, op2, op1);
8574 __ movn(dst, src, AT);
8575 break;
8577 case 0x04: //above_equal
8578 __ slt(AT, op1, op2);
8579 __ movz(dst, src, AT);
8580 break;
8582 case 0x05: //below
8583 __ slt(AT, op1, op2);
8584 __ movn(dst, src, AT);
8585 break;
8587 case 0x06: //below_equal
8588 __ slt(AT, op2, op1);
8589 __ movz(dst, src, AT);
8590 break;
8592 default:
8593 Unimplemented();
8594 }
8595 %}
8597 ins_pipe( pipe_slow );
8598 %}
8600 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8601 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8602 ins_cost(80);
8603 format %{
8604 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8605 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8606 %}
8607 ins_encode %{
8608 Register op1 = $tmp1$$Register;
8609 Register op2 = $tmp2$$Register;
8610 Register dst = $dst$$Register;
8611 Register src = $src$$Register;
8612 int flag = $cop$$cmpcode;
8614 switch(flag)
8615 {
8616 case 0x01: //equal
8617 __ subu32(AT, op1, op2);
8618 __ movz(dst, src, AT);
8619 break;
8621 case 0x02: //not_equal
8622 __ subu32(AT, op1, op2);
8623 __ movn(dst, src, AT);
8624 break;
8626 case 0x03: //above
8627 __ slt(AT, op2, op1);
8628 __ movn(dst, src, AT);
8629 break;
8631 case 0x04: //above_equal
8632 __ slt(AT, op1, op2);
8633 __ movz(dst, src, AT);
8634 break;
8636 case 0x05: //below
8637 __ slt(AT, op1, op2);
8638 __ movn(dst, src, AT);
8639 break;
8641 case 0x06: //below_equal
8642 __ slt(AT, op2, op1);
8643 __ movz(dst, src, AT);
8644 break;
8646 default:
8647 Unimplemented();
8648 }
8649 %}
8651 ins_pipe( pipe_slow );
8652 %}
8655 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8656 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8657 ins_cost(80);
8658 format %{
8659 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8660 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8661 %}
8663 ins_encode %{
8664 Register op1 = $tmp1$$Register;
8665 Register op2 = $tmp2$$Register;
8666 Register dst = as_Register($dst$$reg);
8667 Register src = as_Register($src$$reg);
8668 int flag = $cop$$cmpcode;
8670 switch(flag)
8671 {
8672 case 0x01: //equal
8673 __ subu32(AT, op1, op2);
8674 __ movz(dst, src, AT);
8675 break;
8677 case 0x02: //not_equal
8678 __ subu32(AT, op1, op2);
8679 __ movn(dst, src, AT);
8680 break;
8682 case 0x03: //great
8683 __ slt(AT, op2, op1);
8684 __ movn(dst, src, AT);
8685 break;
8687 case 0x04: //great_equal
8688 __ slt(AT, op1, op2);
8689 __ movz(dst, src, AT);
8690 break;
8692 case 0x05: //less
8693 __ slt(AT, op1, op2);
8694 __ movn(dst, src, AT);
8695 break;
8697 case 0x06: //less_equal
8698 __ slt(AT, op2, op1);
8699 __ movz(dst, src, AT);
8700 break;
8702 default:
8703 Unimplemented();
8704 }
8705 %}
8707 ins_pipe( pipe_slow );
8708 %}
8710 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8711 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8712 ins_cost(80);
8713 format %{
8714 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8715 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8716 %}
8717 ins_encode %{
8718 Register opr1 = as_Register($tmp1$$reg);
8719 Register opr2 = as_Register($tmp2$$reg);
8720 Register dst = as_Register($dst$$reg);
8721 Register src = as_Register($src$$reg);
8722 int flag = $cop$$cmpcode;
8724 switch(flag)
8725 {
8726 case 0x01: //equal
8727 __ subu(AT, opr1, opr2);
8728 __ movz(dst, src, AT);
8729 break;
8731 case 0x02: //not_equal
8732 __ subu(AT, opr1, opr2);
8733 __ movn(dst, src, AT);
8734 break;
8736 case 0x03: //greater
8737 __ slt(AT, opr2, opr1);
8738 __ movn(dst, src, AT);
8739 break;
8741 case 0x04: //greater_equal
8742 __ slt(AT, opr1, opr2);
8743 __ movz(dst, src, AT);
8744 break;
8746 case 0x05: //less
8747 __ slt(AT, opr1, opr2);
8748 __ movn(dst, src, AT);
8749 break;
8751 case 0x06: //less_equal
8752 __ slt(AT, opr2, opr1);
8753 __ movz(dst, src, AT);
8754 break;
8756 default:
8757 Unimplemented();
8758 }
8759 %}
8761 ins_pipe( pipe_slow );
8762 %}
8764 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8765 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8766 ins_cost(80);
8767 format %{
8768 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8769 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8770 %}
8771 ins_encode %{
8772 Register op1 = $tmp1$$Register;
8773 Register op2 = $tmp2$$Register;
8774 Register dst = $dst$$Register;
8775 Register src = $src$$Register;
8776 int flag = $cop$$cmpcode;
8778 switch(flag)
8779 {
8780 case 0x01: //equal
8781 __ subu32(AT, op1, op2);
8782 __ movz(dst, src, AT);
8783 break;
8785 case 0x02: //not_equal
8786 __ subu32(AT, op1, op2);
8787 __ movn(dst, src, AT);
8788 break;
8790 case 0x03: //above
8791 __ sltu(AT, op2, op1);
8792 __ movn(dst, src, AT);
8793 break;
8795 case 0x04: //above_equal
8796 __ sltu(AT, op1, op2);
8797 __ movz(dst, src, AT);
8798 break;
8800 case 0x05: //below
8801 __ sltu(AT, op1, op2);
8802 __ movn(dst, src, AT);
8803 break;
8805 case 0x06: //below_equal
8806 __ sltu(AT, op2, op1);
8807 __ movz(dst, src, AT);
8808 break;
8810 default:
8811 Unimplemented();
8812 }
8813 %}
8815 ins_pipe( pipe_slow );
8816 %}
8819 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8820 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8821 ins_cost(80);
8822 format %{
8823 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8824 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8825 %}
8826 ins_encode %{
8827 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8828 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8829 Register dst = as_Register($dst$$reg);
8830 Register src = as_Register($src$$reg);
8832 int flag = $cop$$cmpcode;
8834 switch(flag)
8835 {
8836 case 0x01: //equal
8837 __ c_eq_d(reg_op1, reg_op2);
8838 __ movt(dst, src);
8839 break;
8840 case 0x02: //not_equal
8841 __ c_eq_d(reg_op1, reg_op2);
8842 __ movf(dst, src);
8843 break;
8844 case 0x03: //greater
8845 __ c_ole_d(reg_op1, reg_op2);
8846 __ movf(dst, src);
8847 break;
8848 case 0x04: //greater_equal
8849 __ c_olt_d(reg_op1, reg_op2);
8850 __ movf(dst, src);
8851 break;
8852 case 0x05: //less
8853 __ c_ult_d(reg_op1, reg_op2);
8854 __ movt(dst, src);
8855 break;
8856 case 0x06: //less_equal
8857 __ c_ule_d(reg_op1, reg_op2);
8858 __ movt(dst, src);
8859 break;
8860 default:
8861 Unimplemented();
8862 }
8863 %}
8865 ins_pipe( pipe_slow );
8866 %}
8868 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8869 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8870 ins_cost(200);
8871 format %{
8872 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8873 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8874 %}
8875 ins_encode %{
8876 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8877 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8878 FloatRegister dst = as_FloatRegister($dst$$reg);
8879 FloatRegister src = as_FloatRegister($src$$reg);
8881 int flag = $cop$$cmpcode;
8883 Label L;
8885 switch(flag)
8886 {
8887 case 0x01: //equal
8888 __ c_eq_d(reg_op1, reg_op2);
8889 __ bc1f(L);
8890 __ nop();
8891 __ mov_d(dst, src);
8892 __ bind(L);
8893 break;
8894 case 0x02: //not_equal
8895 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8896 __ c_eq_d(reg_op1, reg_op2);
8897 __ bc1t(L);
8898 __ nop();
8899 __ mov_d(dst, src);
8900 __ bind(L);
8901 break;
8902 case 0x03: //greater
8903 __ c_ole_d(reg_op1, reg_op2);
8904 __ bc1t(L);
8905 __ nop();
8906 __ mov_d(dst, src);
8907 __ bind(L);
8908 break;
8909 case 0x04: //greater_equal
8910 __ c_olt_d(reg_op1, reg_op2);
8911 __ bc1t(L);
8912 __ nop();
8913 __ mov_d(dst, src);
8914 __ bind(L);
8915 break;
8916 case 0x05: //less
8917 __ c_ult_d(reg_op1, reg_op2);
8918 __ bc1f(L);
8919 __ nop();
8920 __ mov_d(dst, src);
8921 __ bind(L);
8922 break;
8923 case 0x06: //less_equal
8924 __ c_ule_d(reg_op1, reg_op2);
8925 __ bc1f(L);
8926 __ nop();
8927 __ mov_d(dst, src);
8928 __ bind(L);
8929 break;
8930 default:
8931 Unimplemented();
8932 }
8933 %}
8935 ins_pipe( pipe_slow );
8936 %}
8938 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8939 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8940 ins_cost(200);
8941 format %{
8942 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8943 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8944 %}
8946 ins_encode %{
8947 Register op1 = $tmp1$$Register;
8948 Register op2 = $tmp2$$Register;
8949 FloatRegister dst = as_FloatRegister($dst$$reg);
8950 FloatRegister src = as_FloatRegister($src$$reg);
8951 int flag = $cop$$cmpcode;
8952 Label L;
8954 switch(flag)
8955 {
8956 case 0x01: //equal
8957 __ bne(op1, op2, L);
8958 __ nop();
8959 __ mov_s(dst, src);
8960 __ bind(L);
8961 break;
8962 case 0x02: //not_equal
8963 __ beq(op1, op2, L);
8964 __ nop();
8965 __ mov_s(dst, src);
8966 __ bind(L);
8967 break;
8968 case 0x03: //great
8969 __ slt(AT, op2, op1);
8970 __ beq(AT, R0, L);
8971 __ nop();
8972 __ mov_s(dst, src);
8973 __ bind(L);
8974 break;
8975 case 0x04: //great_equal
8976 __ slt(AT, op1, op2);
8977 __ bne(AT, R0, L);
8978 __ nop();
8979 __ mov_s(dst, src);
8980 __ bind(L);
8981 break;
8982 case 0x05: //less
8983 __ slt(AT, op1, op2);
8984 __ beq(AT, R0, L);
8985 __ nop();
8986 __ mov_s(dst, src);
8987 __ bind(L);
8988 break;
8989 case 0x06: //less_equal
8990 __ slt(AT, op2, op1);
8991 __ bne(AT, R0, L);
8992 __ nop();
8993 __ mov_s(dst, src);
8994 __ bind(L);
8995 break;
8996 default:
8997 Unimplemented();
8998 }
8999 %}
9001 ins_pipe( pipe_slow );
9002 %}
9004 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9005 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9006 ins_cost(200);
9007 format %{
9008 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9009 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9010 %}
9012 ins_encode %{
9013 Register op1 = $tmp1$$Register;
9014 Register op2 = $tmp2$$Register;
9015 FloatRegister dst = as_FloatRegister($dst$$reg);
9016 FloatRegister src = as_FloatRegister($src$$reg);
9017 int flag = $cop$$cmpcode;
9018 Label L;
9020 switch(flag)
9021 {
9022 case 0x01: //equal
9023 __ bne(op1, op2, L);
9024 __ nop();
9025 __ mov_d(dst, src);
9026 __ bind(L);
9027 break;
9028 case 0x02: //not_equal
9029 __ beq(op1, op2, L);
9030 __ nop();
9031 __ mov_d(dst, src);
9032 __ bind(L);
9033 break;
9034 case 0x03: //great
9035 __ slt(AT, op2, op1);
9036 __ beq(AT, R0, L);
9037 __ nop();
9038 __ mov_d(dst, src);
9039 __ bind(L);
9040 break;
9041 case 0x04: //great_equal
9042 __ slt(AT, op1, op2);
9043 __ bne(AT, R0, L);
9044 __ nop();
9045 __ mov_d(dst, src);
9046 __ bind(L);
9047 break;
9048 case 0x05: //less
9049 __ slt(AT, op1, op2);
9050 __ beq(AT, R0, L);
9051 __ nop();
9052 __ mov_d(dst, src);
9053 __ bind(L);
9054 break;
9055 case 0x06: //less_equal
9056 __ slt(AT, op2, op1);
9057 __ bne(AT, R0, L);
9058 __ nop();
9059 __ mov_d(dst, src);
9060 __ bind(L);
9061 break;
9062 default:
9063 Unimplemented();
9064 }
9065 %}
9067 ins_pipe( pipe_slow );
9068 %}
9070 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9071 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9072 ins_cost(200);
9073 format %{
9074 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9075 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9076 %}
9078 ins_encode %{
9079 Register op1 = $tmp1$$Register;
9080 Register op2 = $tmp2$$Register;
9081 FloatRegister dst = as_FloatRegister($dst$$reg);
9082 FloatRegister src = as_FloatRegister($src$$reg);
9083 int flag = $cop$$cmpcode;
9084 Label L;
9086 switch(flag)
9087 {
9088 case 0x01: //equal
9089 __ bne(op1, op2, L);
9090 __ nop();
9091 __ mov_d(dst, src);
9092 __ bind(L);
9093 break;
9094 case 0x02: //not_equal
9095 __ beq(op1, op2, L);
9096 __ nop();
9097 __ mov_d(dst, src);
9098 __ bind(L);
9099 break;
9100 case 0x03: //great
9101 __ slt(AT, op2, op1);
9102 __ beq(AT, R0, L);
9103 __ nop();
9104 __ mov_d(dst, src);
9105 __ bind(L);
9106 break;
9107 case 0x04: //great_equal
9108 __ slt(AT, op1, op2);
9109 __ bne(AT, R0, L);
9110 __ nop();
9111 __ mov_d(dst, src);
9112 __ bind(L);
9113 break;
9114 case 0x05: //less
9115 __ slt(AT, op1, op2);
9116 __ beq(AT, R0, L);
9117 __ nop();
9118 __ mov_d(dst, src);
9119 __ bind(L);
9120 break;
9121 case 0x06: //less_equal
9122 __ slt(AT, op2, op1);
9123 __ bne(AT, R0, L);
9124 __ nop();
9125 __ mov_d(dst, src);
9126 __ bind(L);
9127 break;
9128 default:
9129 Unimplemented();
9130 }
9131 %}
9133 ins_pipe( pipe_slow );
9134 %}
9136 //FIXME
9137 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9138 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9139 ins_cost(80);
9140 format %{
9141 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9142 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9143 %}
9145 ins_encode %{
9146 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9147 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9148 Register dst = $dst$$Register;
9149 Register src = $src$$Register;
9150 int flag = $cop$$cmpcode;
9152 switch(flag)
9153 {
9154 case 0x01: //equal
9155 __ c_eq_s(reg_op1, reg_op2);
9156 __ movt(dst, src);
9157 break;
9158 case 0x02: //not_equal
9159 __ c_eq_s(reg_op1, reg_op2);
9160 __ movf(dst, src);
9161 break;
9162 case 0x03: //greater
9163 __ c_ole_s(reg_op1, reg_op2);
9164 __ movf(dst, src);
9165 break;
9166 case 0x04: //greater_equal
9167 __ c_olt_s(reg_op1, reg_op2);
9168 __ movf(dst, src);
9169 break;
9170 case 0x05: //less
9171 __ c_ult_s(reg_op1, reg_op2);
9172 __ movt(dst, src);
9173 break;
9174 case 0x06: //less_equal
9175 __ c_ule_s(reg_op1, reg_op2);
9176 __ movt(dst, src);
9177 break;
9178 default:
9179 Unimplemented();
9180 }
9181 %}
9182 ins_pipe( pipe_slow );
9183 %}
9185 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9186 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9187 ins_cost(200);
9188 format %{
9189 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9190 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9191 %}
9193 ins_encode %{
9194 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9195 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9196 FloatRegister dst = $dst$$FloatRegister;
9197 FloatRegister src = $src$$FloatRegister;
9198 Label L;
9199 int flag = $cop$$cmpcode;
9201 switch(flag)
9202 {
9203 case 0x01: //equal
9204 __ c_eq_s(reg_op1, reg_op2);
9205 __ bc1f(L);
9206 __ nop();
9207 __ mov_s(dst, src);
9208 __ bind(L);
9209 break;
9210 case 0x02: //not_equal
9211 __ c_eq_s(reg_op1, reg_op2);
9212 __ bc1t(L);
9213 __ nop();
9214 __ mov_s(dst, src);
9215 __ bind(L);
9216 break;
9217 case 0x03: //greater
9218 __ c_ole_s(reg_op1, reg_op2);
9219 __ bc1t(L);
9220 __ nop();
9221 __ mov_s(dst, src);
9222 __ bind(L);
9223 break;
9224 case 0x04: //greater_equal
9225 __ c_olt_s(reg_op1, reg_op2);
9226 __ bc1t(L);
9227 __ nop();
9228 __ mov_s(dst, src);
9229 __ bind(L);
9230 break;
9231 case 0x05: //less
9232 __ c_ult_s(reg_op1, reg_op2);
9233 __ bc1f(L);
9234 __ nop();
9235 __ mov_s(dst, src);
9236 __ bind(L);
9237 break;
9238 case 0x06: //less_equal
9239 __ c_ule_s(reg_op1, reg_op2);
9240 __ bc1f(L);
9241 __ nop();
9242 __ mov_s(dst, src);
9243 __ bind(L);
9244 break;
9245 default:
9246 Unimplemented();
9247 }
9248 %}
9249 ins_pipe( pipe_slow );
9250 %}
9252 // Manifest a CmpL result in an integer register. Very painful.
9253 // This is the test to avoid.
9254 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9255 match(Set dst (CmpL3 src1 src2));
9256 ins_cost(1000);
9257 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9258 ins_encode %{
9259 Register opr1 = as_Register($src1$$reg);
9260 Register opr2 = as_Register($src2$$reg);
9261 Register dst = as_Register($dst$$reg);
9263 Label Done;
9265 __ subu(AT, opr1, opr2);
9266 __ bltz(AT, Done);
9267 __ delayed()->daddiu(dst, R0, -1);
9269 __ move(dst, 1);
9270 __ movz(dst, R0, AT);
9272 __ bind(Done);
9273 %}
9274 ins_pipe( pipe_slow );
9275 %}
9277 //
9278 // less_rsult = -1
9279 // greater_result = 1
9280 // equal_result = 0
9281 // nan_result = -1
9282 //
9283 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9284 match(Set dst (CmpF3 src1 src2));
9285 ins_cost(1000);
9286 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9287 ins_encode %{
9288 FloatRegister src1 = as_FloatRegister($src1$$reg);
9289 FloatRegister src2 = as_FloatRegister($src2$$reg);
9290 Register dst = as_Register($dst$$reg);
9292 Label Done;
9294 __ c_ult_s(src1, src2);
9295 __ bc1t(Done);
9296 __ delayed()->daddiu(dst, R0, -1);
9298 __ c_eq_s(src1, src2);
9299 __ move(dst, 1);
9300 __ movt(dst, R0);
9302 __ bind(Done);
9303 %}
9304 ins_pipe( pipe_slow );
9305 %}
9307 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9308 match(Set dst (CmpD3 src1 src2));
9309 ins_cost(1000);
9310 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9311 ins_encode %{
9312 FloatRegister src1 = as_FloatRegister($src1$$reg);
9313 FloatRegister src2 = as_FloatRegister($src2$$reg);
9314 Register dst = as_Register($dst$$reg);
9316 Label Done;
9318 __ c_ult_d(src1, src2);
9319 __ bc1t(Done);
9320 __ delayed()->daddiu(dst, R0, -1);
9322 __ c_eq_d(src1, src2);
9323 __ move(dst, 1);
9324 __ movt(dst, R0);
9326 __ bind(Done);
9327 %}
9328 ins_pipe( pipe_slow );
9329 %}
9331 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9332 match(Set dummy (ClearArray cnt base));
9333 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9334 ins_encode %{
9335 //Assume cnt is the number of bytes in an array to be cleared,
9336 //and base points to the starting address of the array.
9337 Register base = $base$$Register;
9338 Register num = $cnt$$Register;
9339 Label Loop, done;
9341 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9342 __ move(T9, num); /* T9 = words */
9343 __ beq(T9, R0, done);
9344 __ nop();
9345 __ move(AT, base);
9347 __ bind(Loop);
9348 __ sd(R0, Address(AT, 0));
9349 __ daddi(AT, AT, wordSize);
9350 __ daddi(T9, T9, -1);
9351 __ bne(T9, R0, Loop);
9352 __ delayed()->nop();
9353 __ bind(done);
9354 %}
9355 ins_pipe( pipe_slow );
9356 %}
9358 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9359 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9360 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9362 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9363 ins_encode %{
9364 // Get the first character position in both strings
9365 // [8] char array, [12] offset, [16] count
9366 Register str1 = $str1$$Register;
9367 Register str2 = $str2$$Register;
9368 Register cnt1 = $cnt1$$Register;
9369 Register cnt2 = $cnt2$$Register;
9370 Register result = $result$$Register;
9372 Label L, Loop, haveResult, done;
9374 // compute the and difference of lengths (in result)
9375 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9377 // compute the shorter length (in cnt1)
9378 __ slt(AT, cnt2, cnt1);
9379 __ movn(cnt1, cnt2, AT);
9381 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9382 __ bind(Loop); // Loop begin
9383 __ beq(cnt1, R0, done);
9384 __ delayed()->lhu(AT, str1, 0);;
9386 // compare current character
9387 __ lhu(cnt2, str2, 0);
9388 __ bne(AT, cnt2, haveResult);
9389 __ delayed()->addi(str1, str1, 2);
9390 __ addi(str2, str2, 2);
9391 __ b(Loop);
9392 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9394 __ bind(haveResult);
9395 __ subu(result, AT, cnt2);
9397 __ bind(done);
9398 %}
9400 ins_pipe( pipe_slow );
9401 %}
9403 // intrinsic optimization
9404 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9405 match(Set result (StrEquals (Binary str1 str2) cnt));
9406 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9408 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9409 ins_encode %{
9410 // Get the first character position in both strings
9411 // [8] char array, [12] offset, [16] count
9412 Register str1 = $str1$$Register;
9413 Register str2 = $str2$$Register;
9414 Register cnt = $cnt$$Register;
9415 Register tmp = $temp$$Register;
9416 Register result = $result$$Register;
9418 Label Loop, done;
9421 __ beq(str1, str2, done); // same char[] ?
9422 __ daddiu(result, R0, 1);
9424 __ bind(Loop); // Loop begin
9425 __ beq(cnt, R0, done);
9426 __ daddiu(result, R0, 1); // count == 0
9428 // compare current character
9429 __ lhu(AT, str1, 0);;
9430 __ lhu(tmp, str2, 0);
9431 __ bne(AT, tmp, done);
9432 __ delayed()->daddi(result, R0, 0);
9433 __ addi(str1, str1, 2);
9434 __ addi(str2, str2, 2);
9435 __ b(Loop);
9436 __ delayed()->addi(cnt, cnt, -1); // Loop end
9438 __ bind(done);
9439 %}
9441 ins_pipe( pipe_slow );
9442 %}
9444 //----------Arithmetic Instructions-------------------------------------------
9445 //----------Addition Instructions---------------------------------------------
9446 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9447 match(Set dst (AddI src1 src2));
9449 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9450 ins_encode %{
9451 Register dst = $dst$$Register;
9452 Register src1 = $src1$$Register;
9453 Register src2 = $src2$$Register;
9454 __ addu32(dst, src1, src2);
9455 %}
9456 ins_pipe( ialu_regI_regI );
9457 %}
9459 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9460 match(Set dst (AddI src1 src2));
9462 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9463 ins_encode %{
9464 Register dst = $dst$$Register;
9465 Register src1 = $src1$$Register;
9466 int imm = $src2$$constant;
9468 if(Assembler::is_simm16(imm)) {
9469 __ addiu32(dst, src1, imm);
9470 } else {
9471 __ move(AT, imm);
9472 __ addu32(dst, src1, AT);
9473 }
9474 %}
9475 ins_pipe( ialu_regI_regI );
9476 %}
9478 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9479 match(Set dst (AddP src1 src2));
9481 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9483 ins_encode %{
9484 Register dst = $dst$$Register;
9485 Register src1 = $src1$$Register;
9486 Register src2 = $src2$$Register;
9487 __ daddu(dst, src1, src2);
9488 %}
9490 ins_pipe( ialu_regI_regI );
9491 %}
9493 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9494 match(Set dst (AddP src1 (ConvI2L src2)));
9496 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9498 ins_encode %{
9499 Register dst = $dst$$Register;
9500 Register src1 = $src1$$Register;
9501 Register src2 = $src2$$Register;
9502 __ daddu(dst, src1, src2);
9503 %}
9505 ins_pipe( ialu_regI_regI );
9506 %}
9508 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9509 match(Set dst (AddP src1 src2));
9511 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9512 ins_encode %{
9513 Register src1 = $src1$$Register;
9514 long src2 = $src2$$constant;
9515 Register dst = $dst$$Register;
9517 if(Assembler::is_simm16(src2)) {
9518 __ daddiu(dst, src1, src2);
9519 } else {
9520 __ set64(AT, src2);
9521 __ daddu(dst, src1, AT);
9522 }
9523 %}
9524 ins_pipe( ialu_regI_imm16 );
9525 %}
9527 // Add Long Register with Register
9528 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9529 match(Set dst (AddL src1 src2));
9530 ins_cost(200);
9531 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9533 ins_encode %{
9534 Register dst_reg = as_Register($dst$$reg);
9535 Register src1_reg = as_Register($src1$$reg);
9536 Register src2_reg = as_Register($src2$$reg);
9538 __ daddu(dst_reg, src1_reg, src2_reg);
9539 %}
9541 ins_pipe( ialu_regL_regL );
9542 %}
9544 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9545 %{
9546 match(Set dst (AddL src1 src2));
9548 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9549 ins_encode %{
9550 Register dst_reg = as_Register($dst$$reg);
9551 Register src1_reg = as_Register($src1$$reg);
9552 int src2_imm = $src2$$constant;
9554 __ daddiu(dst_reg, src1_reg, src2_imm);
9555 %}
9557 ins_pipe( ialu_regL_regL );
9558 %}
9560 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9561 %{
9562 match(Set dst (AddL (ConvI2L src1) src2));
9564 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9565 ins_encode %{
9566 Register dst_reg = as_Register($dst$$reg);
9567 Register src1_reg = as_Register($src1$$reg);
9568 int src2_imm = $src2$$constant;
9570 __ daddiu(dst_reg, src1_reg, src2_imm);
9571 %}
9573 ins_pipe( ialu_regL_regL );
9574 %}
9576 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9577 match(Set dst (AddL (ConvI2L src1) src2));
9578 ins_cost(200);
9579 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9581 ins_encode %{
9582 Register dst_reg = as_Register($dst$$reg);
9583 Register src1_reg = as_Register($src1$$reg);
9584 Register src2_reg = as_Register($src2$$reg);
9586 __ daddu(dst_reg, src1_reg, src2_reg);
9587 %}
9589 ins_pipe( ialu_regL_regL );
9590 %}
9592 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9593 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9594 ins_cost(200);
9595 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9597 ins_encode %{
9598 Register dst_reg = as_Register($dst$$reg);
9599 Register src1_reg = as_Register($src1$$reg);
9600 Register src2_reg = as_Register($src2$$reg);
9602 __ daddu(dst_reg, src1_reg, src2_reg);
9603 %}
9605 ins_pipe( ialu_regL_regL );
9606 %}
9608 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9609 match(Set dst (AddL src1 (ConvI2L src2)));
9610 ins_cost(200);
9611 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9613 ins_encode %{
9614 Register dst_reg = as_Register($dst$$reg);
9615 Register src1_reg = as_Register($src1$$reg);
9616 Register src2_reg = as_Register($src2$$reg);
9618 __ daddu(dst_reg, src1_reg, src2_reg);
9619 %}
9621 ins_pipe( ialu_regL_regL );
9622 %}
9624 //----------Subtraction Instructions-------------------------------------------
9625 // Integer Subtraction Instructions
9626 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9627 match(Set dst (SubI src1 src2));
9628 ins_cost(100);
9630 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9631 ins_encode %{
9632 Register dst = $dst$$Register;
9633 Register src1 = $src1$$Register;
9634 Register src2 = $src2$$Register;
9635 __ subu32(dst, src1, src2);
9636 %}
9637 ins_pipe( ialu_regI_regI );
9638 %}
9640 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9641 match(Set dst (SubI src1 src2));
9642 ins_cost(80);
9644 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9645 ins_encode %{
9646 Register dst = $dst$$Register;
9647 Register src1 = $src1$$Register;
9648 __ addiu32(dst, src1, -1 * $src2$$constant);
9649 %}
9650 ins_pipe( ialu_regI_regI );
9651 %}
9653 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9654 match(Set dst (SubI zero src));
9655 ins_cost(80);
9657 format %{ "neg $dst, $src #@negI_Reg" %}
9658 ins_encode %{
9659 Register dst = $dst$$Register;
9660 Register src = $src$$Register;
9661 __ subu32(dst, R0, src);
9662 %}
9663 ins_pipe( ialu_regI_regI );
9664 %}
9666 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9667 match(Set dst (SubL zero src));
9668 ins_cost(80);
9670 format %{ "neg $dst, $src #@negL_Reg" %}
9671 ins_encode %{
9672 Register dst = $dst$$Register;
9673 Register src = $src$$Register;
9674 __ subu(dst, R0, src);
9675 %}
9676 ins_pipe( ialu_regI_regI );
9677 %}
9679 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9680 match(Set dst (SubL src1 src2));
9681 ins_cost(80);
9683 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9684 ins_encode %{
9685 Register dst = $dst$$Register;
9686 Register src1 = $src1$$Register;
9687 __ daddiu(dst, src1, -1 * $src2$$constant);
9688 %}
9689 ins_pipe( ialu_regI_regI );
9690 %}
9692 // Subtract Long Register with Register.
9693 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9694 match(Set dst (SubL src1 src2));
9695 ins_cost(100);
9696 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9697 ins_encode %{
9698 Register dst = as_Register($dst$$reg);
9699 Register src1 = as_Register($src1$$reg);
9700 Register src2 = as_Register($src2$$reg);
9702 __ subu(dst, src1, src2);
9703 %}
9704 ins_pipe( ialu_regL_regL );
9705 %}
9707 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9708 match(Set dst (SubL src1 (ConvI2L src2)));
9709 ins_cost(100);
9710 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9711 ins_encode %{
9712 Register dst = as_Register($dst$$reg);
9713 Register src1 = as_Register($src1$$reg);
9714 Register src2 = as_Register($src2$$reg);
9716 __ subu(dst, src1, src2);
9717 %}
9718 ins_pipe( ialu_regL_regL );
9719 %}
9721 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9722 match(Set dst (SubL (ConvI2L src1) src2));
9723 ins_cost(200);
9724 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9725 ins_encode %{
9726 Register dst = as_Register($dst$$reg);
9727 Register src1 = as_Register($src1$$reg);
9728 Register src2 = as_Register($src2$$reg);
9730 __ subu(dst, src1, src2);
9731 %}
9732 ins_pipe( ialu_regL_regL );
9733 %}
9735 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9736 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9737 ins_cost(200);
9738 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9739 ins_encode %{
9740 Register dst = as_Register($dst$$reg);
9741 Register src1 = as_Register($src1$$reg);
9742 Register src2 = as_Register($src2$$reg);
9744 __ subu(dst, src1, src2);
9745 %}
9746 ins_pipe( ialu_regL_regL );
9747 %}
9749 // Integer MOD with Register
9750 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9751 match(Set dst (ModI src1 src2));
9752 ins_cost(300);
9753 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9754 ins_encode %{
9755 Register dst = $dst$$Register;
9756 Register src1 = $src1$$Register;
9757 Register src2 = $src2$$Register;
9759 //if (UseLoongsonISA) {
9760 if (0) {
9761 // 2016.08.10
9762 // Experiments show that gsmod is slower that div+mfhi.
9763 // So I just disable it here.
9764 __ gsmod(dst, src1, src2);
9765 } else {
9766 __ div(src1, src2);
9767 __ mfhi(dst);
9768 }
9769 %}
9771 //ins_pipe( ialu_mod );
9772 ins_pipe( ialu_regI_regI );
9773 %}
9775 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9776 match(Set dst (ModL src1 src2));
9777 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9779 ins_encode %{
9780 Register dst = as_Register($dst$$reg);
9781 Register op1 = as_Register($src1$$reg);
9782 Register op2 = as_Register($src2$$reg);
9784 if (UseLoongsonISA) {
9785 __ gsdmod(dst, op1, op2);
9786 } else {
9787 __ ddiv(op1, op2);
9788 __ mfhi(dst);
9789 }
9790 %}
9791 ins_pipe( pipe_slow );
9792 %}
9794 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9795 match(Set dst (MulI src1 src2));
9797 ins_cost(300);
9798 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9799 ins_encode %{
9800 Register src1 = $src1$$Register;
9801 Register src2 = $src2$$Register;
9802 Register dst = $dst$$Register;
9804 __ mul(dst, src1, src2);
9805 %}
9806 ins_pipe( ialu_mult );
9807 %}
9809 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9810 match(Set dst (AddI (MulI src1 src2) src3));
9812 ins_cost(999);
9813 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9814 ins_encode %{
9815 Register src1 = $src1$$Register;
9816 Register src2 = $src2$$Register;
9817 Register src3 = $src3$$Register;
9818 Register dst = $dst$$Register;
9820 __ mtlo(src3);
9821 __ madd(src1, src2);
9822 __ mflo(dst);
9823 %}
9824 ins_pipe( ialu_mult );
9825 %}
9827 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9828 match(Set dst (DivI src1 src2));
9830 ins_cost(300);
9831 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9832 ins_encode %{
9833 Register src1 = $src1$$Register;
9834 Register src2 = $src2$$Register;
9835 Register dst = $dst$$Register;
9837 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9838 We must trap an exception manually. */
9839 __ teq(R0, src2, 0x7);
9841 if (UseLoongsonISA) {
9842 __ gsdiv(dst, src1, src2);
9843 } else {
9844 __ div(src1, src2);
9846 __ nop();
9847 __ nop();
9848 __ mflo(dst);
9849 }
9850 %}
9851 ins_pipe( ialu_mod );
9852 %}
9854 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9855 match(Set dst (DivF src1 src2));
9857 ins_cost(300);
9858 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9859 ins_encode %{
9860 FloatRegister src1 = $src1$$FloatRegister;
9861 FloatRegister src2 = $src2$$FloatRegister;
9862 FloatRegister dst = $dst$$FloatRegister;
9864 /* Here do we need to trap an exception manually ? */
9865 __ div_s(dst, src1, src2);
9866 %}
9867 ins_pipe( pipe_slow );
9868 %}
9870 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9871 match(Set dst (DivD src1 src2));
9873 ins_cost(300);
9874 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9875 ins_encode %{
9876 FloatRegister src1 = $src1$$FloatRegister;
9877 FloatRegister src2 = $src2$$FloatRegister;
9878 FloatRegister dst = $dst$$FloatRegister;
9880 /* Here do we need to trap an exception manually ? */
9881 __ div_d(dst, src1, src2);
9882 %}
9883 ins_pipe( pipe_slow );
9884 %}
9886 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9887 match(Set dst (MulL src1 src2));
9888 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9889 ins_encode %{
9890 Register dst = as_Register($dst$$reg);
9891 Register op1 = as_Register($src1$$reg);
9892 Register op2 = as_Register($src2$$reg);
9894 if (UseLoongsonISA) {
9895 __ gsdmult(dst, op1, op2);
9896 } else {
9897 __ dmult(op1, op2);
9898 __ mflo(dst);
9899 }
9900 %}
9901 ins_pipe( pipe_slow );
9902 %}
9904 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9905 match(Set dst (MulL src1 (ConvI2L src2)));
9906 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9907 ins_encode %{
9908 Register dst = as_Register($dst$$reg);
9909 Register op1 = as_Register($src1$$reg);
9910 Register op2 = as_Register($src2$$reg);
9912 if (UseLoongsonISA) {
9913 __ gsdmult(dst, op1, op2);
9914 } else {
9915 __ dmult(op1, op2);
9916 __ mflo(dst);
9917 }
9918 %}
9919 ins_pipe( pipe_slow );
9920 %}
9922 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9923 match(Set dst (DivL src1 src2));
9924 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9926 ins_encode %{
9927 Register dst = as_Register($dst$$reg);
9928 Register op1 = as_Register($src1$$reg);
9929 Register op2 = as_Register($src2$$reg);
9931 if (UseLoongsonISA) {
9932 __ gsddiv(dst, op1, op2);
9933 } else {
9934 __ ddiv(op1, op2);
9935 __ mflo(dst);
9936 }
9937 %}
9938 ins_pipe( pipe_slow );
9939 %}
9941 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9942 match(Set dst (AddF src1 src2));
9943 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9944 ins_encode %{
9945 FloatRegister src1 = as_FloatRegister($src1$$reg);
9946 FloatRegister src2 = as_FloatRegister($src2$$reg);
9947 FloatRegister dst = as_FloatRegister($dst$$reg);
9949 __ add_s(dst, src1, src2);
9950 %}
9951 ins_pipe( fpu_regF_regF );
9952 %}
9954 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9955 match(Set dst (SubF src1 src2));
9956 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9957 ins_encode %{
9958 FloatRegister src1 = as_FloatRegister($src1$$reg);
9959 FloatRegister src2 = as_FloatRegister($src2$$reg);
9960 FloatRegister dst = as_FloatRegister($dst$$reg);
9962 __ sub_s(dst, src1, src2);
9963 %}
9964 ins_pipe( fpu_regF_regF );
9965 %}
9966 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9967 match(Set dst (AddD src1 src2));
9968 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9969 ins_encode %{
9970 FloatRegister src1 = as_FloatRegister($src1$$reg);
9971 FloatRegister src2 = as_FloatRegister($src2$$reg);
9972 FloatRegister dst = as_FloatRegister($dst$$reg);
9974 __ add_d(dst, src1, src2);
9975 %}
9976 ins_pipe( fpu_regF_regF );
9977 %}
9979 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9980 match(Set dst (SubD src1 src2));
9981 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9982 ins_encode %{
9983 FloatRegister src1 = as_FloatRegister($src1$$reg);
9984 FloatRegister src2 = as_FloatRegister($src2$$reg);
9985 FloatRegister dst = as_FloatRegister($dst$$reg);
9987 __ sub_d(dst, src1, src2);
9988 %}
9989 ins_pipe( fpu_regF_regF );
9990 %}
9992 instruct negF_reg(regF dst, regF src) %{
9993 match(Set dst (NegF src));
9994 format %{ "negF $dst, $src @negF_reg" %}
9995 ins_encode %{
9996 FloatRegister src = as_FloatRegister($src$$reg);
9997 FloatRegister dst = as_FloatRegister($dst$$reg);
9999 __ neg_s(dst, src);
10000 %}
10001 ins_pipe( fpu_regF_regF );
10002 %}
10004 instruct negD_reg(regD dst, regD src) %{
10005 match(Set dst (NegD src));
10006 format %{ "negD $dst, $src @negD_reg" %}
10007 ins_encode %{
10008 FloatRegister src = as_FloatRegister($src$$reg);
10009 FloatRegister dst = as_FloatRegister($dst$$reg);
10011 __ neg_d(dst, src);
10012 %}
10013 ins_pipe( fpu_regF_regF );
10014 %}
10017 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10018 match(Set dst (MulF src1 src2));
10019 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10020 ins_encode %{
10021 FloatRegister src1 = $src1$$FloatRegister;
10022 FloatRegister src2 = $src2$$FloatRegister;
10023 FloatRegister dst = $dst$$FloatRegister;
10025 __ mul_s(dst, src1, src2);
10026 %}
10027 ins_pipe( fpu_regF_regF );
10028 %}
10030 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10031 match(Set dst (AddF (MulF src1 src2) src3));
10032 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10033 ins_cost(44444);
10034 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10035 ins_encode %{
10036 FloatRegister src1 = $src1$$FloatRegister;
10037 FloatRegister src2 = $src2$$FloatRegister;
10038 FloatRegister src3 = $src3$$FloatRegister;
10039 FloatRegister dst = $dst$$FloatRegister;
10041 __ madd_s(dst, src1, src2, src3);
10042 %}
10043 ins_pipe( fpu_regF_regF );
10044 %}
10046 // Mul two double precision floating piont number
10047 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10048 match(Set dst (MulD src1 src2));
10049 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10050 ins_encode %{
10051 FloatRegister src1 = $src1$$FloatRegister;
10052 FloatRegister src2 = $src2$$FloatRegister;
10053 FloatRegister dst = $dst$$FloatRegister;
10055 __ mul_d(dst, src1, src2);
10056 %}
10057 ins_pipe( fpu_regF_regF );
10058 %}
10060 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10061 match(Set dst (AddD (MulD src1 src2) src3));
10062 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10063 ins_cost(44444);
10064 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10065 ins_encode %{
10066 FloatRegister src1 = $src1$$FloatRegister;
10067 FloatRegister src2 = $src2$$FloatRegister;
10068 FloatRegister src3 = $src3$$FloatRegister;
10069 FloatRegister dst = $dst$$FloatRegister;
10071 __ madd_d(dst, src1, src2, src3);
10072 %}
10073 ins_pipe( fpu_regF_regF );
10074 %}
10076 instruct absF_reg(regF dst, regF src) %{
10077 match(Set dst (AbsF src));
10078 ins_cost(100);
10079 format %{ "absF $dst, $src @absF_reg" %}
10080 ins_encode %{
10081 FloatRegister src = as_FloatRegister($src$$reg);
10082 FloatRegister dst = as_FloatRegister($dst$$reg);
10084 __ abs_s(dst, src);
10085 %}
10086 ins_pipe( fpu_regF_regF );
10087 %}
10090 // intrinsics for math_native.
10091 // AbsD SqrtD CosD SinD TanD LogD Log10D
10093 instruct absD_reg(regD dst, regD src) %{
10094 match(Set dst (AbsD src));
10095 ins_cost(100);
10096 format %{ "absD $dst, $src @absD_reg" %}
10097 ins_encode %{
10098 FloatRegister src = as_FloatRegister($src$$reg);
10099 FloatRegister dst = as_FloatRegister($dst$$reg);
10101 __ abs_d(dst, src);
10102 %}
10103 ins_pipe( fpu_regF_regF );
10104 %}
10106 instruct sqrtD_reg(regD dst, regD src) %{
10107 match(Set dst (SqrtD src));
10108 ins_cost(100);
10109 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10110 ins_encode %{
10111 FloatRegister src = as_FloatRegister($src$$reg);
10112 FloatRegister dst = as_FloatRegister($dst$$reg);
10114 __ sqrt_d(dst, src);
10115 %}
10116 ins_pipe( fpu_regF_regF );
10117 %}
10119 instruct sqrtF_reg(regF dst, regF src) %{
10120 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10121 ins_cost(100);
10122 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10123 ins_encode %{
10124 FloatRegister src = as_FloatRegister($src$$reg);
10125 FloatRegister dst = as_FloatRegister($dst$$reg);
10127 __ sqrt_s(dst, src);
10128 %}
10129 ins_pipe( fpu_regF_regF );
10130 %}
10131 //----------------------------------Logical Instructions----------------------
10132 //__________________________________Integer Logical Instructions-------------
10134 //And Instuctions
10135 // And Register with Immediate
10136 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10137 match(Set dst (AndI src1 src2));
10139 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10140 ins_encode %{
10141 Register dst = $dst$$Register;
10142 Register src = $src1$$Register;
10143 int val = $src2$$constant;
10145 __ move(AT, val);
10146 __ andr(dst, src, AT);
10147 %}
10148 ins_pipe( ialu_regI_regI );
10149 %}
10151 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10152 match(Set dst (AndI src1 src2));
10153 ins_cost(60);
10155 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10156 ins_encode %{
10157 Register dst = $dst$$Register;
10158 Register src = $src1$$Register;
10159 int val = $src2$$constant;
10161 __ andi(dst, src, val);
10162 %}
10163 ins_pipe( ialu_regI_regI );
10164 %}
10166 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10167 match(Set dst (AndI src1 mask));
10168 ins_cost(60);
10170 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10171 ins_encode %{
10172 Register dst = $dst$$Register;
10173 Register src = $src1$$Register;
10174 int size = Assembler::is_int_mask($mask$$constant);
10176 __ ext(dst, src, 0, size);
10177 %}
10178 ins_pipe( ialu_regI_regI );
10179 %}
10181 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10182 match(Set dst (AndL src1 mask));
10183 ins_cost(60);
10185 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10186 ins_encode %{
10187 Register dst = $dst$$Register;
10188 Register src = $src1$$Register;
10189 int size = Assembler::is_jlong_mask($mask$$constant);
10191 __ dext(dst, src, 0, size);
10192 %}
10193 ins_pipe( ialu_regI_regI );
10194 %}
10196 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10197 match(Set dst (XorI src1 src2));
10198 ins_cost(60);
10200 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10201 ins_encode %{
10202 Register dst = $dst$$Register;
10203 Register src = $src1$$Register;
10204 int val = $src2$$constant;
10206 __ xori(dst, src, val);
10207 %}
10208 ins_pipe( ialu_regI_regI );
10209 %}
10211 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10212 match(Set dst (XorI src1 M1));
10213 predicate(UseLoongsonISA && Use3A2000);
10214 ins_cost(60);
10216 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10217 ins_encode %{
10218 Register dst = $dst$$Register;
10219 Register src = $src1$$Register;
10221 __ gsorn(dst, R0, src);
10222 %}
10223 ins_pipe( ialu_regI_regI );
10224 %}
10226 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10227 match(Set dst (XorI (ConvL2I src1) M1));
10228 predicate(UseLoongsonISA && Use3A2000);
10229 ins_cost(60);
10231 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10232 ins_encode %{
10233 Register dst = $dst$$Register;
10234 Register src = $src1$$Register;
10236 __ gsorn(dst, R0, src);
10237 %}
10238 ins_pipe( ialu_regI_regI );
10239 %}
10241 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10242 match(Set dst (XorL src1 src2));
10243 ins_cost(60);
10245 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10246 ins_encode %{
10247 Register dst = $dst$$Register;
10248 Register src = $src1$$Register;
10249 int val = $src2$$constant;
10251 __ xori(dst, src, val);
10252 %}
10253 ins_pipe( ialu_regI_regI );
10254 %}
10256 /*
10257 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10258 match(Set dst (XorL src1 M1));
10259 predicate(UseLoongsonISA);
10260 ins_cost(60);
10262 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10263 ins_encode %{
10264 Register dst = $dst$$Register;
10265 Register src = $src1$$Register;
10267 __ gsorn(dst, R0, src);
10268 %}
10269 ins_pipe( ialu_regI_regI );
10270 %}
10271 */
10273 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10274 match(Set dst (AndI mask (LoadB mem)));
10275 ins_cost(60);
10277 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10278 ins_encode(load_UB_enc(dst, mem));
10279 ins_pipe( ialu_loadI );
10280 %}
10282 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10283 match(Set dst (AndI (LoadB mem) mask));
10284 ins_cost(60);
10286 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10287 ins_encode(load_UB_enc(dst, mem));
10288 ins_pipe( ialu_loadI );
10289 %}
10291 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10292 match(Set dst (AndI src1 src2));
10294 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10295 ins_encode %{
10296 Register dst = $dst$$Register;
10297 Register src1 = $src1$$Register;
10298 Register src2 = $src2$$Register;
10299 __ andr(dst, src1, src2);
10300 %}
10301 ins_pipe( ialu_regI_regI );
10302 %}
10304 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10305 match(Set dst (AndI src1 (XorI src2 M1)));
10306 predicate(UseLoongsonISA && Use3A2000);
10308 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10309 ins_encode %{
10310 Register dst = $dst$$Register;
10311 Register src1 = $src1$$Register;
10312 Register src2 = $src2$$Register;
10314 __ gsandn(dst, src1, src2);
10315 %}
10316 ins_pipe( ialu_regI_regI );
10317 %}
10319 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10320 match(Set dst (OrI src1 (XorI src2 M1)));
10321 predicate(UseLoongsonISA && Use3A2000);
10323 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10324 ins_encode %{
10325 Register dst = $dst$$Register;
10326 Register src1 = $src1$$Register;
10327 Register src2 = $src2$$Register;
10329 __ gsorn(dst, src1, src2);
10330 %}
10331 ins_pipe( ialu_regI_regI );
10332 %}
10334 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10335 match(Set dst (AndI (XorI src1 M1) src2));
10336 predicate(UseLoongsonISA && Use3A2000);
10338 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10339 ins_encode %{
10340 Register dst = $dst$$Register;
10341 Register src1 = $src1$$Register;
10342 Register src2 = $src2$$Register;
10344 __ gsandn(dst, src2, src1);
10345 %}
10346 ins_pipe( ialu_regI_regI );
10347 %}
10349 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10350 match(Set dst (OrI (XorI src1 M1) src2));
10351 predicate(UseLoongsonISA && Use3A2000);
10353 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10354 ins_encode %{
10355 Register dst = $dst$$Register;
10356 Register src1 = $src1$$Register;
10357 Register src2 = $src2$$Register;
10359 __ gsorn(dst, src2, src1);
10360 %}
10361 ins_pipe( ialu_regI_regI );
10362 %}
10364 // And Long Register with Register
10365 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10366 match(Set dst (AndL src1 src2));
10367 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10368 ins_encode %{
10369 Register dst_reg = as_Register($dst$$reg);
10370 Register src1_reg = as_Register($src1$$reg);
10371 Register src2_reg = as_Register($src2$$reg);
10373 __ andr(dst_reg, src1_reg, src2_reg);
10374 %}
10375 ins_pipe( ialu_regL_regL );
10376 %}
10378 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10379 match(Set dst (AndL src1 (ConvI2L src2)));
10380 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10381 ins_encode %{
10382 Register dst_reg = as_Register($dst$$reg);
10383 Register src1_reg = as_Register($src1$$reg);
10384 Register src2_reg = as_Register($src2$$reg);
10386 __ andr(dst_reg, src1_reg, src2_reg);
10387 %}
10388 ins_pipe( ialu_regL_regL );
10389 %}
10391 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10392 match(Set dst (AndL src1 src2));
10393 ins_cost(60);
10395 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10396 ins_encode %{
10397 Register dst = $dst$$Register;
10398 Register src = $src1$$Register;
10399 long val = $src2$$constant;
10401 __ andi(dst, src, val);
10402 %}
10403 ins_pipe( ialu_regI_regI );
10404 %}
10406 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10407 match(Set dst (ConvL2I (AndL src1 src2)));
10408 ins_cost(60);
10410 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10411 ins_encode %{
10412 Register dst = $dst$$Register;
10413 Register src = $src1$$Register;
10414 long val = $src2$$constant;
10416 __ andi(dst, src, val);
10417 %}
10418 ins_pipe( ialu_regI_regI );
10419 %}
10421 /*
10422 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10423 match(Set dst (AndL src1 (XorL src2 M1)));
10424 predicate(UseLoongsonISA);
10426 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10427 ins_encode %{
10428 Register dst = $dst$$Register;
10429 Register src1 = $src1$$Register;
10430 Register src2 = $src2$$Register;
10432 __ gsandn(dst, src1, src2);
10433 %}
10434 ins_pipe( ialu_regI_regI );
10435 %}
10436 */
10438 /*
10439 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10440 match(Set dst (OrL src1 (XorL src2 M1)));
10441 predicate(UseLoongsonISA);
10443 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10444 ins_encode %{
10445 Register dst = $dst$$Register;
10446 Register src1 = $src1$$Register;
10447 Register src2 = $src2$$Register;
10449 __ gsorn(dst, src1, src2);
10450 %}
10451 ins_pipe( ialu_regI_regI );
10452 %}
10453 */
10455 /*
10456 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10457 match(Set dst (AndL (XorL src1 M1) src2));
10458 predicate(UseLoongsonISA);
10460 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10461 ins_encode %{
10462 Register dst = $dst$$Register;
10463 Register src1 = $src1$$Register;
10464 Register src2 = $src2$$Register;
10466 __ gsandn(dst, src2, src1);
10467 %}
10468 ins_pipe( ialu_regI_regI );
10469 %}
10470 */
10472 /*
10473 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10474 match(Set dst (OrL (XorL src1 M1) src2));
10475 predicate(UseLoongsonISA);
10477 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10478 ins_encode %{
10479 Register dst = $dst$$Register;
10480 Register src1 = $src1$$Register;
10481 Register src2 = $src2$$Register;
10483 __ gsorn(dst, src2, src1);
10484 %}
10485 ins_pipe( ialu_regI_regI );
10486 %}
10487 */
10489 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10490 match(Set dst (AndL dst M8));
10491 ins_cost(60);
10493 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10494 ins_encode %{
10495 Register dst = $dst$$Register;
10497 __ dins(dst, R0, 0, 3);
10498 %}
10499 ins_pipe( ialu_regI_regI );
10500 %}
10502 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10503 match(Set dst (AndL dst M5));
10504 ins_cost(60);
10506 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10507 ins_encode %{
10508 Register dst = $dst$$Register;
10510 __ dins(dst, R0, 2, 1);
10511 %}
10512 ins_pipe( ialu_regI_regI );
10513 %}
10515 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10516 match(Set dst (AndL dst M7));
10517 ins_cost(60);
10519 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10520 ins_encode %{
10521 Register dst = $dst$$Register;
10523 __ dins(dst, R0, 1, 2);
10524 %}
10525 ins_pipe( ialu_regI_regI );
10526 %}
10528 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10529 match(Set dst (AndL dst M4));
10530 ins_cost(60);
10532 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10533 ins_encode %{
10534 Register dst = $dst$$Register;
10536 __ dins(dst, R0, 0, 2);
10537 %}
10538 ins_pipe( ialu_regI_regI );
10539 %}
10541 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10542 match(Set dst (AndL dst M121));
10543 ins_cost(60);
10545 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10546 ins_encode %{
10547 Register dst = $dst$$Register;
10549 __ dins(dst, R0, 3, 4);
10550 %}
10551 ins_pipe( ialu_regI_regI );
10552 %}
10554 // Or Long Register with Register
10555 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10556 match(Set dst (OrL src1 src2));
10557 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10558 ins_encode %{
10559 Register dst_reg = $dst$$Register;
10560 Register src1_reg = $src1$$Register;
10561 Register src2_reg = $src2$$Register;
10563 __ orr(dst_reg, src1_reg, src2_reg);
10564 %}
10565 ins_pipe( ialu_regL_regL );
10566 %}
10568 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10569 match(Set dst (OrL (CastP2X src1) src2));
10570 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10571 ins_encode %{
10572 Register dst_reg = $dst$$Register;
10573 Register src1_reg = $src1$$Register;
10574 Register src2_reg = $src2$$Register;
10576 __ orr(dst_reg, src1_reg, src2_reg);
10577 %}
10578 ins_pipe( ialu_regL_regL );
10579 %}
10581 // Xor Long Register with Register
10582 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10583 match(Set dst (XorL src1 src2));
10584 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10585 ins_encode %{
10586 Register dst_reg = as_Register($dst$$reg);
10587 Register src1_reg = as_Register($src1$$reg);
10588 Register src2_reg = as_Register($src2$$reg);
10590 __ xorr(dst_reg, src1_reg, src2_reg);
10591 %}
10592 ins_pipe( ialu_regL_regL );
10593 %}
10595 // Shift Left by 8-bit immediate
10596 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10597 match(Set dst (LShiftI src shift));
10599 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10600 ins_encode %{
10601 Register src = $src$$Register;
10602 Register dst = $dst$$Register;
10603 int shamt = $shift$$constant;
10605 __ sll(dst, src, shamt);
10606 %}
10607 ins_pipe( ialu_regI_regI );
10608 %}
10610 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10611 match(Set dst (LShiftI (ConvL2I src) shift));
10613 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10614 ins_encode %{
10615 Register src = $src$$Register;
10616 Register dst = $dst$$Register;
10617 int shamt = $shift$$constant;
10619 __ sll(dst, src, shamt);
10620 %}
10621 ins_pipe( ialu_regI_regI );
10622 %}
10624 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10625 match(Set dst (AndI (LShiftI src shift) mask));
10627 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10628 ins_encode %{
10629 Register src = $src$$Register;
10630 Register dst = $dst$$Register;
10632 __ sll(dst, src, 16);
10633 %}
10634 ins_pipe( ialu_regI_regI );
10635 %}
10637 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10638 %{
10639 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10641 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10642 ins_encode %{
10643 Register src = $src$$Register;
10644 Register dst = $dst$$Register;
10646 __ andi(dst, src, 7);
10647 %}
10648 ins_pipe(ialu_regI_regI);
10649 %}
10651 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10652 %{
10653 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10655 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10656 ins_encode %{
10657 Register src = $src1$$Register;
10658 int val = $src2$$constant;
10659 Register dst = $dst$$Register;
10661 __ ori(dst, src, val);
10662 %}
10663 ins_pipe(ialu_regI_regI);
10664 %}
10666 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10667 // This idiom is used by the compiler the i2s bytecode.
10668 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10669 %{
10670 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10672 format %{ "i2s $dst, $src\t# @i2s" %}
10673 ins_encode %{
10674 Register src = $src$$Register;
10675 Register dst = $dst$$Register;
10677 __ seh(dst, src);
10678 %}
10679 ins_pipe(ialu_regI_regI);
10680 %}
10682 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10683 // This idiom is used by the compiler for the i2b bytecode.
10684 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10685 %{
10686 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10688 format %{ "i2b $dst, $src\t# @i2b" %}
10689 ins_encode %{
10690 Register src = $src$$Register;
10691 Register dst = $dst$$Register;
10693 __ seb(dst, src);
10694 %}
10695 ins_pipe(ialu_regI_regI);
10696 %}
10699 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10700 match(Set dst (LShiftI (ConvL2I src) shift));
10702 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10703 ins_encode %{
10704 Register src = $src$$Register;
10705 Register dst = $dst$$Register;
10706 int shamt = $shift$$constant;
10708 __ sll(dst, src, shamt);
10709 %}
10710 ins_pipe( ialu_regI_regI );
10711 %}
10713 // Shift Left by 8-bit immediate
10714 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10715 match(Set dst (LShiftI src shift));
10717 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10718 ins_encode %{
10719 Register src = $src$$Register;
10720 Register dst = $dst$$Register;
10721 Register shamt = $shift$$Register;
10722 __ sllv(dst, src, shamt);
10723 %}
10724 ins_pipe( ialu_regI_regI );
10725 %}
10728 // Shift Left Long
10729 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10730 //predicate(UseNewLongLShift);
10731 match(Set dst (LShiftL src shift));
10732 ins_cost(100);
10733 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10734 ins_encode %{
10735 Register src_reg = as_Register($src$$reg);
10736 Register dst_reg = as_Register($dst$$reg);
10737 int shamt = $shift$$constant;
10739 if (__ is_simm(shamt, 5))
10740 __ dsll(dst_reg, src_reg, shamt);
10741 else
10742 {
10743 int sa = Assembler::low(shamt, 6);
10744 if (sa < 32) {
10745 __ dsll(dst_reg, src_reg, sa);
10746 } else {
10747 __ dsll32(dst_reg, src_reg, sa - 32);
10748 }
10749 }
10750 %}
10751 ins_pipe( ialu_regL_regL );
10752 %}
10754 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10755 //predicate(UseNewLongLShift);
10756 match(Set dst (LShiftL (ConvI2L src) shift));
10757 ins_cost(100);
10758 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10759 ins_encode %{
10760 Register src_reg = as_Register($src$$reg);
10761 Register dst_reg = as_Register($dst$$reg);
10762 int shamt = $shift$$constant;
10764 if (__ is_simm(shamt, 5))
10765 __ dsll(dst_reg, src_reg, shamt);
10766 else
10767 {
10768 int sa = Assembler::low(shamt, 6);
10769 if (sa < 32) {
10770 __ dsll(dst_reg, src_reg, sa);
10771 } else {
10772 __ dsll32(dst_reg, src_reg, sa - 32);
10773 }
10774 }
10775 %}
10776 ins_pipe( ialu_regL_regL );
10777 %}
10779 // Shift Left Long
10780 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10781 //predicate(UseNewLongLShift);
10782 match(Set dst (LShiftL src shift));
10783 ins_cost(100);
10784 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10785 ins_encode %{
10786 Register src_reg = as_Register($src$$reg);
10787 Register dst_reg = as_Register($dst$$reg);
10789 __ dsllv(dst_reg, src_reg, $shift$$Register);
10790 %}
10791 ins_pipe( ialu_regL_regL );
10792 %}
10794 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10795 match(Set dst (LShiftL (ConvI2L src) shift));
10796 ins_cost(100);
10797 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10798 ins_encode %{
10799 Register src_reg = as_Register($src$$reg);
10800 Register dst_reg = as_Register($dst$$reg);
10801 int shamt = $shift$$constant;
10803 if (__ is_simm(shamt, 5)) {
10804 __ dsll(dst_reg, src_reg, shamt);
10805 } else {
10806 int sa = Assembler::low(shamt, 6);
10807 if (sa < 32) {
10808 __ dsll(dst_reg, src_reg, sa);
10809 } else {
10810 __ dsll32(dst_reg, src_reg, sa - 32);
10811 }
10812 }
10813 %}
10814 ins_pipe( ialu_regL_regL );
10815 %}
10817 // Shift Right Long
10818 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10819 match(Set dst (RShiftL src shift));
10820 ins_cost(100);
10821 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10822 ins_encode %{
10823 Register src_reg = as_Register($src$$reg);
10824 Register dst_reg = as_Register($dst$$reg);
10825 int shamt = ($shift$$constant & 0x3f);
10826 if (__ is_simm(shamt, 5))
10827 __ dsra(dst_reg, src_reg, shamt);
10828 else {
10829 int sa = Assembler::low(shamt, 6);
10830 if (sa < 32) {
10831 __ dsra(dst_reg, src_reg, sa);
10832 } else {
10833 __ dsra32(dst_reg, src_reg, sa - 32);
10834 }
10835 }
10836 %}
10837 ins_pipe( ialu_regL_regL );
10838 %}
10840 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10841 match(Set dst (ConvL2I (RShiftL src shift)));
10842 ins_cost(100);
10843 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10844 ins_encode %{
10845 Register src_reg = as_Register($src$$reg);
10846 Register dst_reg = as_Register($dst$$reg);
10847 int shamt = $shift$$constant;
10849 __ dsra32(dst_reg, src_reg, shamt - 32);
10850 %}
10851 ins_pipe( ialu_regL_regL );
10852 %}
10854 // Shift Right Long arithmetically
10855 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10856 //predicate(UseNewLongLShift);
10857 match(Set dst (RShiftL src shift));
10858 ins_cost(100);
10859 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10860 ins_encode %{
10861 Register src_reg = as_Register($src$$reg);
10862 Register dst_reg = as_Register($dst$$reg);
10864 __ dsrav(dst_reg, src_reg, $shift$$Register);
10865 %}
10866 ins_pipe( ialu_regL_regL );
10867 %}
10869 // Shift Right Long logically
10870 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10871 match(Set dst (URShiftL src shift));
10872 ins_cost(100);
10873 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10874 ins_encode %{
10875 Register src_reg = as_Register($src$$reg);
10876 Register dst_reg = as_Register($dst$$reg);
10878 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10879 %}
10880 ins_pipe( ialu_regL_regL );
10881 %}
10883 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10884 match(Set dst (URShiftL src shift));
10885 ins_cost(80);
10886 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10887 ins_encode %{
10888 Register src_reg = as_Register($src$$reg);
10889 Register dst_reg = as_Register($dst$$reg);
10890 int shamt = $shift$$constant;
10892 __ dsrl(dst_reg, src_reg, shamt);
10893 %}
10894 ins_pipe( ialu_regL_regL );
10895 %}
10897 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10898 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10899 ins_cost(80);
10900 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10901 ins_encode %{
10902 Register src_reg = as_Register($src$$reg);
10903 Register dst_reg = as_Register($dst$$reg);
10904 int shamt = $shift$$constant;
10906 __ dext(dst_reg, src_reg, shamt, 31);
10907 %}
10908 ins_pipe( ialu_regL_regL );
10909 %}
10911 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10912 match(Set dst (URShiftL (CastP2X src) shift));
10913 ins_cost(80);
10914 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10915 ins_encode %{
10916 Register src_reg = as_Register($src$$reg);
10917 Register dst_reg = as_Register($dst$$reg);
10918 int shamt = $shift$$constant;
10920 __ dsrl(dst_reg, src_reg, shamt);
10921 %}
10922 ins_pipe( ialu_regL_regL );
10923 %}
10925 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10926 match(Set dst (URShiftL src shift));
10927 ins_cost(80);
10928 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10929 ins_encode %{
10930 Register src_reg = as_Register($src$$reg);
10931 Register dst_reg = as_Register($dst$$reg);
10932 int shamt = $shift$$constant;
10934 __ dsrl32(dst_reg, src_reg, shamt - 32);
10935 %}
10936 ins_pipe( ialu_regL_regL );
10937 %}
10939 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10940 match(Set dst (ConvL2I (URShiftL src shift)));
10941 predicate(n->in(1)->in(2)->get_int() > 32);
10942 ins_cost(80);
10943 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10944 ins_encode %{
10945 Register src_reg = as_Register($src$$reg);
10946 Register dst_reg = as_Register($dst$$reg);
10947 int shamt = $shift$$constant;
10949 __ dsrl32(dst_reg, src_reg, shamt - 32);
10950 %}
10951 ins_pipe( ialu_regL_regL );
10952 %}
10954 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10955 match(Set dst (URShiftL (CastP2X src) shift));
10956 ins_cost(80);
10957 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10958 ins_encode %{
10959 Register src_reg = as_Register($src$$reg);
10960 Register dst_reg = as_Register($dst$$reg);
10961 int shamt = $shift$$constant;
10963 __ dsrl32(dst_reg, src_reg, shamt - 32);
10964 %}
10965 ins_pipe( ialu_regL_regL );
10966 %}
10968 // Xor Instructions
10969 // Xor Register with Register
10970 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10971 match(Set dst (XorI src1 src2));
10973 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10975 ins_encode %{
10976 Register dst = $dst$$Register;
10977 Register src1 = $src1$$Register;
10978 Register src2 = $src2$$Register;
10979 __ xorr(dst, src1, src2);
10980 __ sll(dst, dst, 0); /* long -> int */
10981 %}
10983 ins_pipe( ialu_regI_regI );
10984 %}
10986 // Or Instructions
10987 // Or Register with Register
10988 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10989 match(Set dst (OrI src1 src2));
10991 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10992 ins_encode %{
10993 Register dst = $dst$$Register;
10994 Register src1 = $src1$$Register;
10995 Register src2 = $src2$$Register;
10996 __ orr(dst, src1, src2);
10997 %}
10999 ins_pipe( ialu_regI_regI );
11000 %}
11002 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11003 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11004 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11006 format %{ "rotr $dst, $src, 1 ...\n\t"
11007 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11008 ins_encode %{
11009 Register dst = $dst$$Register;
11010 Register src = $src$$Register;
11011 int rshift = $rshift$$constant;
11013 __ rotr(dst, src, 1);
11014 if (rshift - 1) {
11015 __ srl(dst, dst, rshift - 1);
11016 }
11017 %}
11019 ins_pipe( ialu_regI_regI );
11020 %}
11022 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11023 match(Set dst (OrI src1 (CastP2X src2)));
11025 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11026 ins_encode %{
11027 Register dst = $dst$$Register;
11028 Register src1 = $src1$$Register;
11029 Register src2 = $src2$$Register;
11030 __ orr(dst, src1, src2);
11031 %}
11033 ins_pipe( ialu_regI_regI );
11034 %}
11036 // Logical Shift Right by 8-bit immediate
11037 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11038 match(Set dst (URShiftI src shift));
11039 // effect(KILL cr);
11041 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11042 ins_encode %{
11043 Register src = $src$$Register;
11044 Register dst = $dst$$Register;
11045 int shift = $shift$$constant;
11047 __ srl(dst, src, shift);
11048 %}
11049 ins_pipe( ialu_regI_regI );
11050 %}
11052 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11053 match(Set dst (AndI (URShiftI src shift) mask));
11055 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11056 ins_encode %{
11057 Register src = $src$$Register;
11058 Register dst = $dst$$Register;
11059 int pos = $shift$$constant;
11060 int size = Assembler::is_int_mask($mask$$constant);
11062 __ ext(dst, src, pos, size);
11063 %}
11064 ins_pipe( ialu_regI_regI );
11065 %}
11067 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11068 %{
11069 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11070 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11072 ins_cost(100);
11073 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11074 ins_encode %{
11075 Register dst = $dst$$Register;
11076 int sa = $rshift$$constant;
11078 __ rotr(dst, dst, sa);
11079 %}
11080 ins_pipe( ialu_regI_regI );
11081 %}
11083 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11084 %{
11085 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11086 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11088 ins_cost(100);
11089 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11090 ins_encode %{
11091 Register dst = $dst$$Register;
11092 int sa = $rshift$$constant;
11094 __ drotr(dst, dst, sa);
11095 %}
11096 ins_pipe( ialu_regI_regI );
11097 %}
11099 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11100 %{
11101 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11102 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11104 ins_cost(100);
11105 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11106 ins_encode %{
11107 Register dst = $dst$$Register;
11108 int sa = $rshift$$constant;
11110 __ drotr32(dst, dst, sa - 32);
11111 %}
11112 ins_pipe( ialu_regI_regI );
11113 %}
11115 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11116 %{
11117 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11118 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11120 ins_cost(100);
11121 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11122 ins_encode %{
11123 Register dst = $dst$$Register;
11124 int sa = $rshift$$constant;
11126 __ rotr(dst, dst, sa);
11127 %}
11128 ins_pipe( ialu_regI_regI );
11129 %}
11131 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11132 %{
11133 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11134 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11136 ins_cost(100);
11137 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11138 ins_encode %{
11139 Register dst = $dst$$Register;
11140 int sa = $rshift$$constant;
11142 __ drotr(dst, dst, sa);
11143 %}
11144 ins_pipe( ialu_regI_regI );
11145 %}
11147 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11148 %{
11149 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11150 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11152 ins_cost(100);
11153 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11154 ins_encode %{
11155 Register dst = $dst$$Register;
11156 int sa = $rshift$$constant;
11158 __ drotr32(dst, dst, sa - 32);
11159 %}
11160 ins_pipe( ialu_regI_regI );
11161 %}
11163 // Logical Shift Right
11164 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11165 match(Set dst (URShiftI src shift));
11167 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11168 ins_encode %{
11169 Register src = $src$$Register;
11170 Register dst = $dst$$Register;
11171 Register shift = $shift$$Register;
11172 __ srlv(dst, src, shift);
11173 %}
11174 ins_pipe( ialu_regI_regI );
11175 %}
11178 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11179 match(Set dst (RShiftI src shift));
11180 // effect(KILL cr);
11182 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11183 ins_encode %{
11184 Register src = $src$$Register;
11185 Register dst = $dst$$Register;
11186 int shift = $shift$$constant;
11187 __ sra(dst, src, shift);
11188 %}
11189 ins_pipe( ialu_regI_regI );
11190 %}
11192 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11193 match(Set dst (RShiftI src shift));
11194 // effect(KILL cr);
11196 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11197 ins_encode %{
11198 Register src = $src$$Register;
11199 Register dst = $dst$$Register;
11200 Register shift = $shift$$Register;
11201 __ srav(dst, src, shift);
11202 %}
11203 ins_pipe( ialu_regI_regI );
11204 %}
11206 //----------Convert Int to Boolean---------------------------------------------
11208 instruct convI2B(mRegI dst, mRegI src) %{
11209 match(Set dst (Conv2B src));
11211 ins_cost(100);
11212 format %{ "convI2B $dst, $src @ convI2B" %}
11213 ins_encode %{
11214 Register dst = as_Register($dst$$reg);
11215 Register src = as_Register($src$$reg);
11217 if (dst != src) {
11218 __ daddiu(dst, R0, 1);
11219 __ movz(dst, R0, src);
11220 } else {
11221 __ move(AT, src);
11222 __ daddiu(dst, R0, 1);
11223 __ movz(dst, R0, AT);
11224 }
11225 %}
11227 ins_pipe( ialu_regL_regL );
11228 %}
11230 instruct convI2L_reg( mRegL dst, mRegI src) %{
11231 match(Set dst (ConvI2L src));
11233 ins_cost(100);
11234 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11235 ins_encode %{
11236 Register dst = as_Register($dst$$reg);
11237 Register src = as_Register($src$$reg);
11239 if(dst != src) __ sll(dst, src, 0);
11240 %}
11241 ins_pipe( ialu_regL_regL );
11242 %}
11245 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11246 match(Set dst (ConvL2I src));
11248 format %{ "MOV $dst, $src @ convL2I_reg" %}
11249 ins_encode %{
11250 Register dst = as_Register($dst$$reg);
11251 Register src = as_Register($src$$reg);
11253 __ sll(dst, src, 0);
11254 %}
11256 ins_pipe( ialu_regI_regI );
11257 %}
11259 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11260 match(Set dst (ConvI2L (ConvL2I src)));
11262 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11263 ins_encode %{
11264 Register dst = as_Register($dst$$reg);
11265 Register src = as_Register($src$$reg);
11267 __ sll(dst, src, 0);
11268 %}
11270 ins_pipe( ialu_regI_regI );
11271 %}
11273 instruct convL2D_reg( regD dst, mRegL src ) %{
11274 match(Set dst (ConvL2D src));
11275 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11276 ins_encode %{
11277 Register src = as_Register($src$$reg);
11278 FloatRegister dst = as_FloatRegister($dst$$reg);
11280 __ dmtc1(src, dst);
11281 __ cvt_d_l(dst, dst);
11282 %}
11284 ins_pipe( pipe_slow );
11285 %}
11287 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11288 match(Set dst (ConvD2L src));
11289 ins_cost(150);
11290 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11291 ins_encode %{
11292 Register dst = as_Register($dst$$reg);
11293 FloatRegister src = as_FloatRegister($src$$reg);
11295 Label Done;
11297 __ trunc_l_d(F30, src);
11298 // max_long: 0x7fffffffffffffff
11299 // __ set64(AT, 0x7fffffffffffffff);
11300 __ daddiu(AT, R0, -1);
11301 __ dsrl(AT, AT, 1);
11302 __ dmfc1(dst, F30);
11304 __ bne(dst, AT, Done);
11305 __ delayed()->mtc1(R0, F30);
11307 __ cvt_d_w(F30, F30);
11308 __ c_ult_d(src, F30);
11309 __ bc1f(Done);
11310 __ delayed()->daddiu(T9, R0, -1);
11312 __ c_un_d(src, src); //NaN?
11313 __ subu(dst, T9, AT);
11314 __ movt(dst, R0);
11316 __ bind(Done);
11317 %}
11319 ins_pipe( pipe_slow );
11320 %}
11322 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11323 match(Set dst (ConvD2L src));
11324 ins_cost(250);
11325 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11326 ins_encode %{
11327 Register dst = as_Register($dst$$reg);
11328 FloatRegister src = as_FloatRegister($src$$reg);
11330 Label L;
11332 __ c_un_d(src, src); //NaN?
11333 __ bc1t(L);
11334 __ delayed();
11335 __ move(dst, R0);
11337 __ trunc_l_d(F30, src);
11338 __ cfc1(AT, 31);
11339 __ li(T9, 0x10000);
11340 __ andr(AT, AT, T9);
11341 __ beq(AT, R0, L);
11342 __ delayed()->dmfc1(dst, F30);
11344 __ mov_d(F12, src);
11345 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11346 __ move(dst, V0);
11347 __ bind(L);
11348 %}
11350 ins_pipe( pipe_slow );
11351 %}
11353 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11354 match(Set dst (ConvF2I src));
11355 ins_cost(150);
11356 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11357 ins_encode %{
11358 Register dreg = $dst$$Register;
11359 FloatRegister fval = $src$$FloatRegister;
11361 __ trunc_w_s(F30, fval);
11362 __ mfc1(dreg, F30);
11363 __ c_un_s(fval, fval); //NaN?
11364 __ movt(dreg, R0);
11365 %}
11367 ins_pipe( pipe_slow );
11368 %}
11370 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11371 match(Set dst (ConvF2I src));
11372 ins_cost(250);
11373 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11374 ins_encode %{
11375 Register dreg = $dst$$Register;
11376 FloatRegister fval = $src$$FloatRegister;
11377 Label L;
11379 __ c_un_s(fval, fval); //NaN?
11380 __ bc1t(L);
11381 __ delayed();
11382 __ move(dreg, R0);
11384 __ trunc_w_s(F30, fval);
11386 /* Call SharedRuntime:f2i() to do valid convention */
11387 __ cfc1(AT, 31);
11388 __ li(T9, 0x10000);
11389 __ andr(AT, AT, T9);
11390 __ beq(AT, R0, L);
11391 __ delayed()->mfc1(dreg, F30);
11393 __ mov_s(F12, fval);
11395 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11396 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11397 *
11398 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11399 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11400 */
11401 if(dreg != V0) {
11402 __ push(V0);
11403 }
11404 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11405 if(dreg != V0) {
11406 __ move(dreg, V0);
11407 __ pop(V0);
11408 }
11409 __ bind(L);
11410 %}
11412 ins_pipe( pipe_slow );
11413 %}
11415 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11416 match(Set dst (ConvF2L src));
11417 ins_cost(150);
11418 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11419 ins_encode %{
11420 Register dreg = $dst$$Register;
11421 FloatRegister fval = $src$$FloatRegister;
11423 __ trunc_l_s(F30, fval);
11424 __ dmfc1(dreg, F30);
11425 __ c_un_s(fval, fval); //NaN?
11426 __ movt(dreg, R0);
11427 %}
11429 ins_pipe( pipe_slow );
11430 %}
11432 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11433 match(Set dst (ConvF2L src));
11434 ins_cost(250);
11435 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11436 ins_encode %{
11437 Register dst = as_Register($dst$$reg);
11438 FloatRegister fval = $src$$FloatRegister;
11439 Label L;
11441 __ c_un_s(fval, fval); //NaN?
11442 __ bc1t(L);
11443 __ delayed();
11444 __ move(dst, R0);
11446 __ trunc_l_s(F30, fval);
11447 __ cfc1(AT, 31);
11448 __ li(T9, 0x10000);
11449 __ andr(AT, AT, T9);
11450 __ beq(AT, R0, L);
11451 __ delayed()->dmfc1(dst, F30);
11453 __ mov_s(F12, fval);
11454 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11455 __ move(dst, V0);
11456 __ bind(L);
11457 %}
11459 ins_pipe( pipe_slow );
11460 %}
11462 instruct convL2F_reg( regF dst, mRegL src ) %{
11463 match(Set dst (ConvL2F src));
11464 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11465 ins_encode %{
11466 FloatRegister dst = $dst$$FloatRegister;
11467 Register src = as_Register($src$$reg);
11468 Label L;
11470 __ dmtc1(src, dst);
11471 __ cvt_s_l(dst, dst);
11472 %}
11474 ins_pipe( pipe_slow );
11475 %}
11477 instruct convI2F_reg( regF dst, mRegI src ) %{
11478 match(Set dst (ConvI2F src));
11479 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11480 ins_encode %{
11481 Register src = $src$$Register;
11482 FloatRegister dst = $dst$$FloatRegister;
11484 __ mtc1(src, dst);
11485 __ cvt_s_w(dst, dst);
11486 %}
11488 ins_pipe( fpu_regF_regF );
11489 %}
11491 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11492 match(Set dst (CmpLTMask p zero));
11493 ins_cost(100);
11495 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11496 ins_encode %{
11497 Register src = $p$$Register;
11498 Register dst = $dst$$Register;
11500 __ sra(dst, src, 31);
11501 %}
11502 ins_pipe( pipe_slow );
11503 %}
11506 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11507 match(Set dst (CmpLTMask p q));
11508 ins_cost(400);
11510 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11511 ins_encode %{
11512 Register p = $p$$Register;
11513 Register q = $q$$Register;
11514 Register dst = $dst$$Register;
11516 __ slt(dst, p, q);
11517 __ subu(dst, R0, dst);
11518 %}
11519 ins_pipe( pipe_slow );
11520 %}
11522 instruct convP2B(mRegI dst, mRegP src) %{
11523 match(Set dst (Conv2B src));
11525 ins_cost(100);
11526 format %{ "convP2B $dst, $src @ convP2B" %}
11527 ins_encode %{
11528 Register dst = as_Register($dst$$reg);
11529 Register src = as_Register($src$$reg);
11531 if (dst != src) {
11532 __ daddiu(dst, R0, 1);
11533 __ movz(dst, R0, src);
11534 } else {
11535 __ move(AT, src);
11536 __ daddiu(dst, R0, 1);
11537 __ movz(dst, R0, AT);
11538 }
11539 %}
11541 ins_pipe( ialu_regL_regL );
11542 %}
11545 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11546 match(Set dst (ConvI2D src));
11547 format %{ "conI2D $dst, $src @convI2D_reg" %}
11548 ins_encode %{
11549 Register src = $src$$Register;
11550 FloatRegister dst = $dst$$FloatRegister;
11551 __ mtc1(src, dst);
11552 __ cvt_d_w(dst, dst);
11553 %}
11554 ins_pipe( fpu_regF_regF );
11555 %}
11557 instruct convF2D_reg_reg(regD dst, regF src) %{
11558 match(Set dst (ConvF2D src));
11559 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11560 ins_encode %{
11561 FloatRegister dst = $dst$$FloatRegister;
11562 FloatRegister src = $src$$FloatRegister;
11564 __ cvt_d_s(dst, src);
11565 %}
11566 ins_pipe( fpu_regF_regF );
11567 %}
11569 instruct convD2F_reg_reg(regF dst, regD src) %{
11570 match(Set dst (ConvD2F src));
11571 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11572 ins_encode %{
11573 FloatRegister dst = $dst$$FloatRegister;
11574 FloatRegister src = $src$$FloatRegister;
11576 __ cvt_s_d(dst, src);
11577 %}
11578 ins_pipe( fpu_regF_regF );
11579 %}
11581 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11582 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11583 match(Set dst (ConvD2I src));
11585 ins_cost(150);
11586 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11588 ins_encode %{
11589 FloatRegister src = $src$$FloatRegister;
11590 Register dst = $dst$$Register;
11592 Label Done;
11594 __ trunc_w_d(F30, src);
11595 // max_int: 2147483647
11596 __ move(AT, 0x7fffffff);
11597 __ mfc1(dst, F30);
11599 __ bne(dst, AT, Done);
11600 __ delayed()->mtc1(R0, F30);
11602 __ cvt_d_w(F30, F30);
11603 __ c_ult_d(src, F30);
11604 __ bc1f(Done);
11605 __ delayed()->addiu(T9, R0, -1);
11607 __ c_un_d(src, src); //NaN?
11608 __ subu32(dst, T9, AT);
11609 __ movt(dst, R0);
11611 __ bind(Done);
11612 %}
11613 ins_pipe( pipe_slow );
11614 %}
11616 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11617 match(Set dst (ConvD2I src));
11619 ins_cost(250);
11620 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11622 ins_encode %{
11623 FloatRegister src = $src$$FloatRegister;
11624 Register dst = $dst$$Register;
11625 Label L;
11627 __ trunc_w_d(F30, src);
11628 __ cfc1(AT, 31);
11629 __ li(T9, 0x10000);
11630 __ andr(AT, AT, T9);
11631 __ beq(AT, R0, L);
11632 __ delayed()->mfc1(dst, F30);
11634 __ mov_d(F12, src);
11635 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11636 __ move(dst, V0);
11637 __ bind(L);
11639 %}
11640 ins_pipe( pipe_slow );
11641 %}
11643 // Convert oop pointer into compressed form
11644 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11645 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11646 match(Set dst (EncodeP src));
11647 format %{ "encode_heap_oop $dst,$src" %}
11648 ins_encode %{
11649 Register src = $src$$Register;
11650 Register dst = $dst$$Register;
11651 if (src != dst) {
11652 __ move(dst, src);
11653 }
11654 __ encode_heap_oop(dst);
11655 %}
11656 ins_pipe( ialu_regL_regL );
11657 %}
11659 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11660 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11661 match(Set dst (EncodeP src));
11662 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11663 ins_encode %{
11664 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11665 %}
11666 ins_pipe( ialu_regL_regL );
11667 %}
11669 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11670 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11671 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11672 match(Set dst (DecodeN src));
11673 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11674 ins_encode %{
11675 Register s = $src$$Register;
11676 Register d = $dst$$Register;
11677 if (s != d) {
11678 __ move(d, s);
11679 }
11680 __ decode_heap_oop(d);
11681 %}
11682 ins_pipe( ialu_regL_regL );
11683 %}
11685 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11686 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11687 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11688 match(Set dst (DecodeN src));
11689 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11690 ins_encode %{
11691 Register s = $src$$Register;
11692 Register d = $dst$$Register;
11693 if (s != d) {
11694 __ decode_heap_oop_not_null(d, s);
11695 } else {
11696 __ decode_heap_oop_not_null(d);
11697 }
11698 %}
11699 ins_pipe( ialu_regL_regL );
11700 %}
11702 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11703 match(Set dst (EncodePKlass src));
11704 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11705 ins_encode %{
11706 __ encode_klass_not_null($dst$$Register, $src$$Register);
11707 %}
11708 ins_pipe( ialu_regL_regL );
11709 %}
11711 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11712 match(Set dst (DecodeNKlass src));
11713 format %{ "decode_heap_klass_not_null $dst,$src" %}
11714 ins_encode %{
11715 Register s = $src$$Register;
11716 Register d = $dst$$Register;
11717 if (s != d) {
11718 __ decode_klass_not_null(d, s);
11719 } else {
11720 __ decode_klass_not_null(d);
11721 }
11722 %}
11723 ins_pipe( ialu_regL_regL );
11724 %}
11726 //FIXME
11727 instruct tlsLoadP(mRegP dst) %{
11728 match(Set dst (ThreadLocal));
11730 ins_cost(0);
11731 format %{ " get_thread in $dst #@tlsLoadP" %}
11732 ins_encode %{
11733 Register dst = $dst$$Register;
11734 #ifdef OPT_THREAD
11735 __ move(dst, TREG);
11736 #else
11737 __ get_thread(dst);
11738 #endif
11739 %}
11741 ins_pipe( ialu_loadI );
11742 %}
11745 instruct checkCastPP( mRegP dst ) %{
11746 match(Set dst (CheckCastPP dst));
11748 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11749 ins_encode( /*empty encoding*/ );
11750 ins_pipe( empty );
11751 %}
11753 instruct castPP(mRegP dst)
11754 %{
11755 match(Set dst (CastPP dst));
11757 size(0);
11758 format %{ "# castPP of $dst" %}
11759 ins_encode(/* empty encoding */);
11760 ins_pipe(empty);
11761 %}
11763 instruct castII( mRegI dst ) %{
11764 match(Set dst (CastII dst));
11765 format %{ "#castII of $dst empty encoding" %}
11766 ins_encode( /*empty encoding*/ );
11767 ins_cost(0);
11768 ins_pipe( empty );
11769 %}
11771 // Return Instruction
11772 // Remove the return address & jump to it.
11773 instruct Ret() %{
11774 match(Return);
11775 format %{ "RET #@Ret" %}
11777 ins_encode %{
11778 __ jr(RA);
11779 __ nop();
11780 %}
11782 ins_pipe( pipe_jump );
11783 %}
11785 /*
11786 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11787 instruct jumpXtnd(mRegL switch_val) %{
11788 match(Jump switch_val);
11790 ins_cost(350);
11792 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11793 "jr T9\n\t"
11794 "nop" %}
11795 ins_encode %{
11796 Register table_base = $constanttablebase;
11797 int con_offset = $constantoffset;
11798 Register switch_reg = $switch_val$$Register;
11800 if (UseLoongsonISA) {
11801 if (Assembler::is_simm(con_offset, 8)) {
11802 __ gsldx(T9, table_base, switch_reg, con_offset);
11803 } else if (Assembler::is_simm16(con_offset)) {
11804 __ daddu(T9, table_base, switch_reg);
11805 __ ld(T9, T9, con_offset);
11806 } else {
11807 __ move(T9, con_offset);
11808 __ daddu(AT, table_base, switch_reg);
11809 __ gsldx(T9, AT, T9, 0);
11810 }
11811 } else {
11812 if (Assembler::is_simm16(con_offset)) {
11813 __ daddu(T9, table_base, switch_reg);
11814 __ ld(T9, T9, con_offset);
11815 } else {
11816 __ move(T9, con_offset);
11817 __ daddu(AT, table_base, switch_reg);
11818 __ daddu(AT, T9, AT);
11819 __ ld(T9, AT, 0);
11820 }
11821 }
11823 __ jr(T9);
11824 __ nop();
11826 %}
11827 ins_pipe(pipe_jump);
11828 %}
11829 */
11831 // Jump Direct - Label defines a relative address from JMP
11832 instruct jmpDir(label labl) %{
11833 match(Goto);
11834 effect(USE labl);
11836 ins_cost(300);
11837 format %{ "JMP $labl #@jmpDir" %}
11839 ins_encode %{
11840 Label &L = *($labl$$label);
11841 if(&L)
11842 __ b(L);
11843 else
11844 __ b(int(0));
11845 __ nop();
11846 %}
11848 ins_pipe( pipe_jump );
11849 ins_pc_relative(1);
11850 %}
11854 // Tail Jump; remove the return address; jump to target.
11855 // TailCall above leaves the return address around.
11856 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11857 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11858 // "restore" before this instruction (in Epilogue), we need to materialize it
11859 // in %i0.
11860 //FIXME
11861 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11862 match( TailJump jump_target ex_oop );
11863 ins_cost(200);
11864 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11865 ins_encode %{
11866 Register target = $jump_target$$Register;
11868 /* 2012/9/14 Jin: V0, V1 are indicated in:
11869 * [stubGenerator_mips.cpp] generate_forward_exception()
11870 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11871 */
11872 Register oop = $ex_oop$$Register;
11873 Register exception_oop = V0;
11874 Register exception_pc = V1;
11876 __ move(exception_pc, RA);
11877 __ move(exception_oop, oop);
11879 __ jr(target);
11880 __ nop();
11881 %}
11882 ins_pipe( pipe_jump );
11883 %}
11885 // ============================================================================
11886 // Procedure Call/Return Instructions
11887 // Call Java Static Instruction
11888 // Note: If this code changes, the corresponding ret_addr_offset() and
11889 // compute_padding() functions will have to be adjusted.
11890 instruct CallStaticJavaDirect(method meth) %{
11891 match(CallStaticJava);
11892 effect(USE meth);
11894 ins_cost(300);
11895 format %{ "CALL,static #@CallStaticJavaDirect " %}
11896 ins_encode( Java_Static_Call( meth ) );
11897 ins_pipe( pipe_slow );
11898 ins_pc_relative(1);
11899 %}
11901 // Call Java Dynamic Instruction
11902 // Note: If this code changes, the corresponding ret_addr_offset() and
11903 // compute_padding() functions will have to be adjusted.
11904 instruct CallDynamicJavaDirect(method meth) %{
11905 match(CallDynamicJava);
11906 effect(USE meth);
11908 ins_cost(300);
11909 format %{"MOV IC_Klass, (oop)-1\n\t"
11910 "CallDynamic @ CallDynamicJavaDirect" %}
11911 ins_encode( Java_Dynamic_Call( meth ) );
11912 ins_pipe( pipe_slow );
11913 ins_pc_relative(1);
11914 %}
11916 instruct CallLeafNoFPDirect(method meth) %{
11917 match(CallLeafNoFP);
11918 effect(USE meth);
11920 ins_cost(300);
11921 format %{ "CALL_LEAF_NOFP,runtime " %}
11922 ins_encode(Java_To_Runtime(meth));
11923 ins_pipe( pipe_slow );
11924 ins_pc_relative(1);
11925 ins_alignment(16);
11926 %}
11928 // Prefetch instructions.
11930 instruct prefetchrNTA( memory mem ) %{
11931 match(PrefetchRead mem);
11932 ins_cost(125);
11934 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11935 ins_encode %{
11936 int base = $mem$$base;
11937 int index = $mem$$index;
11938 int scale = $mem$$scale;
11939 int disp = $mem$$disp;
11941 if( index != 0 ) {
11942 if (scale == 0) {
11943 __ daddu(AT, as_Register(base), as_Register(index));
11944 } else {
11945 __ dsll(AT, as_Register(index), scale);
11946 __ daddu(AT, as_Register(base), AT);
11947 }
11948 } else {
11949 __ move(AT, as_Register(base));
11950 }
11951 if( Assembler::is_simm16(disp) ) {
11952 __ daddiu(AT, as_Register(base), disp);
11953 __ daddiu(AT, AT, disp);
11954 } else {
11955 __ move(T9, disp);
11956 __ daddu(AT, as_Register(base), T9);
11957 }
11958 __ pref(0, AT, 0); //hint: 0:load
11959 %}
11960 ins_pipe(pipe_slow);
11961 %}
11963 instruct prefetchwNTA( memory mem ) %{
11964 match(PrefetchWrite mem);
11965 ins_cost(125);
11966 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11967 ins_encode %{
11968 int base = $mem$$base;
11969 int index = $mem$$index;
11970 int scale = $mem$$scale;
11971 int disp = $mem$$disp;
11973 if( index != 0 ) {
11974 if (scale == 0) {
11975 __ daddu(AT, as_Register(base), as_Register(index));
11976 } else {
11977 __ dsll(AT, as_Register(index), scale);
11978 __ daddu(AT, as_Register(base), AT);
11979 }
11980 } else {
11981 __ move(AT, as_Register(base));
11982 }
11983 if( Assembler::is_simm16(disp) ) {
11984 __ daddiu(AT, as_Register(base), disp);
11985 __ daddiu(AT, AT, disp);
11986 } else {
11987 __ move(T9, disp);
11988 __ daddu(AT, as_Register(base), T9);
11989 }
11990 __ pref(1, AT, 0); //hint: 1:store
11991 %}
11992 ins_pipe(pipe_slow);
11993 %}
11995 // Prefetch instructions for allocation.
11997 instruct prefetchAllocNTA( memory mem ) %{
11998 match(PrefetchAllocation mem);
11999 ins_cost(125);
12000 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12001 ins_encode %{
12002 int base = $mem$$base;
12003 int index = $mem$$index;
12004 int scale = $mem$$scale;
12005 int disp = $mem$$disp;
12007 Register dst = R0;
12009 if( index != 0 ) {
12010 if( Assembler::is_simm16(disp) ) {
12011 if( UseLoongsonISA ) {
12012 if (scale == 0) {
12013 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12014 } else {
12015 __ dsll(AT, as_Register(index), scale);
12016 __ gslbx(dst, as_Register(base), AT, disp);
12017 }
12018 } else {
12019 if (scale == 0) {
12020 __ addu(AT, as_Register(base), as_Register(index));
12021 } else {
12022 __ dsll(AT, as_Register(index), scale);
12023 __ addu(AT, as_Register(base), AT);
12024 }
12025 __ lb(dst, AT, disp);
12026 }
12027 } else {
12028 if (scale == 0) {
12029 __ addu(AT, as_Register(base), as_Register(index));
12030 } else {
12031 __ dsll(AT, as_Register(index), scale);
12032 __ addu(AT, as_Register(base), AT);
12033 }
12034 __ move(T9, disp);
12035 if( UseLoongsonISA ) {
12036 __ gslbx(dst, AT, T9, 0);
12037 } else {
12038 __ addu(AT, AT, T9);
12039 __ lb(dst, AT, 0);
12040 }
12041 }
12042 } else {
12043 if( Assembler::is_simm16(disp) ) {
12044 __ lb(dst, as_Register(base), disp);
12045 } else {
12046 __ move(T9, disp);
12047 if( UseLoongsonISA ) {
12048 __ gslbx(dst, as_Register(base), T9, 0);
12049 } else {
12050 __ addu(AT, as_Register(base), T9);
12051 __ lb(dst, AT, 0);
12052 }
12053 }
12054 }
12055 %}
12056 ins_pipe(pipe_slow);
12057 %}
12060 // Call runtime without safepoint
12061 instruct CallLeafDirect(method meth) %{
12062 match(CallLeaf);
12063 effect(USE meth);
12065 ins_cost(300);
12066 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12067 ins_encode(Java_To_Runtime(meth));
12068 ins_pipe( pipe_slow );
12069 ins_pc_relative(1);
12070 ins_alignment(16);
12071 %}
12073 // Load Char (16bit unsigned)
12074 instruct loadUS(mRegI dst, memory mem) %{
12075 match(Set dst (LoadUS mem));
12077 ins_cost(125);
12078 format %{ "loadUS $dst,$mem @ loadC" %}
12079 ins_encode(load_C_enc(dst, mem));
12080 ins_pipe( ialu_loadI );
12081 %}
12083 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12084 match(Set dst (ConvI2L (LoadUS mem)));
12086 ins_cost(125);
12087 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12088 ins_encode(load_C_enc(dst, mem));
12089 ins_pipe( ialu_loadI );
12090 %}
12092 // Store Char (16bit unsigned)
12093 instruct storeC(memory mem, mRegI src) %{
12094 match(Set mem (StoreC mem src));
12096 ins_cost(125);
12097 format %{ "storeC $src, $mem @ storeC" %}
12098 ins_encode(store_C_reg_enc(mem, src));
12099 ins_pipe( ialu_loadI );
12100 %}
12102 instruct storeC0(memory mem, immI0 zero) %{
12103 match(Set mem (StoreC mem zero));
12105 ins_cost(125);
12106 format %{ "storeC $zero, $mem @ storeC0" %}
12107 ins_encode(store_C0_enc(mem));
12108 ins_pipe( ialu_loadI );
12109 %}
12112 instruct loadConF0(regF dst, immF0 zero) %{
12113 match(Set dst zero);
12114 ins_cost(100);
12116 format %{ "mov $dst, zero @ loadConF0\n"%}
12117 ins_encode %{
12118 FloatRegister dst = $dst$$FloatRegister;
12120 __ mtc1(R0, dst);
12121 %}
12122 ins_pipe( fpu_loadF );
12123 %}
12126 instruct loadConF(regF dst, immF src) %{
12127 match(Set dst src);
12128 ins_cost(125);
12130 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12131 ins_encode %{
12132 int con_offset = $constantoffset($src);
12134 if (Assembler::is_simm16(con_offset)) {
12135 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12136 } else {
12137 __ set64(AT, con_offset);
12138 if (UseLoongsonISA) {
12139 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12140 } else {
12141 __ daddu(AT, $constanttablebase, AT);
12142 __ lwc1($dst$$FloatRegister, AT, 0);
12143 }
12144 }
12145 %}
12146 ins_pipe( fpu_loadF );
12147 %}
12150 instruct loadConD0(regD dst, immD0 zero) %{
12151 match(Set dst zero);
12152 ins_cost(100);
12154 format %{ "mov $dst, zero @ loadConD0"%}
12155 ins_encode %{
12156 FloatRegister dst = as_FloatRegister($dst$$reg);
12158 __ dmtc1(R0, dst);
12159 %}
12160 ins_pipe( fpu_loadF );
12161 %}
12163 instruct loadConD(regD dst, immD src) %{
12164 match(Set dst src);
12165 ins_cost(125);
12167 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12168 ins_encode %{
12169 int con_offset = $constantoffset($src);
12171 if (Assembler::is_simm16(con_offset)) {
12172 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12173 } else {
12174 __ set64(AT, con_offset);
12175 if (UseLoongsonISA) {
12176 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12177 } else {
12178 __ daddu(AT, $constanttablebase, AT);
12179 __ ldc1($dst$$FloatRegister, AT, 0);
12180 }
12181 }
12182 %}
12183 ins_pipe( fpu_loadF );
12184 %}
12186 // Store register Float value (it is faster than store from FPU register)
12187 instruct storeF_reg( memory mem, regF src) %{
12188 match(Set mem (StoreF mem src));
12190 ins_cost(50);
12191 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12192 ins_encode(store_F_reg_enc(mem, src));
12193 ins_pipe( fpu_storeF );
12194 %}
12196 instruct storeF_imm0( memory mem, immF0 zero) %{
12197 match(Set mem (StoreF mem zero));
12199 ins_cost(40);
12200 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12201 ins_encode %{
12202 int base = $mem$$base;
12203 int index = $mem$$index;
12204 int scale = $mem$$scale;
12205 int disp = $mem$$disp;
12207 if( index != 0 ) {
12208 if ( UseLoongsonISA ) {
12209 if ( Assembler::is_simm(disp, 8) ) {
12210 if ( scale == 0 ) {
12211 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12212 } else {
12213 __ dsll(T9, as_Register(index), scale);
12214 __ gsswx(R0, as_Register(base), T9, disp);
12215 }
12216 } else if ( Assembler::is_simm16(disp) ) {
12217 if ( scale == 0 ) {
12218 __ daddu(AT, as_Register(base), as_Register(index));
12219 } else {
12220 __ dsll(T9, as_Register(index), scale);
12221 __ daddu(AT, as_Register(base), T9);
12222 }
12223 __ sw(R0, AT, disp);
12224 } else {
12225 if ( scale == 0 ) {
12226 __ move(T9, disp);
12227 __ daddu(AT, as_Register(index), T9);
12228 __ gsswx(R0, as_Register(base), AT, 0);
12229 } else {
12230 __ dsll(T9, as_Register(index), scale);
12231 __ move(AT, disp);
12232 __ daddu(AT, AT, T9);
12233 __ gsswx(R0, as_Register(base), AT, 0);
12234 }
12235 }
12236 } else { //not use loongson isa
12237 if(scale != 0) {
12238 __ dsll(T9, as_Register(index), scale);
12239 __ daddu(AT, as_Register(base), T9);
12240 } else {
12241 __ daddu(AT, as_Register(base), as_Register(index));
12242 }
12243 if( Assembler::is_simm16(disp) ) {
12244 __ sw(R0, AT, disp);
12245 } else {
12246 __ move(T9, disp);
12247 __ daddu(AT, AT, T9);
12248 __ sw(R0, AT, 0);
12249 }
12250 }
12251 } else { //index is 0
12252 if ( UseLoongsonISA ) {
12253 if ( Assembler::is_simm16(disp) ) {
12254 __ sw(R0, as_Register(base), disp);
12255 } else {
12256 __ move(T9, disp);
12257 __ gsswx(R0, as_Register(base), T9, 0);
12258 }
12259 } else {
12260 if( Assembler::is_simm16(disp) ) {
12261 __ sw(R0, as_Register(base), disp);
12262 } else {
12263 __ move(T9, disp);
12264 __ daddu(AT, as_Register(base), T9);
12265 __ sw(R0, AT, 0);
12266 }
12267 }
12268 }
12269 %}
12270 ins_pipe( ialu_storeI );
12271 %}
12273 // Load Double
12274 instruct loadD(regD dst, memory mem) %{
12275 match(Set dst (LoadD mem));
12277 ins_cost(150);
12278 format %{ "loadD $dst, $mem #@loadD" %}
12279 ins_encode(load_D_enc(dst, mem));
12280 ins_pipe( ialu_loadI );
12281 %}
12283 // Load Double - UNaligned
12284 instruct loadD_unaligned(regD dst, memory mem ) %{
12285 match(Set dst (LoadD_unaligned mem));
12286 ins_cost(250);
12287 // FIXME: Jin: Need more effective ldl/ldr
12288 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12289 ins_encode(load_D_enc(dst, mem));
12290 ins_pipe( ialu_loadI );
12291 %}
12293 instruct storeD_reg( memory mem, regD src) %{
12294 match(Set mem (StoreD mem src));
12296 ins_cost(50);
12297 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12298 ins_encode(store_D_reg_enc(mem, src));
12299 ins_pipe( fpu_storeF );
12300 %}
12302 instruct storeD_imm0( memory mem, immD0 zero) %{
12303 match(Set mem (StoreD mem zero));
12305 ins_cost(40);
12306 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12307 ins_encode %{
12308 int base = $mem$$base;
12309 int index = $mem$$index;
12310 int scale = $mem$$scale;
12311 int disp = $mem$$disp;
12313 __ mtc1(R0, F30);
12314 __ cvt_d_w(F30, F30);
12316 if( index != 0 ) {
12317 if ( UseLoongsonISA ) {
12318 if ( Assembler::is_simm(disp, 8) ) {
12319 if (scale == 0) {
12320 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12321 } else {
12322 __ dsll(T9, as_Register(index), scale);
12323 __ gssdxc1(F30, as_Register(base), T9, disp);
12324 }
12325 } else if ( Assembler::is_simm16(disp) ) {
12326 if (scale == 0) {
12327 __ daddu(AT, as_Register(base), as_Register(index));
12328 __ sdc1(F30, AT, disp);
12329 } else {
12330 __ dsll(T9, as_Register(index), scale);
12331 __ daddu(AT, as_Register(base), T9);
12332 __ sdc1(F30, AT, disp);
12333 }
12334 } else {
12335 if (scale == 0) {
12336 __ move(T9, disp);
12337 __ daddu(AT, as_Register(index), T9);
12338 __ gssdxc1(F30, as_Register(base), AT, 0);
12339 } else {
12340 __ move(T9, disp);
12341 __ dsll(AT, as_Register(index), scale);
12342 __ daddu(AT, AT, T9);
12343 __ gssdxc1(F30, as_Register(base), AT, 0);
12344 }
12345 }
12346 } else { // not use loongson isa
12347 if(scale != 0) {
12348 __ dsll(T9, as_Register(index), scale);
12349 __ daddu(AT, as_Register(base), T9);
12350 } else {
12351 __ daddu(AT, as_Register(base), as_Register(index));
12352 }
12353 if( Assembler::is_simm16(disp) ) {
12354 __ sdc1(F30, AT, disp);
12355 } else {
12356 __ move(T9, disp);
12357 __ daddu(AT, AT, T9);
12358 __ sdc1(F30, AT, 0);
12359 }
12360 }
12361 } else {// index is 0
12362 if ( UseLoongsonISA ) {
12363 if ( Assembler::is_simm16(disp) ) {
12364 __ sdc1(F30, as_Register(base), disp);
12365 } else {
12366 __ move(T9, disp);
12367 __ gssdxc1(F30, as_Register(base), T9, 0);
12368 }
12369 } else {
12370 if( Assembler::is_simm16(disp) ) {
12371 __ sdc1(F30, as_Register(base), disp);
12372 } else {
12373 __ move(T9, disp);
12374 __ daddu(AT, as_Register(base), T9);
12375 __ sdc1(F30, AT, 0);
12376 }
12377 }
12378 }
12379 %}
12380 ins_pipe( ialu_storeI );
12381 %}
12383 instruct loadSSI(mRegI dst, stackSlotI src)
12384 %{
12385 match(Set dst src);
12387 ins_cost(125);
12388 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12389 ins_encode %{
12390 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12391 __ lw($dst$$Register, SP, $src$$disp);
12392 %}
12393 ins_pipe(ialu_loadI);
12394 %}
12396 instruct storeSSI(stackSlotI dst, mRegI src)
12397 %{
12398 match(Set dst src);
12400 ins_cost(100);
12401 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12402 ins_encode %{
12403 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12404 __ sw($src$$Register, SP, $dst$$disp);
12405 %}
12406 ins_pipe(ialu_storeI);
12407 %}
12409 instruct loadSSL(mRegL dst, stackSlotL src)
12410 %{
12411 match(Set dst src);
12413 ins_cost(125);
12414 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12415 ins_encode %{
12416 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12417 __ ld($dst$$Register, SP, $src$$disp);
12418 %}
12419 ins_pipe(ialu_loadI);
12420 %}
12422 instruct storeSSL(stackSlotL dst, mRegL src)
12423 %{
12424 match(Set dst src);
12426 ins_cost(100);
12427 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12428 ins_encode %{
12429 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12430 __ sd($src$$Register, SP, $dst$$disp);
12431 %}
12432 ins_pipe(ialu_storeI);
12433 %}
12435 instruct loadSSP(mRegP dst, stackSlotP src)
12436 %{
12437 match(Set dst src);
12439 ins_cost(125);
12440 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12441 ins_encode %{
12442 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12443 __ ld($dst$$Register, SP, $src$$disp);
12444 %}
12445 ins_pipe(ialu_loadI);
12446 %}
12448 instruct storeSSP(stackSlotP dst, mRegP src)
12449 %{
12450 match(Set dst src);
12452 ins_cost(100);
12453 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12454 ins_encode %{
12455 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12456 __ sd($src$$Register, SP, $dst$$disp);
12457 %}
12458 ins_pipe(ialu_storeI);
12459 %}
12461 instruct loadSSF(regF dst, stackSlotF src)
12462 %{
12463 match(Set dst src);
12465 ins_cost(125);
12466 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12467 ins_encode %{
12468 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12469 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12470 %}
12471 ins_pipe(ialu_loadI);
12472 %}
12474 instruct storeSSF(stackSlotF dst, regF src)
12475 %{
12476 match(Set dst src);
12478 ins_cost(100);
12479 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12480 ins_encode %{
12481 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12482 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12483 %}
12484 ins_pipe(fpu_storeF);
12485 %}
12487 // Use the same format since predicate() can not be used here.
12488 instruct loadSSD(regD dst, stackSlotD src)
12489 %{
12490 match(Set dst src);
12492 ins_cost(125);
12493 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12494 ins_encode %{
12495 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12496 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12497 %}
12498 ins_pipe(ialu_loadI);
12499 %}
12501 instruct storeSSD(stackSlotD dst, regD src)
12502 %{
12503 match(Set dst src);
12505 ins_cost(100);
12506 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12507 ins_encode %{
12508 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12509 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12510 %}
12511 ins_pipe(fpu_storeF);
12512 %}
12514 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12515 match( Set cr (FastLock object box) );
12516 effect( TEMP tmp, TEMP scr, USE_KILL box );
12517 ins_cost(300);
12518 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12519 ins_encode %{
12520 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12521 %}
12523 ins_pipe( pipe_slow );
12524 ins_pc_relative(1);
12525 %}
12527 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12528 match( Set cr (FastUnlock object box) );
12529 effect( TEMP tmp, USE_KILL box );
12530 ins_cost(300);
12531 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12532 ins_encode %{
12533 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12534 %}
12536 ins_pipe( pipe_slow );
12537 ins_pc_relative(1);
12538 %}
12540 // Store CMS card-mark Immediate
12541 instruct storeImmCM(memory mem, immI8 src) %{
12542 match(Set mem (StoreCM mem src));
12544 ins_cost(150);
12545 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12546 // opcode(0xC6);
12547 ins_encode(store_B_immI_enc_sync(mem, src));
12548 ins_pipe( ialu_storeI );
12549 %}
12551 // Die now
12552 instruct ShouldNotReachHere( )
12553 %{
12554 match(Halt);
12555 ins_cost(300);
12557 // Use the following format syntax
12558 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12559 ins_encode %{
12560 // Here we should emit illtrap !
12562 __ stop("in ShoudNotReachHere");
12564 %}
12565 ins_pipe( pipe_jump );
12566 %}
12568 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12569 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12570 match(CountedLoopEnd cop (CmpI src1 src2));
12571 effect(USE labl);
12573 ins_cost(300);
12574 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12575 ins_encode %{
12576 Register op1 = $src1$$Register;
12577 Register op2 = $src2$$Register;
12578 Label &L = *($labl$$label);
12579 int flag = $cop$$cmpcode;
12581 switch(flag)
12582 {
12583 case 0x01: //equal
12584 if (&L)
12585 __ beq(op1, op2, L);
12586 else
12587 __ beq(op1, op2, (int)0);
12588 break;
12589 case 0x02: //not_equal
12590 if (&L)
12591 __ bne(op1, op2, L);
12592 else
12593 __ bne(op1, op2, (int)0);
12594 break;
12595 case 0x03: //above
12596 __ slt(AT, op2, op1);
12597 if(&L)
12598 __ bne(AT, R0, L);
12599 else
12600 __ bne(AT, R0, (int)0);
12601 break;
12602 case 0x04: //above_equal
12603 __ slt(AT, op1, op2);
12604 if(&L)
12605 __ beq(AT, R0, L);
12606 else
12607 __ beq(AT, R0, (int)0);
12608 break;
12609 case 0x05: //below
12610 __ slt(AT, op1, op2);
12611 if(&L)
12612 __ bne(AT, R0, L);
12613 else
12614 __ bne(AT, R0, (int)0);
12615 break;
12616 case 0x06: //below_equal
12617 __ slt(AT, op2, op1);
12618 if(&L)
12619 __ beq(AT, R0, L);
12620 else
12621 __ beq(AT, R0, (int)0);
12622 break;
12623 default:
12624 Unimplemented();
12625 }
12626 __ nop();
12627 %}
12628 ins_pipe( pipe_jump );
12629 ins_pc_relative(1);
12630 %}
12633 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12634 match(CountedLoopEnd cop (CmpI src1 src2));
12635 effect(USE labl);
12637 ins_cost(250);
12638 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12639 ins_encode %{
12640 Register op1 = $src1$$Register;
12641 int op2 = $src2$$constant;
12642 Label &L = *($labl$$label);
12643 int flag = $cop$$cmpcode;
12645 __ addiu32(AT, op1, -1 * op2);
12647 switch(flag)
12648 {
12649 case 0x01: //equal
12650 if (&L)
12651 __ beq(AT, R0, L);
12652 else
12653 __ beq(AT, R0, (int)0);
12654 break;
12655 case 0x02: //not_equal
12656 if (&L)
12657 __ bne(AT, R0, L);
12658 else
12659 __ bne(AT, R0, (int)0);
12660 break;
12661 case 0x03: //above
12662 if(&L)
12663 __ bgtz(AT, L);
12664 else
12665 __ bgtz(AT, (int)0);
12666 break;
12667 case 0x04: //above_equal
12668 if(&L)
12669 __ bgez(AT, L);
12670 else
12671 __ bgez(AT,(int)0);
12672 break;
12673 case 0x05: //below
12674 if(&L)
12675 __ bltz(AT, L);
12676 else
12677 __ bltz(AT, (int)0);
12678 break;
12679 case 0x06: //below_equal
12680 if(&L)
12681 __ blez(AT, L);
12682 else
12683 __ blez(AT, (int)0);
12684 break;
12685 default:
12686 Unimplemented();
12687 }
12688 __ nop();
12689 %}
12690 ins_pipe( pipe_jump );
12691 ins_pc_relative(1);
12692 %}
12695 /*
12696 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12697 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12698 match(CountedLoopEnd cop cmp);
12699 effect(USE labl);
12701 ins_cost(300);
12702 format %{ "J$cop,u $labl\t# Loop end" %}
12703 size(6);
12704 opcode(0x0F, 0x80);
12705 ins_encode( Jcc( cop, labl) );
12706 ins_pipe( pipe_jump );
12707 ins_pc_relative(1);
12708 %}
12710 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12711 match(CountedLoopEnd cop cmp);
12712 effect(USE labl);
12714 ins_cost(200);
12715 format %{ "J$cop,u $labl\t# Loop end" %}
12716 opcode(0x0F, 0x80);
12717 ins_encode( Jcc( cop, labl) );
12718 ins_pipe( pipe_jump );
12719 ins_pc_relative(1);
12720 %}
12721 */
12723 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12724 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12725 match(If cop cr);
12726 effect(USE labl);
12728 ins_cost(300);
12729 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12731 ins_encode %{
12732 Label &L = *($labl$$label);
12733 switch($cop$$cmpcode)
12734 {
12735 case 0x01: //equal
12736 if (&L)
12737 __ bne(AT, R0, L);
12738 else
12739 __ bne(AT, R0, (int)0);
12740 break;
12741 case 0x02: //not equal
12742 if (&L)
12743 __ beq(AT, R0, L);
12744 else
12745 __ beq(AT, R0, (int)0);
12746 break;
12747 default:
12748 Unimplemented();
12749 }
12750 __ nop();
12751 %}
12753 ins_pipe( pipe_jump );
12754 ins_pc_relative(1);
12755 %}
12758 // ============================================================================
12759 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12760 // array for an instance of the superklass. Set a hidden internal cache on a
12761 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12762 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12763 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12764 match(Set result (PartialSubtypeCheck sub super));
12765 effect(KILL tmp);
12766 ins_cost(1100); // slightly larger than the next version
12767 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12769 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12770 ins_pipe( pipe_slow );
12771 %}
12774 // Conditional-store of an int value.
12775 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12776 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12777 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12778 // effect(KILL oldval);
12779 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12781 ins_encode %{
12782 Register oldval = $oldval$$Register;
12783 Register newval = $newval$$Register;
12784 Address addr(as_Register($mem$$base), $mem$$disp);
12785 Label again, failure;
12787 // int base = $mem$$base;
12788 int index = $mem$$index;
12789 int scale = $mem$$scale;
12790 int disp = $mem$$disp;
12792 guarantee(Assembler::is_simm16(disp), "");
12794 if( index != 0 ) {
12795 __ stop("in storeIConditional: index != 0");
12796 } else {
12797 __ bind(again);
12798 if(UseSyncLevel <= 1000) __ sync();
12799 __ ll(AT, addr);
12800 __ bne(AT, oldval, failure);
12801 __ delayed()->addu(AT, R0, R0);
12803 __ addu(AT, newval, R0);
12804 __ sc(AT, addr);
12805 __ beq(AT, R0, again);
12806 __ delayed()->addiu(AT, R0, 0xFF);
12807 __ bind(failure);
12808 __ sync();
12809 }
12810 %}
12812 ins_pipe( long_memory_op );
12813 %}
12815 // Conditional-store of a long value.
12816 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12817 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12818 %{
12819 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12820 effect(KILL oldval);
12822 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12823 ins_encode%{
12824 Register oldval = $oldval$$Register;
12825 Register newval = $newval$$Register;
12826 Address addr((Register)$mem$$base, $mem$$disp);
12828 int index = $mem$$index;
12829 int scale = $mem$$scale;
12830 int disp = $mem$$disp;
12832 guarantee(Assembler::is_simm16(disp), "");
12834 if( index != 0 ) {
12835 __ stop("in storeIConditional: index != 0");
12836 } else {
12837 __ cmpxchg(newval, addr, oldval);
12838 }
12839 %}
12840 ins_pipe( long_memory_op );
12841 %}
12844 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12845 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12846 effect(KILL oldval);
12847 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12848 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12849 "MOV $res, 1 @ compareAndSwapI\n\t"
12850 "BNE AT, R0 @ compareAndSwapI\n\t"
12851 "MOV $res, 0 @ compareAndSwapI\n"
12852 "L:" %}
12853 ins_encode %{
12854 Register newval = $newval$$Register;
12855 Register oldval = $oldval$$Register;
12856 Register res = $res$$Register;
12857 Address addr($mem_ptr$$Register, 0);
12858 Label L;
12860 __ cmpxchg32(newval, addr, oldval);
12861 __ move(res, AT);
12862 %}
12863 ins_pipe( long_memory_op );
12864 %}
12866 //FIXME:
12867 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12868 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12869 effect(KILL oldval);
12870 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12871 "MOV $res, AT @ compareAndSwapP\n\t"
12872 "L:" %}
12873 ins_encode %{
12874 Register newval = $newval$$Register;
12875 Register oldval = $oldval$$Register;
12876 Register res = $res$$Register;
12877 Address addr($mem_ptr$$Register, 0);
12878 Label L;
12880 __ cmpxchg(newval, addr, oldval);
12881 __ move(res, AT);
12882 %}
12883 ins_pipe( long_memory_op );
12884 %}
12886 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12887 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12888 effect(KILL oldval);
12889 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12890 "MOV $res, AT @ compareAndSwapN\n\t"
12891 "L:" %}
12892 ins_encode %{
12893 Register newval = $newval$$Register;
12894 Register oldval = $oldval$$Register;
12895 Register res = $res$$Register;
12896 Address addr($mem_ptr$$Register, 0);
12897 Label L;
12899 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12900 * Thus, we should extend oldval's sign for correct comparision.
12901 */
12902 __ sll(oldval, oldval, 0);
12904 __ cmpxchg32(newval, addr, oldval);
12905 __ move(res, AT);
12906 %}
12907 ins_pipe( long_memory_op );
12908 %}
12910 //----------Max and Min--------------------------------------------------------
12911 // Min Instructions
12912 ////
12913 // *** Min and Max using the conditional move are slower than the
12914 // *** branch version on a Pentium III.
12915 // // Conditional move for min
12916 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12917 // effect( USE_DEF op2, USE op1, USE cr );
12918 // format %{ "CMOVlt $op2,$op1\t! min" %}
12919 // opcode(0x4C,0x0F);
12920 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12921 // ins_pipe( pipe_cmov_reg );
12922 //%}
12923 //
12924 //// Min Register with Register (P6 version)
12925 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12926 // predicate(VM_Version::supports_cmov() );
12927 // match(Set op2 (MinI op1 op2));
12928 // ins_cost(200);
12929 // expand %{
12930 // eFlagsReg cr;
12931 // compI_eReg(cr,op1,op2);
12932 // cmovI_reg_lt(op2,op1,cr);
12933 // %}
12934 //%}
12936 // Min Register with Register (generic version)
12937 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12938 match(Set dst (MinI dst src));
12939 //effect(KILL flags);
12940 ins_cost(80);
12942 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12943 ins_encode %{
12944 Register dst = $dst$$Register;
12945 Register src = $src$$Register;
12947 __ slt(AT, src, dst);
12948 __ movn(dst, src, AT);
12950 %}
12952 ins_pipe( pipe_slow );
12953 %}
12955 // Max Register with Register
12956 // *** Min and Max using the conditional move are slower than the
12957 // *** branch version on a Pentium III.
12958 // // Conditional move for max
12959 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12960 // effect( USE_DEF op2, USE op1, USE cr );
12961 // format %{ "CMOVgt $op2,$op1\t! max" %}
12962 // opcode(0x4F,0x0F);
12963 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12964 // ins_pipe( pipe_cmov_reg );
12965 //%}
12966 //
12967 // // Max Register with Register (P6 version)
12968 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12969 // predicate(VM_Version::supports_cmov() );
12970 // match(Set op2 (MaxI op1 op2));
12971 // ins_cost(200);
12972 // expand %{
12973 // eFlagsReg cr;
12974 // compI_eReg(cr,op1,op2);
12975 // cmovI_reg_gt(op2,op1,cr);
12976 // %}
12977 //%}
12979 // Max Register with Register (generic version)
12980 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12981 match(Set dst (MaxI dst src));
12982 ins_cost(80);
12984 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12986 ins_encode %{
12987 Register dst = $dst$$Register;
12988 Register src = $src$$Register;
12990 __ slt(AT, dst, src);
12991 __ movn(dst, src, AT);
12993 %}
12995 ins_pipe( pipe_slow );
12996 %}
12998 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12999 match(Set dst (MaxI dst zero));
13000 ins_cost(50);
13002 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13004 ins_encode %{
13005 Register dst = $dst$$Register;
13007 __ slt(AT, dst, R0);
13008 __ movn(dst, R0, AT);
13010 %}
13012 ins_pipe( pipe_slow );
13013 %}
13015 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13016 %{
13017 match(Set dst (AndL src mask));
13019 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13020 ins_encode %{
13021 Register dst = $dst$$Register;
13022 Register src = $src$$Register;
13024 __ dext(dst, src, 0, 32);
13025 %}
13026 ins_pipe(ialu_regI_regI);
13027 %}
13029 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13030 %{
13031 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13033 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13034 ins_encode %{
13035 Register dst = $dst$$Register;
13036 Register src1 = $src1$$Register;
13037 Register src2 = $src2$$Register;
13039 if (src1 == dst) {
13040 __ dinsu(dst, src2, 32, 32);
13041 } else if (src2 == dst) {
13042 __ dsll32(dst, dst, 0);
13043 __ dins(dst, src1, 0, 32);
13044 } else {
13045 __ dext(dst, src1, 0, 32);
13046 __ dinsu(dst, src2, 32, 32);
13047 }
13048 %}
13049 ins_pipe(ialu_regI_regI);
13050 %}
13052 // Zero-extend convert int to long
13053 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13054 %{
13055 match(Set dst (AndL (ConvI2L src) mask));
13057 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13058 ins_encode %{
13059 Register dst = $dst$$Register;
13060 Register src = $src$$Register;
13062 __ dext(dst, src, 0, 32);
13063 %}
13064 ins_pipe(ialu_regI_regI);
13065 %}
13067 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13068 %{
13069 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13071 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13072 ins_encode %{
13073 Register dst = $dst$$Register;
13074 Register src = $src$$Register;
13076 __ dext(dst, src, 0, 32);
13077 %}
13078 ins_pipe(ialu_regI_regI);
13079 %}
13081 // Match loading integer and casting it to unsigned int in long register.
13082 // LoadI + ConvI2L + AndL 0xffffffff.
13083 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13084 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13086 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13087 ins_encode (load_N_enc(dst, mem));
13088 ins_pipe(ialu_loadI);
13089 %}
13091 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13092 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13094 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13095 ins_encode (load_N_enc(dst, mem));
13096 ins_pipe(ialu_loadI);
13097 %}
13100 // ============================================================================
13101 // Safepoint Instruction
13102 instruct safePoint_poll_reg(mRegP poll) %{
13103 match(SafePoint poll);
13104 predicate(false);
13105 effect(USE poll);
13107 ins_cost(125);
13108 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13110 ins_encode %{
13111 Register poll_reg = $poll$$Register;
13113 __ block_comment("Safepoint:");
13114 __ relocate(relocInfo::poll_type);
13115 __ lw(AT, poll_reg, 0);
13116 %}
13118 ins_pipe( ialu_storeI );
13119 %}
13121 instruct safePoint_poll() %{
13122 match(SafePoint);
13124 ins_cost(105);
13125 format %{ "poll for GC @ safePoint_poll" %}
13127 ins_encode %{
13128 __ block_comment("Safepoint:");
13129 __ set64(T9, (long)os::get_polling_page());
13130 __ relocate(relocInfo::poll_type);
13131 __ lw(AT, T9, 0);
13132 %}
13134 ins_pipe( ialu_storeI );
13135 %}
13137 //----------Arithmetic Conversion Instructions---------------------------------
13139 instruct roundFloat_nop(regF dst)
13140 %{
13141 match(Set dst (RoundFloat dst));
13143 ins_cost(0);
13144 ins_encode();
13145 ins_pipe(empty);
13146 %}
13148 instruct roundDouble_nop(regD dst)
13149 %{
13150 match(Set dst (RoundDouble dst));
13152 ins_cost(0);
13153 ins_encode();
13154 ins_pipe(empty);
13155 %}
13157 //---------- Zeros Count Instructions ------------------------------------------
13158 // CountLeadingZerosINode CountTrailingZerosINode
13159 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13160 predicate(UseCountLeadingZerosInstruction);
13161 match(Set dst (CountLeadingZerosI src));
13163 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13164 ins_encode %{
13165 __ clz($dst$$Register, $src$$Register);
13166 %}
13167 ins_pipe( ialu_regL_regL );
13168 %}
13170 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13171 predicate(UseCountLeadingZerosInstruction);
13172 match(Set dst (CountLeadingZerosL src));
13174 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13175 ins_encode %{
13176 __ dclz($dst$$Register, $src$$Register);
13177 %}
13178 ins_pipe( ialu_regL_regL );
13179 %}
13181 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13182 predicate(UseCountTrailingZerosInstruction);
13183 match(Set dst (CountTrailingZerosI src));
13185 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13186 ins_encode %{
13187 // ctz and dctz is gs instructions.
13188 __ ctz($dst$$Register, $src$$Register);
13189 %}
13190 ins_pipe( ialu_regL_regL );
13191 %}
13193 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13194 predicate(UseCountTrailingZerosInstruction);
13195 match(Set dst (CountTrailingZerosL src));
13197 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13198 ins_encode %{
13199 __ dctz($dst$$Register, $src$$Register);
13200 %}
13201 ins_pipe( ialu_regL_regL );
13202 %}
13204 // ====================VECTOR INSTRUCTIONS=====================================
13206 // Load vectors (8 bytes long)
13207 instruct loadV8(vecD dst, memory mem) %{
13208 predicate(n->as_LoadVector()->memory_size() == 8);
13209 match(Set dst (LoadVector mem));
13210 ins_cost(125);
13211 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13212 ins_encode(load_D_enc(dst, mem));
13213 ins_pipe( fpu_loadF );
13214 %}
13216 // Store vectors (8 bytes long)
13217 instruct storeV8(memory mem, vecD src) %{
13218 predicate(n->as_StoreVector()->memory_size() == 8);
13219 match(Set mem (StoreVector mem src));
13220 ins_cost(145);
13221 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13222 ins_encode(store_D_reg_enc(mem, src));
13223 ins_pipe( fpu_storeF );
13224 %}
13226 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13227 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13228 match(Set dst (ReplicateB src));
13229 ins_cost(100);
13230 format %{ "replv_ob AT, $src\n\t"
13231 "dmtc1 AT, $dst\t! replicate8B" %}
13232 ins_encode %{
13233 __ replv_ob(AT, $src$$Register);
13234 __ dmtc1(AT, $dst$$FloatRegister);
13235 %}
13236 ins_pipe( pipe_mtc1 );
13237 %}
13239 instruct Repl8B(vecD dst, mRegI src) %{
13240 predicate(n->as_Vector()->length() == 8);
13241 match(Set dst (ReplicateB src));
13242 ins_cost(140);
13243 format %{ "move AT, $src\n\t"
13244 "dins AT, AT, 8, 8\n\t"
13245 "dins AT, AT, 16, 16\n\t"
13246 "dinsu AT, AT, 32, 32\n\t"
13247 "dmtc1 AT, $dst\t! replicate8B" %}
13248 ins_encode %{
13249 __ move(AT, $src$$Register);
13250 __ dins(AT, AT, 8, 8);
13251 __ dins(AT, AT, 16, 16);
13252 __ dinsu(AT, AT, 32, 32);
13253 __ dmtc1(AT, $dst$$FloatRegister);
13254 %}
13255 ins_pipe( pipe_mtc1 );
13256 %}
13258 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13259 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13260 match(Set dst (ReplicateB con));
13261 ins_cost(110);
13262 format %{ "repl_ob AT, [$con]\n\t"
13263 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13264 ins_encode %{
13265 int val = $con$$constant;
13266 __ repl_ob(AT, val);
13267 __ dmtc1(AT, $dst$$FloatRegister);
13268 %}
13269 ins_pipe( pipe_mtc1 );
13270 %}
13272 instruct Repl8B_imm(vecD dst, immI con) %{
13273 predicate(n->as_Vector()->length() == 8);
13274 match(Set dst (ReplicateB con));
13275 ins_cost(150);
13276 format %{ "move AT, [$con]\n\t"
13277 "dins AT, AT, 8, 8\n\t"
13278 "dins AT, AT, 16, 16\n\t"
13279 "dinsu AT, AT, 32, 32\n\t"
13280 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13281 ins_encode %{
13282 __ move(AT, $con$$constant);
13283 __ dins(AT, AT, 8, 8);
13284 __ dins(AT, AT, 16, 16);
13285 __ dinsu(AT, AT, 32, 32);
13286 __ dmtc1(AT, $dst$$FloatRegister);
13287 %}
13288 ins_pipe( pipe_mtc1 );
13289 %}
13291 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13292 predicate(n->as_Vector()->length() == 8);
13293 match(Set dst (ReplicateB zero));
13294 ins_cost(90);
13295 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13296 ins_encode %{
13297 __ dmtc1(R0, $dst$$FloatRegister);
13298 %}
13299 ins_pipe( pipe_mtc1 );
13300 %}
13302 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13303 predicate(n->as_Vector()->length() == 8);
13304 match(Set dst (ReplicateB M1));
13305 ins_cost(80);
13306 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13307 ins_encode %{
13308 __ nor(AT, R0, R0);
13309 __ dmtc1(AT, $dst$$FloatRegister);
13310 %}
13311 ins_pipe( pipe_mtc1 );
13312 %}
13314 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13315 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13316 match(Set dst (ReplicateS src));
13317 ins_cost(100);
13318 format %{ "replv_qh AT, $src\n\t"
13319 "dmtc1 AT, $dst\t! replicate4S" %}
13320 ins_encode %{
13321 __ replv_qh(AT, $src$$Register);
13322 __ dmtc1(AT, $dst$$FloatRegister);
13323 %}
13324 ins_pipe( pipe_mtc1 );
13325 %}
13327 instruct Repl4S(vecD dst, mRegI src) %{
13328 predicate(n->as_Vector()->length() == 4);
13329 match(Set dst (ReplicateS src));
13330 ins_cost(120);
13331 format %{ "move AT, $src \n\t"
13332 "dins AT, AT, 16, 16\n\t"
13333 "dinsu AT, AT, 32, 32\n\t"
13334 "dmtc1 AT, $dst\t! replicate4S" %}
13335 ins_encode %{
13336 __ move(AT, $src$$Register);
13337 __ dins(AT, AT, 16, 16);
13338 __ dinsu(AT, AT, 32, 32);
13339 __ dmtc1(AT, $dst$$FloatRegister);
13340 %}
13341 ins_pipe( pipe_mtc1 );
13342 %}
13344 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13345 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13346 match(Set dst (ReplicateS con));
13347 ins_cost(100);
13348 format %{ "replv_qh AT, [$con]\n\t"
13349 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13350 ins_encode %{
13351 int val = $con$$constant;
13352 if ( Assembler::is_simm(val, 10)) {
13353 //repl_qh supports 10 bits immediate
13354 __ repl_qh(AT, val);
13355 } else {
13356 __ li32(AT, val);
13357 __ replv_qh(AT, AT);
13358 }
13359 __ dmtc1(AT, $dst$$FloatRegister);
13360 %}
13361 ins_pipe( pipe_mtc1 );
13362 %}
13364 instruct Repl4S_imm(vecD dst, immI con) %{
13365 predicate(n->as_Vector()->length() == 4);
13366 match(Set dst (ReplicateS con));
13367 ins_cost(110);
13368 format %{ "move AT, [$con]\n\t"
13369 "dins AT, AT, 16, 16\n\t"
13370 "dinsu AT, AT, 32, 32\n\t"
13371 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13372 ins_encode %{
13373 __ move(AT, $con$$constant);
13374 __ dins(AT, AT, 16, 16);
13375 __ dinsu(AT, AT, 32, 32);
13376 __ dmtc1(AT, $dst$$FloatRegister);
13377 %}
13378 ins_pipe( pipe_mtc1 );
13379 %}
13381 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13382 predicate(n->as_Vector()->length() == 4);
13383 match(Set dst (ReplicateS zero));
13384 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13385 ins_encode %{
13386 __ dmtc1(R0, $dst$$FloatRegister);
13387 %}
13388 ins_pipe( pipe_mtc1 );
13389 %}
13391 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13392 predicate(n->as_Vector()->length() == 4);
13393 match(Set dst (ReplicateS M1));
13394 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13395 ins_encode %{
13396 __ nor(AT, R0, R0);
13397 __ dmtc1(AT, $dst$$FloatRegister);
13398 %}
13399 ins_pipe( pipe_mtc1 );
13400 %}
13402 // Replicate integer (4 byte) scalar to be vector
13403 instruct Repl2I(vecD dst, mRegI src) %{
13404 predicate(n->as_Vector()->length() == 2);
13405 match(Set dst (ReplicateI src));
13406 format %{ "dins AT, $src, 0, 32\n\t"
13407 "dinsu AT, $src, 32, 32\n\t"
13408 "dmtc1 AT, $dst\t! replicate2I" %}
13409 ins_encode %{
13410 __ dins(AT, $src$$Register, 0, 32);
13411 __ dinsu(AT, $src$$Register, 32, 32);
13412 __ dmtc1(AT, $dst$$FloatRegister);
13413 %}
13414 ins_pipe( pipe_mtc1 );
13415 %}
13417 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13418 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13419 predicate(n->as_Vector()->length() == 2);
13420 match(Set dst (ReplicateI con));
13421 effect(KILL tmp);
13422 format %{ "li32 AT, [$con], 32\n\t"
13423 "dinsu AT, AT\n\t"
13424 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13425 ins_encode %{
13426 int val = $con$$constant;
13427 __ li32(AT, val);
13428 __ dinsu(AT, AT, 32, 32);
13429 __ dmtc1(AT, $dst$$FloatRegister);
13430 %}
13431 ins_pipe( pipe_mtc1 );
13432 %}
13434 // Replicate integer (4 byte) scalar zero to be vector
13435 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13436 predicate(n->as_Vector()->length() == 2);
13437 match(Set dst (ReplicateI zero));
13438 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13439 ins_encode %{
13440 __ dmtc1(R0, $dst$$FloatRegister);
13441 %}
13442 ins_pipe( pipe_mtc1 );
13443 %}
13445 // Replicate integer (4 byte) scalar -1 to be vector
13446 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13447 predicate(n->as_Vector()->length() == 2);
13448 match(Set dst (ReplicateI M1));
13449 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13450 ins_encode %{
13451 __ nor(AT, R0, R0);
13452 __ dmtc1(AT, $dst$$FloatRegister);
13453 %}
13454 ins_pipe( pipe_mtc1 );
13455 %}
13457 // Replicate float (4 byte) scalar to be vector
13458 instruct Repl2F(vecD dst, regF src) %{
13459 predicate(n->as_Vector()->length() == 2);
13460 match(Set dst (ReplicateF src));
13461 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13462 ins_encode %{
13463 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13464 %}
13465 ins_pipe( pipe_slow );
13466 %}
13468 // Replicate float (4 byte) scalar zero to be vector
13469 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13470 predicate(n->as_Vector()->length() == 2);
13471 match(Set dst (ReplicateF zero));
13472 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13473 ins_encode %{
13474 __ dmtc1(R0, $dst$$FloatRegister);
13475 %}
13476 ins_pipe( pipe_mtc1 );
13477 %}
13480 // ====================VECTOR ARITHMETIC=======================================
13482 // --------------------------------- ADD --------------------------------------
13484 // Floats vector add
13485 instruct vadd2F(vecD dst, vecD src) %{
13486 predicate(n->as_Vector()->length() == 2);
13487 match(Set dst (AddVF dst src));
13488 format %{ "add.ps $dst,$src\t! add packed2F" %}
13489 ins_encode %{
13490 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13491 %}
13492 ins_pipe( pipe_slow );
13493 %}
13495 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13496 predicate(n->as_Vector()->length() == 2);
13497 match(Set dst (AddVF src1 src2));
13498 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13499 ins_encode %{
13500 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13501 %}
13502 ins_pipe( fpu_regF_regF );
13503 %}
13505 // --------------------------------- SUB --------------------------------------
13507 // Floats vector sub
13508 instruct vsub2F(vecD dst, vecD src) %{
13509 predicate(n->as_Vector()->length() == 2);
13510 match(Set dst (SubVF dst src));
13511 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13512 ins_encode %{
13513 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13514 %}
13515 ins_pipe( fpu_regF_regF );
13516 %}
13518 // --------------------------------- MUL --------------------------------------
13520 // Floats vector mul
13521 instruct vmul2F(vecD dst, vecD src) %{
13522 predicate(n->as_Vector()->length() == 2);
13523 match(Set dst (MulVF dst src));
13524 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13525 ins_encode %{
13526 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13527 %}
13528 ins_pipe( fpu_regF_regF );
13529 %}
13531 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13532 predicate(n->as_Vector()->length() == 2);
13533 match(Set dst (MulVF src1 src2));
13534 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13535 ins_encode %{
13536 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13537 %}
13538 ins_pipe( fpu_regF_regF );
13539 %}
13541 // --------------------------------- DIV --------------------------------------
13542 // MIPS do not have div.ps
13545 //----------PEEPHOLE RULES-----------------------------------------------------
13546 // These must follow all instruction definitions as they use the names
13547 // defined in the instructions definitions.
13548 //
13549 // peepmatch ( root_instr_name [preceeding_instruction]* );
13550 //
13551 // peepconstraint %{
13552 // (instruction_number.operand_name relational_op instruction_number.operand_name
13553 // [, ...] );
13554 // // instruction numbers are zero-based using left to right order in peepmatch
13555 //
13556 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13557 // // provide an instruction_number.operand_name for each operand that appears
13558 // // in the replacement instruction's match rule
13559 //
13560 // ---------VM FLAGS---------------------------------------------------------
13561 //
13562 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13563 //
13564 // Each peephole rule is given an identifying number starting with zero and
13565 // increasing by one in the order seen by the parser. An individual peephole
13566 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13567 // on the command-line.
13568 //
13569 // ---------CURRENT LIMITATIONS----------------------------------------------
13570 //
13571 // Only match adjacent instructions in same basic block
13572 // Only equality constraints
13573 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13574 // Only one replacement instruction
13575 //
13576 // ---------EXAMPLE----------------------------------------------------------
13577 //
13578 // // pertinent parts of existing instructions in architecture description
13579 // instruct movI(eRegI dst, eRegI src) %{
13580 // match(Set dst (CopyI src));
13581 // %}
13582 //
13583 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13584 // match(Set dst (AddI dst src));
13585 // effect(KILL cr);
13586 // %}
13587 //
13588 // // Change (inc mov) to lea
13589 // peephole %{
13590 // // increment preceeded by register-register move
13591 // peepmatch ( incI_eReg movI );
13592 // // require that the destination register of the increment
13593 // // match the destination register of the move
13594 // peepconstraint ( 0.dst == 1.dst );
13595 // // construct a replacement instruction that sets
13596 // // the destination to ( move's source register + one )
13597 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13598 // %}
13599 //
13600 // Implementation no longer uses movX instructions since
13601 // machine-independent system no longer uses CopyX nodes.
13602 //
13603 // peephole %{
13604 // peepmatch ( incI_eReg movI );
13605 // peepconstraint ( 0.dst == 1.dst );
13606 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13607 // %}
13608 //
13609 // peephole %{
13610 // peepmatch ( decI_eReg movI );
13611 // peepconstraint ( 0.dst == 1.dst );
13612 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13613 // %}
13614 //
13615 // peephole %{
13616 // peepmatch ( addI_eReg_imm movI );
13617 // peepconstraint ( 0.dst == 1.dst );
13618 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13619 // %}
13620 //
13621 // peephole %{
13622 // peepmatch ( addP_eReg_imm movP );
13623 // peepconstraint ( 0.dst == 1.dst );
13624 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13625 // %}
13627 // // Change load of spilled value to only a spill
13628 // instruct storeI(memory mem, eRegI src) %{
13629 // match(Set mem (StoreI mem src));
13630 // %}
13631 //
13632 // instruct loadI(eRegI dst, memory mem) %{
13633 // match(Set dst (LoadI mem));
13634 // %}
13635 //
13636 //peephole %{
13637 // peepmatch ( loadI storeI );
13638 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13639 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13640 //%}
13642 //----------SMARTSPILL RULES---------------------------------------------------
13643 // These must follow all instruction definitions as they use the names
13644 // defined in the instructions definitions.