Thu, 01 Mar 2018 12:56:18 +0800
#5745 [Code Reorganization] delete trailing whitespace
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 int size = NativeCall::instruction_size;
503 return round_to(size, 16);
504 }
506 #ifdef _LP64
507 static uint size_deopt_handler() {
508 int size = NativeCall::instruction_size;
509 return round_to(size, 16);
510 }
511 #else
512 static uint size_deopt_handler() {
513 // NativeCall instruction size is the same as NativeJump.
514 // exception handler starts out as jump and can be patched to
515 // a call be deoptimization. (4932387)
516 // Note that this value is also credited (in output.cpp) to
517 // the size of the code section.
518 return 5 + NativeJump::instruction_size; // pushl(); jmp;
519 }
520 #endif
521 };
523 %} // end source_hpp
525 source %{
527 #define NO_INDEX 0
528 #define RELOC_IMM64 Assembler::imm_operand
529 #define RELOC_DISP32 Assembler::disp32_operand
532 #define __ _masm.
535 // Emit exception handler code.
536 // Stuff framesize into a register and call a VM stub routine.
537 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
538 // Note that the code buffer's insts_mark is always relative to insts.
539 // That's why we must use the macroassembler to generate a handler.
540 MacroAssembler _masm(&cbuf);
541 address base =
542 __ start_a_stub(size_exception_handler());
543 if (base == NULL) return 0; // CodeBuffer::expand failed
544 int offset = __ offset();
546 __ block_comment("; emit_exception_handler");
548 cbuf.set_insts_mark();
549 __ relocate(relocInfo::runtime_call_type);
550 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
551 __ align(16);
552 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
553 __ end_a_stub();
554 return offset;
555 }
557 // Emit deopt handler code.
558 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
559 // Note that the code buffer's insts_mark is always relative to insts.
560 // That's why we must use the macroassembler to generate a handler.
561 MacroAssembler _masm(&cbuf);
562 address base =
563 __ start_a_stub(size_deopt_handler());
565 // FIXME
566 if (base == NULL) return 0; // CodeBuffer::expand failed
567 int offset = __ offset();
569 __ block_comment("; emit_deopt_handler");
571 cbuf.set_insts_mark();
572 __ relocate(relocInfo::runtime_call_type);
573 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
574 __ align(16);
575 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
576 __ end_a_stub();
577 return offset;
578 }
581 const bool Matcher::match_rule_supported(int opcode) {
582 if (!has_match_rule(opcode))
583 return false;
585 switch (opcode) {
586 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
587 case Op_CountLeadingZerosI:
588 case Op_CountLeadingZerosL:
589 if (!UseCountLeadingZerosInstruction)
590 return false;
591 break;
592 case Op_CountTrailingZerosI:
593 case Op_CountTrailingZerosL:
594 if (!UseCountTrailingZerosInstruction)
595 return false;
596 break;
597 }
599 return true; // Per default match rules are supported.
600 }
602 //FIXME
603 // emit call stub, compiled java to interpreter
604 void emit_java_to_interp(CodeBuffer &cbuf ) {
605 // Stub is fixed up when the corresponding call is converted from calling
606 // compiled code to calling interpreted code.
607 // mov rbx,0
608 // jmp -1
610 address mark = cbuf.insts_mark(); // get mark within main instrs section
612 // Note that the code buffer's insts_mark is always relative to insts.
613 // That's why we must use the macroassembler to generate a stub.
614 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(Compile::MAX_stubs_size);
618 if (base == NULL) return; // CodeBuffer::expand failed
619 // static stub relocation stores the instruction address of the call
621 __ relocate(static_stub_Relocation::spec(mark), 0);
623 // static stub relocation also tags the methodOop in the code-stream.
624 __ patchable_set48(S3, (long)0);
625 // This is recognized as unresolved by relocs/nativeInst/ic code
627 __ relocate(relocInfo::runtime_call_type);
629 cbuf.set_insts_mark();
630 address call_pc = (address)-1;
631 __ patchable_jump(call_pc);
632 __ align(16);
633 __ end_a_stub();
634 // Update current stubs pointer and restore code_end.
635 }
637 // size of call stub, compiled java to interpretor
638 uint size_java_to_interp() {
639 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
640 return round_to(size, 16);
641 }
643 // relocation entries for call stub, compiled java to interpreter
644 uint reloc_java_to_interp() {
645 return 16; // in emit_java_to_interp + in Java_Static_Call
646 }
648 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
649 if( Assembler::is_simm16(offset) )
650 return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 if (MaxVectorSize == 0)
696 return 0;
697 assert(MaxVectorSize == 8, "");
698 return 8;
699 }
701 // Vector ideal reg
702 const int Matcher::vector_ideal_reg(int size) {
703 assert(MaxVectorSize == 8, "");
704 switch(size) {
705 case 8: return Op_VecD;
706 }
707 ShouldNotReachHere();
708 return 0;
709 }
711 // Only lowest bits of xmm reg are used for vector shift count.
712 const int Matcher::vector_shift_count_ideal_reg(int size) {
713 fatal("vector shift is not supported");
714 return Node::NotAMachineReg;
715 }
717 // Limits on vector size (number of elements) loaded into vector.
718 const int Matcher::max_vector_size(const BasicType bt) {
719 assert(is_java_primitive(bt), "only primitive type vectors");
720 return vector_width_in_bytes(bt)/type2aelembytes(bt);
721 }
723 const int Matcher::min_vector_size(const BasicType bt) {
724 return max_vector_size(bt); // Same as max.
725 }
727 // MIPS supports misaligned vectors store/load? FIXME
728 const bool Matcher::misaligned_vectors_ok() {
729 return false;
730 //return !AlignVector; // can be changed by flag
731 }
733 // Register for DIVI projection of divmodI
734 RegMask Matcher::divI_proj_mask() {
735 ShouldNotReachHere();
736 return RegMask();
737 }
739 // Register for MODI projection of divmodI
740 RegMask Matcher::modI_proj_mask() {
741 ShouldNotReachHere();
742 return RegMask();
743 }
745 // Register for DIVL projection of divmodL
746 RegMask Matcher::divL_proj_mask() {
747 ShouldNotReachHere();
748 return RegMask();
749 }
751 int Matcher::regnum_to_fpu_offset(int regnum) {
752 return regnum - 32; // The FP registers are in the second chunk
753 }
756 const bool Matcher::isSimpleConstant64(jlong value) {
757 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
758 return true;
759 }
762 // Return whether or not this register is ever used as an argument. This
763 // function is used on startup to build the trampoline stubs in generateOptoStub.
764 // Registers not mentioned will be killed by the VM call in the trampoline, and
765 // arguments in those registers not be available to the callee.
766 bool Matcher::can_be_java_arg( int reg ) {
767 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
768 if ( reg == T0_num || reg == T0_H_num
769 || reg == A0_num || reg == A0_H_num
770 || reg == A1_num || reg == A1_H_num
771 || reg == A2_num || reg == A2_H_num
772 || reg == A3_num || reg == A3_H_num
773 || reg == A4_num || reg == A4_H_num
774 || reg == A5_num || reg == A5_H_num
775 || reg == A6_num || reg == A6_H_num
776 || reg == A7_num || reg == A7_H_num )
777 return true;
779 if ( reg == F12_num || reg == F12_H_num
780 || reg == F13_num || reg == F13_H_num
781 || reg == F14_num || reg == F14_H_num
782 || reg == F15_num || reg == F15_H_num
783 || reg == F16_num || reg == F16_H_num
784 || reg == F17_num || reg == F17_H_num
785 || reg == F18_num || reg == F18_H_num
786 || reg == F19_num || reg == F19_H_num )
787 return true;
789 return false;
790 }
792 bool Matcher::is_spillable_arg( int reg ) {
793 return can_be_java_arg(reg);
794 }
796 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
797 return false;
798 }
800 // Register for MODL projection of divmodL
801 RegMask Matcher::modL_proj_mask() {
802 ShouldNotReachHere();
803 return RegMask();
804 }
806 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
807 return FP_REG_mask();
808 }
810 // MIPS doesn't support AES intrinsics
811 const bool Matcher::pass_original_key_for_aes() {
812 return false;
813 }
815 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
816 //lui
817 //ori
818 //dsll
819 //ori
821 //jalr
822 //nop
824 return round_to(current_offset, alignment_required()) - current_offset;
825 }
827 int CallLeafDirectNode::compute_padding(int current_offset) const {
828 //lui
829 //ori
830 //dsll
831 //ori
833 //jalr
834 //nop
836 return round_to(current_offset, alignment_required()) - current_offset;
837 }
839 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
840 //lui
841 //ori
842 //dsll
843 //ori
845 //jalr
846 //nop
848 return round_to(current_offset, alignment_required()) - current_offset;
849 }
851 // If CPU can load and store mis-aligned doubles directly then no fixup is
852 // needed. Else we split the double into 2 integer pieces and move it
853 // piece-by-piece. Only happens when passing doubles into C code as the
854 // Java calling convention forces doubles to be aligned.
855 const bool Matcher::misaligned_doubles_ok = false;
856 // Do floats take an entire double register or just half?
857 //const bool Matcher::float_in_double = true;
858 bool Matcher::float_in_double() { return false; }
859 // Threshold size for cleararray.
860 const int Matcher::init_array_short_size = 8 * BytesPerLong;
861 // Do ints take an entire long register or just half?
862 const bool Matcher::int_in_long = true;
863 // Is it better to copy float constants, or load them directly from memory?
864 // Intel can load a float constant from a direct address, requiring no
865 // extra registers. Most RISCs will have to materialize an address into a
866 // register first, so they would do better to copy the constant from stack.
867 const bool Matcher::rematerialize_float_constants = false;
868 // Advertise here if the CPU requires explicit rounding operations
869 // to implement the UseStrictFP mode.
870 const bool Matcher::strict_fp_requires_explicit_rounding = false;
871 // The ecx parameter to rep stos for the ClearArray node is in dwords.
872 const bool Matcher::init_array_count_is_in_bytes = false;
875 // Indicate if the safepoint node needs the polling page as an input.
876 // Since MIPS doesn't have absolute addressing, it needs.
877 bool SafePointNode::needs_polling_address_input() {
878 return false;
879 }
881 // !!!!! Special hack to get all type of calls to specify the byte offset
882 // from the start of the call to the point where the return address
883 // will point.
884 int MachCallStaticJavaNode::ret_addr_offset() {
885 //lui
886 //ori
887 //nop
888 //nop
889 //jalr
890 //nop
891 return 24;
892 }
894 int MachCallDynamicJavaNode::ret_addr_offset() {
895 //lui IC_Klass,
896 //ori IC_Klass,
897 //dsll IC_Klass
898 //ori IC_Klass
900 //lui T9
901 //ori T9
902 //nop
903 //nop
904 //jalr T9
905 //nop
906 return 4 * 4 + 4 * 6;
907 }
909 //=============================================================================
911 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
912 enum RC { rc_bad, rc_int, rc_float, rc_stack };
913 static enum RC rc_class( OptoReg::Name reg ) {
914 if( !OptoReg::is_valid(reg) ) return rc_bad;
915 if (OptoReg::is_stack(reg)) return rc_stack;
916 VMReg r = OptoReg::as_VMReg(reg);
917 if (r->is_Register()) return rc_int;
918 assert(r->is_FloatRegister(), "must be");
919 return rc_float;
920 }
922 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
923 // Get registers to move
924 OptoReg::Name src_second = ra_->get_reg_second(in(1));
925 OptoReg::Name src_first = ra_->get_reg_first(in(1));
926 OptoReg::Name dst_second = ra_->get_reg_second(this );
927 OptoReg::Name dst_first = ra_->get_reg_first(this );
929 enum RC src_second_rc = rc_class(src_second);
930 enum RC src_first_rc = rc_class(src_first);
931 enum RC dst_second_rc = rc_class(dst_second);
932 enum RC dst_first_rc = rc_class(dst_first);
934 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
936 // Generate spill code!
937 int size = 0;
939 if( src_first == dst_first && src_second == dst_second )
940 return 0; // Self copy, no move
942 if (src_first_rc == rc_stack) {
943 // mem ->
944 if (dst_first_rc == rc_stack) {
945 // mem -> mem
946 assert(src_second != dst_first, "overlap");
947 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
948 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
949 // 64-bit
950 int src_offset = ra_->reg2offset(src_first);
951 int dst_offset = ra_->reg2offset(dst_first);
952 if (cbuf) {
953 MacroAssembler _masm(cbuf);
954 __ ld(AT, Address(SP, src_offset));
955 __ sd(AT, Address(SP, dst_offset));
956 #ifndef PRODUCT
957 } else {
958 if(!do_size){
959 if (size != 0) st->print("\n\t");
960 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
961 "sd AT, [SP + #%d]",
962 src_offset, dst_offset);
963 }
964 #endif
965 }
966 size += 8;
967 } else {
968 // 32-bit
969 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
970 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
971 // No pushl/popl, so:
972 int src_offset = ra_->reg2offset(src_first);
973 int dst_offset = ra_->reg2offset(dst_first);
974 if (cbuf) {
975 MacroAssembler _masm(cbuf);
976 __ lw(AT, Address(SP, src_offset));
977 __ sw(AT, Address(SP, dst_offset));
978 #ifndef PRODUCT
979 } else {
980 if(!do_size){
981 if (size != 0) st->print("\n\t");
982 st->print("lw AT, [SP + #%d] spill 2\n\t"
983 "sw AT, [SP + #%d]\n\t",
984 src_offset, dst_offset);
985 }
986 #endif
987 }
988 size += 8;
989 }
990 return size;
991 } else if (dst_first_rc == rc_int) {
992 // mem -> gpr
993 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
994 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
995 // 64-bit
996 int offset = ra_->reg2offset(src_first);
997 if (cbuf) {
998 MacroAssembler _masm(cbuf);
999 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1000 #ifndef PRODUCT
1001 } else {
1002 if(!do_size){
1003 if (size != 0) st->print("\n\t");
1004 st->print("ld %s, [SP + #%d]\t# spill 3",
1005 Matcher::regName[dst_first],
1006 offset);
1007 }
1008 #endif
1009 }
1010 size += 4;
1011 } else {
1012 // 32-bit
1013 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1014 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1015 int offset = ra_->reg2offset(src_first);
1016 if (cbuf) {
1017 MacroAssembler _masm(cbuf);
1018 if (this->ideal_reg() == Op_RegI)
1019 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 else
1021 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1022 #ifndef PRODUCT
1023 } else {
1024 if(!do_size){
1025 if (size != 0) st->print("\n\t");
1026 if (this->ideal_reg() == Op_RegI)
1027 st->print("lw %s, [SP + #%d]\t# spill 4",
1028 Matcher::regName[dst_first],
1029 offset);
1030 else
1031 st->print("lwu %s, [SP + #%d]\t# spill 5",
1032 Matcher::regName[dst_first],
1033 offset);
1034 }
1035 #endif
1036 }
1037 size += 4;
1038 }
1039 return size;
1040 } else if (dst_first_rc == rc_float) {
1041 // mem-> xmm
1042 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1043 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1044 // 64-bit
1045 int offset = ra_->reg2offset(src_first);
1046 if (cbuf) {
1047 MacroAssembler _masm(cbuf);
1048 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1049 #ifndef PRODUCT
1050 } else {
1051 if (!do_size) {
1052 if (size != 0) st->print("\n\t");
1053 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1054 Matcher::regName[dst_first],
1055 offset);
1056 }
1057 #endif
1058 }
1059 size += 4;
1060 } else {
1061 // 32-bit
1062 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1063 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1064 int offset = ra_->reg2offset(src_first);
1065 if (cbuf) {
1066 MacroAssembler _masm(cbuf);
1067 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1068 #ifndef PRODUCT
1069 } else {
1070 if(!do_size){
1071 if (size != 0) st->print("\n\t");
1072 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1073 Matcher::regName[dst_first],
1074 offset);
1075 }
1076 #endif
1077 }
1078 size += 4;
1079 }
1080 return size;
1081 }
1082 } else if (src_first_rc == rc_int) {
1083 // gpr ->
1084 if (dst_first_rc == rc_stack) {
1085 // gpr -> mem
1086 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1087 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1088 // 64-bit
1089 int offset = ra_->reg2offset(dst_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1093 #ifndef PRODUCT
1094 } else {
1095 if(!do_size){
1096 if (size != 0) st->print("\n\t");
1097 st->print("sd %s, [SP + #%d] # spill 8",
1098 Matcher::regName[src_first],
1099 offset);
1100 }
1101 #endif
1102 }
1103 size += 4;
1104 } else {
1105 // 32-bit
1106 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1107 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1108 int offset = ra_->reg2offset(dst_first);
1109 if (cbuf) {
1110 MacroAssembler _masm(cbuf);
1111 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1112 #ifndef PRODUCT
1113 } else {
1114 if (!do_size) {
1115 if (size != 0) st->print("\n\t");
1116 st->print("sw %s, [SP + #%d]\t# spill 9",
1117 Matcher::regName[src_first], offset);
1118 }
1119 #endif
1120 }
1121 size += 4;
1122 }
1123 return size;
1124 } else if (dst_first_rc == rc_int) {
1125 // gpr -> gpr
1126 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1127 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1128 // 64-bit
1129 if (cbuf) {
1130 MacroAssembler _masm(cbuf);
1131 __ move(as_Register(Matcher::_regEncode[dst_first]),
1132 as_Register(Matcher::_regEncode[src_first]));
1133 #ifndef PRODUCT
1134 } else {
1135 if(!do_size){
1136 if (size != 0) st->print("\n\t");
1137 st->print("move(64bit) %s <-- %s\t# spill 10",
1138 Matcher::regName[dst_first],
1139 Matcher::regName[src_first]);
1140 }
1141 #endif
1142 }
1143 size += 4;
1144 return size;
1145 } else {
1146 // 32-bit
1147 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1148 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1149 if (cbuf) {
1150 MacroAssembler _masm(cbuf);
1151 if (this->ideal_reg() == Op_RegI)
1152 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1153 else
1154 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1155 #ifndef PRODUCT
1156 } else {
1157 if (!do_size) {
1158 if (size != 0) st->print("\n\t");
1159 st->print("move(32-bit) %s <-- %s\t# spill 11",
1160 Matcher::regName[dst_first],
1161 Matcher::regName[src_first]);
1162 }
1163 #endif
1164 }
1165 size += 4;
1166 return size;
1167 }
1168 } else if (dst_first_rc == rc_float) {
1169 // gpr -> xmm
1170 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1171 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1172 // 64-bit
1173 if (cbuf) {
1174 MacroAssembler _masm(cbuf);
1175 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1176 #ifndef PRODUCT
1177 } else {
1178 if(!do_size){
1179 if (size != 0) st->print("\n\t");
1180 st->print("dmtc1 %s, %s\t# spill 12",
1181 Matcher::regName[dst_first],
1182 Matcher::regName[src_first]);
1183 }
1184 #endif
1185 }
1186 size += 4;
1187 } else {
1188 // 32-bit
1189 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1190 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1191 if (cbuf) {
1192 MacroAssembler _masm(cbuf);
1193 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1194 #ifndef PRODUCT
1195 } else {
1196 if(!do_size){
1197 if (size != 0) st->print("\n\t");
1198 st->print("mtc1 %s, %s\t# spill 13",
1199 Matcher::regName[dst_first],
1200 Matcher::regName[src_first]);
1201 }
1202 #endif
1203 }
1204 size += 4;
1205 }
1206 return size;
1207 }
1208 } else if (src_first_rc == rc_float) {
1209 // xmm ->
1210 if (dst_first_rc == rc_stack) {
1211 // xmm -> mem
1212 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1213 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1214 // 64-bit
1215 int offset = ra_->reg2offset(dst_first);
1216 if (cbuf) {
1217 MacroAssembler _masm(cbuf);
1218 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1219 #ifndef PRODUCT
1220 } else {
1221 if(!do_size){
1222 if (size != 0) st->print("\n\t");
1223 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1224 Matcher::regName[src_first],
1225 offset);
1226 }
1227 #endif
1228 }
1229 size += 4;
1230 } else {
1231 // 32-bit
1232 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1233 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1234 int offset = ra_->reg2offset(dst_first);
1235 if (cbuf) {
1236 MacroAssembler _masm(cbuf);
1237 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1238 #ifndef PRODUCT
1239 } else {
1240 if(!do_size){
1241 if (size != 0) st->print("\n\t");
1242 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1243 Matcher::regName[src_first],
1244 offset);
1245 }
1246 #endif
1247 }
1248 size += 4;
1249 }
1250 return size;
1251 } else if (dst_first_rc == rc_int) {
1252 // xmm -> gpr
1253 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1254 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1255 // 64-bit
1256 if (cbuf) {
1257 MacroAssembler _masm(cbuf);
1258 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1259 #ifndef PRODUCT
1260 } else {
1261 if(!do_size){
1262 if (size != 0) st->print("\n\t");
1263 st->print("dmfc1 %s, %s\t# spill 16",
1264 Matcher::regName[dst_first],
1265 Matcher::regName[src_first]);
1266 }
1267 #endif
1268 }
1269 size += 4;
1270 } else {
1271 // 32-bit
1272 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1273 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1274 if (cbuf) {
1275 MacroAssembler _masm(cbuf);
1276 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1277 #ifndef PRODUCT
1278 } else {
1279 if(!do_size){
1280 if (size != 0) st->print("\n\t");
1281 st->print("mfc1 %s, %s\t# spill 17",
1282 Matcher::regName[dst_first],
1283 Matcher::regName[src_first]);
1284 }
1285 #endif
1286 }
1287 size += 4;
1288 }
1289 return size;
1290 } else if (dst_first_rc == rc_float) {
1291 // xmm -> xmm
1292 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1293 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1294 // 64-bit
1295 if (cbuf) {
1296 MacroAssembler _masm(cbuf);
1297 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1298 #ifndef PRODUCT
1299 } else {
1300 if(!do_size){
1301 if (size != 0) st->print("\n\t");
1302 st->print("mov_d %s <-- %s\t# spill 18",
1303 Matcher::regName[dst_first],
1304 Matcher::regName[src_first]);
1305 }
1306 #endif
1307 }
1308 size += 4;
1309 } else {
1310 // 32-bit
1311 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1312 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1313 if (cbuf) {
1314 MacroAssembler _masm(cbuf);
1315 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1316 #ifndef PRODUCT
1317 } else {
1318 if(!do_size){
1319 if (size != 0) st->print("\n\t");
1320 st->print("mov_s %s <-- %s\t# spill 19",
1321 Matcher::regName[dst_first],
1322 Matcher::regName[src_first]);
1323 }
1324 #endif
1325 }
1326 size += 4;
1327 }
1328 return size;
1329 }
1330 }
1332 assert(0," foo ");
1333 Unimplemented();
1334 return size;
1336 }
1338 #ifndef PRODUCT
1339 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1340 implementation( NULL, ra_, false, st );
1341 }
1342 #endif
1344 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1345 implementation( &cbuf, ra_, false, NULL );
1346 }
1348 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1349 return implementation( NULL, ra_, true, NULL );
1350 }
1352 //=============================================================================
1353 #
1355 #ifndef PRODUCT
1356 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1357 st->print("INT3");
1358 }
1359 #endif
1361 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1362 MacroAssembler _masm(&cbuf);
1363 __ int3();
1364 }
1366 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1367 return MachNode::size(ra_);
1368 }
1371 //=============================================================================
1372 #ifndef PRODUCT
1373 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1374 Compile *C = ra_->C;
1375 int framesize = C->frame_size_in_bytes();
1377 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1379 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1380 st->cr(); st->print("\t");
1381 if (UseLoongsonISA) {
1382 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1383 } else {
1384 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1385 st->cr(); st->print("\t");
1386 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1387 }
1389 if( do_polling() && C->is_method_compilation() ) {
1390 st->print("Poll Safepoint # MachEpilogNode");
1391 }
1392 }
1393 #endif
1395 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1396 Compile *C = ra_->C;
1397 MacroAssembler _masm(&cbuf);
1398 int framesize = C->frame_size_in_bytes();
1400 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1402 __ daddiu(SP, SP, framesize);
1404 if (UseLoongsonISA) {
1405 __ gslq(RA, FP, SP, -wordSize*2);
1406 } else {
1407 __ ld(RA, SP, -wordSize );
1408 __ ld(FP, SP, -wordSize*2 );
1409 }
1411 if( do_polling() && C->is_method_compilation() ) {
1412 __ set64(AT, (long)os::get_polling_page());
1413 __ relocate(relocInfo::poll_return_type);
1414 __ lw(AT, AT, 0);
1415 }
1416 }
1418 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1419 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1420 }
1422 int MachEpilogNode::reloc() const {
1423 return 0; // a large enough number
1424 }
1426 const Pipeline * MachEpilogNode::pipeline() const {
1427 return MachNode::pipeline_class();
1428 }
1430 int MachEpilogNode::safepoint_offset() const { return 0; }
1432 //=============================================================================
1434 #ifndef PRODUCT
1435 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1436 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1437 int reg = ra_->get_reg_first(this);
1438 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1439 }
1440 #endif
1443 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1444 return 4;
1445 }
1447 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1448 MacroAssembler _masm(&cbuf);
1449 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1450 int reg = ra_->get_encode(this);
1452 __ addi(as_Register(reg), SP, offset);
1453 }
1456 //static int sizeof_FFree_Float_Stack_All = -1;
1458 int MachCallRuntimeNode::ret_addr_offset() {
1459 //lui
1460 //ori
1461 //dsll
1462 //ori
1463 //jalr
1464 //nop
1465 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1466 return NativeCall::instruction_size;
1467 }
1470 //=============================================================================
1471 #ifndef PRODUCT
1472 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1473 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1474 }
1475 #endif
1477 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1478 MacroAssembler _masm(&cbuf);
1479 int i = 0;
1480 for(i = 0; i < _count; i++)
1481 __ nop();
1482 }
1484 uint MachNopNode::size(PhaseRegAlloc *) const {
1485 return 4 * _count;
1486 }
1487 const Pipeline* MachNopNode::pipeline() const {
1488 return MachNode::pipeline_class();
1489 }
1491 //=============================================================================
1493 //=============================================================================
1494 #ifndef PRODUCT
1495 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1496 st->print_cr("load_klass(T9, T0)");
1497 st->print_cr("\tbeq(T9, iCache, L)");
1498 st->print_cr("\tnop");
1499 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1500 st->print_cr("\tnop");
1501 st->print_cr("\tnop");
1502 st->print_cr(" L:");
1503 }
1504 #endif
1507 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1508 MacroAssembler _masm(&cbuf);
1509 #ifdef ASSERT
1510 //uint code_size = cbuf.code_size();
1511 #endif
1512 int ic_reg = Matcher::inline_cache_reg_encode();
1513 Label L;
1514 Register receiver = T0;
1515 Register iCache = as_Register(ic_reg);
1516 __ load_klass(T9, receiver);
1517 __ beq(T9, iCache, L);
1518 __ nop();
1520 __ relocate(relocInfo::runtime_call_type);
1521 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1523 /* WARNING these NOPs are critical so that verified entry point is properly
1524 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1525 __ align(CodeEntryAlignment);
1526 __ bind(L);
1527 }
1529 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1530 return MachNode::size(ra_);
1531 }
1535 //=============================================================================
1537 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1539 int Compile::ConstantTable::calculate_table_base_offset() const {
1540 return 0; // absolute addressing, no offset
1541 }
1543 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1544 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1545 ShouldNotReachHere();
1546 }
1548 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1549 Compile* C = ra_->C;
1550 Compile::ConstantTable& constant_table = C->constant_table();
1551 MacroAssembler _masm(&cbuf);
1553 Register Rtoc = as_Register(ra_->get_encode(this));
1554 CodeSection* consts_section = __ code()->consts();
1555 int consts_size = consts_section->align_at_start(consts_section->size());
1556 assert(constant_table.size() == consts_size, "must be equal");
1558 if (consts_section->size()) {
1559 // Materialize the constant table base.
1560 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1561 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1562 __ relocate(relocInfo::internal_pc_type);
1563 __ patchable_set48(Rtoc, (long)baseaddr);
1564 }
1565 }
1567 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1568 // patchable_set48 (4 insts)
1569 return 4 * 4;
1570 }
1572 #ifndef PRODUCT
1573 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1574 Register r = as_Register(ra_->get_encode(this));
1575 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1576 }
1577 #endif
1580 //=============================================================================
1581 #ifndef PRODUCT
1582 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1583 Compile* C = ra_->C;
1585 int framesize = C->frame_size_in_bytes();
1586 int bangsize = C->bang_size_in_bytes();
1587 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1589 // Calls to C2R adapters often do not accept exceptional returns.
1590 // We require that their callers must bang for them. But be careful, because
1591 // some VM calls (such as call site linkage) can use several kilobytes of
1592 // stack. But the stack safety zone should account for that.
1593 // See bugs 4446381, 4468289, 4497237.
1594 if (C->need_stack_bang(bangsize)) {
1595 st->print_cr("# stack bang"); st->print("\t");
1596 }
1597 if (UseLoongsonISA) {
1598 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1599 } else {
1600 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1601 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1602 }
1603 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1604 st->print("daddiu SP, SP, -%d \t",framesize);
1605 }
1606 #endif
1609 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1610 Compile* C = ra_->C;
1611 MacroAssembler _masm(&cbuf);
1613 int framesize = C->frame_size_in_bytes();
1614 int bangsize = C->bang_size_in_bytes();
1616 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1618 if (C->need_stack_bang(bangsize)) {
1619 __ generate_stack_overflow_check(bangsize);
1620 }
1622 if (UseLoongsonISA) {
1623 __ gssq(RA, FP, SP, -wordSize*2);
1624 } else {
1625 __ sd(RA, SP, -wordSize);
1626 __ sd(FP, SP, -wordSize*2);
1627 }
1628 __ daddiu(FP, SP, -wordSize*2);
1629 __ daddiu(SP, SP, -framesize);
1630 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1631 __ nop();
1633 C->set_frame_complete(cbuf.insts_size());
1634 if (C->has_mach_constant_base_node()) {
1635 // NOTE: We set the table base offset here because users might be
1636 // emitted before MachConstantBaseNode.
1637 Compile::ConstantTable& constant_table = C->constant_table();
1638 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1639 }
1641 }
1644 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1645 return MachNode::size(ra_); // too many variables; just compute it the hard way
1646 }
1648 int MachPrologNode::reloc() const {
1649 return 0; // a large enough number
1650 }
1652 %}
1654 //----------ENCODING BLOCK-----------------------------------------------------
1655 // This block specifies the encoding classes used by the compiler to output
1656 // byte streams. Encoding classes generate functions which are called by
1657 // Machine Instruction Nodes in order to generate the bit encoding of the
1658 // instruction. Operands specify their base encoding interface with the
1659 // interface keyword. There are currently supported four interfaces,
1660 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1661 // operand to generate a function which returns its register number when
1662 // queried. CONST_INTER causes an operand to generate a function which
1663 // returns the value of the constant when queried. MEMORY_INTER causes an
1664 // operand to generate four functions which return the Base Register, the
1665 // Index Register, the Scale Value, and the Offset Value of the operand when
1666 // queried. COND_INTER causes an operand to generate six functions which
1667 // return the encoding code (ie - encoding bits for the instruction)
1668 // associated with each basic boolean condition for a conditional instruction.
1669 // Instructions specify two basic values for encoding. They use the
1670 // ins_encode keyword to specify their encoding class (which must be one of
1671 // the class names specified in the encoding block), and they use the
1672 // opcode keyword to specify, in order, their primary, secondary, and
1673 // tertiary opcode. Only the opcode sections which a particular instruction
1674 // needs for encoding need to be specified.
1675 encode %{
1677 //Load byte signed
1678 enc_class load_B_enc (mRegI dst, memory mem) %{
1679 MacroAssembler _masm(&cbuf);
1680 int dst = $dst$$reg;
1681 int base = $mem$$base;
1682 int index = $mem$$index;
1683 int scale = $mem$$scale;
1684 int disp = $mem$$disp;
1686 if( index != 0 ) {
1687 if( Assembler::is_simm16(disp) ) {
1688 if( UseLoongsonISA ) {
1689 if (scale == 0) {
1690 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1691 } else {
1692 __ dsll(AT, as_Register(index), scale);
1693 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1694 }
1695 } else {
1696 if (scale == 0) {
1697 __ addu(AT, as_Register(base), as_Register(index));
1698 } else {
1699 __ dsll(AT, as_Register(index), scale);
1700 __ addu(AT, as_Register(base), AT);
1701 }
1702 __ lb(as_Register(dst), AT, disp);
1703 }
1704 } else {
1705 if (scale == 0) {
1706 __ addu(AT, as_Register(base), as_Register(index));
1707 } else {
1708 __ dsll(AT, as_Register(index), scale);
1709 __ addu(AT, as_Register(base), AT);
1710 }
1711 __ move(T9, disp);
1712 if( UseLoongsonISA ) {
1713 __ gslbx(as_Register(dst), AT, T9, 0);
1714 } else {
1715 __ addu(AT, AT, T9);
1716 __ lb(as_Register(dst), AT, 0);
1717 }
1718 }
1719 } else {
1720 if( Assembler::is_simm16(disp) ) {
1721 __ lb(as_Register(dst), as_Register(base), disp);
1722 } else {
1723 __ move(T9, disp);
1724 if( UseLoongsonISA ) {
1725 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1726 } else {
1727 __ addu(AT, as_Register(base), T9);
1728 __ lb(as_Register(dst), AT, 0);
1729 }
1730 }
1731 }
1732 %}
1734 //Load byte unsigned
1735 enc_class load_UB_enc (mRegI dst, memory mem) %{
1736 MacroAssembler _masm(&cbuf);
1737 int dst = $dst$$reg;
1738 int base = $mem$$base;
1739 int index = $mem$$index;
1740 int scale = $mem$$scale;
1741 int disp = $mem$$disp;
1743 if( index != 0 ) {
1744 if (scale == 0) {
1745 __ daddu(AT, as_Register(base), as_Register(index));
1746 } else {
1747 __ dsll(AT, as_Register(index), scale);
1748 __ daddu(AT, as_Register(base), AT);
1749 }
1750 if( Assembler::is_simm16(disp) ) {
1751 __ lbu(as_Register(dst), AT, disp);
1752 } else {
1753 __ move(T9, disp);
1754 __ daddu(AT, AT, T9);
1755 __ lbu(as_Register(dst), AT, 0);
1756 }
1757 } else {
1758 if( Assembler::is_simm16(disp) ) {
1759 __ lbu(as_Register(dst), as_Register(base), disp);
1760 } else {
1761 __ move(T9, disp);
1762 __ daddu(AT, as_Register(base), T9);
1763 __ lbu(as_Register(dst), AT, 0);
1764 }
1765 }
1766 %}
1768 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1769 MacroAssembler _masm(&cbuf);
1770 int src = $src$$reg;
1771 int base = $mem$$base;
1772 int index = $mem$$index;
1773 int scale = $mem$$scale;
1774 int disp = $mem$$disp;
1776 if( index != 0 ) {
1777 if (scale == 0) {
1778 if( Assembler::is_simm(disp, 8) ) {
1779 if (UseLoongsonISA) {
1780 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1781 } else {
1782 __ addu(AT, as_Register(base), as_Register(index));
1783 __ sb(as_Register(src), AT, disp);
1784 }
1785 } else if( Assembler::is_simm16(disp) ) {
1786 __ addu(AT, as_Register(base), as_Register(index));
1787 __ sb(as_Register(src), AT, disp);
1788 } else {
1789 __ addu(AT, as_Register(base), as_Register(index));
1790 __ move(T9, disp);
1791 if (UseLoongsonISA) {
1792 __ gssbx(as_Register(src), AT, T9, 0);
1793 } else {
1794 __ addu(AT, AT, T9);
1795 __ sb(as_Register(src), AT, 0);
1796 }
1797 }
1798 } else {
1799 __ dsll(AT, as_Register(index), scale);
1800 if( Assembler::is_simm(disp, 8) ) {
1801 if (UseLoongsonISA) {
1802 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1803 } else {
1804 __ addu(AT, as_Register(base), AT);
1805 __ sb(as_Register(src), AT, disp);
1806 }
1807 } else if( Assembler::is_simm16(disp) ) {
1808 __ addu(AT, as_Register(base), AT);
1809 __ sb(as_Register(src), AT, disp);
1810 } else {
1811 __ addu(AT, as_Register(base), AT);
1812 __ move(T9, disp);
1813 if (UseLoongsonISA) {
1814 __ gssbx(as_Register(src), AT, T9, 0);
1815 } else {
1816 __ addu(AT, AT, T9);
1817 __ sb(as_Register(src), AT, 0);
1818 }
1819 }
1820 }
1821 } else {
1822 if( Assembler::is_simm16(disp) ) {
1823 __ sb(as_Register(src), as_Register(base), disp);
1824 } else {
1825 __ move(T9, disp);
1826 if (UseLoongsonISA) {
1827 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1828 } else {
1829 __ addu(AT, as_Register(base), T9);
1830 __ sb(as_Register(src), AT, 0);
1831 }
1832 }
1833 }
1834 %}
1836 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1837 MacroAssembler _masm(&cbuf);
1838 int base = $mem$$base;
1839 int index = $mem$$index;
1840 int scale = $mem$$scale;
1841 int disp = $mem$$disp;
1842 int value = $src$$constant;
1844 if( index != 0 ) {
1845 if (!UseLoongsonISA) {
1846 if (scale == 0) {
1847 __ daddu(AT, as_Register(base), as_Register(index));
1848 } else {
1849 __ dsll(AT, as_Register(index), scale);
1850 __ daddu(AT, as_Register(base), AT);
1851 }
1852 if( Assembler::is_simm16(disp) ) {
1853 if (value == 0) {
1854 __ sb(R0, AT, disp);
1855 } else {
1856 __ move(T9, value);
1857 __ sb(T9, AT, disp);
1858 }
1859 } else {
1860 if (value == 0) {
1861 __ move(T9, disp);
1862 __ daddu(AT, AT, T9);
1863 __ sb(R0, AT, 0);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ move(T9, value);
1868 __ sb(T9, AT, 0);
1869 }
1870 }
1871 } else {
1873 if (scale == 0) {
1874 if( Assembler::is_simm(disp, 8) ) {
1875 if (value == 0) {
1876 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1877 } else {
1878 __ move(T9, value);
1879 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1880 }
1881 } else if( Assembler::is_simm16(disp) ) {
1882 __ daddu(AT, as_Register(base), as_Register(index));
1883 if (value == 0) {
1884 __ sb(R0, AT, disp);
1885 } else {
1886 __ move(T9, value);
1887 __ sb(T9, AT, disp);
1888 }
1889 } else {
1890 if (value == 0) {
1891 __ daddu(AT, as_Register(base), as_Register(index));
1892 __ move(T9, disp);
1893 __ gssbx(R0, AT, T9, 0);
1894 } else {
1895 __ move(AT, disp);
1896 __ move(T9, value);
1897 __ daddu(AT, as_Register(base), AT);
1898 __ gssbx(T9, AT, as_Register(index), 0);
1899 }
1900 }
1902 } else {
1904 if( Assembler::is_simm(disp, 8) ) {
1905 __ dsll(AT, as_Register(index), scale);
1906 if (value == 0) {
1907 __ gssbx(R0, as_Register(base), AT, disp);
1908 } else {
1909 __ move(T9, value);
1910 __ gssbx(T9, as_Register(base), AT, disp);
1911 }
1912 } else if( Assembler::is_simm16(disp) ) {
1913 __ dsll(AT, as_Register(index), scale);
1914 __ daddu(AT, as_Register(base), AT);
1915 if (value == 0) {
1916 __ sb(R0, AT, disp);
1917 } else {
1918 __ move(T9, value);
1919 __ sb(T9, AT, disp);
1920 }
1921 } else {
1922 __ dsll(AT, as_Register(index), scale);
1923 if (value == 0) {
1924 __ daddu(AT, as_Register(base), AT);
1925 __ move(T9, disp);
1926 __ gssbx(R0, AT, T9, 0);
1927 } else {
1928 __ move(T9, disp);
1929 __ daddu(AT, AT, T9);
1930 __ move(T9, value);
1931 __ gssbx(T9, as_Register(base), AT, 0);
1932 }
1933 }
1934 }
1935 }
1936 } else {
1937 if( Assembler::is_simm16(disp) ) {
1938 if (value == 0) {
1939 __ sb(R0, as_Register(base), disp);
1940 } else {
1941 __ move(AT, value);
1942 __ sb(AT, as_Register(base), disp);
1943 }
1944 } else {
1945 if (value == 0) {
1946 __ move(T9, disp);
1947 if (UseLoongsonISA) {
1948 __ gssbx(R0, as_Register(base), T9, 0);
1949 } else {
1950 __ daddu(AT, as_Register(base), T9);
1951 __ sb(R0, AT, 0);
1952 }
1953 } else {
1954 __ move(T9, disp);
1955 if (UseLoongsonISA) {
1956 __ move(AT, value);
1957 __ gssbx(AT, as_Register(base), T9, 0);
1958 } else {
1959 __ daddu(AT, as_Register(base), T9);
1960 __ move(T9, value);
1961 __ sb(T9, AT, 0);
1962 }
1963 }
1964 }
1965 }
1966 %}
1969 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1970 MacroAssembler _masm(&cbuf);
1971 int base = $mem$$base;
1972 int index = $mem$$index;
1973 int scale = $mem$$scale;
1974 int disp = $mem$$disp;
1975 int value = $src$$constant;
1977 if( index != 0 ) {
1978 if ( UseLoongsonISA ) {
1979 if ( Assembler::is_simm(disp,8) ) {
1980 if ( scale == 0 ) {
1981 if ( value == 0 ) {
1982 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1983 } else {
1984 __ move(AT, value);
1985 __ gssbx(AT, as_Register(base), as_Register(index), disp);
1986 }
1987 } else {
1988 __ dsll(AT, as_Register(index), scale);
1989 if ( value == 0 ) {
1990 __ gssbx(R0, as_Register(base), AT, disp);
1991 } else {
1992 __ move(T9, value);
1993 __ gssbx(T9, as_Register(base), AT, disp);
1994 }
1995 }
1996 } else if ( Assembler::is_simm16(disp) ) {
1997 if ( scale == 0 ) {
1998 __ daddu(AT, as_Register(base), as_Register(index));
1999 if ( value == 0 ){
2000 __ sb(R0, AT, disp);
2001 } else {
2002 __ move(T9, value);
2003 __ sb(T9, AT, disp);
2004 }
2005 } else {
2006 __ dsll(AT, as_Register(index), scale);
2007 __ daddu(AT, as_Register(base), AT);
2008 if ( value == 0 ) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 }
2015 } else {
2016 if ( scale == 0 ) {
2017 __ move(AT, disp);
2018 __ daddu(AT, as_Register(index), AT);
2019 if ( value == 0 ) {
2020 __ gssbx(R0, as_Register(base), AT, 0);
2021 } else {
2022 __ move(T9, value);
2023 __ gssbx(T9, as_Register(base), AT, 0);
2024 }
2025 } else {
2026 __ dsll(AT, as_Register(index), scale);
2027 __ move(T9, disp);
2028 __ daddu(AT, AT, T9);
2029 if ( value == 0 ) {
2030 __ gssbx(R0, as_Register(base), AT, 0);
2031 } else {
2032 __ move(T9, value);
2033 __ gssbx(T9, as_Register(base), AT, 0);
2034 }
2035 }
2036 }
2037 } else { //not use loongson isa
2038 if (scale == 0) {
2039 __ daddu(AT, as_Register(base), as_Register(index));
2040 } else {
2041 __ dsll(AT, as_Register(index), scale);
2042 __ daddu(AT, as_Register(base), AT);
2043 }
2044 if( Assembler::is_simm16(disp) ) {
2045 if (value == 0) {
2046 __ sb(R0, AT, disp);
2047 } else {
2048 __ move(T9, value);
2049 __ sb(T9, AT, disp);
2050 }
2051 } else {
2052 if (value == 0) {
2053 __ move(T9, disp);
2054 __ daddu(AT, AT, T9);
2055 __ sb(R0, AT, 0);
2056 } else {
2057 __ move(T9, disp);
2058 __ daddu(AT, AT, T9);
2059 __ move(T9, value);
2060 __ sb(T9, AT, 0);
2061 }
2062 }
2063 }
2064 } else {
2065 if ( UseLoongsonISA ){
2066 if ( Assembler::is_simm16(disp) ){
2067 if ( value == 0 ) {
2068 __ sb(R0, as_Register(base), disp);
2069 } else {
2070 __ move(AT, value);
2071 __ sb(AT, as_Register(base), disp);
2072 }
2073 } else {
2074 __ move(AT, disp);
2075 if ( value == 0 ) {
2076 __ gssbx(R0, as_Register(base), AT, 0);
2077 } else {
2078 __ move(T9, value);
2079 __ gssbx(T9, as_Register(base), AT, 0);
2080 }
2081 }
2082 } else {
2083 if( Assembler::is_simm16(disp) ) {
2084 if (value == 0) {
2085 __ sb(R0, as_Register(base), disp);
2086 } else {
2087 __ move(AT, value);
2088 __ sb(AT, as_Register(base), disp);
2089 }
2090 } else {
2091 if (value == 0) {
2092 __ move(T9, disp);
2093 __ daddu(AT, as_Register(base), T9);
2094 __ sb(R0, AT, 0);
2095 } else {
2096 __ move(T9, disp);
2097 __ daddu(AT, as_Register(base), T9);
2098 __ move(T9, value);
2099 __ sb(T9, AT, 0);
2100 }
2101 }
2102 }
2103 }
2105 __ sync();
2106 %}
2108 // Load Short (16bit signed)
2109 enc_class load_S_enc (mRegI dst, memory mem) %{
2110 MacroAssembler _masm(&cbuf);
2111 int dst = $dst$$reg;
2112 int base = $mem$$base;
2113 int index = $mem$$index;
2114 int scale = $mem$$scale;
2115 int disp = $mem$$disp;
2117 if( index != 0 ) {
2118 if ( UseLoongsonISA ) {
2119 if ( Assembler::is_simm(disp, 8) ) {
2120 if (scale == 0) {
2121 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2122 } else {
2123 __ dsll(AT, as_Register(index), scale);
2124 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2125 }
2126 } else if ( Assembler::is_simm16(disp) ) {
2127 if (scale == 0) {
2128 __ daddu(AT, as_Register(base), as_Register(index));
2129 __ lh(as_Register(dst), AT, disp);
2130 } else {
2131 __ dsll(AT, as_Register(index), scale);
2132 __ daddu(AT, as_Register(base), AT);
2133 __ lh(as_Register(dst), AT, disp);
2134 }
2135 } else {
2136 if (scale == 0) {
2137 __ move(AT, disp);
2138 __ daddu(AT, as_Register(index), AT);
2139 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2140 } else {
2141 __ dsll(AT, as_Register(index), scale);
2142 __ move(T9, disp);
2143 __ daddu(AT, AT, T9);
2144 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2145 }
2146 }
2147 } else { // not use loongson isa
2148 if (scale == 0) {
2149 __ daddu(AT, as_Register(base), as_Register(index));
2150 } else {
2151 __ dsll(AT, as_Register(index), scale);
2152 __ daddu(AT, as_Register(base), AT);
2153 }
2154 if( Assembler::is_simm16(disp) ) {
2155 __ lh(as_Register(dst), AT, disp);
2156 } else {
2157 __ move(T9, disp);
2158 __ daddu(AT, AT, T9);
2159 __ lh(as_Register(dst), AT, 0);
2160 }
2161 }
2162 } else { // index is 0
2163 if ( UseLoongsonISA ) {
2164 if ( Assembler::is_simm16(disp) ) {
2165 __ lh(as_Register(dst), as_Register(base), disp);
2166 } else {
2167 __ move(T9, disp);
2168 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2169 }
2170 } else { //not use loongson isa
2171 if( Assembler::is_simm16(disp) ) {
2172 __ lh(as_Register(dst), as_Register(base), disp);
2173 } else {
2174 __ move(T9, disp);
2175 __ daddu(AT, as_Register(base), T9);
2176 __ lh(as_Register(dst), AT, 0);
2177 }
2178 }
2179 }
2180 %}
2182 // Load Char (16bit unsigned)
2183 enc_class load_C_enc (mRegI dst, memory mem) %{
2184 MacroAssembler _masm(&cbuf);
2185 int dst = $dst$$reg;
2186 int base = $mem$$base;
2187 int index = $mem$$index;
2188 int scale = $mem$$scale;
2189 int disp = $mem$$disp;
2191 if( index != 0 ) {
2192 if (scale == 0) {
2193 __ daddu(AT, as_Register(base), as_Register(index));
2194 } else {
2195 __ dsll(AT, as_Register(index), scale);
2196 __ daddu(AT, as_Register(base), AT);
2197 }
2198 if( Assembler::is_simm16(disp) ) {
2199 __ lhu(as_Register(dst), AT, disp);
2200 } else {
2201 __ move(T9, disp);
2202 __ addu(AT, AT, T9);
2203 __ lhu(as_Register(dst), AT, 0);
2204 }
2205 } else {
2206 if( Assembler::is_simm16(disp) ) {
2207 __ lhu(as_Register(dst), as_Register(base), disp);
2208 } else {
2209 __ move(T9, disp);
2210 __ daddu(AT, as_Register(base), T9);
2211 __ lhu(as_Register(dst), AT, 0);
2212 }
2213 }
2214 %}
2216 // Store Char (16bit unsigned)
2217 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2218 MacroAssembler _masm(&cbuf);
2219 int src = $src$$reg;
2220 int base = $mem$$base;
2221 int index = $mem$$index;
2222 int scale = $mem$$scale;
2223 int disp = $mem$$disp;
2225 if( index != 0 ) {
2226 if( Assembler::is_simm16(disp) ) {
2227 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2228 if (scale == 0) {
2229 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2230 } else {
2231 __ dsll(AT, as_Register(index), scale);
2232 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2233 }
2234 } else {
2235 if (scale == 0) {
2236 __ addu(AT, as_Register(base), as_Register(index));
2237 } else {
2238 __ dsll(AT, as_Register(index), scale);
2239 __ addu(AT, as_Register(base), AT);
2240 }
2241 __ sh(as_Register(src), AT, disp);
2242 }
2243 } else {
2244 if (scale == 0) {
2245 __ addu(AT, as_Register(base), as_Register(index));
2246 } else {
2247 __ dsll(AT, as_Register(index), scale);
2248 __ addu(AT, as_Register(base), AT);
2249 }
2250 __ move(T9, disp);
2251 if( UseLoongsonISA ) {
2252 __ gsshx(as_Register(src), AT, T9, 0);
2253 } else {
2254 __ addu(AT, AT, T9);
2255 __ sh(as_Register(src), AT, 0);
2256 }
2257 }
2258 } else {
2259 if( Assembler::is_simm16(disp) ) {
2260 __ sh(as_Register(src), as_Register(base), disp);
2261 } else {
2262 __ move(T9, disp);
2263 if( UseLoongsonISA ) {
2264 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2265 } else {
2266 __ addu(AT, as_Register(base), T9);
2267 __ sh(as_Register(src), AT, 0);
2268 }
2269 }
2270 }
2271 %}
2273 enc_class store_C0_enc (memory mem) %{
2274 MacroAssembler _masm(&cbuf);
2275 int base = $mem$$base;
2276 int index = $mem$$index;
2277 int scale = $mem$$scale;
2278 int disp = $mem$$disp;
2280 if( index != 0 ) {
2281 if( Assembler::is_simm16(disp) ) {
2282 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2283 if (scale == 0) {
2284 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2285 } else {
2286 __ dsll(AT, as_Register(index), scale);
2287 __ gsshx(R0, as_Register(base), AT, disp);
2288 }
2289 } else {
2290 if (scale == 0) {
2291 __ addu(AT, as_Register(base), as_Register(index));
2292 } else {
2293 __ dsll(AT, as_Register(index), scale);
2294 __ addu(AT, as_Register(base), AT);
2295 }
2296 __ sh(R0, AT, disp);
2297 }
2298 } else {
2299 if (scale == 0) {
2300 __ addu(AT, as_Register(base), as_Register(index));
2301 } else {
2302 __ dsll(AT, as_Register(index), scale);
2303 __ addu(AT, as_Register(base), AT);
2304 }
2305 __ move(T9, disp);
2306 if( UseLoongsonISA ) {
2307 __ gsshx(R0, AT, T9, 0);
2308 } else {
2309 __ addu(AT, AT, T9);
2310 __ sh(R0, AT, 0);
2311 }
2312 }
2313 } else {
2314 if( Assembler::is_simm16(disp) ) {
2315 __ sh(R0, as_Register(base), disp);
2316 } else {
2317 __ move(T9, disp);
2318 if( UseLoongsonISA ) {
2319 __ gsshx(R0, as_Register(base), T9, 0);
2320 } else {
2321 __ addu(AT, as_Register(base), T9);
2322 __ sh(R0, AT, 0);
2323 }
2324 }
2325 }
2326 %}
2328 enc_class load_I_enc (mRegI dst, memory mem) %{
2329 MacroAssembler _masm(&cbuf);
2330 int dst = $dst$$reg;
2331 int base = $mem$$base;
2332 int index = $mem$$index;
2333 int scale = $mem$$scale;
2334 int disp = $mem$$disp;
2336 if( index != 0 ) {
2337 if( Assembler::is_simm16(disp) ) {
2338 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2339 if (scale == 0) {
2340 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2341 } else {
2342 __ dsll(AT, as_Register(index), scale);
2343 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2344 }
2345 } else {
2346 if (scale == 0) {
2347 __ addu(AT, as_Register(base), as_Register(index));
2348 } else {
2349 __ dsll(AT, as_Register(index), scale);
2350 __ addu(AT, as_Register(base), AT);
2351 }
2352 __ lw(as_Register(dst), AT, disp);
2353 }
2354 } else {
2355 if (scale == 0) {
2356 __ addu(AT, as_Register(base), as_Register(index));
2357 } else {
2358 __ dsll(AT, as_Register(index), scale);
2359 __ addu(AT, as_Register(base), AT);
2360 }
2361 __ move(T9, disp);
2362 if( UseLoongsonISA ) {
2363 __ gslwx(as_Register(dst), AT, T9, 0);
2364 } else {
2365 __ addu(AT, AT, T9);
2366 __ lw(as_Register(dst), AT, 0);
2367 }
2368 }
2369 } else {
2370 if( Assembler::is_simm16(disp) ) {
2371 __ lw(as_Register(dst), as_Register(base), disp);
2372 } else {
2373 __ move(T9, disp);
2374 if( UseLoongsonISA ) {
2375 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2376 } else {
2377 __ addu(AT, as_Register(base), T9);
2378 __ lw(as_Register(dst), AT, 0);
2379 }
2380 }
2381 }
2382 %}
2384 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2385 MacroAssembler _masm(&cbuf);
2386 int src = $src$$reg;
2387 int base = $mem$$base;
2388 int index = $mem$$index;
2389 int scale = $mem$$scale;
2390 int disp = $mem$$disp;
2392 if( index != 0 ) {
2393 if( Assembler::is_simm16(disp) ) {
2394 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2395 if (scale == 0) {
2396 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2397 } else {
2398 __ dsll(AT, as_Register(index), scale);
2399 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2400 }
2401 } else {
2402 if (scale == 0) {
2403 __ addu(AT, as_Register(base), as_Register(index));
2404 } else {
2405 __ dsll(AT, as_Register(index), scale);
2406 __ addu(AT, as_Register(base), AT);
2407 }
2408 __ sw(as_Register(src), AT, disp);
2409 }
2410 } else {
2411 if (scale == 0) {
2412 __ addu(AT, as_Register(base), as_Register(index));
2413 } else {
2414 __ dsll(AT, as_Register(index), scale);
2415 __ addu(AT, as_Register(base), AT);
2416 }
2417 __ move(T9, disp);
2418 if( UseLoongsonISA ) {
2419 __ gsswx(as_Register(src), AT, T9, 0);
2420 } else {
2421 __ addu(AT, AT, T9);
2422 __ sw(as_Register(src), AT, 0);
2423 }
2424 }
2425 } else {
2426 if( Assembler::is_simm16(disp) ) {
2427 __ sw(as_Register(src), as_Register(base), disp);
2428 } else {
2429 __ move(T9, disp);
2430 if( UseLoongsonISA ) {
2431 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2432 } else {
2433 __ addu(AT, as_Register(base), T9);
2434 __ sw(as_Register(src), AT, 0);
2435 }
2436 }
2437 }
2438 %}
2440 enc_class store_I_immI_enc (memory mem, immI src) %{
2441 MacroAssembler _masm(&cbuf);
2442 int base = $mem$$base;
2443 int index = $mem$$index;
2444 int scale = $mem$$scale;
2445 int disp = $mem$$disp;
2446 int value = $src$$constant;
2448 if( index != 0 ) {
2449 if ( UseLoongsonISA ) {
2450 if ( Assembler::is_simm(disp, 8) ) {
2451 if ( scale == 0 ) {
2452 if ( value == 0 ) {
2453 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2454 } else {
2455 __ move(T9, value);
2456 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2457 }
2458 } else {
2459 __ dsll(AT, as_Register(index), scale);
2460 if ( value == 0 ) {
2461 __ gsswx(R0, as_Register(base), AT, disp);
2462 } else {
2463 __ move(T9, value);
2464 __ gsswx(T9, as_Register(base), AT, disp);
2465 }
2466 }
2467 } else if ( Assembler::is_simm16(disp) ) {
2468 if ( scale == 0 ) {
2469 __ daddu(AT, as_Register(base), as_Register(index));
2470 if ( value == 0 ) {
2471 __ sw(R0, AT, disp);
2472 } else {
2473 __ move(T9, value);
2474 __ sw(T9, AT, disp);
2475 }
2476 } else {
2477 __ dsll(AT, as_Register(index), scale);
2478 __ daddu(AT, as_Register(base), AT);
2479 if ( value == 0 ) {
2480 __ sw(R0, AT, disp);
2481 } else {
2482 __ move(T9, value);
2483 __ sw(T9, AT, disp);
2484 }
2485 }
2486 } else {
2487 if ( scale == 0 ) {
2488 __ move(T9, disp);
2489 __ daddu(AT, as_Register(index), T9);
2490 if ( value ==0 ) {
2491 __ gsswx(R0, as_Register(base), AT, 0);
2492 } else {
2493 __ move(T9, value);
2494 __ gsswx(T9, as_Register(base), AT, 0);
2495 }
2496 } else {
2497 __ dsll(AT, as_Register(index), scale);
2498 __ move(T9, disp);
2499 __ daddu(AT, AT, T9);
2500 if ( value == 0 ) {
2501 __ gsswx(R0, as_Register(base), AT, 0);
2502 } else {
2503 __ move(T9, value);
2504 __ gsswx(T9, as_Register(base), AT, 0);
2505 }
2506 }
2507 }
2508 } else { //not use loongson isa
2509 if (scale == 0) {
2510 __ daddu(AT, as_Register(base), as_Register(index));
2511 } else {
2512 __ dsll(AT, as_Register(index), scale);
2513 __ daddu(AT, as_Register(base), AT);
2514 }
2515 if( Assembler::is_simm16(disp) ) {
2516 if (value == 0) {
2517 __ sw(R0, AT, disp);
2518 } else {
2519 __ move(T9, value);
2520 __ sw(T9, AT, disp);
2521 }
2522 } else {
2523 if (value == 0) {
2524 __ move(T9, disp);
2525 __ daddu(AT, AT, T9);
2526 __ sw(R0, AT, 0);
2527 } else {
2528 __ move(T9, disp);
2529 __ daddu(AT, AT, T9);
2530 __ move(T9, value);
2531 __ sw(T9, AT, 0);
2532 }
2533 }
2534 }
2535 } else {
2536 if ( UseLoongsonISA ) {
2537 if ( Assembler::is_simm16(disp) ) {
2538 if ( value == 0 ) {
2539 __ sw(R0, as_Register(base), disp);
2540 } else {
2541 __ move(AT, value);
2542 __ sw(AT, as_Register(base), disp);
2543 }
2544 } else {
2545 __ move(T9, disp);
2546 if ( value == 0 ) {
2547 __ gsswx(R0, as_Register(base), T9, 0);
2548 } else {
2549 __ move(AT, value);
2550 __ gsswx(AT, as_Register(base), T9, 0);
2551 }
2552 }
2553 } else {
2554 if( Assembler::is_simm16(disp) ) {
2555 if (value == 0) {
2556 __ sw(R0, as_Register(base), disp);
2557 } else {
2558 __ move(AT, value);
2559 __ sw(AT, as_Register(base), disp);
2560 }
2561 } else {
2562 if (value == 0) {
2563 __ move(T9, disp);
2564 __ daddu(AT, as_Register(base), T9);
2565 __ sw(R0, AT, 0);
2566 } else {
2567 __ move(T9, disp);
2568 __ daddu(AT, as_Register(base), T9);
2569 __ move(T9, value);
2570 __ sw(T9, AT, 0);
2571 }
2572 }
2573 }
2574 }
2575 %}
2577 enc_class load_N_enc (mRegN dst, memory mem) %{
2578 MacroAssembler _masm(&cbuf);
2579 int dst = $dst$$reg;
2580 int base = $mem$$base;
2581 int index = $mem$$index;
2582 int scale = $mem$$scale;
2583 int disp = $mem$$disp;
2584 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2585 assert(disp_reloc == relocInfo::none, "cannot have disp");
2587 if( index != 0 ) {
2588 if (scale == 0) {
2589 __ daddu(AT, as_Register(base), as_Register(index));
2590 } else {
2591 __ dsll(AT, as_Register(index), scale);
2592 __ daddu(AT, as_Register(base), AT);
2593 }
2594 if( Assembler::is_simm16(disp) ) {
2595 __ lwu(as_Register(dst), AT, disp);
2596 } else {
2597 __ set64(T9, disp);
2598 __ daddu(AT, AT, T9);
2599 __ lwu(as_Register(dst), AT, 0);
2600 }
2601 } else {
2602 if( Assembler::is_simm16(disp) ) {
2603 __ lwu(as_Register(dst), as_Register(base), disp);
2604 } else {
2605 __ set64(T9, disp);
2606 __ daddu(AT, as_Register(base), T9);
2607 __ lwu(as_Register(dst), AT, 0);
2608 }
2609 }
2610 %}
2613 enc_class load_P_enc (mRegP dst, memory mem) %{
2614 MacroAssembler _masm(&cbuf);
2615 int dst = $dst$$reg;
2616 int base = $mem$$base;
2617 int index = $mem$$index;
2618 int scale = $mem$$scale;
2619 int disp = $mem$$disp;
2620 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2621 assert(disp_reloc == relocInfo::none, "cannot have disp");
2623 if( index != 0 ) {
2624 if ( UseLoongsonISA ) {
2625 if ( Assembler::is_simm(disp, 8) ) {
2626 if ( scale != 0 ) {
2627 __ dsll(AT, as_Register(index), scale);
2628 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2629 } else {
2630 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2631 }
2632 } else if ( Assembler::is_simm16(disp) ){
2633 if ( scale != 0 ) {
2634 __ dsll(AT, as_Register(index), scale);
2635 __ daddu(AT, AT, as_Register(base));
2636 } else {
2637 __ daddu(AT, as_Register(index), as_Register(base));
2638 }
2639 __ ld(as_Register(dst), AT, disp);
2640 } else {
2641 if ( scale != 0 ) {
2642 __ dsll(AT, as_Register(index), scale);
2643 __ move(T9, disp);
2644 __ daddu(AT, AT, T9);
2645 } else {
2646 __ move(T9, disp);
2647 __ daddu(AT, as_Register(index), T9);
2648 }
2649 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2650 }
2651 } else { //not use loongson isa
2652 if (scale == 0) {
2653 __ daddu(AT, as_Register(base), as_Register(index));
2654 } else {
2655 __ dsll(AT, as_Register(index), scale);
2656 __ daddu(AT, as_Register(base), AT);
2657 }
2658 if( Assembler::is_simm16(disp) ) {
2659 __ ld(as_Register(dst), AT, disp);
2660 } else {
2661 __ set64(T9, disp);
2662 __ daddu(AT, AT, T9);
2663 __ ld(as_Register(dst), AT, 0);
2664 }
2665 }
2666 } else {
2667 if ( UseLoongsonISA ) {
2668 if ( Assembler::is_simm16(disp) ){
2669 __ ld(as_Register(dst), as_Register(base), disp);
2670 } else {
2671 __ set64(T9, disp);
2672 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2673 }
2674 } else { //not use loongson isa
2675 if( Assembler::is_simm16(disp) ) {
2676 __ ld(as_Register(dst), as_Register(base), disp);
2677 } else {
2678 __ set64(T9, disp);
2679 __ daddu(AT, as_Register(base), T9);
2680 __ ld(as_Register(dst), AT, 0);
2681 }
2682 }
2683 }
2684 %}
2686 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2687 MacroAssembler _masm(&cbuf);
2688 int src = $src$$reg;
2689 int base = $mem$$base;
2690 int index = $mem$$index;
2691 int scale = $mem$$scale;
2692 int disp = $mem$$disp;
2694 if( index != 0 ) {
2695 if ( UseLoongsonISA ){
2696 if ( Assembler::is_simm(disp, 8) ) {
2697 if ( scale == 0 ) {
2698 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2699 } else {
2700 __ dsll(AT, as_Register(index), scale);
2701 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2702 }
2703 } else if ( Assembler::is_simm16(disp) ) {
2704 if ( scale == 0 ) {
2705 __ daddu(AT, as_Register(base), as_Register(index));
2706 } else {
2707 __ dsll(AT, as_Register(index), scale);
2708 __ daddu(AT, as_Register(base), AT);
2709 }
2710 __ sd(as_Register(src), AT, disp);
2711 } else {
2712 if ( scale == 0 ) {
2713 __ move(T9, disp);
2714 __ daddu(AT, as_Register(index), T9);
2715 } else {
2716 __ dsll(AT, as_Register(index), scale);
2717 __ move(T9, disp);
2718 __ daddu(AT, AT, T9);
2719 }
2720 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2721 }
2722 } else { //not use loongson isa
2723 if (scale == 0) {
2724 __ daddu(AT, as_Register(base), as_Register(index));
2725 } else {
2726 __ dsll(AT, as_Register(index), scale);
2727 __ daddu(AT, as_Register(base), AT);
2728 }
2729 if( Assembler::is_simm16(disp) ) {
2730 __ sd(as_Register(src), AT, disp);
2731 } else {
2732 __ move(T9, disp);
2733 __ daddu(AT, AT, T9);
2734 __ sd(as_Register(src), AT, 0);
2735 }
2736 }
2737 } else {
2738 if ( UseLoongsonISA ) {
2739 if ( Assembler::is_simm16(disp) ) {
2740 __ sd(as_Register(src), as_Register(base), disp);
2741 } else {
2742 __ move(T9, disp);
2743 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2744 }
2745 } else {
2746 if( Assembler::is_simm16(disp) ) {
2747 __ sd(as_Register(src), as_Register(base), disp);
2748 } else {
2749 __ move(T9, disp);
2750 __ daddu(AT, as_Register(base), T9);
2751 __ sd(as_Register(src), AT, 0);
2752 }
2753 }
2754 }
2755 %}
2757 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2758 MacroAssembler _masm(&cbuf);
2759 int src = $src$$reg;
2760 int base = $mem$$base;
2761 int index = $mem$$index;
2762 int scale = $mem$$scale;
2763 int disp = $mem$$disp;
2765 if( index != 0 ) {
2766 if ( UseLoongsonISA ){
2767 if ( Assembler::is_simm(disp, 8) ) {
2768 if ( scale == 0 ) {
2769 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2770 } else {
2771 __ dsll(AT, as_Register(index), scale);
2772 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2773 }
2774 } else if ( Assembler::is_simm16(disp) ) {
2775 if ( scale == 0 ) {
2776 __ daddu(AT, as_Register(base), as_Register(index));
2777 } else {
2778 __ dsll(AT, as_Register(index), scale);
2779 __ daddu(AT, as_Register(base), AT);
2780 }
2781 __ sw(as_Register(src), AT, disp);
2782 } else {
2783 if ( scale == 0 ) {
2784 __ move(T9, disp);
2785 __ daddu(AT, as_Register(index), T9);
2786 } else {
2787 __ dsll(AT, as_Register(index), scale);
2788 __ move(T9, disp);
2789 __ daddu(AT, AT, T9);
2790 }
2791 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2792 }
2793 } else { //not use loongson isa
2794 if (scale == 0) {
2795 __ daddu(AT, as_Register(base), as_Register(index));
2796 } else {
2797 __ dsll(AT, as_Register(index), scale);
2798 __ daddu(AT, as_Register(base), AT);
2799 }
2800 if( Assembler::is_simm16(disp) ) {
2801 __ sw(as_Register(src), AT, disp);
2802 } else {
2803 __ move(T9, disp);
2804 __ daddu(AT, AT, T9);
2805 __ sw(as_Register(src), AT, 0);
2806 }
2807 }
2808 } else {
2809 if ( UseLoongsonISA ) {
2810 if ( Assembler::is_simm16(disp) ) {
2811 __ sw(as_Register(src), as_Register(base), disp);
2812 } else {
2813 __ move(T9, disp);
2814 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2815 }
2816 } else {
2817 if( Assembler::is_simm16(disp) ) {
2818 __ sw(as_Register(src), as_Register(base), disp);
2819 } else {
2820 __ move(T9, disp);
2821 __ daddu(AT, as_Register(base), T9);
2822 __ sw(as_Register(src), AT, 0);
2823 }
2824 }
2825 }
2826 %}
2828 enc_class store_P_immP0_enc (memory mem) %{
2829 MacroAssembler _masm(&cbuf);
2830 int base = $mem$$base;
2831 int index = $mem$$index;
2832 int scale = $mem$$scale;
2833 int disp = $mem$$disp;
2835 if( index != 0 ) {
2836 if (scale == 0) {
2837 if( Assembler::is_simm16(disp) ) {
2838 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2839 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2840 } else {
2841 __ daddu(AT, as_Register(base), as_Register(index));
2842 __ sd(R0, AT, disp);
2843 }
2844 } else {
2845 __ daddu(AT, as_Register(base), as_Register(index));
2846 __ move(T9, disp);
2847 if(UseLoongsonISA) {
2848 __ gssdx(R0, AT, T9, 0);
2849 } else {
2850 __ daddu(AT, AT, T9);
2851 __ sd(R0, AT, 0);
2852 }
2853 }
2854 } else {
2855 __ dsll(AT, as_Register(index), scale);
2856 if( Assembler::is_simm16(disp) ) {
2857 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2858 __ gssdx(R0, as_Register(base), AT, disp);
2859 } else {
2860 __ daddu(AT, as_Register(base), AT);
2861 __ sd(R0, AT, disp);
2862 }
2863 } else {
2864 __ daddu(AT, as_Register(base), AT);
2865 __ move(T9, disp);
2866 if (UseLoongsonISA) {
2867 __ gssdx(R0, AT, T9, 0);
2868 } else {
2869 __ daddu(AT, AT, T9);
2870 __ sd(R0, AT, 0);
2871 }
2872 }
2873 }
2874 } else {
2875 if( Assembler::is_simm16(disp) ) {
2876 __ sd(R0, as_Register(base), disp);
2877 } else {
2878 __ move(T9, disp);
2879 if (UseLoongsonISA) {
2880 __ gssdx(R0, as_Register(base), T9, 0);
2881 } else {
2882 __ daddu(AT, as_Register(base), T9);
2883 __ sd(R0, AT, 0);
2884 }
2885 }
2886 }
2887 %}
2889 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2890 MacroAssembler _masm(&cbuf);
2891 int base = $mem$$base;
2892 int index = $mem$$index;
2893 int scale = $mem$$scale;
2894 int disp = $mem$$disp;
2896 if(index!=0){
2897 if (scale == 0) {
2898 __ daddu(AT, as_Register(base), as_Register(index));
2899 } else {
2900 __ dsll(AT, as_Register(index), scale);
2901 __ daddu(AT, as_Register(base), AT);
2902 }
2904 if( Assembler::is_simm16(disp) ) {
2905 __ sw(R0, AT, disp);
2906 } else {
2907 __ move(T9, disp);
2908 __ daddu(AT, AT, T9);
2909 __ sw(R0, AT, 0);
2910 }
2911 } else {
2912 if( Assembler::is_simm16(disp) ) {
2913 __ sw(R0, as_Register(base), disp);
2914 } else {
2915 __ move(T9, disp);
2916 __ daddu(AT, as_Register(base), T9);
2917 __ sw(R0, AT, 0);
2918 }
2919 }
2920 %}
2922 enc_class load_L_enc (mRegL dst, memory mem) %{
2923 MacroAssembler _masm(&cbuf);
2924 int base = $mem$$base;
2925 int index = $mem$$index;
2926 int scale = $mem$$scale;
2927 int disp = $mem$$disp;
2928 Register dst_reg = as_Register($dst$$reg);
2930 if( index != 0 ) {
2931 if (scale == 0) {
2932 __ daddu(AT, as_Register(base), as_Register(index));
2933 } else {
2934 __ dsll(AT, as_Register(index), scale);
2935 __ daddu(AT, as_Register(base), AT);
2936 }
2937 if( Assembler::is_simm16(disp) ) {
2938 __ ld(dst_reg, AT, disp);
2939 } else {
2940 __ move(T9, disp);
2941 __ daddu(AT, AT, T9);
2942 __ ld(dst_reg, AT, 0);
2943 }
2944 } else {
2945 if( Assembler::is_simm16(disp) ) {
2946 __ ld(dst_reg, as_Register(base), disp);
2947 } else {
2948 __ move(T9, disp);
2949 __ daddu(AT, as_Register(base), T9);
2950 __ ld(dst_reg, AT, 0);
2951 }
2952 }
2953 %}
2955 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2956 MacroAssembler _masm(&cbuf);
2957 int base = $mem$$base;
2958 int index = $mem$$index;
2959 int scale = $mem$$scale;
2960 int disp = $mem$$disp;
2961 Register src_reg = as_Register($src$$reg);
2963 if( index != 0 ) {
2964 if (scale == 0) {
2965 __ daddu(AT, as_Register(base), as_Register(index));
2966 } else {
2967 __ dsll(AT, as_Register(index), scale);
2968 __ daddu(AT, as_Register(base), AT);
2969 }
2970 if( Assembler::is_simm16(disp) ) {
2971 __ sd(src_reg, AT, disp);
2972 } else {
2973 __ move(T9, disp);
2974 __ daddu(AT, AT, T9);
2975 __ sd(src_reg, AT, 0);
2976 }
2977 } else {
2978 if( Assembler::is_simm16(disp) ) {
2979 __ sd(src_reg, as_Register(base), disp);
2980 } else {
2981 __ move(T9, disp);
2982 __ daddu(AT, as_Register(base), T9);
2983 __ sd(src_reg, AT, 0);
2984 }
2985 }
2986 %}
2988 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2989 MacroAssembler _masm(&cbuf);
2990 int base = $mem$$base;
2991 int index = $mem$$index;
2992 int scale = $mem$$scale;
2993 int disp = $mem$$disp;
2995 if( index != 0 ) {
2996 if (scale == 0) {
2997 __ daddu(AT, as_Register(base), as_Register(index));
2998 } else {
2999 __ dsll(AT, as_Register(index), scale);
3000 __ daddu(AT, as_Register(base), AT);
3001 }
3002 if( Assembler::is_simm16(disp) ) {
3003 __ sd(R0, AT, disp);
3004 } else {
3005 __ move(T9, disp);
3006 __ addu(AT, AT, T9);
3007 __ sd(R0, AT, 0);
3008 }
3009 } else {
3010 if( Assembler::is_simm16(disp) ) {
3011 __ sd(R0, as_Register(base), disp);
3012 } else {
3013 __ move(T9, disp);
3014 __ addu(AT, as_Register(base), T9);
3015 __ sd(R0, AT, 0);
3016 }
3017 }
3018 %}
3020 enc_class store_L_immL_enc (memory mem, immL src) %{
3021 MacroAssembler _masm(&cbuf);
3022 int base = $mem$$base;
3023 int index = $mem$$index;
3024 int scale = $mem$$scale;
3025 int disp = $mem$$disp;
3026 long imm = $src$$constant;
3028 if( index != 0 ) {
3029 if (scale == 0) {
3030 __ daddu(AT, as_Register(base), as_Register(index));
3031 } else {
3032 __ dsll(AT, as_Register(index), scale);
3033 __ daddu(AT, as_Register(base), AT);
3034 }
3035 if( Assembler::is_simm16(disp) ) {
3036 __ set64(T9, imm);
3037 __ sd(T9, AT, disp);
3038 } else {
3039 __ move(T9, disp);
3040 __ addu(AT, AT, T9);
3041 __ set64(T9, imm);
3042 __ sd(T9, AT, 0);
3043 }
3044 } else {
3045 if( Assembler::is_simm16(disp) ) {
3046 __ move(AT, as_Register(base));
3047 __ set64(T9, imm);
3048 __ sd(T9, AT, disp);
3049 } else {
3050 __ move(T9, disp);
3051 __ addu(AT, as_Register(base), T9);
3052 __ set64(T9, imm);
3053 __ sd(T9, AT, 0);
3054 }
3055 }
3056 %}
3058 enc_class load_F_enc (regF dst, memory mem) %{
3059 MacroAssembler _masm(&cbuf);
3060 int base = $mem$$base;
3061 int index = $mem$$index;
3062 int scale = $mem$$scale;
3063 int disp = $mem$$disp;
3064 FloatRegister dst = $dst$$FloatRegister;
3066 if( index != 0 ) {
3067 if( Assembler::is_simm16(disp) ) {
3068 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3069 if (scale == 0) {
3070 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3071 } else {
3072 __ dsll(AT, as_Register(index), scale);
3073 __ gslwxc1(dst, as_Register(base), AT, disp);
3074 }
3075 } else {
3076 if (scale == 0) {
3077 __ daddu(AT, as_Register(base), as_Register(index));
3078 } else {
3079 __ dsll(AT, as_Register(index), scale);
3080 __ daddu(AT, as_Register(base), AT);
3081 }
3082 __ lwc1(dst, AT, disp);
3083 }
3084 } else {
3085 if (scale == 0) {
3086 __ daddu(AT, as_Register(base), as_Register(index));
3087 } else {
3088 __ dsll(AT, as_Register(index), scale);
3089 __ daddu(AT, as_Register(base), AT);
3090 }
3091 __ move(T9, disp);
3092 if( UseLoongsonISA ) {
3093 __ gslwxc1(dst, AT, T9, 0);
3094 } else {
3095 __ daddu(AT, AT, T9);
3096 __ lwc1(dst, AT, 0);
3097 }
3098 }
3099 } else {
3100 if( Assembler::is_simm16(disp) ) {
3101 __ lwc1(dst, as_Register(base), disp);
3102 } else {
3103 __ move(T9, disp);
3104 if( UseLoongsonISA ) {
3105 __ gslwxc1(dst, as_Register(base), T9, 0);
3106 } else {
3107 __ daddu(AT, as_Register(base), T9);
3108 __ lwc1(dst, AT, 0);
3109 }
3110 }
3111 }
3112 %}
3114 enc_class store_F_reg_enc (memory mem, regF src) %{
3115 MacroAssembler _masm(&cbuf);
3116 int base = $mem$$base;
3117 int index = $mem$$index;
3118 int scale = $mem$$scale;
3119 int disp = $mem$$disp;
3120 FloatRegister src = $src$$FloatRegister;
3122 if( index != 0 ) {
3123 if( Assembler::is_simm16(disp) ) {
3124 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3125 if (scale == 0) {
3126 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3127 } else {
3128 __ dsll(AT, as_Register(index), scale);
3129 __ gsswxc1(src, as_Register(base), AT, disp);
3130 }
3131 } else {
3132 if (scale == 0) {
3133 __ daddu(AT, as_Register(base), as_Register(index));
3134 } else {
3135 __ dsll(AT, as_Register(index), scale);
3136 __ daddu(AT, as_Register(base), AT);
3137 }
3138 __ swc1(src, AT, disp);
3139 }
3140 } else {
3141 if (scale == 0) {
3142 __ daddu(AT, as_Register(base), as_Register(index));
3143 } else {
3144 __ dsll(AT, as_Register(index), scale);
3145 __ daddu(AT, as_Register(base), AT);
3146 }
3147 __ move(T9, disp);
3148 if( UseLoongsonISA ) {
3149 __ gsswxc1(src, AT, T9, 0);
3150 } else {
3151 __ daddu(AT, AT, T9);
3152 __ swc1(src, AT, 0);
3153 }
3154 }
3155 } else {
3156 if( Assembler::is_simm16(disp) ) {
3157 __ swc1(src, as_Register(base), disp);
3158 } else {
3159 __ move(T9, disp);
3160 if( UseLoongsonISA ) {
3161 __ gsswxc1(src, as_Register(base), T9, 0);
3162 } else {
3163 __ daddu(AT, as_Register(base), T9);
3164 __ swc1(src, AT, 0);
3165 }
3166 }
3167 }
3168 %}
3170 enc_class load_D_enc (regD dst, memory mem) %{
3171 MacroAssembler _masm(&cbuf);
3172 int base = $mem$$base;
3173 int index = $mem$$index;
3174 int scale = $mem$$scale;
3175 int disp = $mem$$disp;
3176 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3178 if( index != 0 ) {
3179 if( Assembler::is_simm16(disp) ) {
3180 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3181 if (scale == 0) {
3182 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3183 } else {
3184 __ dsll(AT, as_Register(index), scale);
3185 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3186 }
3187 } else {
3188 if (scale == 0) {
3189 __ daddu(AT, as_Register(base), as_Register(index));
3190 } else {
3191 __ dsll(AT, as_Register(index), scale);
3192 __ daddu(AT, as_Register(base), AT);
3193 }
3194 __ ldc1(dst_reg, AT, disp);
3195 }
3196 } else {
3197 if (scale == 0) {
3198 __ daddu(AT, as_Register(base), as_Register(index));
3199 } else {
3200 __ dsll(AT, as_Register(index), scale);
3201 __ daddu(AT, as_Register(base), AT);
3202 }
3203 __ move(T9, disp);
3204 if( UseLoongsonISA ) {
3205 __ gsldxc1(dst_reg, AT, T9, 0);
3206 } else {
3207 __ addu(AT, AT, T9);
3208 __ ldc1(dst_reg, AT, 0);
3209 }
3210 }
3211 } else {
3212 if( Assembler::is_simm16(disp) ) {
3213 __ ldc1(dst_reg, as_Register(base), disp);
3214 } else {
3215 __ move(T9, disp);
3216 if( UseLoongsonISA ) {
3217 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3218 } else {
3219 __ addu(AT, as_Register(base), T9);
3220 __ ldc1(dst_reg, AT, 0);
3221 }
3222 }
3223 }
3224 %}
3226 enc_class store_D_reg_enc (memory mem, regD src) %{
3227 MacroAssembler _masm(&cbuf);
3228 int base = $mem$$base;
3229 int index = $mem$$index;
3230 int scale = $mem$$scale;
3231 int disp = $mem$$disp;
3232 FloatRegister src_reg = as_FloatRegister($src$$reg);
3234 if( index != 0 ) {
3235 if( Assembler::is_simm16(disp) ) {
3236 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3237 if (scale == 0) {
3238 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3239 } else {
3240 __ dsll(AT, as_Register(index), scale);
3241 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3242 }
3243 } else {
3244 if (scale == 0) {
3245 __ daddu(AT, as_Register(base), as_Register(index));
3246 } else {
3247 __ dsll(AT, as_Register(index), scale);
3248 __ daddu(AT, as_Register(base), AT);
3249 }
3250 __ sdc1(src_reg, AT, disp);
3251 }
3252 } else {
3253 if (scale == 0) {
3254 __ daddu(AT, as_Register(base), as_Register(index));
3255 } else {
3256 __ dsll(AT, as_Register(index), scale);
3257 __ daddu(AT, as_Register(base), AT);
3258 }
3259 __ move(T9, disp);
3260 if( UseLoongsonISA ) {
3261 __ gssdxc1(src_reg, AT, T9, 0);
3262 } else {
3263 __ addu(AT, AT, T9);
3264 __ sdc1(src_reg, AT, 0);
3265 }
3266 }
3267 } else {
3268 if( Assembler::is_simm16(disp) ) {
3269 __ sdc1(src_reg, as_Register(base), disp);
3270 } else {
3271 __ move(T9, disp);
3272 if( UseLoongsonISA ) {
3273 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3274 } else {
3275 __ addu(AT, as_Register(base), T9);
3276 __ sdc1(src_reg, AT, 0);
3277 }
3278 }
3279 }
3280 %}
3282 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3283 MacroAssembler _masm(&cbuf);
3284 // This is the instruction starting address for relocation info.
3285 __ block_comment("Java_To_Runtime");
3286 cbuf.set_insts_mark();
3287 __ relocate(relocInfo::runtime_call_type);
3289 __ patchable_call((address)$meth$$method);
3290 %}
3292 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3293 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3294 // who we intended to call.
3295 MacroAssembler _masm(&cbuf);
3296 cbuf.set_insts_mark();
3298 if ( !_method ) {
3299 __ relocate(relocInfo::runtime_call_type);
3300 } else if(_optimized_virtual) {
3301 __ relocate(relocInfo::opt_virtual_call_type);
3302 } else {
3303 __ relocate(relocInfo::static_call_type);
3304 }
3306 __ patchable_call((address)($meth$$method));
3307 if( _method ) { // Emit stub for static call
3308 emit_java_to_interp(cbuf);
3309 }
3310 %}
3313 /*
3314 * [Ref: LIR_Assembler::ic_call() ]
3315 */
3316 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3317 MacroAssembler _masm(&cbuf);
3318 __ block_comment("Java_Dynamic_Call");
3319 __ ic_call((address)$meth$$method);
3320 %}
3323 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3324 Register flags = $cr$$Register;
3325 Label L;
3327 MacroAssembler _masm(&cbuf);
3329 __ addu(flags, R0, R0);
3330 __ beq(AT, R0, L);
3331 __ delayed()->nop();
3332 __ move(flags, 0xFFFFFFFF);
3333 __ bind(L);
3334 %}
3336 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3337 Register result = $result$$Register;
3338 Register sub = $sub$$Register;
3339 Register super = $super$$Register;
3340 Register length = $tmp$$Register;
3341 Register tmp = T9;
3342 Label miss;
3344 /* 2012/9/28 Jin: result may be the same as sub
3345 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3346 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3347 * 4bc mov S2, NULL #@loadConP
3348 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3349 */
3350 MacroAssembler _masm(&cbuf);
3351 Label done;
3352 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3353 NULL, &miss,
3354 /*set_cond_codes:*/ true);
3355 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3356 __ move(result, 0);
3357 __ b(done);
3358 __ nop();
3360 __ bind(miss);
3361 __ move(result, 1);
3362 __ bind(done);
3363 %}
3365 %}
3368 //---------MIPS FRAME--------------------------------------------------------------
3369 // Definition of frame structure and management information.
3370 //
3371 // S T A C K L A Y O U T Allocators stack-slot number
3372 // | (to get allocators register number
3373 // G Owned by | | v add SharedInfo::stack0)
3374 // r CALLER | |
3375 // o | +--------+ pad to even-align allocators stack-slot
3376 // w V | pad0 | numbers; owned by CALLER
3377 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3378 // h ^ | in | 5
3379 // | | args | 4 Holes in incoming args owned by SELF
3380 // | | old | | 3
3381 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3382 // v | | ret | 3 return address
3383 // Owned by +--------+
3384 // Self | pad2 | 2 pad to align old SP
3385 // | +--------+ 1
3386 // | | locks | 0
3387 // | +--------+----> SharedInfo::stack0, even aligned
3388 // | | pad1 | 11 pad to align new SP
3389 // | +--------+
3390 // | | | 10
3391 // | | spills | 9 spills
3392 // V | | 8 (pad0 slot for callee)
3393 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3394 // ^ | out | 7
3395 // | | args | 6 Holes in outgoing args owned by CALLEE
3396 // Owned by new | |
3397 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3398 // | |
3399 //
3400 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3401 // known from SELF's arguments and the Java calling convention.
3402 // Region 6-7 is determined per call site.
3403 // Note 2: If the calling convention leaves holes in the incoming argument
3404 // area, those holes are owned by SELF. Holes in the outgoing area
3405 // are owned by the CALLEE. Holes should not be nessecary in the
3406 // incoming area, as the Java calling convention is completely under
3407 // the control of the AD file. Doubles can be sorted and packed to
3408 // avoid holes. Holes in the outgoing arguments may be nessecary for
3409 // varargs C calling conventions.
3410 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3411 // even aligned with pad0 as needed.
3412 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3413 // region 6-11 is even aligned; it may be padded out more so that
3414 // the region from SP to FP meets the minimum stack alignment.
3415 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3416 // alignment. Region 11, pad1, may be dynamically extended so that
3417 // SP meets the minimum alignment.
3420 frame %{
3422 stack_direction(TOWARDS_LOW);
3424 // These two registers define part of the calling convention
3425 // between compiled code and the interpreter.
3426 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3427 // for more information. by yjl 3/16/2006
3429 inline_cache_reg(T1); // Inline Cache Register
3430 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3432 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3433 cisc_spilling_operand_name(indOffset32);
3435 // Number of stack slots consumed by locking an object
3436 // generate Compile::sync_stack_slots
3437 #ifdef _LP64
3438 sync_stack_slots(2);
3439 #else
3440 sync_stack_slots(1);
3441 #endif
3443 frame_pointer(SP);
3445 // Interpreter stores its frame pointer in a register which is
3446 // stored to the stack by I2CAdaptors.
3447 // I2CAdaptors convert from interpreted java to compiled java.
3449 interpreter_frame_pointer(FP);
3451 // generate Matcher::stack_alignment
3452 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3454 // Number of stack slots between incoming argument block and the start of
3455 // a new frame. The PROLOG must add this many slots to the stack. The
3456 // EPILOG must remove this many slots. Intel needs one slot for
3457 // return address.
3458 // generate Matcher::in_preserve_stack_slots
3459 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3460 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3462 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3463 // for calls to C. Supports the var-args backing area for register parms.
3464 varargs_C_out_slots_killed(0);
3466 // The after-PROLOG location of the return address. Location of
3467 // return address specifies a type (REG or STACK) and a number
3468 // representing the register number (i.e. - use a register name) or
3469 // stack slot.
3470 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3471 // Otherwise, it is above the locks and verification slot and alignment word
3472 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3473 return_addr(REG RA);
3475 // Body of function which returns an integer array locating
3476 // arguments either in registers or in stack slots. Passed an array
3477 // of ideal registers called "sig" and a "length" count. Stack-slot
3478 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3479 // arguments for a CALLEE. Incoming stack arguments are
3480 // automatically biased by the preserve_stack_slots field above.
3483 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3484 // StartNode::calling_convention call this. by yjl 3/16/2006
3485 calling_convention %{
3486 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3487 %}
3492 // Body of function which returns an integer array locating
3493 // arguments either in registers or in stack slots. Passed an array
3494 // of ideal registers called "sig" and a "length" count. Stack-slot
3495 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3496 // arguments for a CALLEE. Incoming stack arguments are
3497 // automatically biased by the preserve_stack_slots field above.
3500 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3501 c_calling_convention %{
3502 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3503 %}
3506 // Location of C & interpreter return values
3507 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3508 // SEE Matcher::match. by yjl 3/16/2006
3509 c_return_value %{
3510 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3511 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3512 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3513 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3514 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3515 %}
3517 // Location of return values
3518 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3519 // SEE Matcher::match. by yjl 3/16/2006
3521 return_value %{
3522 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3523 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3524 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3525 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3526 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3527 %}
3529 %}
3531 //----------ATTRIBUTES---------------------------------------------------------
3532 //----------Operand Attributes-------------------------------------------------
3533 op_attrib op_cost(0); // Required cost attribute
3535 //----------Instruction Attributes---------------------------------------------
3536 ins_attrib ins_cost(100); // Required cost attribute
3537 ins_attrib ins_size(32); // Required size attribute (in bits)
3538 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3539 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3540 // non-matching short branch variant of some
3541 // long branch?
3542 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3543 // specifies the alignment that some part of the instruction (not
3544 // necessarily the start) requires. If > 1, a compute_padding()
3545 // function must be provided for the instruction
3547 //----------OPERANDS-----------------------------------------------------------
3548 // Operand definitions must precede instruction definitions for correct parsing
3549 // in the ADLC because operands constitute user defined types which are used in
3550 // instruction definitions.
3552 // Vectors
3553 operand vecD() %{
3554 constraint(ALLOC_IN_RC(dbl_reg));
3555 match(VecD);
3557 format %{ %}
3558 interface(REG_INTER);
3559 %}
3561 // Flags register, used as output of compare instructions
3562 operand FlagsReg() %{
3563 constraint(ALLOC_IN_RC(mips_flags));
3564 match(RegFlags);
3566 format %{ "EFLAGS" %}
3567 interface(REG_INTER);
3568 %}
3570 //----------Simple Operands----------------------------------------------------
3571 //TODO: Should we need to define some more special immediate number ?
3572 // Immediate Operands
3573 // Integer Immediate
3574 operand immI() %{
3575 match(ConI);
3576 //TODO: should not match immI8 here LEE
3577 match(immI8);
3579 op_cost(20);
3580 format %{ %}
3581 interface(CONST_INTER);
3582 %}
3584 // Long Immediate 8-bit
3585 operand immL8()
3586 %{
3587 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3588 match(ConL);
3590 op_cost(5);
3591 format %{ %}
3592 interface(CONST_INTER);
3593 %}
3595 // Constant for test vs zero
3596 operand immI0() %{
3597 predicate(n->get_int() == 0);
3598 match(ConI);
3600 op_cost(0);
3601 format %{ %}
3602 interface(CONST_INTER);
3603 %}
3605 // Constant for increment
3606 operand immI1() %{
3607 predicate(n->get_int() == 1);
3608 match(ConI);
3610 op_cost(0);
3611 format %{ %}
3612 interface(CONST_INTER);
3613 %}
3615 // Constant for decrement
3616 operand immI_M1() %{
3617 predicate(n->get_int() == -1);
3618 match(ConI);
3620 op_cost(0);
3621 format %{ %}
3622 interface(CONST_INTER);
3623 %}
3625 operand immI_MaxI() %{
3626 predicate(n->get_int() == 2147483647);
3627 match(ConI);
3629 op_cost(0);
3630 format %{ %}
3631 interface(CONST_INTER);
3632 %}
3634 // Valid scale values for addressing modes
3635 operand immI2() %{
3636 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3637 match(ConI);
3639 format %{ %}
3640 interface(CONST_INTER);
3641 %}
3643 operand immI8() %{
3644 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3645 match(ConI);
3647 op_cost(5);
3648 format %{ %}
3649 interface(CONST_INTER);
3650 %}
3652 operand immI16() %{
3653 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3654 match(ConI);
3656 op_cost(10);
3657 format %{ %}
3658 interface(CONST_INTER);
3659 %}
3661 // Constant for long shifts
3662 operand immI_32() %{
3663 predicate( n->get_int() == 32 );
3664 match(ConI);
3666 op_cost(0);
3667 format %{ %}
3668 interface(CONST_INTER);
3669 %}
3671 operand immI_63() %{
3672 predicate( n->get_int() == 63 );
3673 match(ConI);
3675 op_cost(0);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 operand immI_0_31() %{
3681 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3682 match(ConI);
3684 op_cost(0);
3685 format %{ %}
3686 interface(CONST_INTER);
3687 %}
3689 // Operand for non-negtive integer mask
3690 operand immI_nonneg_mask() %{
3691 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3692 match(ConI);
3694 op_cost(0);
3695 format %{ %}
3696 interface(CONST_INTER);
3697 %}
3699 operand immI_32_63() %{
3700 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3701 match(ConI);
3702 op_cost(0);
3704 format %{ %}
3705 interface(CONST_INTER);
3706 %}
3708 operand immI16_sub() %{
3709 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3710 match(ConI);
3712 op_cost(10);
3713 format %{ %}
3714 interface(CONST_INTER);
3715 %}
3717 operand immI_0_32767() %{
3718 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3719 match(ConI);
3720 op_cost(0);
3722 format %{ %}
3723 interface(CONST_INTER);
3724 %}
3726 operand immI_0_65535() %{
3727 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3728 match(ConI);
3729 op_cost(0);
3731 format %{ %}
3732 interface(CONST_INTER);
3733 %}
3735 operand immI_1() %{
3736 predicate( n->get_int() == 1 );
3737 match(ConI);
3739 op_cost(0);
3740 format %{ %}
3741 interface(CONST_INTER);
3742 %}
3744 operand immI_2() %{
3745 predicate( n->get_int() == 2 );
3746 match(ConI);
3748 op_cost(0);
3749 format %{ %}
3750 interface(CONST_INTER);
3751 %}
3753 operand immI_3() %{
3754 predicate( n->get_int() == 3 );
3755 match(ConI);
3757 op_cost(0);
3758 format %{ %}
3759 interface(CONST_INTER);
3760 %}
3762 operand immI_7() %{
3763 predicate( n->get_int() == 7 );
3764 match(ConI);
3766 format %{ %}
3767 interface(CONST_INTER);
3768 %}
3770 // Immediates for special shifts (sign extend)
3772 // Constants for increment
3773 operand immI_16() %{
3774 predicate( n->get_int() == 16 );
3775 match(ConI);
3777 format %{ %}
3778 interface(CONST_INTER);
3779 %}
3781 operand immI_24() %{
3782 predicate( n->get_int() == 24 );
3783 match(ConI);
3785 format %{ %}
3786 interface(CONST_INTER);
3787 %}
3789 // Constant for byte-wide masking
3790 operand immI_255() %{
3791 predicate( n->get_int() == 255 );
3792 match(ConI);
3794 op_cost(0);
3795 format %{ %}
3796 interface(CONST_INTER);
3797 %}
3799 operand immI_65535() %{
3800 predicate( n->get_int() == 65535 );
3801 match(ConI);
3803 op_cost(5);
3804 format %{ %}
3805 interface(CONST_INTER);
3806 %}
3808 operand immI_65536() %{
3809 predicate( n->get_int() == 65536 );
3810 match(ConI);
3812 op_cost(5);
3813 format %{ %}
3814 interface(CONST_INTER);
3815 %}
3817 operand immI_M65536() %{
3818 predicate( n->get_int() == -65536 );
3819 match(ConI);
3821 op_cost(5);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 // Pointer Immediate
3827 operand immP() %{
3828 match(ConP);
3830 op_cost(10);
3831 format %{ %}
3832 interface(CONST_INTER);
3833 %}
3835 // NULL Pointer Immediate
3836 operand immP0() %{
3837 predicate( n->get_ptr() == 0 );
3838 match(ConP);
3839 op_cost(0);
3841 format %{ %}
3842 interface(CONST_INTER);
3843 %}
3845 // Pointer Immediate: 64-bit
3846 operand immP_set() %{
3847 match(ConP);
3849 op_cost(5);
3850 // formats are generated automatically for constants and base registers
3851 format %{ %}
3852 interface(CONST_INTER);
3853 %}
3855 // Pointer Immediate: 64-bit
3856 operand immP_load() %{
3857 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3858 match(ConP);
3860 op_cost(5);
3861 // formats are generated automatically for constants and base registers
3862 format %{ %}
3863 interface(CONST_INTER);
3864 %}
3866 // Pointer Immediate: 64-bit
3867 operand immP_no_oop_cheap() %{
3868 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3869 match(ConP);
3871 op_cost(5);
3872 // formats are generated automatically for constants and base registers
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 // Pointer for polling page
3878 operand immP_poll() %{
3879 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3880 match(ConP);
3881 op_cost(5);
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3887 // Pointer Immediate
3888 operand immN() %{
3889 match(ConN);
3891 op_cost(10);
3892 format %{ %}
3893 interface(CONST_INTER);
3894 %}
3896 operand immNKlass() %{
3897 match(ConNKlass);
3899 op_cost(10);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 // NULL Pointer Immediate
3905 operand immN0() %{
3906 predicate(n->get_narrowcon() == 0);
3907 match(ConN);
3909 op_cost(5);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 // Long Immediate
3915 operand immL() %{
3916 match(ConL);
3918 op_cost(20);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 // Long Immediate zero
3924 operand immL0() %{
3925 predicate( n->get_long() == 0L );
3926 match(ConL);
3927 op_cost(0);
3929 format %{ %}
3930 interface(CONST_INTER);
3931 %}
3933 operand immL7() %{
3934 predicate( n->get_long() == 7L );
3935 match(ConL);
3936 op_cost(0);
3938 format %{ %}
3939 interface(CONST_INTER);
3940 %}
3942 operand immL_M1() %{
3943 predicate( n->get_long() == -1L );
3944 match(ConL);
3945 op_cost(0);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 // bit 0..2 zero
3952 operand immL_M8() %{
3953 predicate( n->get_long() == -8L );
3954 match(ConL);
3955 op_cost(0);
3957 format %{ %}
3958 interface(CONST_INTER);
3959 %}
3961 // bit 2 zero
3962 operand immL_M5() %{
3963 predicate( n->get_long() == -5L );
3964 match(ConL);
3965 op_cost(0);
3967 format %{ %}
3968 interface(CONST_INTER);
3969 %}
3971 // bit 1..2 zero
3972 operand immL_M7() %{
3973 predicate( n->get_long() == -7L );
3974 match(ConL);
3975 op_cost(0);
3977 format %{ %}
3978 interface(CONST_INTER);
3979 %}
3981 // bit 0..1 zero
3982 operand immL_M4() %{
3983 predicate( n->get_long() == -4L );
3984 match(ConL);
3985 op_cost(0);
3987 format %{ %}
3988 interface(CONST_INTER);
3989 %}
3991 // bit 3..6 zero
3992 operand immL_M121() %{
3993 predicate( n->get_long() == -121L );
3994 match(ConL);
3995 op_cost(0);
3997 format %{ %}
3998 interface(CONST_INTER);
3999 %}
4001 // Long immediate from 0 to 127.
4002 // Used for a shorter form of long mul by 10.
4003 operand immL_127() %{
4004 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4005 match(ConL);
4006 op_cost(0);
4008 format %{ %}
4009 interface(CONST_INTER);
4010 %}
4012 // Operand for non-negtive long mask
4013 operand immL_nonneg_mask() %{
4014 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4015 match(ConL);
4017 op_cost(0);
4018 format %{ %}
4019 interface(CONST_INTER);
4020 %}
4022 operand immL_0_65535() %{
4023 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4024 match(ConL);
4025 op_cost(0);
4027 format %{ %}
4028 interface(CONST_INTER);
4029 %}
4031 // Long Immediate: cheap (materialize in <= 3 instructions)
4032 operand immL_cheap() %{
4033 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4034 match(ConL);
4035 op_cost(0);
4037 format %{ %}
4038 interface(CONST_INTER);
4039 %}
4041 // Long Immediate: expensive (materialize in > 3 instructions)
4042 operand immL_expensive() %{
4043 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4044 match(ConL);
4045 op_cost(0);
4047 format %{ %}
4048 interface(CONST_INTER);
4049 %}
4051 operand immL16() %{
4052 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4053 match(ConL);
4055 op_cost(10);
4056 format %{ %}
4057 interface(CONST_INTER);
4058 %}
4060 operand immL16_sub() %{
4061 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4062 match(ConL);
4064 op_cost(10);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // Long Immediate: low 32-bit mask
4070 operand immL_32bits() %{
4071 predicate(n->get_long() == 0xFFFFFFFFL);
4072 match(ConL);
4073 op_cost(20);
4075 format %{ %}
4076 interface(CONST_INTER);
4077 %}
4079 // Long Immediate 32-bit signed
4080 operand immL32()
4081 %{
4082 predicate(n->get_long() == (int) (n->get_long()));
4083 match(ConL);
4085 op_cost(15);
4086 format %{ %}
4087 interface(CONST_INTER);
4088 %}
4091 //single-precision floating-point zero
4092 operand immF0() %{
4093 predicate(jint_cast(n->getf()) == 0);
4094 match(ConF);
4096 op_cost(5);
4097 format %{ %}
4098 interface(CONST_INTER);
4099 %}
4101 //single-precision floating-point immediate
4102 operand immF() %{
4103 match(ConF);
4105 op_cost(20);
4106 format %{ %}
4107 interface(CONST_INTER);
4108 %}
4110 //double-precision floating-point zero
4111 operand immD0() %{
4112 predicate(jlong_cast(n->getd()) == 0);
4113 match(ConD);
4115 op_cost(5);
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 //double-precision floating-point immediate
4121 operand immD() %{
4122 match(ConD);
4124 op_cost(20);
4125 format %{ %}
4126 interface(CONST_INTER);
4127 %}
4129 // Register Operands
4130 // Integer Register
4131 operand mRegI() %{
4132 constraint(ALLOC_IN_RC(int_reg));
4133 match(RegI);
4135 format %{ %}
4136 interface(REG_INTER);
4137 %}
4139 operand no_Ax_mRegI() %{
4140 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4141 match(RegI);
4142 match(mRegI);
4144 format %{ %}
4145 interface(REG_INTER);
4146 %}
4148 operand mS0RegI() %{
4149 constraint(ALLOC_IN_RC(s0_reg));
4150 match(RegI);
4151 match(mRegI);
4153 format %{ "S0" %}
4154 interface(REG_INTER);
4155 %}
4157 operand mS1RegI() %{
4158 constraint(ALLOC_IN_RC(s1_reg));
4159 match(RegI);
4160 match(mRegI);
4162 format %{ "S1" %}
4163 interface(REG_INTER);
4164 %}
4166 operand mS2RegI() %{
4167 constraint(ALLOC_IN_RC(s2_reg));
4168 match(RegI);
4169 match(mRegI);
4171 format %{ "S2" %}
4172 interface(REG_INTER);
4173 %}
4175 operand mS3RegI() %{
4176 constraint(ALLOC_IN_RC(s3_reg));
4177 match(RegI);
4178 match(mRegI);
4180 format %{ "S3" %}
4181 interface(REG_INTER);
4182 %}
4184 operand mS4RegI() %{
4185 constraint(ALLOC_IN_RC(s4_reg));
4186 match(RegI);
4187 match(mRegI);
4189 format %{ "S4" %}
4190 interface(REG_INTER);
4191 %}
4193 operand mS5RegI() %{
4194 constraint(ALLOC_IN_RC(s5_reg));
4195 match(RegI);
4196 match(mRegI);
4198 format %{ "S5" %}
4199 interface(REG_INTER);
4200 %}
4202 operand mS6RegI() %{
4203 constraint(ALLOC_IN_RC(s6_reg));
4204 match(RegI);
4205 match(mRegI);
4207 format %{ "S6" %}
4208 interface(REG_INTER);
4209 %}
4211 operand mS7RegI() %{
4212 constraint(ALLOC_IN_RC(s7_reg));
4213 match(RegI);
4214 match(mRegI);
4216 format %{ "S7" %}
4217 interface(REG_INTER);
4218 %}
4221 operand mT0RegI() %{
4222 constraint(ALLOC_IN_RC(t0_reg));
4223 match(RegI);
4224 match(mRegI);
4226 format %{ "T0" %}
4227 interface(REG_INTER);
4228 %}
4230 operand mT1RegI() %{
4231 constraint(ALLOC_IN_RC(t1_reg));
4232 match(RegI);
4233 match(mRegI);
4235 format %{ "T1" %}
4236 interface(REG_INTER);
4237 %}
4239 operand mT2RegI() %{
4240 constraint(ALLOC_IN_RC(t2_reg));
4241 match(RegI);
4242 match(mRegI);
4244 format %{ "T2" %}
4245 interface(REG_INTER);
4246 %}
4248 operand mT3RegI() %{
4249 constraint(ALLOC_IN_RC(t3_reg));
4250 match(RegI);
4251 match(mRegI);
4253 format %{ "T3" %}
4254 interface(REG_INTER);
4255 %}
4257 operand mT8RegI() %{
4258 constraint(ALLOC_IN_RC(t8_reg));
4259 match(RegI);
4260 match(mRegI);
4262 format %{ "T8" %}
4263 interface(REG_INTER);
4264 %}
4266 operand mT9RegI() %{
4267 constraint(ALLOC_IN_RC(t9_reg));
4268 match(RegI);
4269 match(mRegI);
4271 format %{ "T9" %}
4272 interface(REG_INTER);
4273 %}
4275 operand mA0RegI() %{
4276 constraint(ALLOC_IN_RC(a0_reg));
4277 match(RegI);
4278 match(mRegI);
4280 format %{ "A0" %}
4281 interface(REG_INTER);
4282 %}
4284 operand mA1RegI() %{
4285 constraint(ALLOC_IN_RC(a1_reg));
4286 match(RegI);
4287 match(mRegI);
4289 format %{ "A1" %}
4290 interface(REG_INTER);
4291 %}
4293 operand mA2RegI() %{
4294 constraint(ALLOC_IN_RC(a2_reg));
4295 match(RegI);
4296 match(mRegI);
4298 format %{ "A2" %}
4299 interface(REG_INTER);
4300 %}
4302 operand mA3RegI() %{
4303 constraint(ALLOC_IN_RC(a3_reg));
4304 match(RegI);
4305 match(mRegI);
4307 format %{ "A3" %}
4308 interface(REG_INTER);
4309 %}
4311 operand mA4RegI() %{
4312 constraint(ALLOC_IN_RC(a4_reg));
4313 match(RegI);
4314 match(mRegI);
4316 format %{ "A4" %}
4317 interface(REG_INTER);
4318 %}
4320 operand mA5RegI() %{
4321 constraint(ALLOC_IN_RC(a5_reg));
4322 match(RegI);
4323 match(mRegI);
4325 format %{ "A5" %}
4326 interface(REG_INTER);
4327 %}
4329 operand mA6RegI() %{
4330 constraint(ALLOC_IN_RC(a6_reg));
4331 match(RegI);
4332 match(mRegI);
4334 format %{ "A6" %}
4335 interface(REG_INTER);
4336 %}
4338 operand mA7RegI() %{
4339 constraint(ALLOC_IN_RC(a7_reg));
4340 match(RegI);
4341 match(mRegI);
4343 format %{ "A7" %}
4344 interface(REG_INTER);
4345 %}
4347 operand mV0RegI() %{
4348 constraint(ALLOC_IN_RC(v0_reg));
4349 match(RegI);
4350 match(mRegI);
4352 format %{ "V0" %}
4353 interface(REG_INTER);
4354 %}
4356 operand mV1RegI() %{
4357 constraint(ALLOC_IN_RC(v1_reg));
4358 match(RegI);
4359 match(mRegI);
4361 format %{ "V1" %}
4362 interface(REG_INTER);
4363 %}
4365 operand mRegN() %{
4366 constraint(ALLOC_IN_RC(int_reg));
4367 match(RegN);
4369 format %{ %}
4370 interface(REG_INTER);
4371 %}
4373 operand t0_RegN() %{
4374 constraint(ALLOC_IN_RC(t0_reg));
4375 match(RegN);
4376 match(mRegN);
4378 format %{ %}
4379 interface(REG_INTER);
4380 %}
4382 operand t1_RegN() %{
4383 constraint(ALLOC_IN_RC(t1_reg));
4384 match(RegN);
4385 match(mRegN);
4387 format %{ %}
4388 interface(REG_INTER);
4389 %}
4391 operand t2_RegN() %{
4392 constraint(ALLOC_IN_RC(t2_reg));
4393 match(RegN);
4394 match(mRegN);
4396 format %{ %}
4397 interface(REG_INTER);
4398 %}
4400 operand t3_RegN() %{
4401 constraint(ALLOC_IN_RC(t3_reg));
4402 match(RegN);
4403 match(mRegN);
4405 format %{ %}
4406 interface(REG_INTER);
4407 %}
4409 operand t8_RegN() %{
4410 constraint(ALLOC_IN_RC(t8_reg));
4411 match(RegN);
4412 match(mRegN);
4414 format %{ %}
4415 interface(REG_INTER);
4416 %}
4418 operand t9_RegN() %{
4419 constraint(ALLOC_IN_RC(t9_reg));
4420 match(RegN);
4421 match(mRegN);
4423 format %{ %}
4424 interface(REG_INTER);
4425 %}
4427 operand a0_RegN() %{
4428 constraint(ALLOC_IN_RC(a0_reg));
4429 match(RegN);
4430 match(mRegN);
4432 format %{ %}
4433 interface(REG_INTER);
4434 %}
4436 operand a1_RegN() %{
4437 constraint(ALLOC_IN_RC(a1_reg));
4438 match(RegN);
4439 match(mRegN);
4441 format %{ %}
4442 interface(REG_INTER);
4443 %}
4445 operand a2_RegN() %{
4446 constraint(ALLOC_IN_RC(a2_reg));
4447 match(RegN);
4448 match(mRegN);
4450 format %{ %}
4451 interface(REG_INTER);
4452 %}
4454 operand a3_RegN() %{
4455 constraint(ALLOC_IN_RC(a3_reg));
4456 match(RegN);
4457 match(mRegN);
4459 format %{ %}
4460 interface(REG_INTER);
4461 %}
4463 operand a4_RegN() %{
4464 constraint(ALLOC_IN_RC(a4_reg));
4465 match(RegN);
4466 match(mRegN);
4468 format %{ %}
4469 interface(REG_INTER);
4470 %}
4472 operand a5_RegN() %{
4473 constraint(ALLOC_IN_RC(a5_reg));
4474 match(RegN);
4475 match(mRegN);
4477 format %{ %}
4478 interface(REG_INTER);
4479 %}
4481 operand a6_RegN() %{
4482 constraint(ALLOC_IN_RC(a6_reg));
4483 match(RegN);
4484 match(mRegN);
4486 format %{ %}
4487 interface(REG_INTER);
4488 %}
4490 operand a7_RegN() %{
4491 constraint(ALLOC_IN_RC(a7_reg));
4492 match(RegN);
4493 match(mRegN);
4495 format %{ %}
4496 interface(REG_INTER);
4497 %}
4499 operand s0_RegN() %{
4500 constraint(ALLOC_IN_RC(s0_reg));
4501 match(RegN);
4502 match(mRegN);
4504 format %{ %}
4505 interface(REG_INTER);
4506 %}
4508 operand s1_RegN() %{
4509 constraint(ALLOC_IN_RC(s1_reg));
4510 match(RegN);
4511 match(mRegN);
4513 format %{ %}
4514 interface(REG_INTER);
4515 %}
4517 operand s2_RegN() %{
4518 constraint(ALLOC_IN_RC(s2_reg));
4519 match(RegN);
4520 match(mRegN);
4522 format %{ %}
4523 interface(REG_INTER);
4524 %}
4526 operand s3_RegN() %{
4527 constraint(ALLOC_IN_RC(s3_reg));
4528 match(RegN);
4529 match(mRegN);
4531 format %{ %}
4532 interface(REG_INTER);
4533 %}
4535 operand s4_RegN() %{
4536 constraint(ALLOC_IN_RC(s4_reg));
4537 match(RegN);
4538 match(mRegN);
4540 format %{ %}
4541 interface(REG_INTER);
4542 %}
4544 operand s5_RegN() %{
4545 constraint(ALLOC_IN_RC(s5_reg));
4546 match(RegN);
4547 match(mRegN);
4549 format %{ %}
4550 interface(REG_INTER);
4551 %}
4553 operand s6_RegN() %{
4554 constraint(ALLOC_IN_RC(s6_reg));
4555 match(RegN);
4556 match(mRegN);
4558 format %{ %}
4559 interface(REG_INTER);
4560 %}
4562 operand s7_RegN() %{
4563 constraint(ALLOC_IN_RC(s7_reg));
4564 match(RegN);
4565 match(mRegN);
4567 format %{ %}
4568 interface(REG_INTER);
4569 %}
4571 operand v0_RegN() %{
4572 constraint(ALLOC_IN_RC(v0_reg));
4573 match(RegN);
4574 match(mRegN);
4576 format %{ %}
4577 interface(REG_INTER);
4578 %}
4580 operand v1_RegN() %{
4581 constraint(ALLOC_IN_RC(v1_reg));
4582 match(RegN);
4583 match(mRegN);
4585 format %{ %}
4586 interface(REG_INTER);
4587 %}
4589 // Pointer Register
4590 operand mRegP() %{
4591 constraint(ALLOC_IN_RC(p_reg));
4592 match(RegP);
4594 format %{ %}
4595 interface(REG_INTER);
4596 %}
4598 operand no_T8_mRegP() %{
4599 constraint(ALLOC_IN_RC(no_T8_p_reg));
4600 match(RegP);
4601 match(mRegP);
4603 format %{ %}
4604 interface(REG_INTER);
4605 %}
4607 operand s0_RegP()
4608 %{
4609 constraint(ALLOC_IN_RC(s0_long_reg));
4610 match(RegP);
4611 match(mRegP);
4612 match(no_T8_mRegP);
4614 format %{ %}
4615 interface(REG_INTER);
4616 %}
4618 operand s1_RegP()
4619 %{
4620 constraint(ALLOC_IN_RC(s1_long_reg));
4621 match(RegP);
4622 match(mRegP);
4623 match(no_T8_mRegP);
4625 format %{ %}
4626 interface(REG_INTER);
4627 %}
4629 operand s2_RegP()
4630 %{
4631 constraint(ALLOC_IN_RC(s2_long_reg));
4632 match(RegP);
4633 match(mRegP);
4634 match(no_T8_mRegP);
4636 format %{ %}
4637 interface(REG_INTER);
4638 %}
4640 operand s3_RegP()
4641 %{
4642 constraint(ALLOC_IN_RC(s3_long_reg));
4643 match(RegP);
4644 match(mRegP);
4645 match(no_T8_mRegP);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 operand s4_RegP()
4652 %{
4653 constraint(ALLOC_IN_RC(s4_long_reg));
4654 match(RegP);
4655 match(mRegP);
4656 match(no_T8_mRegP);
4658 format %{ %}
4659 interface(REG_INTER);
4660 %}
4662 operand s5_RegP()
4663 %{
4664 constraint(ALLOC_IN_RC(s5_long_reg));
4665 match(RegP);
4666 match(mRegP);
4667 match(no_T8_mRegP);
4669 format %{ %}
4670 interface(REG_INTER);
4671 %}
4673 operand s6_RegP()
4674 %{
4675 constraint(ALLOC_IN_RC(s6_long_reg));
4676 match(RegP);
4677 match(mRegP);
4678 match(no_T8_mRegP);
4680 format %{ %}
4681 interface(REG_INTER);
4682 %}
4684 operand s7_RegP()
4685 %{
4686 constraint(ALLOC_IN_RC(s7_long_reg));
4687 match(RegP);
4688 match(mRegP);
4689 match(no_T8_mRegP);
4691 format %{ %}
4692 interface(REG_INTER);
4693 %}
4695 operand t0_RegP()
4696 %{
4697 constraint(ALLOC_IN_RC(t0_long_reg));
4698 match(RegP);
4699 match(mRegP);
4700 match(no_T8_mRegP);
4702 format %{ %}
4703 interface(REG_INTER);
4704 %}
4706 operand t1_RegP()
4707 %{
4708 constraint(ALLOC_IN_RC(t1_long_reg));
4709 match(RegP);
4710 match(mRegP);
4711 match(no_T8_mRegP);
4713 format %{ %}
4714 interface(REG_INTER);
4715 %}
4717 operand t2_RegP()
4718 %{
4719 constraint(ALLOC_IN_RC(t2_long_reg));
4720 match(RegP);
4721 match(mRegP);
4722 match(no_T8_mRegP);
4724 format %{ %}
4725 interface(REG_INTER);
4726 %}
4728 operand t3_RegP()
4729 %{
4730 constraint(ALLOC_IN_RC(t3_long_reg));
4731 match(RegP);
4732 match(mRegP);
4733 match(no_T8_mRegP);
4735 format %{ %}
4736 interface(REG_INTER);
4737 %}
4739 operand t8_RegP()
4740 %{
4741 constraint(ALLOC_IN_RC(t8_long_reg));
4742 match(RegP);
4743 match(mRegP);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand t9_RegP()
4750 %{
4751 constraint(ALLOC_IN_RC(t9_long_reg));
4752 match(RegP);
4753 match(mRegP);
4754 match(no_T8_mRegP);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand a0_RegP()
4761 %{
4762 constraint(ALLOC_IN_RC(a0_long_reg));
4763 match(RegP);
4764 match(mRegP);
4765 match(no_T8_mRegP);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand a1_RegP()
4772 %{
4773 constraint(ALLOC_IN_RC(a1_long_reg));
4774 match(RegP);
4775 match(mRegP);
4776 match(no_T8_mRegP);
4778 format %{ %}
4779 interface(REG_INTER);
4780 %}
4782 operand a2_RegP()
4783 %{
4784 constraint(ALLOC_IN_RC(a2_long_reg));
4785 match(RegP);
4786 match(mRegP);
4787 match(no_T8_mRegP);
4789 format %{ %}
4790 interface(REG_INTER);
4791 %}
4793 operand a3_RegP()
4794 %{
4795 constraint(ALLOC_IN_RC(a3_long_reg));
4796 match(RegP);
4797 match(mRegP);
4798 match(no_T8_mRegP);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand a4_RegP()
4805 %{
4806 constraint(ALLOC_IN_RC(a4_long_reg));
4807 match(RegP);
4808 match(mRegP);
4809 match(no_T8_mRegP);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4816 operand a5_RegP()
4817 %{
4818 constraint(ALLOC_IN_RC(a5_long_reg));
4819 match(RegP);
4820 match(mRegP);
4821 match(no_T8_mRegP);
4823 format %{ %}
4824 interface(REG_INTER);
4825 %}
4827 operand a6_RegP()
4828 %{
4829 constraint(ALLOC_IN_RC(a6_long_reg));
4830 match(RegP);
4831 match(mRegP);
4832 match(no_T8_mRegP);
4834 format %{ %}
4835 interface(REG_INTER);
4836 %}
4838 operand a7_RegP()
4839 %{
4840 constraint(ALLOC_IN_RC(a7_long_reg));
4841 match(RegP);
4842 match(mRegP);
4843 match(no_T8_mRegP);
4845 format %{ %}
4846 interface(REG_INTER);
4847 %}
4849 operand v0_RegP()
4850 %{
4851 constraint(ALLOC_IN_RC(v0_long_reg));
4852 match(RegP);
4853 match(mRegP);
4854 match(no_T8_mRegP);
4856 format %{ %}
4857 interface(REG_INTER);
4858 %}
4860 operand v1_RegP()
4861 %{
4862 constraint(ALLOC_IN_RC(v1_long_reg));
4863 match(RegP);
4864 match(mRegP);
4865 match(no_T8_mRegP);
4867 format %{ %}
4868 interface(REG_INTER);
4869 %}
4871 /*
4872 operand mSPRegP(mRegP reg) %{
4873 constraint(ALLOC_IN_RC(sp_reg));
4874 match(reg);
4876 format %{ "SP" %}
4877 interface(REG_INTER);
4878 %}
4880 operand mFPRegP(mRegP reg) %{
4881 constraint(ALLOC_IN_RC(fp_reg));
4882 match(reg);
4884 format %{ "FP" %}
4885 interface(REG_INTER);
4886 %}
4887 */
4889 operand mRegL() %{
4890 constraint(ALLOC_IN_RC(long_reg));
4891 match(RegL);
4893 format %{ %}
4894 interface(REG_INTER);
4895 %}
4897 operand v0RegL() %{
4898 constraint(ALLOC_IN_RC(v0_long_reg));
4899 match(RegL);
4900 match(mRegL);
4902 format %{ %}
4903 interface(REG_INTER);
4904 %}
4906 operand v1RegL() %{
4907 constraint(ALLOC_IN_RC(v1_long_reg));
4908 match(RegL);
4909 match(mRegL);
4911 format %{ %}
4912 interface(REG_INTER);
4913 %}
4915 operand a0RegL() %{
4916 constraint(ALLOC_IN_RC(a0_long_reg));
4917 match(RegL);
4918 match(mRegL);
4920 format %{ "A0" %}
4921 interface(REG_INTER);
4922 %}
4924 operand a1RegL() %{
4925 constraint(ALLOC_IN_RC(a1_long_reg));
4926 match(RegL);
4927 match(mRegL);
4929 format %{ %}
4930 interface(REG_INTER);
4931 %}
4933 operand a2RegL() %{
4934 constraint(ALLOC_IN_RC(a2_long_reg));
4935 match(RegL);
4936 match(mRegL);
4938 format %{ %}
4939 interface(REG_INTER);
4940 %}
4942 operand a3RegL() %{
4943 constraint(ALLOC_IN_RC(a3_long_reg));
4944 match(RegL);
4945 match(mRegL);
4947 format %{ %}
4948 interface(REG_INTER);
4949 %}
4951 operand t0RegL() %{
4952 constraint(ALLOC_IN_RC(t0_long_reg));
4953 match(RegL);
4954 match(mRegL);
4956 format %{ %}
4957 interface(REG_INTER);
4958 %}
4960 operand t1RegL() %{
4961 constraint(ALLOC_IN_RC(t1_long_reg));
4962 match(RegL);
4963 match(mRegL);
4965 format %{ %}
4966 interface(REG_INTER);
4967 %}
4969 operand t2RegL() %{
4970 constraint(ALLOC_IN_RC(t2_long_reg));
4971 match(RegL);
4972 match(mRegL);
4974 format %{ %}
4975 interface(REG_INTER);
4976 %}
4978 operand t3RegL() %{
4979 constraint(ALLOC_IN_RC(t3_long_reg));
4980 match(RegL);
4981 match(mRegL);
4983 format %{ %}
4984 interface(REG_INTER);
4985 %}
4987 operand t8RegL() %{
4988 constraint(ALLOC_IN_RC(t8_long_reg));
4989 match(RegL);
4990 match(mRegL);
4992 format %{ %}
4993 interface(REG_INTER);
4994 %}
4996 operand a4RegL() %{
4997 constraint(ALLOC_IN_RC(a4_long_reg));
4998 match(RegL);
4999 match(mRegL);
5001 format %{ %}
5002 interface(REG_INTER);
5003 %}
5005 operand a5RegL() %{
5006 constraint(ALLOC_IN_RC(a5_long_reg));
5007 match(RegL);
5008 match(mRegL);
5010 format %{ %}
5011 interface(REG_INTER);
5012 %}
5014 operand a6RegL() %{
5015 constraint(ALLOC_IN_RC(a6_long_reg));
5016 match(RegL);
5017 match(mRegL);
5019 format %{ %}
5020 interface(REG_INTER);
5021 %}
5023 operand a7RegL() %{
5024 constraint(ALLOC_IN_RC(a7_long_reg));
5025 match(RegL);
5026 match(mRegL);
5028 format %{ %}
5029 interface(REG_INTER);
5030 %}
5032 operand s0RegL() %{
5033 constraint(ALLOC_IN_RC(s0_long_reg));
5034 match(RegL);
5035 match(mRegL);
5037 format %{ %}
5038 interface(REG_INTER);
5039 %}
5041 operand s1RegL() %{
5042 constraint(ALLOC_IN_RC(s1_long_reg));
5043 match(RegL);
5044 match(mRegL);
5046 format %{ %}
5047 interface(REG_INTER);
5048 %}
5050 operand s2RegL() %{
5051 constraint(ALLOC_IN_RC(s2_long_reg));
5052 match(RegL);
5053 match(mRegL);
5055 format %{ %}
5056 interface(REG_INTER);
5057 %}
5059 operand s3RegL() %{
5060 constraint(ALLOC_IN_RC(s3_long_reg));
5061 match(RegL);
5062 match(mRegL);
5064 format %{ %}
5065 interface(REG_INTER);
5066 %}
5068 operand s4RegL() %{
5069 constraint(ALLOC_IN_RC(s4_long_reg));
5070 match(RegL);
5071 match(mRegL);
5073 format %{ %}
5074 interface(REG_INTER);
5075 %}
5077 operand s7RegL() %{
5078 constraint(ALLOC_IN_RC(s7_long_reg));
5079 match(RegL);
5080 match(mRegL);
5082 format %{ %}
5083 interface(REG_INTER);
5084 %}
5086 // Floating register operands
5087 operand regF() %{
5088 constraint(ALLOC_IN_RC(flt_reg));
5089 match(RegF);
5091 format %{ %}
5092 interface(REG_INTER);
5093 %}
5095 //Double Precision Floating register operands
5096 operand regD() %{
5097 constraint(ALLOC_IN_RC(dbl_reg));
5098 match(RegD);
5100 format %{ %}
5101 interface(REG_INTER);
5102 %}
5104 //----------Memory Operands----------------------------------------------------
5105 // Indirect Memory Operand
5106 operand indirect(mRegP reg) %{
5107 constraint(ALLOC_IN_RC(p_reg));
5108 match(reg);
5110 format %{ "[$reg] @ indirect" %}
5111 interface(MEMORY_INTER) %{
5112 base($reg);
5113 index(0x0); /* NO_INDEX */
5114 scale(0x0);
5115 disp(0x0);
5116 %}
5117 %}
5119 // Indirect Memory Plus Short Offset Operand
5120 operand indOffset8(mRegP reg, immL8 off)
5121 %{
5122 constraint(ALLOC_IN_RC(p_reg));
5123 match(AddP reg off);
5125 op_cost(10);
5126 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5127 interface(MEMORY_INTER) %{
5128 base($reg);
5129 index(0x0); /* NO_INDEX */
5130 scale(0x0);
5131 disp($off);
5132 %}
5133 %}
5135 // Indirect Memory Times Scale Plus Index Register
5136 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5137 %{
5138 constraint(ALLOC_IN_RC(p_reg));
5139 match(AddP reg (LShiftL lreg scale));
5141 op_cost(10);
5142 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5143 interface(MEMORY_INTER) %{
5144 base($reg);
5145 index($lreg);
5146 scale($scale);
5147 disp(0x0);
5148 %}
5149 %}
5152 // [base + index + offset]
5153 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5154 %{
5155 constraint(ALLOC_IN_RC(p_reg));
5156 op_cost(5);
5157 match(AddP (AddP base index) off);
5159 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5160 interface(MEMORY_INTER) %{
5161 base($base);
5162 index($index);
5163 scale(0x0);
5164 disp($off);
5165 %}
5166 %}
5168 // [base + index + offset]
5169 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5170 %{
5171 constraint(ALLOC_IN_RC(p_reg));
5172 op_cost(5);
5173 match(AddP (AddP base (ConvI2L index)) off);
5175 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5176 interface(MEMORY_INTER) %{
5177 base($base);
5178 index($index);
5179 scale(0x0);
5180 disp($off);
5181 %}
5182 %}
5184 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5185 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5186 %{
5187 constraint(ALLOC_IN_RC(p_reg));
5188 match(AddP (AddP reg (LShiftL lreg scale)) off);
5190 op_cost(10);
5191 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5192 interface(MEMORY_INTER) %{
5193 base($reg);
5194 index($lreg);
5195 scale($scale);
5196 disp($off);
5197 %}
5198 %}
5200 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5201 %{
5202 constraint(ALLOC_IN_RC(p_reg));
5203 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5205 op_cost(10);
5206 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5207 interface(MEMORY_INTER) %{
5208 base($reg);
5209 index($ireg);
5210 scale($scale);
5211 disp($off);
5212 %}
5213 %}
5215 // [base + index<<scale + offset]
5216 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5217 %{
5218 constraint(ALLOC_IN_RC(p_reg));
5219 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5220 op_cost(10);
5221 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5223 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5224 interface(MEMORY_INTER) %{
5225 base($base);
5226 index($index);
5227 scale($scale);
5228 disp($off);
5229 %}
5230 %}
5232 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5233 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5234 %{
5235 predicate(Universe::narrow_oop_shift() == 0);
5236 constraint(ALLOC_IN_RC(p_reg));
5237 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5239 op_cost(10);
5240 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5241 interface(MEMORY_INTER) %{
5242 base($reg);
5243 index($lreg);
5244 scale($scale);
5245 disp($off);
5246 %}
5247 %}
5249 // [base + index<<scale + offset] for compressd Oops
5250 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5251 %{
5252 constraint(ALLOC_IN_RC(p_reg));
5253 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5254 predicate(Universe::narrow_oop_shift() == 0);
5255 op_cost(10);
5256 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5258 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5259 interface(MEMORY_INTER) %{
5260 base($base);
5261 index($index);
5262 scale($scale);
5263 disp($off);
5264 %}
5265 %}
5267 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5268 // Indirect Memory Plus Long Offset Operand
5269 operand indOffset32(mRegP reg, immL32 off) %{
5270 constraint(ALLOC_IN_RC(p_reg));
5271 op_cost(20);
5272 match(AddP reg off);
5274 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5275 interface(MEMORY_INTER) %{
5276 base($reg);
5277 index(0x0); /* NO_INDEX */
5278 scale(0x0);
5279 disp($off);
5280 %}
5281 %}
5283 // Indirect Memory Plus Index Register
5284 operand indIndex(mRegP addr, mRegL index) %{
5285 constraint(ALLOC_IN_RC(p_reg));
5286 match(AddP addr index);
5288 op_cost(20);
5289 format %{"[$addr + $index] @ indIndex" %}
5290 interface(MEMORY_INTER) %{
5291 base($addr);
5292 index($index);
5293 scale(0x0);
5294 disp(0x0);
5295 %}
5296 %}
5298 operand indirectNarrowKlass(mRegN reg)
5299 %{
5300 predicate(Universe::narrow_klass_shift() == 0);
5301 constraint(ALLOC_IN_RC(p_reg));
5302 op_cost(10);
5303 match(DecodeNKlass reg);
5305 format %{ "[$reg] @ indirectNarrowKlass" %}
5306 interface(MEMORY_INTER) %{
5307 base($reg);
5308 index(0x0);
5309 scale(0x0);
5310 disp(0x0);
5311 %}
5312 %}
5314 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5315 %{
5316 predicate(Universe::narrow_klass_shift() == 0);
5317 constraint(ALLOC_IN_RC(p_reg));
5318 op_cost(10);
5319 match(AddP (DecodeNKlass reg) off);
5321 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5322 interface(MEMORY_INTER) %{
5323 base($reg);
5324 index(0x0);
5325 scale(0x0);
5326 disp($off);
5327 %}
5328 %}
5330 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5331 %{
5332 predicate(Universe::narrow_klass_shift() == 0);
5333 constraint(ALLOC_IN_RC(p_reg));
5334 op_cost(10);
5335 match(AddP (DecodeNKlass reg) off);
5337 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5338 interface(MEMORY_INTER) %{
5339 base($reg);
5340 index(0x0);
5341 scale(0x0);
5342 disp($off);
5343 %}
5344 %}
5346 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5347 %{
5348 predicate(Universe::narrow_klass_shift() == 0);
5349 constraint(ALLOC_IN_RC(p_reg));
5350 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5352 op_cost(10);
5353 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5354 interface(MEMORY_INTER) %{
5355 base($reg);
5356 index($lreg);
5357 scale(0x0);
5358 disp($off);
5359 %}
5360 %}
5362 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5363 %{
5364 predicate(Universe::narrow_klass_shift() == 0);
5365 constraint(ALLOC_IN_RC(p_reg));
5366 match(AddP (DecodeNKlass reg) lreg);
5368 op_cost(10);
5369 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5370 interface(MEMORY_INTER) %{
5371 base($reg);
5372 index($lreg);
5373 scale(0x0);
5374 disp(0x0);
5375 %}
5376 %}
5378 // Indirect Memory Operand
5379 operand indirectNarrow(mRegN reg)
5380 %{
5381 predicate(Universe::narrow_oop_shift() == 0);
5382 constraint(ALLOC_IN_RC(p_reg));
5383 op_cost(10);
5384 match(DecodeN reg);
5386 format %{ "[$reg] @ indirectNarrow" %}
5387 interface(MEMORY_INTER) %{
5388 base($reg);
5389 index(0x0);
5390 scale(0x0);
5391 disp(0x0);
5392 %}
5393 %}
5395 // Indirect Memory Plus Short Offset Operand
5396 operand indOffset8Narrow(mRegN reg, immL8 off)
5397 %{
5398 predicate(Universe::narrow_oop_shift() == 0);
5399 constraint(ALLOC_IN_RC(p_reg));
5400 op_cost(10);
5401 match(AddP (DecodeN reg) off);
5403 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5404 interface(MEMORY_INTER) %{
5405 base($reg);
5406 index(0x0);
5407 scale(0x0);
5408 disp($off);
5409 %}
5410 %}
5412 // Indirect Memory Plus Index Register Plus Offset Operand
5413 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5414 %{
5415 predicate(Universe::narrow_oop_shift() == 0);
5416 constraint(ALLOC_IN_RC(p_reg));
5417 match(AddP (AddP (DecodeN reg) lreg) off);
5419 op_cost(10);
5420 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5421 interface(MEMORY_INTER) %{
5422 base($reg);
5423 index($lreg);
5424 scale(0x0);
5425 disp($off);
5426 %}
5427 %}
5429 //----------Load Long Memory Operands------------------------------------------
5430 // The load-long idiom will use it's address expression again after loading
5431 // the first word of the long. If the load-long destination overlaps with
5432 // registers used in the addressing expression, the 2nd half will be loaded
5433 // from a clobbered address. Fix this by requiring that load-long use
5434 // address registers that do not overlap with the load-long target.
5436 // load-long support
5437 operand load_long_RegP() %{
5438 constraint(ALLOC_IN_RC(p_reg));
5439 match(RegP);
5440 match(mRegP);
5441 op_cost(100);
5442 format %{ %}
5443 interface(REG_INTER);
5444 %}
5446 // Indirect Memory Operand Long
5447 operand load_long_indirect(load_long_RegP reg) %{
5448 constraint(ALLOC_IN_RC(p_reg));
5449 match(reg);
5451 format %{ "[$reg]" %}
5452 interface(MEMORY_INTER) %{
5453 base($reg);
5454 index(0x0);
5455 scale(0x0);
5456 disp(0x0);
5457 %}
5458 %}
5460 // Indirect Memory Plus Long Offset Operand
5461 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5462 match(AddP reg off);
5464 format %{ "[$reg + $off]" %}
5465 interface(MEMORY_INTER) %{
5466 base($reg);
5467 index(0x0);
5468 scale(0x0);
5469 disp($off);
5470 %}
5471 %}
5473 //----------Conditional Branch Operands----------------------------------------
5474 // Comparison Op - This is the operation of the comparison, and is limited to
5475 // the following set of codes:
5476 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5477 //
5478 // Other attributes of the comparison, such as unsignedness, are specified
5479 // by the comparison instruction that sets a condition code flags register.
5480 // That result is represented by a flags operand whose subtype is appropriate
5481 // to the unsignedness (etc.) of the comparison.
5482 //
5483 // Later, the instruction which matches both the Comparison Op (a Bool) and
5484 // the flags (produced by the Cmp) specifies the coding of the comparison op
5485 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5487 // Comparision Code
5488 operand cmpOp() %{
5489 match(Bool);
5491 format %{ "" %}
5492 interface(COND_INTER) %{
5493 equal(0x01);
5494 not_equal(0x02);
5495 greater(0x03);
5496 greater_equal(0x04);
5497 less(0x05);
5498 less_equal(0x06);
5499 overflow(0x7);
5500 no_overflow(0x8);
5501 %}
5502 %}
5505 // Comparision Code
5506 // Comparison Code, unsigned compare. Used by FP also, with
5507 // C2 (unordered) turned into GT or LT already. The other bits
5508 // C0 and C3 are turned into Carry & Zero flags.
5509 operand cmpOpU() %{
5510 match(Bool);
5512 format %{ "" %}
5513 interface(COND_INTER) %{
5514 equal(0x01);
5515 not_equal(0x02);
5516 greater(0x03);
5517 greater_equal(0x04);
5518 less(0x05);
5519 less_equal(0x06);
5520 overflow(0x7);
5521 no_overflow(0x8);
5522 %}
5523 %}
5526 //----------Special Memory Operands--------------------------------------------
5527 // Stack Slot Operand - This operand is used for loading and storing temporary
5528 // values on the stack where a match requires a value to
5529 // flow through memory.
5530 operand stackSlotP(sRegP reg) %{
5531 constraint(ALLOC_IN_RC(stack_slots));
5532 // No match rule because this operand is only generated in matching
5533 op_cost(50);
5534 format %{ "[$reg]" %}
5535 interface(MEMORY_INTER) %{
5536 base(0x1d); // SP
5537 index(0x0); // No Index
5538 scale(0x0); // No Scale
5539 disp($reg); // Stack Offset
5540 %}
5541 %}
5543 operand stackSlotI(sRegI reg) %{
5544 constraint(ALLOC_IN_RC(stack_slots));
5545 // No match rule because this operand is only generated in matching
5546 op_cost(50);
5547 format %{ "[$reg]" %}
5548 interface(MEMORY_INTER) %{
5549 base(0x1d); // SP
5550 index(0x0); // No Index
5551 scale(0x0); // No Scale
5552 disp($reg); // Stack Offset
5553 %}
5554 %}
5556 operand stackSlotF(sRegF reg) %{
5557 constraint(ALLOC_IN_RC(stack_slots));
5558 // No match rule because this operand is only generated in matching
5559 op_cost(50);
5560 format %{ "[$reg]" %}
5561 interface(MEMORY_INTER) %{
5562 base(0x1d); // SP
5563 index(0x0); // No Index
5564 scale(0x0); // No Scale
5565 disp($reg); // Stack Offset
5566 %}
5567 %}
5569 operand stackSlotD(sRegD reg) %{
5570 constraint(ALLOC_IN_RC(stack_slots));
5571 // No match rule because this operand is only generated in matching
5572 op_cost(50);
5573 format %{ "[$reg]" %}
5574 interface(MEMORY_INTER) %{
5575 base(0x1d); // SP
5576 index(0x0); // No Index
5577 scale(0x0); // No Scale
5578 disp($reg); // Stack Offset
5579 %}
5580 %}
5582 operand stackSlotL(sRegL reg) %{
5583 constraint(ALLOC_IN_RC(stack_slots));
5584 // No match rule because this operand is only generated in matching
5585 op_cost(50);
5586 format %{ "[$reg]" %}
5587 interface(MEMORY_INTER) %{
5588 base(0x1d); // SP
5589 index(0x0); // No Index
5590 scale(0x0); // No Scale
5591 disp($reg); // Stack Offset
5592 %}
5593 %}
5596 //------------------------OPERAND CLASSES--------------------------------------
5597 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5598 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5601 //----------PIPELINE-----------------------------------------------------------
5602 // Rules which define the behavior of the target architectures pipeline.
5604 pipeline %{
5606 //----------ATTRIBUTES---------------------------------------------------------
5607 attributes %{
5608 fixed_size_instructions; // Fixed size instructions
5609 branch_has_delay_slot; // branch have delay slot in gs2
5610 max_instructions_per_bundle = 1; // 1 instruction per bundle
5611 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5612 bundle_unit_size=4;
5613 instruction_unit_size = 4; // An instruction is 4 bytes long
5614 instruction_fetch_unit_size = 16; // The processor fetches one line
5615 instruction_fetch_units = 1; // of 16 bytes
5617 // List of nop instructions
5618 nops( MachNop );
5619 %}
5621 //----------RESOURCES----------------------------------------------------------
5622 // Resources are the functional units available to the machine
5624 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5626 //----------PIPELINE DESCRIPTION-----------------------------------------------
5627 // Pipeline Description specifies the stages in the machine's pipeline
5629 // IF: fetch
5630 // ID: decode
5631 // RD: read
5632 // CA: caculate
5633 // WB: write back
5634 // CM: commit
5636 pipe_desc(IF, ID, RD, CA, WB, CM);
5639 //----------PIPELINE CLASSES---------------------------------------------------
5640 // Pipeline Classes describe the stages in which input and output are
5641 // referenced by the hardware pipeline.
5643 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5644 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5645 single_instruction;
5646 src1 : RD(read);
5647 src2 : RD(read);
5648 dst : WB(write)+1;
5649 DECODE : ID;
5650 ALU : CA;
5651 %}
5653 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5654 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5655 src1 : RD(read);
5656 src2 : RD(read);
5657 dst : WB(write)+5;
5658 DECODE : ID;
5659 ALU2 : CA;
5660 %}
5662 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5663 src1 : RD(read);
5664 src2 : RD(read);
5665 dst : WB(write)+10;
5666 DECODE : ID;
5667 ALU2 : CA;
5668 %}
5670 //No.19 Integer div operation : dst <-- reg1 div reg2
5671 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5672 src1 : RD(read);
5673 src2 : RD(read);
5674 dst : WB(write)+10;
5675 DECODE : ID;
5676 ALU2 : CA;
5677 %}
5679 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5680 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5681 instruction_count(2);
5682 src1 : RD(read);
5683 src2 : RD(read);
5684 dst : WB(write)+10;
5685 DECODE : ID;
5686 ALU2 : CA;
5687 %}
5689 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5690 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5691 instruction_count(2);
5692 src1 : RD(read);
5693 src2 : RD(read);
5694 dst : WB(write);
5695 DECODE : ID;
5696 ALU : CA;
5697 %}
5699 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5700 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5701 instruction_count(2);
5702 src : RD(read);
5703 dst : WB(write);
5704 DECODE : ID;
5705 ALU : CA;
5706 %}
5708 //no.16 load Long from memory :
5709 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5710 instruction_count(2);
5711 mem : RD(read);
5712 dst : WB(write)+5;
5713 DECODE : ID;
5714 MEM : RD;
5715 %}
5717 //No.17 Store Long to Memory :
5718 pipe_class ialu_storeL(mRegL src, memory mem) %{
5719 instruction_count(2);
5720 mem : RD(read);
5721 src : RD(read);
5722 DECODE : ID;
5723 MEM : RD;
5724 %}
5726 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5727 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5728 single_instruction;
5729 src : RD(read);
5730 dst : WB(write);
5731 DECODE : ID;
5732 ALU : CA;
5733 %}
5735 //No.3 Integer move operation : dst <-- reg
5736 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5737 src : RD(read);
5738 dst : WB(write);
5739 DECODE : ID;
5740 ALU : CA;
5741 %}
5743 //No.4 No instructions : do nothing
5744 pipe_class empty( ) %{
5745 instruction_count(0);
5746 %}
5748 //No.5 UnConditional branch :
5749 pipe_class pipe_jump( label labl ) %{
5750 multiple_bundles;
5751 DECODE : ID;
5752 BR : RD;
5753 %}
5755 //No.6 ALU Conditional branch :
5756 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5757 multiple_bundles;
5758 src1 : RD(read);
5759 src2 : RD(read);
5760 DECODE : ID;
5761 BR : RD;
5762 %}
5764 //no.7 load integer from memory :
5765 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5766 mem : RD(read);
5767 dst : WB(write)+3;
5768 DECODE : ID;
5769 MEM : RD;
5770 %}
5772 //No.8 Store Integer to Memory :
5773 pipe_class ialu_storeI(mRegI src, memory mem) %{
5774 mem : RD(read);
5775 src : RD(read);
5776 DECODE : ID;
5777 MEM : RD;
5778 %}
5781 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5782 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5783 src1 : RD(read);
5784 src2 : RD(read);
5785 dst : WB(write);
5786 DECODE : ID;
5787 FPU : CA;
5788 %}
5790 //No.22 Floating div operation : dst <-- reg1 div reg2
5791 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5792 src1 : RD(read);
5793 src2 : RD(read);
5794 dst : WB(write);
5795 DECODE : ID;
5796 FPU2 : CA;
5797 %}
5799 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5800 src : RD(read);
5801 dst : WB(write);
5802 DECODE : ID;
5803 FPU1 : CA;
5804 %}
5806 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5807 src : RD(read);
5808 dst : WB(write);
5809 DECODE : ID;
5810 FPU1 : CA;
5811 %}
5813 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5814 src : RD(read);
5815 dst : WB(write);
5816 DECODE : ID;
5817 MEM : RD;
5818 %}
5820 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5821 src : RD(read);
5822 dst : WB(write);
5823 DECODE : ID;
5824 MEM : RD(5);
5825 %}
5827 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5828 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5829 multiple_bundles;
5830 src1 : RD(read);
5831 src2 : RD(read);
5832 dst : WB(write);
5833 DECODE : ID;
5834 FPU2 : CA;
5835 %}
5837 //No.11 Load Floating from Memory :
5838 pipe_class fpu_loadF(regF dst, memory mem) %{
5839 instruction_count(1);
5840 mem : RD(read);
5841 dst : WB(write)+3;
5842 DECODE : ID;
5843 MEM : RD;
5844 %}
5846 //No.12 Store Floating to Memory :
5847 pipe_class fpu_storeF(regF src, memory mem) %{
5848 instruction_count(1);
5849 mem : RD(read);
5850 src : RD(read);
5851 DECODE : ID;
5852 MEM : RD;
5853 %}
5855 //No.13 FPU Conditional branch :
5856 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5857 multiple_bundles;
5858 src1 : RD(read);
5859 src2 : RD(read);
5860 DECODE : ID;
5861 BR : RD;
5862 %}
5864 //No.14 Floating FPU reg operation : dst <-- op reg
5865 pipe_class fpu1_regF(regF dst, regF src) %{
5866 src : RD(read);
5867 dst : WB(write);
5868 DECODE : ID;
5869 FPU : CA;
5870 %}
5872 pipe_class long_memory_op() %{
5873 instruction_count(10); multiple_bundles; force_serialization;
5874 fixed_latency(30);
5875 %}
5877 pipe_class simple_call() %{
5878 instruction_count(10); multiple_bundles; force_serialization;
5879 fixed_latency(200);
5880 BR : RD;
5881 %}
5883 pipe_class call() %{
5884 instruction_count(10); multiple_bundles; force_serialization;
5885 fixed_latency(200);
5886 %}
5888 //FIXME:
5889 //No.9 Piple slow : for multi-instructions
5890 pipe_class pipe_slow( ) %{
5891 instruction_count(20);
5892 force_serialization;
5893 multiple_bundles;
5894 fixed_latency(50);
5895 %}
5897 %}
5901 //----------INSTRUCTIONS-------------------------------------------------------
5902 //
5903 // match -- States which machine-independent subtree may be replaced
5904 // by this instruction.
5905 // ins_cost -- The estimated cost of this instruction is used by instruction
5906 // selection to identify a minimum cost tree of machine
5907 // instructions that matches a tree of machine-independent
5908 // instructions.
5909 // format -- A string providing the disassembly for this instruction.
5910 // The value of an instruction's operand may be inserted
5911 // by referring to it with a '$' prefix.
5912 // opcode -- Three instruction opcodes may be provided. These are referred
5913 // to within an encode class as $primary, $secondary, and $tertiary
5914 // respectively. The primary opcode is commonly used to
5915 // indicate the type of machine instruction, while secondary
5916 // and tertiary are often used for prefix options or addressing
5917 // modes.
5918 // ins_encode -- A list of encode classes with parameters. The encode class
5919 // name must have been defined in an 'enc_class' specification
5920 // in the encode section of the architecture description.
5923 // Load Integer
5924 instruct loadI(mRegI dst, memory mem) %{
5925 match(Set dst (LoadI mem));
5927 ins_cost(125);
5928 format %{ "lw $dst, $mem #@loadI" %}
5929 ins_encode (load_I_enc(dst, mem));
5930 ins_pipe( ialu_loadI );
5931 %}
5933 instruct loadI_convI2L(mRegL dst, memory mem) %{
5934 match(Set dst (ConvI2L (LoadI mem)));
5936 ins_cost(125);
5937 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5938 ins_encode (load_I_enc(dst, mem));
5939 ins_pipe( ialu_loadI );
5940 %}
5942 // Load Integer (32 bit signed) to Byte (8 bit signed)
5943 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5944 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5946 ins_cost(125);
5947 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5948 ins_encode(load_B_enc(dst, mem));
5949 ins_pipe(ialu_loadI);
5950 %}
5952 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5953 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5954 match(Set dst (AndI (LoadI mem) mask));
5956 ins_cost(125);
5957 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5958 ins_encode(load_UB_enc(dst, mem));
5959 ins_pipe(ialu_loadI);
5960 %}
5962 // Load Integer (32 bit signed) to Short (16 bit signed)
5963 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5964 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5966 ins_cost(125);
5967 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5968 ins_encode(load_S_enc(dst, mem));
5969 ins_pipe(ialu_loadI);
5970 %}
5972 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5973 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5974 match(Set dst (AndI (LoadI mem) mask));
5976 ins_cost(125);
5977 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5978 ins_encode(load_C_enc(dst, mem));
5979 ins_pipe(ialu_loadI);
5980 %}
5982 // Load Long.
5983 instruct loadL(mRegL dst, memory mem) %{
5984 // predicate(!((LoadLNode*)n)->require_atomic_access());
5985 match(Set dst (LoadL mem));
5987 ins_cost(250);
5988 format %{ "ld $dst, $mem #@loadL" %}
5989 ins_encode(load_L_enc(dst, mem));
5990 ins_pipe( ialu_loadL );
5991 %}
5993 // Load Long - UNaligned
5994 instruct loadL_unaligned(mRegL dst, memory mem) %{
5995 match(Set dst (LoadL_unaligned mem));
5997 // FIXME: Jin: Need more effective ldl/ldr
5998 ins_cost(450);
5999 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6000 ins_encode(load_L_enc(dst, mem));
6001 ins_pipe( ialu_loadL );
6002 %}
6004 // Store Long
6005 instruct storeL_reg(memory mem, mRegL src) %{
6006 match(Set mem (StoreL mem src));
6008 ins_cost(200);
6009 format %{ "sd $mem, $src #@storeL_reg\n" %}
6010 ins_encode(store_L_reg_enc(mem, src));
6011 ins_pipe( ialu_storeL );
6012 %}
6014 instruct storeL_immL0(memory mem, immL0 zero) %{
6015 match(Set mem (StoreL mem zero));
6017 ins_cost(180);
6018 format %{ "sd zero, $mem #@storeL_immL0" %}
6019 ins_encode(store_L_immL0_enc(mem, zero));
6020 ins_pipe( ialu_storeL );
6021 %}
6023 instruct storeL_imm(memory mem, immL src) %{
6024 match(Set mem (StoreL mem src));
6026 ins_cost(200);
6027 format %{ "sd $src, $mem #@storeL_imm" %}
6028 ins_encode(store_L_immL_enc(mem, src));
6029 ins_pipe( ialu_storeL );
6030 %}
6032 // Load Compressed Pointer
6033 instruct loadN(mRegN dst, memory mem)
6034 %{
6035 match(Set dst (LoadN mem));
6037 ins_cost(125); // XXX
6038 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6039 ins_encode (load_N_enc(dst, mem));
6040 ins_pipe( ialu_loadI ); // XXX
6041 %}
6043 instruct loadN2P(mRegP dst, memory mem)
6044 %{
6045 match(Set dst (DecodeN (LoadN mem)));
6046 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6048 ins_cost(125); // XXX
6049 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6050 ins_encode (load_N_enc(dst, mem));
6051 ins_pipe( ialu_loadI ); // XXX
6052 %}
6054 // Load Pointer
6055 instruct loadP(mRegP dst, memory mem) %{
6056 match(Set dst (LoadP mem));
6058 ins_cost(125);
6059 format %{ "ld $dst, $mem #@loadP" %}
6060 ins_encode (load_P_enc(dst, mem));
6061 ins_pipe( ialu_loadI );
6062 %}
6064 // Load Klass Pointer
6065 instruct loadKlass(mRegP dst, memory mem) %{
6066 match(Set dst (LoadKlass mem));
6068 ins_cost(125);
6069 format %{ "MOV $dst,$mem @ loadKlass" %}
6070 ins_encode (load_P_enc(dst, mem));
6071 ins_pipe( ialu_loadI );
6072 %}
6074 // Load narrow Klass Pointer
6075 instruct loadNKlass(mRegN dst, memory mem)
6076 %{
6077 match(Set dst (LoadNKlass mem));
6079 ins_cost(125); // XXX
6080 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6081 ins_encode (load_N_enc(dst, mem));
6082 ins_pipe( ialu_loadI ); // XXX
6083 %}
6085 instruct loadN2PKlass(mRegP dst, memory mem)
6086 %{
6087 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6088 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6090 ins_cost(125); // XXX
6091 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6092 ins_encode (load_N_enc(dst, mem));
6093 ins_pipe( ialu_loadI ); // XXX
6094 %}
6096 // Load Constant
6097 instruct loadConI(mRegI dst, immI src) %{
6098 match(Set dst src);
6100 ins_cost(150);
6101 format %{ "mov $dst, $src #@loadConI" %}
6102 ins_encode %{
6103 Register dst = $dst$$Register;
6104 int value = $src$$constant;
6105 __ move(dst, value);
6106 %}
6107 ins_pipe( ialu_regI_regI );
6108 %}
6111 instruct loadConL_set64(mRegL dst, immL src) %{
6112 match(Set dst src);
6113 ins_cost(120);
6114 format %{ "li $dst, $src @ loadConL_set64" %}
6115 ins_encode %{
6116 __ set64($dst$$Register, $src$$constant);
6117 %}
6118 ins_pipe(ialu_regL_regL);
6119 %}
6121 /*
6122 // Load long value from constant table (predicated by immL_expensive).
6123 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6124 match(Set dst src);
6125 ins_cost(150);
6126 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6127 ins_encode %{
6128 int con_offset = $constantoffset($src);
6130 if (Assembler::is_simm16(con_offset)) {
6131 __ ld($dst$$Register, $constanttablebase, con_offset);
6132 } else {
6133 __ set64(AT, con_offset);
6134 if (UseLoongsonISA) {
6135 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6136 } else {
6137 __ daddu(AT, $constanttablebase, AT);
6138 __ ld($dst$$Register, AT, 0);
6139 }
6140 }
6141 %}
6142 ins_pipe(ialu_loadI);
6143 %}
6144 */
6146 instruct loadConL16(mRegL dst, immL16 src) %{
6147 match(Set dst src);
6148 ins_cost(105);
6149 format %{ "mov $dst, $src #@loadConL16" %}
6150 ins_encode %{
6151 Register dst_reg = as_Register($dst$$reg);
6152 int value = $src$$constant;
6153 __ daddiu(dst_reg, R0, value);
6154 %}
6155 ins_pipe( ialu_regL_regL );
6156 %}
6159 instruct loadConL0(mRegL dst, immL0 src) %{
6160 match(Set dst src);
6161 ins_cost(100);
6162 format %{ "mov $dst, zero #@loadConL0" %}
6163 ins_encode %{
6164 Register dst_reg = as_Register($dst$$reg);
6165 __ daddu(dst_reg, R0, R0);
6166 %}
6167 ins_pipe( ialu_regL_regL );
6168 %}
6170 // Load Range
6171 instruct loadRange(mRegI dst, memory mem) %{
6172 match(Set dst (LoadRange mem));
6174 ins_cost(125);
6175 format %{ "MOV $dst,$mem @ loadRange" %}
6176 ins_encode(load_I_enc(dst, mem));
6177 ins_pipe( ialu_loadI );
6178 %}
6181 instruct storeP(memory mem, mRegP src ) %{
6182 match(Set mem (StoreP mem src));
6184 ins_cost(125);
6185 format %{ "sd $src, $mem #@storeP" %}
6186 ins_encode(store_P_reg_enc(mem, src));
6187 ins_pipe( ialu_storeI );
6188 %}
6190 // Store NULL Pointer, mark word, or other simple pointer constant.
6191 instruct storeImmP0(memory mem, immP0 zero) %{
6192 match(Set mem (StoreP mem zero));
6194 ins_cost(125);
6195 format %{ "mov $mem, $zero #@storeImmP0" %}
6196 ins_encode(store_P_immP0_enc(mem));
6197 ins_pipe( ialu_storeI );
6198 %}
6200 // Store Byte Immediate
6201 instruct storeImmB(memory mem, immI8 src) %{
6202 match(Set mem (StoreB mem src));
6204 ins_cost(150);
6205 format %{ "movb $mem, $src #@storeImmB" %}
6206 ins_encode(store_B_immI_enc(mem, src));
6207 ins_pipe( ialu_storeI );
6208 %}
6210 // Store Compressed Pointer
6211 instruct storeN(memory mem, mRegN src)
6212 %{
6213 match(Set mem (StoreN mem src));
6215 ins_cost(125); // XXX
6216 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6217 ins_encode(store_N_reg_enc(mem, src));
6218 ins_pipe( ialu_storeI );
6219 %}
6221 instruct storeP2N(memory mem, mRegP src)
6222 %{
6223 match(Set mem (StoreN mem (EncodeP src)));
6224 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6226 ins_cost(125); // XXX
6227 format %{ "sw $mem, $src\t# @ storeP2N" %}
6228 ins_encode(store_N_reg_enc(mem, src));
6229 ins_pipe( ialu_storeI );
6230 %}
6232 instruct storeNKlass(memory mem, mRegN src)
6233 %{
6234 match(Set mem (StoreNKlass mem src));
6236 ins_cost(125); // XXX
6237 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6238 ins_encode(store_N_reg_enc(mem, src));
6239 ins_pipe( ialu_storeI );
6240 %}
6242 instruct storeP2NKlass(memory mem, mRegP src)
6243 %{
6244 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6245 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6247 ins_cost(125); // XXX
6248 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6249 ins_encode(store_N_reg_enc(mem, src));
6250 ins_pipe( ialu_storeI );
6251 %}
6253 instruct storeImmN0(memory mem, immN0 zero)
6254 %{
6255 match(Set mem (StoreN mem zero));
6257 ins_cost(125); // XXX
6258 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6259 ins_encode(storeImmN0_enc(mem, zero));
6260 ins_pipe( ialu_storeI );
6261 %}
6263 // Store Byte
6264 instruct storeB(memory mem, mRegI src) %{
6265 match(Set mem (StoreB mem src));
6267 ins_cost(125);
6268 format %{ "sb $src, $mem #@storeB" %}
6269 ins_encode(store_B_reg_enc(mem, src));
6270 ins_pipe( ialu_storeI );
6271 %}
6273 instruct storeB_convL2I(memory mem, mRegL src) %{
6274 match(Set mem (StoreB mem (ConvL2I src)));
6276 ins_cost(125);
6277 format %{ "sb $src, $mem #@storeB_convL2I" %}
6278 ins_encode(store_B_reg_enc(mem, src));
6279 ins_pipe( ialu_storeI );
6280 %}
6282 // Load Byte (8bit signed)
6283 instruct loadB(mRegI dst, memory mem) %{
6284 match(Set dst (LoadB mem));
6286 ins_cost(125);
6287 format %{ "lb $dst, $mem #@loadB" %}
6288 ins_encode(load_B_enc(dst, mem));
6289 ins_pipe( ialu_loadI );
6290 %}
6292 instruct loadB_convI2L(mRegL dst, memory mem) %{
6293 match(Set dst (ConvI2L (LoadB mem)));
6295 ins_cost(125);
6296 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6297 ins_encode(load_B_enc(dst, mem));
6298 ins_pipe( ialu_loadI );
6299 %}
6301 // Load Byte (8bit UNsigned)
6302 instruct loadUB(mRegI dst, memory mem) %{
6303 match(Set dst (LoadUB mem));
6305 ins_cost(125);
6306 format %{ "lbu $dst, $mem #@loadUB" %}
6307 ins_encode(load_UB_enc(dst, mem));
6308 ins_pipe( ialu_loadI );
6309 %}
6311 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6312 match(Set dst (ConvI2L (LoadUB mem)));
6314 ins_cost(125);
6315 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6316 ins_encode(load_UB_enc(dst, mem));
6317 ins_pipe( ialu_loadI );
6318 %}
6320 // Load Short (16bit signed)
6321 instruct loadS(mRegI dst, memory mem) %{
6322 match(Set dst (LoadS mem));
6324 ins_cost(125);
6325 format %{ "lh $dst, $mem #@loadS" %}
6326 ins_encode(load_S_enc(dst, mem));
6327 ins_pipe( ialu_loadI );
6328 %}
6330 // Load Short (16 bit signed) to Byte (8 bit signed)
6331 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6332 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6334 ins_cost(125);
6335 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6336 ins_encode(load_B_enc(dst, mem));
6337 ins_pipe(ialu_loadI);
6338 %}
6340 instruct loadS_convI2L(mRegL dst, memory mem) %{
6341 match(Set dst (ConvI2L (LoadS mem)));
6343 ins_cost(125);
6344 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6345 ins_encode(load_S_enc(dst, mem));
6346 ins_pipe( ialu_loadI );
6347 %}
6349 // Store Integer Immediate
6350 instruct storeImmI(memory mem, immI src) %{
6351 match(Set mem (StoreI mem src));
6353 ins_cost(150);
6354 format %{ "mov $mem, $src #@storeImmI" %}
6355 ins_encode(store_I_immI_enc(mem, src));
6356 ins_pipe( ialu_storeI );
6357 %}
6359 // Store Integer
6360 instruct storeI(memory mem, mRegI src) %{
6361 match(Set mem (StoreI mem src));
6363 ins_cost(125);
6364 format %{ "sw $mem, $src #@storeI" %}
6365 ins_encode(store_I_reg_enc(mem, src));
6366 ins_pipe( ialu_storeI );
6367 %}
6369 instruct storeI_convL2I(memory mem, mRegL src) %{
6370 match(Set mem (StoreI mem (ConvL2I src)));
6372 ins_cost(125);
6373 format %{ "sw $mem, $src #@storeI_convL2I" %}
6374 ins_encode(store_I_reg_enc(mem, src));
6375 ins_pipe( ialu_storeI );
6376 %}
6378 // Load Float
6379 instruct loadF(regF dst, memory mem) %{
6380 match(Set dst (LoadF mem));
6382 ins_cost(150);
6383 format %{ "loadF $dst, $mem #@loadF" %}
6384 ins_encode(load_F_enc(dst, mem));
6385 ins_pipe( ialu_loadI );
6386 %}
6388 instruct loadConP_general(mRegP dst, immP src) %{
6389 match(Set dst src);
6391 ins_cost(120);
6392 format %{ "li $dst, $src #@loadConP_general" %}
6394 ins_encode %{
6395 Register dst = $dst$$Register;
6396 long* value = (long*)$src$$constant;
6398 if($src->constant_reloc() == relocInfo::metadata_type){
6399 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6400 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6402 __ relocate(rspec);
6403 __ patchable_set48(dst, (long)value);
6404 }else if($src->constant_reloc() == relocInfo::oop_type){
6405 int oop_index = __ oop_recorder()->find_index((jobject)value);
6406 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6408 __ relocate(rspec);
6409 __ patchable_set48(dst, (long)value);
6410 } else if ($src->constant_reloc() == relocInfo::none) {
6411 __ set64(dst, (long)value);
6412 }
6413 %}
6415 ins_pipe( ialu_regI_regI );
6416 %}
6418 /*
6419 instruct loadConP_load(mRegP dst, immP_load src) %{
6420 match(Set dst src);
6422 ins_cost(100);
6423 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6425 ins_encode %{
6427 int con_offset = $constantoffset($src);
6429 if (Assembler::is_simm16(con_offset)) {
6430 __ ld($dst$$Register, $constanttablebase, con_offset);
6431 } else {
6432 __ set64(AT, con_offset);
6433 if (UseLoongsonISA) {
6434 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6435 } else {
6436 __ daddu(AT, $constanttablebase, AT);
6437 __ ld($dst$$Register, AT, 0);
6438 }
6439 }
6440 %}
6442 ins_pipe(ialu_loadI);
6443 %}
6444 */
6446 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6447 match(Set dst src);
6449 ins_cost(80);
6450 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6452 ins_encode %{
6453 __ set64($dst$$Register, $src$$constant);
6454 %}
6456 ins_pipe(ialu_regI_regI);
6457 %}
6460 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6461 match(Set dst src);
6463 ins_cost(50);
6464 format %{ "li $dst, $src #@loadConP_poll" %}
6466 ins_encode %{
6467 Register dst = $dst$$Register;
6468 intptr_t value = (intptr_t)$src$$constant;
6470 __ set64(dst, (jlong)value);
6471 %}
6473 ins_pipe( ialu_regI_regI );
6474 %}
6476 instruct loadConP0(mRegP dst, immP0 src)
6477 %{
6478 match(Set dst src);
6480 ins_cost(50);
6481 format %{ "mov $dst, R0\t# ptr" %}
6482 ins_encode %{
6483 Register dst_reg = $dst$$Register;
6484 __ daddu(dst_reg, R0, R0);
6485 %}
6486 ins_pipe( ialu_regI_regI );
6487 %}
6489 instruct loadConN0(mRegN dst, immN0 src) %{
6490 match(Set dst src);
6491 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6492 ins_encode %{
6493 __ move($dst$$Register, R0);
6494 %}
6495 ins_pipe( ialu_regI_regI );
6496 %}
6498 instruct loadConN(mRegN dst, immN src) %{
6499 match(Set dst src);
6501 ins_cost(125);
6502 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6503 ins_encode %{
6504 Register dst = $dst$$Register;
6505 __ set_narrow_oop(dst, (jobject)$src$$constant);
6506 %}
6507 ins_pipe( ialu_regI_regI ); // XXX
6508 %}
6510 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6511 match(Set dst src);
6513 ins_cost(125);
6514 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6515 ins_encode %{
6516 Register dst = $dst$$Register;
6517 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6518 %}
6519 ins_pipe( ialu_regI_regI ); // XXX
6520 %}
6522 //FIXME
6523 // Tail Call; Jump from runtime stub to Java code.
6524 // Also known as an 'interprocedural jump'.
6525 // Target of jump will eventually return to caller.
6526 // TailJump below removes the return address.
6527 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6528 match(TailCall jump_target method_oop );
6529 ins_cost(300);
6530 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6532 ins_encode %{
6533 Register target = $jump_target$$Register;
6534 Register oop = $method_oop$$Register;
6536 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6537 __ push(RA);
6539 __ move(S3, oop);
6540 __ jr(target);
6541 __ nop();
6542 %}
6544 ins_pipe( pipe_jump );
6545 %}
6547 // Create exception oop: created by stack-crawling runtime code.
6548 // Created exception is now available to this handler, and is setup
6549 // just prior to jumping to this handler. No code emitted.
6550 instruct CreateException( a0_RegP ex_oop )
6551 %{
6552 match(Set ex_oop (CreateEx));
6554 // use the following format syntax
6555 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6556 ins_encode %{
6557 /* Jin: X86 leaves this function empty */
6558 __ block_comment("CreateException is empty in X86/MIPS");
6559 %}
6560 ins_pipe( empty );
6561 // ins_pipe( pipe_jump );
6562 %}
6565 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6567 - Common try/catch:
6568 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6569 |- V0, V1 are created
6570 |- T9 <= SharedRuntime::exception_handler_for_return_address
6571 `- jr T9
6572 `- the caller's exception_handler
6573 `- jr OptoRuntime::exception_blob
6574 `- here
6575 - Rethrow(e.g. 'unwind'):
6576 * The callee:
6577 |- an exception is triggered during execution
6578 `- exits the callee method through RethrowException node
6579 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6580 `- The callee jumps to OptoRuntime::rethrow_stub()
6581 * In OptoRuntime::rethrow_stub:
6582 |- The VM calls _rethrow_Java to determine the return address in the caller method
6583 `- exits the stub with tailjmpInd
6584 |- pops exception_oop(V0) and exception_pc(V1)
6585 `- jumps to the return address(usually an exception_handler)
6586 * The caller:
6587 `- continues processing the exception_blob with V0/V1
6588 */
6590 /*
6591 Disassembling OptoRuntime::rethrow_stub()
6593 ; locals
6594 0x2d3bf320: addiu sp, sp, 0xfffffff8
6595 0x2d3bf324: sw ra, 0x4(sp)
6596 0x2d3bf328: sw fp, 0x0(sp)
6597 0x2d3bf32c: addu fp, sp, zero
6598 0x2d3bf330: addiu sp, sp, 0xfffffff0
6599 0x2d3bf334: sw ra, 0x8(sp)
6600 0x2d3bf338: sw t0, 0x4(sp)
6601 0x2d3bf33c: sw sp, 0x0(sp)
6603 ; get_thread(S2)
6604 0x2d3bf340: addu s2, sp, zero
6605 0x2d3bf344: srl s2, s2, 12
6606 0x2d3bf348: sll s2, s2, 2
6607 0x2d3bf34c: lui at, 0x2c85
6608 0x2d3bf350: addu at, at, s2
6609 0x2d3bf354: lw s2, 0xffffcc80(at)
6611 0x2d3bf358: lw s0, 0x0(sp)
6612 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6613 0x2d3bf360: sw s2, 0xc(sp)
6615 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6616 0x2d3bf364: lw a0, 0x4(sp)
6617 0x2d3bf368: lw a1, 0xc(sp)
6618 0x2d3bf36c: lw a2, 0x8(sp)
6619 ;; Java_To_Runtime
6620 0x2d3bf370: lui t9, 0x2c34
6621 0x2d3bf374: addiu t9, t9, 0xffff8a48
6622 0x2d3bf378: jalr t9
6623 0x2d3bf37c: nop
6625 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6627 0x2d3bf384: lw s0, 0xc(sp)
6628 0x2d3bf388: sw zero, 0x118(s0)
6629 0x2d3bf38c: sw zero, 0x11c(s0)
6630 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6631 0x2d3bf394: addu s2, s0, zero
6632 0x2d3bf398: sw zero, 0x144(s2)
6633 0x2d3bf39c: lw s0, 0x4(s2)
6634 0x2d3bf3a0: addiu s4, zero, 0x0
6635 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6636 0x2d3bf3a8: nop
6637 0x2d3bf3ac: addiu sp, sp, 0x10
6638 0x2d3bf3b0: addiu sp, sp, 0x8
6639 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6640 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6641 0x2d3bf3bc: lui at, 0x2b48
6642 0x2d3bf3c0: lw at, 0x100(at)
6644 ; tailjmpInd: Restores exception_oop & exception_pc
6645 0x2d3bf3c4: addu v1, ra, zero
6646 0x2d3bf3c8: addu v0, s1, zero
6647 0x2d3bf3cc: jr s3
6648 0x2d3bf3d0: nop
6649 ; Exception:
6650 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6651 0x2d3bf3d8: addiu s1, s1, 0x40
6652 0x2d3bf3dc: addiu s2, zero, 0x0
6653 0x2d3bf3e0: addiu sp, sp, 0x10
6654 0x2d3bf3e4: addiu sp, sp, 0x8
6655 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6656 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6657 0x2d3bf3f0: lui at, 0x2b48
6658 0x2d3bf3f4: lw at, 0x100(at)
6659 ; TailCalljmpInd
6660 __ push(RA); ; to be used in generate_forward_exception()
6661 0x2d3bf3f8: addu t7, s2, zero
6662 0x2d3bf3fc: jr s1
6663 0x2d3bf400: nop
6664 */
6665 // Rethrow exception:
6666 // The exception oop will come in the first argument position.
6667 // Then JUMP (not call) to the rethrow stub code.
6668 instruct RethrowException()
6669 %{
6670 match(Rethrow);
6672 // use the following format syntax
6673 format %{ "JMP rethrow_stub #@RethrowException" %}
6674 ins_encode %{
6675 __ block_comment("@ RethrowException");
6677 cbuf.set_insts_mark();
6678 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6680 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6681 __ patchable_jump((address)OptoRuntime::rethrow_stub());
6682 %}
6683 ins_pipe( pipe_jump );
6684 %}
6686 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6687 match(If cmp (CmpP op1 zero));
6688 effect(USE labl);
6690 ins_cost(180);
6691 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6693 ins_encode %{
6694 Register op1 = $op1$$Register;
6695 Register op2 = R0;
6696 Label &L = *($labl$$label);
6697 int flag = $cmp$$cmpcode;
6699 switch(flag) {
6700 case 0x01: //equal
6701 if (&L)
6702 __ beq(op1, op2, L);
6703 else
6704 __ beq(op1, op2, (int)0);
6705 break;
6706 case 0x02: //not_equal
6707 if (&L)
6708 __ bne(op1, op2, L);
6709 else
6710 __ bne(op1, op2, (int)0);
6711 break;
6712 default:
6713 Unimplemented();
6714 }
6715 __ nop();
6716 %}
6718 ins_pc_relative(1);
6719 ins_pipe( pipe_alu_branch );
6720 %}
6722 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6723 match(If cmp (CmpP (DecodeN op1) zero));
6724 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6725 effect(USE labl);
6727 ins_cost(180);
6728 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6730 ins_encode %{
6731 Register op1 = $op1$$Register;
6732 Register op2 = R0;
6733 Label &L = *($labl$$label);
6734 int flag = $cmp$$cmpcode;
6736 switch(flag)
6737 {
6738 case 0x01: //equal
6739 if (&L)
6740 __ beq(op1, op2, L);
6741 else
6742 __ beq(op1, op2, (int)0);
6743 break;
6744 case 0x02: //not_equal
6745 if (&L)
6746 __ bne(op1, op2, L);
6747 else
6748 __ bne(op1, op2, (int)0);
6749 break;
6750 default:
6751 Unimplemented();
6752 }
6753 __ nop();
6754 %}
6756 ins_pc_relative(1);
6757 ins_pipe( pipe_alu_branch );
6758 %}
6761 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6762 match(If cmp (CmpP op1 op2));
6763 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6764 effect(USE labl);
6766 ins_cost(200);
6767 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6769 ins_encode %{
6770 Register op1 = $op1$$Register;
6771 Register op2 = $op2$$Register;
6772 Label &L = *($labl$$label);
6773 int flag = $cmp$$cmpcode;
6775 switch(flag) {
6776 case 0x01: //equal
6777 if (&L)
6778 __ beq(op1, op2, L);
6779 else
6780 __ beq(op1, op2, (int)0);
6781 break;
6782 case 0x02: //not_equal
6783 if (&L)
6784 __ bne(op1, op2, L);
6785 else
6786 __ bne(op1, op2, (int)0);
6787 break;
6788 case 0x03: //above
6789 __ sltu(AT, op2, op1);
6790 if(&L)
6791 __ bne(R0, AT, L);
6792 else
6793 __ bne(R0, AT, (int)0);
6794 break;
6795 case 0x04: //above_equal
6796 __ sltu(AT, op1, op2);
6797 if(&L)
6798 __ beq(AT, R0, L);
6799 else
6800 __ beq(AT, R0, (int)0);
6801 break;
6802 case 0x05: //below
6803 __ sltu(AT, op1, op2);
6804 if(&L)
6805 __ bne(R0, AT, L);
6806 else
6807 __ bne(R0, AT, (int)0);
6808 break;
6809 case 0x06: //below_equal
6810 __ sltu(AT, op2, op1);
6811 if(&L)
6812 __ beq(AT, R0, L);
6813 else
6814 __ beq(AT, R0, (int)0);
6815 break;
6816 default:
6817 Unimplemented();
6818 }
6819 __ nop();
6820 %}
6822 ins_pc_relative(1);
6823 ins_pipe( pipe_alu_branch );
6824 %}
6826 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6827 match(If cmp (CmpN op1 null));
6828 effect(USE labl);
6830 ins_cost(180);
6831 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6832 "BP$cmp $labl @ cmpN_null_branch" %}
6833 ins_encode %{
6834 Register op1 = $op1$$Register;
6835 Register op2 = R0;
6836 Label &L = *($labl$$label);
6837 int flag = $cmp$$cmpcode;
6839 switch(flag) {
6840 case 0x01: //equal
6841 if (&L)
6842 __ beq(op1, op2, L);
6843 else
6844 __ beq(op1, op2, (int)0);
6845 break;
6846 case 0x02: //not_equal
6847 if (&L)
6848 __ bne(op1, op2, L);
6849 else
6850 __ bne(op1, op2, (int)0);
6851 break;
6852 default:
6853 Unimplemented();
6854 }
6855 __ nop();
6856 %}
6857 //TODO: pipe_branchP or create pipe_branchN LEE
6858 ins_pc_relative(1);
6859 ins_pipe( pipe_alu_branch );
6860 %}
6862 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6863 match(If cmp (CmpN op1 op2));
6864 effect(USE labl);
6866 ins_cost(180);
6867 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6868 "BP$cmp $labl" %}
6869 ins_encode %{
6870 Register op1_reg = $op1$$Register;
6871 Register op2_reg = $op2$$Register;
6872 Label &L = *($labl$$label);
6873 int flag = $cmp$$cmpcode;
6875 switch(flag) {
6876 case 0x01: //equal
6877 if (&L)
6878 __ beq(op1_reg, op2_reg, L);
6879 else
6880 __ beq(op1_reg, op2_reg, (int)0);
6881 break;
6882 case 0x02: //not_equal
6883 if (&L)
6884 __ bne(op1_reg, op2_reg, L);
6885 else
6886 __ bne(op1_reg, op2_reg, (int)0);
6887 break;
6888 case 0x03: //above
6889 __ sltu(AT, op2_reg, op1_reg);
6890 if(&L)
6891 __ bne(R0, AT, L);
6892 else
6893 __ bne(R0, AT, (int)0);
6894 break;
6895 case 0x04: //above_equal
6896 __ sltu(AT, op1_reg, op2_reg);
6897 if(&L)
6898 __ beq(AT, R0, L);
6899 else
6900 __ beq(AT, R0, (int)0);
6901 break;
6902 case 0x05: //below
6903 __ sltu(AT, op1_reg, op2_reg);
6904 if(&L)
6905 __ bne(R0, AT, L);
6906 else
6907 __ bne(R0, AT, (int)0);
6908 break;
6909 case 0x06: //below_equal
6910 __ sltu(AT, op2_reg, op1_reg);
6911 if(&L)
6912 __ beq(AT, R0, L);
6913 else
6914 __ beq(AT, R0, (int)0);
6915 break;
6916 default:
6917 Unimplemented();
6918 }
6919 __ nop();
6920 %}
6921 ins_pc_relative(1);
6922 ins_pipe( pipe_alu_branch );
6923 %}
6925 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6926 match( If cmp (CmpU src1 src2) );
6927 effect(USE labl);
6928 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6930 ins_encode %{
6931 Register op1 = $src1$$Register;
6932 Register op2 = $src2$$Register;
6933 Label &L = *($labl$$label);
6934 int flag = $cmp$$cmpcode;
6936 switch(flag) {
6937 case 0x01: //equal
6938 if (&L)
6939 __ beq(op1, op2, L);
6940 else
6941 __ beq(op1, op2, (int)0);
6942 break;
6943 case 0x02: //not_equal
6944 if (&L)
6945 __ bne(op1, op2, L);
6946 else
6947 __ bne(op1, op2, (int)0);
6948 break;
6949 case 0x03: //above
6950 __ sltu(AT, op2, op1);
6951 if(&L)
6952 __ bne(AT, R0, L);
6953 else
6954 __ bne(AT, R0, (int)0);
6955 break;
6956 case 0x04: //above_equal
6957 __ sltu(AT, op1, op2);
6958 if(&L)
6959 __ beq(AT, R0, L);
6960 else
6961 __ beq(AT, R0, (int)0);
6962 break;
6963 case 0x05: //below
6964 __ sltu(AT, op1, op2);
6965 if(&L)
6966 __ bne(AT, R0, L);
6967 else
6968 __ bne(AT, R0, (int)0);
6969 break;
6970 case 0x06: //below_equal
6971 __ sltu(AT, op2, op1);
6972 if(&L)
6973 __ beq(AT, R0, L);
6974 else
6975 __ beq(AT, R0, (int)0);
6976 break;
6977 default:
6978 Unimplemented();
6979 }
6980 __ nop();
6981 %}
6983 ins_pc_relative(1);
6984 ins_pipe( pipe_alu_branch );
6985 %}
6988 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6989 match( If cmp (CmpU src1 src2) );
6990 effect(USE labl);
6991 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6993 ins_encode %{
6994 Register op1 = $src1$$Register;
6995 int val = $src2$$constant;
6996 Label &L = *($labl$$label);
6997 int flag = $cmp$$cmpcode;
6999 __ move(AT, val);
7000 switch(flag) {
7001 case 0x01: //equal
7002 if (&L)
7003 __ beq(op1, AT, L);
7004 else
7005 __ beq(op1, AT, (int)0);
7006 break;
7007 case 0x02: //not_equal
7008 if (&L)
7009 __ bne(op1, AT, L);
7010 else
7011 __ bne(op1, AT, (int)0);
7012 break;
7013 case 0x03: //above
7014 __ sltu(AT, AT, op1);
7015 if(&L)
7016 __ bne(R0, AT, L);
7017 else
7018 __ bne(R0, AT, (int)0);
7019 break;
7020 case 0x04: //above_equal
7021 __ sltu(AT, op1, AT);
7022 if(&L)
7023 __ beq(AT, R0, L);
7024 else
7025 __ beq(AT, R0, (int)0);
7026 break;
7027 case 0x05: //below
7028 __ sltu(AT, op1, AT);
7029 if(&L)
7030 __ bne(R0, AT, L);
7031 else
7032 __ bne(R0, AT, (int)0);
7033 break;
7034 case 0x06: //below_equal
7035 __ sltu(AT, AT, op1);
7036 if(&L)
7037 __ beq(AT, R0, L);
7038 else
7039 __ beq(AT, R0, (int)0);
7040 break;
7041 default:
7042 Unimplemented();
7043 }
7044 __ nop();
7045 %}
7047 ins_pc_relative(1);
7048 ins_pipe( pipe_alu_branch );
7049 %}
7051 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7052 match( If cmp (CmpI src1 src2) );
7053 effect(USE labl);
7054 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7056 ins_encode %{
7057 Register op1 = $src1$$Register;
7058 Register op2 = $src2$$Register;
7059 Label &L = *($labl$$label);
7060 int flag = $cmp$$cmpcode;
7062 switch(flag) {
7063 case 0x01: //equal
7064 if (&L)
7065 __ beq(op1, op2, L);
7066 else
7067 __ beq(op1, op2, (int)0);
7068 break;
7069 case 0x02: //not_equal
7070 if (&L)
7071 __ bne(op1, op2, L);
7072 else
7073 __ bne(op1, op2, (int)0);
7074 break;
7075 case 0x03: //above
7076 __ slt(AT, op2, op1);
7077 if(&L)
7078 __ bne(R0, AT, L);
7079 else
7080 __ bne(R0, AT, (int)0);
7081 break;
7082 case 0x04: //above_equal
7083 __ slt(AT, op1, op2);
7084 if(&L)
7085 __ beq(AT, R0, L);
7086 else
7087 __ beq(AT, R0, (int)0);
7088 break;
7089 case 0x05: //below
7090 __ slt(AT, op1, op2);
7091 if(&L)
7092 __ bne(R0, AT, L);
7093 else
7094 __ bne(R0, AT, (int)0);
7095 break;
7096 case 0x06: //below_equal
7097 __ slt(AT, op2, op1);
7098 if(&L)
7099 __ beq(AT, R0, L);
7100 else
7101 __ beq(AT, R0, (int)0);
7102 break;
7103 default:
7104 Unimplemented();
7105 }
7106 __ nop();
7107 %}
7109 ins_pc_relative(1);
7110 ins_pipe( pipe_alu_branch );
7111 %}
7113 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7114 match( If cmp (CmpI src1 src2) );
7115 effect(USE labl);
7116 ins_cost(170);
7117 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7119 ins_encode %{
7120 Register op1 = $src1$$Register;
7121 Label &L = *($labl$$label);
7122 int flag = $cmp$$cmpcode;
7124 switch(flag) {
7125 case 0x01: //equal
7126 if (&L)
7127 __ beq(op1, R0, L);
7128 else
7129 __ beq(op1, R0, (int)0);
7130 break;
7131 case 0x02: //not_equal
7132 if (&L)
7133 __ bne(op1, R0, L);
7134 else
7135 __ bne(op1, R0, (int)0);
7136 break;
7137 case 0x03: //greater
7138 if(&L)
7139 __ bgtz(op1, L);
7140 else
7141 __ bgtz(op1, (int)0);
7142 break;
7143 case 0x04: //greater_equal
7144 if(&L)
7145 __ bgez(op1, L);
7146 else
7147 __ bgez(op1, (int)0);
7148 break;
7149 case 0x05: //less
7150 if(&L)
7151 __ bltz(op1, L);
7152 else
7153 __ bltz(op1, (int)0);
7154 break;
7155 case 0x06: //less_equal
7156 if(&L)
7157 __ blez(op1, L);
7158 else
7159 __ blez(op1, (int)0);
7160 break;
7161 default:
7162 Unimplemented();
7163 }
7164 __ nop();
7165 %}
7167 ins_pc_relative(1);
7168 ins_pipe( pipe_alu_branch );
7169 %}
7172 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7173 match( If cmp (CmpI src1 src2) );
7174 effect(USE labl);
7175 ins_cost(200);
7176 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7178 ins_encode %{
7179 Register op1 = $src1$$Register;
7180 int val = $src2$$constant;
7181 Label &L = *($labl$$label);
7182 int flag = $cmp$$cmpcode;
7184 __ move(AT, val);
7185 switch(flag) {
7186 case 0x01: //equal
7187 if (&L)
7188 __ beq(op1, AT, L);
7189 else
7190 __ beq(op1, AT, (int)0);
7191 break;
7192 case 0x02: //not_equal
7193 if (&L)
7194 __ bne(op1, AT, L);
7195 else
7196 __ bne(op1, AT, (int)0);
7197 break;
7198 case 0x03: //greater
7199 __ slt(AT, AT, op1);
7200 if(&L)
7201 __ bne(R0, AT, L);
7202 else
7203 __ bne(R0, AT, (int)0);
7204 break;
7205 case 0x04: //greater_equal
7206 __ slt(AT, op1, AT);
7207 if(&L)
7208 __ beq(AT, R0, L);
7209 else
7210 __ beq(AT, R0, (int)0);
7211 break;
7212 case 0x05: //less
7213 __ slt(AT, op1, AT);
7214 if(&L)
7215 __ bne(R0, AT, L);
7216 else
7217 __ bne(R0, AT, (int)0);
7218 break;
7219 case 0x06: //less_equal
7220 __ slt(AT, AT, op1);
7221 if(&L)
7222 __ beq(AT, R0, L);
7223 else
7224 __ beq(AT, R0, (int)0);
7225 break;
7226 default:
7227 Unimplemented();
7228 }
7229 __ nop();
7230 %}
7232 ins_pc_relative(1);
7233 ins_pipe( pipe_alu_branch );
7234 %}
7236 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7237 match( If cmp (CmpU src1 zero) );
7238 effect(USE labl);
7239 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7241 ins_encode %{
7242 Register op1 = $src1$$Register;
7243 Label &L = *($labl$$label);
7244 int flag = $cmp$$cmpcode;
7246 switch(flag) {
7247 case 0x01: //equal
7248 if (&L)
7249 __ beq(op1, R0, L);
7250 else
7251 __ beq(op1, R0, (int)0);
7252 break;
7253 case 0x02: //not_equal
7254 if (&L)
7255 __ bne(op1, R0, L);
7256 else
7257 __ bne(op1, R0, (int)0);
7258 break;
7259 case 0x03: //above
7260 if(&L)
7261 __ bne(R0, op1, L);
7262 else
7263 __ bne(R0, op1, (int)0);
7264 break;
7265 case 0x04: //above_equal
7266 if(&L)
7267 __ beq(R0, R0, L);
7268 else
7269 __ beq(R0, R0, (int)0);
7270 break;
7271 case 0x05: //below
7272 return;
7273 break;
7274 case 0x06: //below_equal
7275 if(&L)
7276 __ beq(op1, R0, L);
7277 else
7278 __ beq(op1, R0, (int)0);
7279 break;
7280 default:
7281 Unimplemented();
7282 }
7283 __ nop();
7284 %}
7286 ins_pc_relative(1);
7287 ins_pipe( pipe_alu_branch );
7288 %}
7291 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7292 match( If cmp (CmpU src1 src2) );
7293 effect(USE labl);
7294 ins_cost(180);
7295 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7297 ins_encode %{
7298 Register op1 = $src1$$Register;
7299 int val = $src2$$constant;
7300 Label &L = *($labl$$label);
7301 int flag = $cmp$$cmpcode;
7303 switch(flag) {
7304 case 0x01: //equal
7305 __ move(AT, val);
7306 if (&L)
7307 __ beq(op1, AT, L);
7308 else
7309 __ beq(op1, AT, (int)0);
7310 break;
7311 case 0x02: //not_equal
7312 __ move(AT, val);
7313 if (&L)
7314 __ bne(op1, AT, L);
7315 else
7316 __ bne(op1, AT, (int)0);
7317 break;
7318 case 0x03: //above
7319 __ move(AT, val);
7320 __ sltu(AT, AT, op1);
7321 if(&L)
7322 __ bne(R0, AT, L);
7323 else
7324 __ bne(R0, AT, (int)0);
7325 break;
7326 case 0x04: //above_equal
7327 __ sltiu(AT, op1, val);
7328 if(&L)
7329 __ beq(AT, R0, L);
7330 else
7331 __ beq(AT, R0, (int)0);
7332 break;
7333 case 0x05: //below
7334 __ sltiu(AT, op1, val);
7335 if(&L)
7336 __ bne(R0, AT, L);
7337 else
7338 __ bne(R0, AT, (int)0);
7339 break;
7340 case 0x06: //below_equal
7341 __ move(AT, val);
7342 __ sltu(AT, AT, op1);
7343 if(&L)
7344 __ beq(AT, R0, L);
7345 else
7346 __ beq(AT, R0, (int)0);
7347 break;
7348 default:
7349 Unimplemented();
7350 }
7351 __ nop();
7352 %}
7354 ins_pc_relative(1);
7355 ins_pipe( pipe_alu_branch );
7356 %}
7359 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7360 match( If cmp (CmpL src1 src2) );
7361 effect(USE labl);
7362 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7363 ins_cost(250);
7365 ins_encode %{
7366 Register opr1_reg = as_Register($src1$$reg);
7367 Register opr2_reg = as_Register($src2$$reg);
7369 Label &target = *($labl$$label);
7370 int flag = $cmp$$cmpcode;
7372 switch(flag) {
7373 case 0x01: //equal
7374 if (&target)
7375 __ beq(opr1_reg, opr2_reg, target);
7376 else
7377 __ beq(opr1_reg, opr2_reg, (int)0);
7378 __ delayed()->nop();
7379 break;
7381 case 0x02: //not_equal
7382 if(&target)
7383 __ bne(opr1_reg, opr2_reg, target);
7384 else
7385 __ bne(opr1_reg, opr2_reg, (int)0);
7386 __ delayed()->nop();
7387 break;
7389 case 0x03: //greater
7390 __ slt(AT, opr2_reg, opr1_reg);
7391 if(&target)
7392 __ bne(AT, R0, target);
7393 else
7394 __ bne(AT, R0, (int)0);
7395 __ delayed()->nop();
7396 break;
7398 case 0x04: //greater_equal
7399 __ slt(AT, opr1_reg, opr2_reg);
7400 if(&target)
7401 __ beq(AT, R0, target);
7402 else
7403 __ beq(AT, R0, (int)0);
7404 __ delayed()->nop();
7406 break;
7408 case 0x05: //less
7409 __ slt(AT, opr1_reg, opr2_reg);
7410 if(&target)
7411 __ bne(AT, R0, target);
7412 else
7413 __ bne(AT, R0, (int)0);
7414 __ delayed()->nop();
7416 break;
7418 case 0x06: //less_equal
7419 __ slt(AT, opr2_reg, opr1_reg);
7421 if(&target)
7422 __ beq(AT, R0, target);
7423 else
7424 __ beq(AT, R0, (int)0);
7425 __ delayed()->nop();
7427 break;
7429 default:
7430 Unimplemented();
7431 }
7432 %}
7435 ins_pc_relative(1);
7436 ins_pipe( pipe_alu_branch );
7437 %}
7440 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7441 match( If cmp (CmpL src1 zero) );
7442 effect(USE labl);
7443 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7444 ins_cost(150);
7446 ins_encode %{
7447 Register opr1_reg = as_Register($src1$$reg);
7448 Label &target = *($labl$$label);
7449 int flag = $cmp$$cmpcode;
7451 switch(flag) {
7452 case 0x01: //equal
7453 if (&target)
7454 __ beq(opr1_reg, R0, target);
7455 else
7456 __ beq(opr1_reg, R0, int(0));
7457 break;
7459 case 0x02: //not_equal
7460 if(&target)
7461 __ bne(opr1_reg, R0, target);
7462 else
7463 __ bne(opr1_reg, R0, (int)0);
7464 break;
7466 case 0x03: //greater
7467 if(&target)
7468 __ bgtz(opr1_reg, target);
7469 else
7470 __ bgtz(opr1_reg, (int)0);
7471 break;
7473 case 0x04: //greater_equal
7474 if(&target)
7475 __ bgez(opr1_reg, target);
7476 else
7477 __ bgez(opr1_reg, (int)0);
7478 break;
7480 case 0x05: //less
7481 __ slt(AT, opr1_reg, R0);
7482 if(&target)
7483 __ bne(AT, R0, target);
7484 else
7485 __ bne(AT, R0, (int)0);
7486 break;
7488 case 0x06: //less_equal
7489 if (&target)
7490 __ blez(opr1_reg, target);
7491 else
7492 __ blez(opr1_reg, int(0));
7493 break;
7495 default:
7496 Unimplemented();
7497 }
7498 __ delayed()->nop();
7499 %}
7502 ins_pc_relative(1);
7503 ins_pipe( pipe_alu_branch );
7504 %}
7506 instruct branchConL_regL_immL(cmpOp cmp, mRegL src1, immL src2, label labl) %{
7507 match( If cmp (CmpL src1 src2) );
7508 effect(USE labl);
7509 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_immL" %}
7510 ins_cost(180);
7512 ins_encode %{
7513 Register opr1_reg = as_Register($src1$$reg);
7514 Register opr2_reg = AT;
7516 Label &target = *($labl$$label);
7517 int flag = $cmp$$cmpcode;
7519 __ set64(opr2_reg, $src2$$constant);
7521 switch(flag) {
7522 case 0x01: //equal
7523 if (&target)
7524 __ beq(opr1_reg, opr2_reg, target);
7525 else
7526 __ beq(opr1_reg, opr2_reg, (int)0);
7527 break;
7529 case 0x02: //not_equal
7530 if(&target)
7531 __ bne(opr1_reg, opr2_reg, target);
7532 else
7533 __ bne(opr1_reg, opr2_reg, (int)0);
7534 break;
7536 case 0x03: //greater
7537 __ slt(AT, opr2_reg, opr1_reg);
7538 if(&target)
7539 __ bne(AT, R0, target);
7540 else
7541 __ bne(AT, R0, (int)0);
7542 break;
7544 case 0x04: //greater_equal
7545 __ slt(AT, opr1_reg, opr2_reg);
7546 if(&target)
7547 __ beq(AT, R0, target);
7548 else
7549 __ beq(AT, R0, (int)0);
7550 break;
7552 case 0x05: //less
7553 __ slt(AT, opr1_reg, opr2_reg);
7554 if(&target)
7555 __ bne(AT, R0, target);
7556 else
7557 __ bne(AT, R0, (int)0);
7558 break;
7560 case 0x06: //less_equal
7561 __ slt(AT, opr2_reg, opr1_reg);
7562 if(&target)
7563 __ beq(AT, R0, target);
7564 else
7565 __ beq(AT, R0, (int)0);
7566 break;
7568 default:
7569 Unimplemented();
7570 }
7571 __ nop();
7572 %}
7575 ins_pc_relative(1);
7576 ins_pipe( pipe_alu_branch );
7577 %}
7580 //FIXME
7581 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7582 match( If cmp (CmpF src1 src2) );
7583 effect(USE labl);
7584 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7586 ins_encode %{
7587 FloatRegister reg_op1 = $src1$$FloatRegister;
7588 FloatRegister reg_op2 = $src2$$FloatRegister;
7589 Label &L = *($labl$$label);
7590 int flag = $cmp$$cmpcode;
7592 switch(flag) {
7593 case 0x01: //equal
7594 __ c_eq_s(reg_op1, reg_op2);
7595 if (&L)
7596 __ bc1t(L);
7597 else
7598 __ bc1t((int)0);
7599 break;
7600 case 0x02: //not_equal
7601 __ c_eq_s(reg_op1, reg_op2);
7602 if (&L)
7603 __ bc1f(L);
7604 else
7605 __ bc1f((int)0);
7606 break;
7607 case 0x03: //greater
7608 __ c_ule_s(reg_op1, reg_op2);
7609 if(&L)
7610 __ bc1f(L);
7611 else
7612 __ bc1f((int)0);
7613 break;
7614 case 0x04: //greater_equal
7615 __ c_ult_s(reg_op1, reg_op2);
7616 if(&L)
7617 __ bc1f(L);
7618 else
7619 __ bc1f((int)0);
7620 break;
7621 case 0x05: //less
7622 __ c_ult_s(reg_op1, reg_op2);
7623 if(&L)
7624 __ bc1t(L);
7625 else
7626 __ bc1t((int)0);
7627 break;
7628 case 0x06: //less_equal
7629 __ c_ule_s(reg_op1, reg_op2);
7630 if(&L)
7631 __ bc1t(L);
7632 else
7633 __ bc1t((int)0);
7634 break;
7635 default:
7636 Unimplemented();
7637 }
7638 __ nop();
7639 %}
7641 ins_pc_relative(1);
7642 ins_pipe(pipe_slow);
7643 %}
7645 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7646 match( If cmp (CmpD src1 src2) );
7647 effect(USE labl);
7648 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7650 ins_encode %{
7651 FloatRegister reg_op1 = $src1$$FloatRegister;
7652 FloatRegister reg_op2 = $src2$$FloatRegister;
7653 Label &L = *($labl$$label);
7654 int flag = $cmp$$cmpcode;
7656 switch(flag) {
7657 case 0x01: //equal
7658 __ c_eq_d(reg_op1, reg_op2);
7659 if (&L)
7660 __ bc1t(L);
7661 else
7662 __ bc1t((int)0);
7663 break;
7664 case 0x02: //not_equal
7665 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7666 __ c_eq_d(reg_op1, reg_op2);
7667 if (&L)
7668 __ bc1f(L);
7669 else
7670 __ bc1f((int)0);
7671 break;
7672 case 0x03: //greater
7673 __ c_ule_d(reg_op1, reg_op2);
7674 if(&L)
7675 __ bc1f(L);
7676 else
7677 __ bc1f((int)0);
7678 break;
7679 case 0x04: //greater_equal
7680 __ c_ult_d(reg_op1, reg_op2);
7681 if(&L)
7682 __ bc1f(L);
7683 else
7684 __ bc1f((int)0);
7685 break;
7686 case 0x05: //less
7687 __ c_ult_d(reg_op1, reg_op2);
7688 if(&L)
7689 __ bc1t(L);
7690 else
7691 __ bc1t((int)0);
7692 break;
7693 case 0x06: //less_equal
7694 __ c_ule_d(reg_op1, reg_op2);
7695 if(&L)
7696 __ bc1t(L);
7697 else
7698 __ bc1t((int)0);
7699 break;
7700 default:
7701 Unimplemented();
7702 }
7703 __ nop();
7704 %}
7706 ins_pc_relative(1);
7707 ins_pipe(pipe_slow);
7708 %}
7711 // Call Runtime Instruction
7712 instruct CallRuntimeDirect(method meth) %{
7713 match(CallRuntime );
7714 effect(USE meth);
7716 ins_cost(300);
7717 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7718 ins_encode( Java_To_Runtime( meth ) );
7719 ins_pipe( pipe_slow );
7720 ins_alignment(16);
7721 %}
7725 //------------------------MemBar Instructions-------------------------------
7726 //Memory barrier flavors
7728 instruct membar_acquire() %{
7729 match(MemBarAcquire);
7730 ins_cost(400);
7732 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7733 ins_encode %{
7734 __ sync();
7735 %}
7736 ins_pipe(empty);
7737 %}
7739 instruct load_fence() %{
7740 match(LoadFence);
7741 ins_cost(400);
7743 format %{ "MEMBAR @ load_fence" %}
7744 ins_encode %{
7745 __ sync();
7746 %}
7747 ins_pipe(pipe_slow);
7748 %}
7750 instruct membar_acquire_lock()
7751 %{
7752 match(MemBarAcquireLock);
7753 ins_cost(0);
7755 size(0);
7756 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7757 ins_encode();
7758 ins_pipe(empty);
7759 %}
7761 instruct membar_release() %{
7762 match(MemBarRelease);
7763 ins_cost(400);
7765 format %{ "MEMBAR-release @ membar_release" %}
7767 ins_encode %{
7768 // Attention: DO NOT DELETE THIS GUY!
7769 __ sync();
7770 %}
7772 ins_pipe(pipe_slow);
7773 %}
7775 instruct store_fence() %{
7776 match(StoreFence);
7777 ins_cost(400);
7779 format %{ "MEMBAR @ store_fence" %}
7781 ins_encode %{
7782 __ sync();
7783 %}
7785 ins_pipe(pipe_slow);
7786 %}
7788 instruct membar_release_lock()
7789 %{
7790 match(MemBarReleaseLock);
7791 ins_cost(0);
7793 size(0);
7794 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7795 ins_encode();
7796 ins_pipe(empty);
7797 %}
7800 instruct membar_volatile() %{
7801 match(MemBarVolatile);
7802 ins_cost(400);
7804 format %{ "MEMBAR-volatile" %}
7805 ins_encode %{
7806 if( !os::is_MP() ) return; // Not needed on single CPU
7807 __ sync();
7809 %}
7810 ins_pipe(pipe_slow);
7811 %}
7813 instruct unnecessary_membar_volatile() %{
7814 match(MemBarVolatile);
7815 predicate(Matcher::post_store_load_barrier(n));
7816 ins_cost(0);
7818 size(0);
7819 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7820 ins_encode( );
7821 ins_pipe(empty);
7822 %}
7824 instruct membar_storestore() %{
7825 match(MemBarStoreStore);
7827 ins_cost(400);
7828 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7829 ins_encode %{
7830 __ sync();
7831 %}
7832 ins_pipe(empty);
7833 %}
7835 //----------Move Instructions--------------------------------------------------
7836 instruct castX2P(mRegP dst, mRegL src) %{
7837 match(Set dst (CastX2P src));
7838 format %{ "castX2P $dst, $src @ castX2P" %}
7839 ins_encode %{
7840 Register src = $src$$Register;
7841 Register dst = $dst$$Register;
7843 if(src != dst)
7844 __ move(dst, src);
7845 %}
7846 ins_cost(10);
7847 ins_pipe( ialu_regI_mov );
7848 %}
7850 instruct castP2X(mRegL dst, mRegP src ) %{
7851 match(Set dst (CastP2X src));
7853 format %{ "mov $dst, $src\t #@castP2X" %}
7854 ins_encode %{
7855 Register src = $src$$Register;
7856 Register dst = $dst$$Register;
7858 if(src != dst)
7859 __ move(dst, src);
7860 %}
7861 ins_pipe( ialu_regI_mov );
7862 %}
7864 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7865 match(Set dst (MoveF2I src));
7866 effect(DEF dst, USE src);
7867 ins_cost(85);
7868 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7869 ins_encode %{
7870 Register dst = as_Register($dst$$reg);
7871 FloatRegister src = as_FloatRegister($src$$reg);
7873 __ mfc1(dst, src);
7874 %}
7875 ins_pipe( pipe_slow );
7876 %}
7878 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7879 match(Set dst (MoveI2F src));
7880 effect(DEF dst, USE src);
7881 ins_cost(85);
7882 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7883 ins_encode %{
7884 Register src = as_Register($src$$reg);
7885 FloatRegister dst = as_FloatRegister($dst$$reg);
7887 __ mtc1(src, dst);
7888 %}
7889 ins_pipe( pipe_slow );
7890 %}
7892 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7893 match(Set dst (MoveD2L src));
7894 effect(DEF dst, USE src);
7895 ins_cost(85);
7896 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7897 ins_encode %{
7898 Register dst = as_Register($dst$$reg);
7899 FloatRegister src = as_FloatRegister($src$$reg);
7901 __ dmfc1(dst, src);
7902 %}
7903 ins_pipe( pipe_slow );
7904 %}
7906 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7907 match(Set dst (MoveL2D src));
7908 effect(DEF dst, USE src);
7909 ins_cost(85);
7910 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7911 ins_encode %{
7912 FloatRegister dst = as_FloatRegister($dst$$reg);
7913 Register src = as_Register($src$$reg);
7915 __ dmtc1(src, dst);
7916 %}
7917 ins_pipe( pipe_slow );
7918 %}
7920 //----------Conditional Move---------------------------------------------------
7921 // Conditional move
7922 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7923 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7924 ins_cost(80);
7925 format %{
7926 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7927 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7928 %}
7930 ins_encode %{
7931 Register op1 = $tmp1$$Register;
7932 Register op2 = $tmp2$$Register;
7933 Register dst = $dst$$Register;
7934 Register src = $src$$Register;
7935 int flag = $cop$$cmpcode;
7937 switch(flag) {
7938 case 0x01: //equal
7939 __ subu32(AT, op1, op2);
7940 __ movz(dst, src, AT);
7941 break;
7943 case 0x02: //not_equal
7944 __ subu32(AT, op1, op2);
7945 __ movn(dst, src, AT);
7946 break;
7948 case 0x03: //great
7949 __ slt(AT, op2, op1);
7950 __ movn(dst, src, AT);
7951 break;
7953 case 0x04: //great_equal
7954 __ slt(AT, op1, op2);
7955 __ movz(dst, src, AT);
7956 break;
7958 case 0x05: //less
7959 __ slt(AT, op1, op2);
7960 __ movn(dst, src, AT);
7961 break;
7963 case 0x06: //less_equal
7964 __ slt(AT, op2, op1);
7965 __ movz(dst, src, AT);
7966 break;
7968 default:
7969 Unimplemented();
7970 }
7971 %}
7973 ins_pipe( pipe_slow );
7974 %}
7976 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7977 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7978 ins_cost(80);
7979 format %{
7980 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7981 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7982 %}
7983 ins_encode %{
7984 Register op1 = $tmp1$$Register;
7985 Register op2 = $tmp2$$Register;
7986 Register dst = $dst$$Register;
7987 Register src = $src$$Register;
7988 int flag = $cop$$cmpcode;
7990 switch(flag) {
7991 case 0x01: //equal
7992 __ subu(AT, op1, op2);
7993 __ movz(dst, src, AT);
7994 break;
7996 case 0x02: //not_equal
7997 __ subu(AT, op1, op2);
7998 __ movn(dst, src, AT);
7999 break;
8001 case 0x03: //above
8002 __ sltu(AT, op2, op1);
8003 __ movn(dst, src, AT);
8004 break;
8006 case 0x04: //above_equal
8007 __ sltu(AT, op1, op2);
8008 __ movz(dst, src, AT);
8009 break;
8011 case 0x05: //below
8012 __ sltu(AT, op1, op2);
8013 __ movn(dst, src, AT);
8014 break;
8016 case 0x06: //below_equal
8017 __ sltu(AT, op2, op1);
8018 __ movz(dst, src, AT);
8019 break;
8021 default:
8022 Unimplemented();
8023 }
8024 %}
8026 ins_pipe( pipe_slow );
8027 %}
8029 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8030 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8031 ins_cost(80);
8032 format %{
8033 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8034 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8035 %}
8036 ins_encode %{
8037 Register op1 = $tmp1$$Register;
8038 Register op2 = $tmp2$$Register;
8039 Register dst = $dst$$Register;
8040 Register src = $src$$Register;
8041 int flag = $cop$$cmpcode;
8043 switch(flag) {
8044 case 0x01: //equal
8045 __ subu32(AT, op1, op2);
8046 __ movz(dst, src, AT);
8047 break;
8049 case 0x02: //not_equal
8050 __ subu32(AT, op1, op2);
8051 __ movn(dst, src, AT);
8052 break;
8054 case 0x03: //above
8055 __ sltu(AT, op2, op1);
8056 __ movn(dst, src, AT);
8057 break;
8059 case 0x04: //above_equal
8060 __ sltu(AT, op1, op2);
8061 __ movz(dst, src, AT);
8062 break;
8064 case 0x05: //below
8065 __ sltu(AT, op1, op2);
8066 __ movn(dst, src, AT);
8067 break;
8069 case 0x06: //below_equal
8070 __ sltu(AT, op2, op1);
8071 __ movz(dst, src, AT);
8072 break;
8074 default:
8075 Unimplemented();
8076 }
8077 %}
8079 ins_pipe( pipe_slow );
8080 %}
8082 instruct cmovP_cmpU_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8083 match(Set dst (CMoveP (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8084 ins_cost(80);
8085 format %{
8086 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpU_reg_reg\n\t"
8087 "CMOV $dst,$src\t @cmovP_cmpU_reg_reg"
8088 %}
8089 ins_encode %{
8090 Register op1 = $tmp1$$Register;
8091 Register op2 = $tmp2$$Register;
8092 Register dst = $dst$$Register;
8093 Register src = $src$$Register;
8094 int flag = $cop$$cmpcode;
8096 switch(flag) {
8097 case 0x01: //equal
8098 __ subu32(AT, op1, op2);
8099 __ movz(dst, src, AT);
8100 break;
8102 case 0x02: //not_equal
8103 __ subu32(AT, op1, op2);
8104 __ movn(dst, src, AT);
8105 break;
8107 case 0x03: //above
8108 __ sltu(AT, op2, op1);
8109 __ movn(dst, src, AT);
8110 break;
8112 case 0x04: //above_equal
8113 __ sltu(AT, op1, op2);
8114 __ movz(dst, src, AT);
8115 break;
8117 case 0x05: //below
8118 __ sltu(AT, op1, op2);
8119 __ movn(dst, src, AT);
8120 break;
8122 case 0x06: //below_equal
8123 __ sltu(AT, op2, op1);
8124 __ movz(dst, src, AT);
8125 break;
8127 default:
8128 Unimplemented();
8129 }
8130 %}
8132 ins_pipe( pipe_slow );
8133 %}
8135 instruct cmovP_cmpF_reg_reg(mRegP dst, mRegP src, regF tmp1, regF tmp2, cmpOp cop ) %{
8136 match(Set dst (CMoveP (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8137 ins_cost(80);
8138 format %{
8139 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpF_reg_reg\n"
8140 "\tCMOV $dst,$src \t @cmovP_cmpF_reg_reg"
8141 %}
8143 ins_encode %{
8144 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8145 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8146 Register dst = $dst$$Register;
8147 Register src = $src$$Register;
8148 int flag = $cop$$cmpcode;
8150 switch(flag) {
8151 case 0x01: //equal
8152 __ c_eq_s(reg_op1, reg_op2);
8153 __ movt(dst, src);
8154 break;
8155 case 0x02: //not_equal
8156 __ c_eq_s(reg_op1, reg_op2);
8157 __ movf(dst, src);
8158 break;
8159 case 0x03: //greater
8160 __ c_ole_s(reg_op1, reg_op2);
8161 __ movf(dst, src);
8162 break;
8163 case 0x04: //greater_equal
8164 __ c_olt_s(reg_op1, reg_op2);
8165 __ movf(dst, src);
8166 break;
8167 case 0x05: //less
8168 __ c_ult_s(reg_op1, reg_op2);
8169 __ movt(dst, src);
8170 break;
8171 case 0x06: //less_equal
8172 __ c_ule_s(reg_op1, reg_op2);
8173 __ movt(dst, src);
8174 break;
8175 default:
8176 Unimplemented();
8177 }
8178 %}
8179 ins_pipe( pipe_slow );
8180 %}
8182 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8183 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8184 ins_cost(80);
8185 format %{
8186 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8187 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8188 %}
8189 ins_encode %{
8190 Register op1 = $tmp1$$Register;
8191 Register op2 = $tmp2$$Register;
8192 Register dst = $dst$$Register;
8193 Register src = $src$$Register;
8194 int flag = $cop$$cmpcode;
8196 switch(flag) {
8197 case 0x01: //equal
8198 __ subu32(AT, op1, op2);
8199 __ movz(dst, src, AT);
8200 break;
8202 case 0x02: //not_equal
8203 __ subu32(AT, op1, op2);
8204 __ movn(dst, src, AT);
8205 break;
8207 case 0x03: //above
8208 __ sltu(AT, op2, op1);
8209 __ movn(dst, src, AT);
8210 break;
8212 case 0x04: //above_equal
8213 __ sltu(AT, op1, op2);
8214 __ movz(dst, src, AT);
8215 break;
8217 case 0x05: //below
8218 __ sltu(AT, op1, op2);
8219 __ movn(dst, src, AT);
8220 break;
8222 case 0x06: //below_equal
8223 __ sltu(AT, op2, op1);
8224 __ movz(dst, src, AT);
8225 break;
8227 default:
8228 Unimplemented();
8229 }
8230 %}
8232 ins_pipe( pipe_slow );
8233 %}
8235 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8236 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8237 ins_cost(80);
8238 format %{
8239 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8240 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8241 %}
8242 ins_encode %{
8243 Register op1 = $tmp1$$Register;
8244 Register op2 = $tmp2$$Register;
8245 Register dst = $dst$$Register;
8246 Register src = $src$$Register;
8247 int flag = $cop$$cmpcode;
8249 switch(flag) {
8250 case 0x01: //equal
8251 __ subu(AT, op1, op2);
8252 __ movz(dst, src, AT);
8253 break;
8255 case 0x02: //not_equal
8256 __ subu(AT, op1, op2);
8257 __ movn(dst, src, AT);
8258 break;
8260 case 0x03: //above
8261 __ sltu(AT, op2, op1);
8262 __ movn(dst, src, AT);
8263 break;
8265 case 0x04: //above_equal
8266 __ sltu(AT, op1, op2);
8267 __ movz(dst, src, AT);
8268 break;
8270 case 0x05: //below
8271 __ sltu(AT, op1, op2);
8272 __ movn(dst, src, AT);
8273 break;
8275 case 0x06: //below_equal
8276 __ sltu(AT, op2, op1);
8277 __ movz(dst, src, AT);
8278 break;
8280 default:
8281 Unimplemented();
8282 }
8283 %}
8285 ins_pipe( pipe_slow );
8286 %}
8288 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8289 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8290 ins_cost(80);
8291 format %{
8292 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8293 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8294 %}
8295 ins_encode %{
8296 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8297 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8298 Register dst = as_Register($dst$$reg);
8299 Register src = as_Register($src$$reg);
8301 int flag = $cop$$cmpcode;
8303 switch(flag) {
8304 case 0x01: //equal
8305 __ c_eq_d(reg_op1, reg_op2);
8306 __ movt(dst, src);
8307 break;
8308 case 0x02: //not_equal
8309 __ c_eq_d(reg_op1, reg_op2);
8310 __ movf(dst, src);
8311 break;
8312 case 0x03: //greater
8313 __ c_ole_d(reg_op1, reg_op2);
8314 __ movf(dst, src);
8315 break;
8316 case 0x04: //greater_equal
8317 __ c_olt_d(reg_op1, reg_op2);
8318 __ movf(dst, src);
8319 break;
8320 case 0x05: //less
8321 __ c_ult_d(reg_op1, reg_op2);
8322 __ movt(dst, src);
8323 break;
8324 case 0x06: //less_equal
8325 __ c_ule_d(reg_op1, reg_op2);
8326 __ movt(dst, src);
8327 break;
8328 default:
8329 Unimplemented();
8330 }
8331 %}
8333 ins_pipe( pipe_slow );
8334 %}
8337 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8338 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8339 ins_cost(80);
8340 format %{
8341 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8342 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8343 %}
8344 ins_encode %{
8345 Register op1 = $tmp1$$Register;
8346 Register op2 = $tmp2$$Register;
8347 Register dst = $dst$$Register;
8348 Register src = $src$$Register;
8349 int flag = $cop$$cmpcode;
8351 switch(flag) {
8352 case 0x01: //equal
8353 __ subu32(AT, op1, op2);
8354 __ movz(dst, src, AT);
8355 break;
8357 case 0x02: //not_equal
8358 __ subu32(AT, op1, op2);
8359 __ movn(dst, src, AT);
8360 break;
8362 case 0x03: //above
8363 __ sltu(AT, op2, op1);
8364 __ movn(dst, src, AT);
8365 break;
8367 case 0x04: //above_equal
8368 __ sltu(AT, op1, op2);
8369 __ movz(dst, src, AT);
8370 break;
8372 case 0x05: //below
8373 __ sltu(AT, op1, op2);
8374 __ movn(dst, src, AT);
8375 break;
8377 case 0x06: //below_equal
8378 __ sltu(AT, op2, op1);
8379 __ movz(dst, src, AT);
8380 break;
8382 default:
8383 Unimplemented();
8384 }
8385 %}
8387 ins_pipe( pipe_slow );
8388 %}
8391 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8392 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8393 ins_cost(80);
8394 format %{
8395 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8396 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8397 %}
8398 ins_encode %{
8399 Register op1 = $tmp1$$Register;
8400 Register op2 = $tmp2$$Register;
8401 Register dst = $dst$$Register;
8402 Register src = $src$$Register;
8403 int flag = $cop$$cmpcode;
8405 switch(flag) {
8406 case 0x01: //equal
8407 __ subu(AT, op1, op2);
8408 __ movz(dst, src, AT);
8409 break;
8411 case 0x02: //not_equal
8412 __ subu(AT, op1, op2);
8413 __ movn(dst, src, AT);
8414 break;
8416 case 0x03: //above
8417 __ sltu(AT, op2, op1);
8418 __ movn(dst, src, AT);
8419 break;
8421 case 0x04: //above_equal
8422 __ sltu(AT, op1, op2);
8423 __ movz(dst, src, AT);
8424 break;
8426 case 0x05: //below
8427 __ sltu(AT, op1, op2);
8428 __ movn(dst, src, AT);
8429 break;
8431 case 0x06: //below_equal
8432 __ sltu(AT, op2, op1);
8433 __ movz(dst, src, AT);
8434 break;
8436 default:
8437 Unimplemented();
8438 }
8439 %}
8441 ins_pipe( pipe_slow );
8442 %}
8444 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8445 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8446 ins_cost(80);
8447 format %{
8448 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8449 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8450 %}
8451 ins_encode %{
8452 Register opr1 = as_Register($tmp1$$reg);
8453 Register opr2 = as_Register($tmp2$$reg);
8454 Register dst = $dst$$Register;
8455 Register src = $src$$Register;
8456 int flag = $cop$$cmpcode;
8458 switch(flag) {
8459 case 0x01: //equal
8460 __ subu(AT, opr1, opr2);
8461 __ movz(dst, src, AT);
8462 break;
8464 case 0x02: //not_equal
8465 __ subu(AT, opr1, opr2);
8466 __ movn(dst, src, AT);
8467 break;
8469 case 0x03: //greater
8470 __ slt(AT, opr2, opr1);
8471 __ movn(dst, src, AT);
8472 break;
8474 case 0x04: //greater_equal
8475 __ slt(AT, opr1, opr2);
8476 __ movz(dst, src, AT);
8477 break;
8479 case 0x05: //less
8480 __ slt(AT, opr1, opr2);
8481 __ movn(dst, src, AT);
8482 break;
8484 case 0x06: //less_equal
8485 __ slt(AT, opr2, opr1);
8486 __ movz(dst, src, AT);
8487 break;
8489 default:
8490 Unimplemented();
8491 }
8492 %}
8494 ins_pipe( pipe_slow );
8495 %}
8497 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8498 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8499 ins_cost(80);
8500 format %{
8501 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8502 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8503 %}
8504 ins_encode %{
8505 Register opr1 = as_Register($tmp1$$reg);
8506 Register opr2 = as_Register($tmp2$$reg);
8507 Register dst = $dst$$Register;
8508 Register src = $src$$Register;
8509 int flag = $cop$$cmpcode;
8511 switch(flag) {
8512 case 0x01: //equal
8513 __ subu(AT, opr1, opr2);
8514 __ movz(dst, src, AT);
8515 break;
8517 case 0x02: //not_equal
8518 __ subu(AT, opr1, opr2);
8519 __ movn(dst, src, AT);
8520 break;
8522 case 0x03: //greater
8523 __ slt(AT, opr2, opr1);
8524 __ movn(dst, src, AT);
8525 break;
8527 case 0x04: //greater_equal
8528 __ slt(AT, opr1, opr2);
8529 __ movz(dst, src, AT);
8530 break;
8532 case 0x05: //less
8533 __ slt(AT, opr1, opr2);
8534 __ movn(dst, src, AT);
8535 break;
8537 case 0x06: //less_equal
8538 __ slt(AT, opr2, opr1);
8539 __ movz(dst, src, AT);
8540 break;
8542 default:
8543 Unimplemented();
8544 }
8545 %}
8547 ins_pipe( pipe_slow );
8548 %}
8550 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8551 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8552 ins_cost(80);
8553 format %{
8554 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8555 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8556 %}
8557 ins_encode %{
8558 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8559 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8560 Register dst = as_Register($dst$$reg);
8561 Register src = as_Register($src$$reg);
8563 int flag = $cop$$cmpcode;
8565 switch(flag) {
8566 case 0x01: //equal
8567 __ c_eq_d(reg_op1, reg_op2);
8568 __ movt(dst, src);
8569 break;
8570 case 0x02: //not_equal
8571 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8572 __ c_eq_d(reg_op1, reg_op2);
8573 __ movf(dst, src);
8574 break;
8575 case 0x03: //greater
8576 __ c_ole_d(reg_op1, reg_op2);
8577 __ movf(dst, src);
8578 break;
8579 case 0x04: //greater_equal
8580 __ c_olt_d(reg_op1, reg_op2);
8581 __ movf(dst, src);
8582 break;
8583 case 0x05: //less
8584 __ c_ult_d(reg_op1, reg_op2);
8585 __ movt(dst, src);
8586 break;
8587 case 0x06: //less_equal
8588 __ c_ule_d(reg_op1, reg_op2);
8589 __ movt(dst, src);
8590 break;
8591 default:
8592 Unimplemented();
8593 }
8594 %}
8596 ins_pipe( pipe_slow );
8597 %}
8600 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8601 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8602 ins_cost(80);
8603 format %{
8604 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8605 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8606 %}
8607 ins_encode %{
8608 Register op1 = $tmp1$$Register;
8609 Register op2 = $tmp2$$Register;
8610 Register dst = $dst$$Register;
8611 Register src = $src$$Register;
8612 int flag = $cop$$cmpcode;
8614 switch(flag) {
8615 case 0x01: //equal
8616 __ subu(AT, op1, op2);
8617 __ movz(dst, src, AT);
8618 break;
8620 case 0x02: //not_equal
8621 __ subu(AT, op1, op2);
8622 __ movn(dst, src, AT);
8623 break;
8625 case 0x03: //above
8626 __ sltu(AT, op2, op1);
8627 __ movn(dst, src, AT);
8628 break;
8630 case 0x04: //above_equal
8631 __ sltu(AT, op1, op2);
8632 __ movz(dst, src, AT);
8633 break;
8635 case 0x05: //below
8636 __ sltu(AT, op1, op2);
8637 __ movn(dst, src, AT);
8638 break;
8640 case 0x06: //below_equal
8641 __ sltu(AT, op2, op1);
8642 __ movz(dst, src, AT);
8643 break;
8645 default:
8646 Unimplemented();
8647 }
8648 %}
8650 ins_pipe( pipe_slow );
8651 %}
8653 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8654 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8655 ins_cost(80);
8656 format %{
8657 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8658 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8659 %}
8660 ins_encode %{
8661 Register op1 = $tmp1$$Register;
8662 Register op2 = $tmp2$$Register;
8663 Register dst = $dst$$Register;
8664 Register src = $src$$Register;
8665 int flag = $cop$$cmpcode;
8667 switch(flag) {
8668 case 0x01: //equal
8669 __ subu32(AT, op1, op2);
8670 __ movz(dst, src, AT);
8671 break;
8673 case 0x02: //not_equal
8674 __ subu32(AT, op1, op2);
8675 __ movn(dst, src, AT);
8676 break;
8678 case 0x03: //above
8679 __ slt(AT, op2, op1);
8680 __ movn(dst, src, AT);
8681 break;
8683 case 0x04: //above_equal
8684 __ slt(AT, op1, op2);
8685 __ movz(dst, src, AT);
8686 break;
8688 case 0x05: //below
8689 __ slt(AT, op1, op2);
8690 __ movn(dst, src, AT);
8691 break;
8693 case 0x06: //below_equal
8694 __ slt(AT, op2, op1);
8695 __ movz(dst, src, AT);
8696 break;
8698 default:
8699 Unimplemented();
8700 }
8701 %}
8703 ins_pipe( pipe_slow );
8704 %}
8706 instruct cmovN_cmpL_reg_reg(mRegN dst, mRegN src, mRegL tmp1, mRegL tmp2, cmpOp cop) %{
8707 match(Set dst (CMoveN (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8708 ins_cost(80);
8709 format %{
8710 "CMP$cop $tmp1, $tmp2\t @cmovN_cmpL_reg_reg\n"
8711 "\tCMOV $dst,$src \t @cmovN_cmpL_reg_reg"
8712 %}
8713 ins_encode %{
8714 Register opr1 = as_Register($tmp1$$reg);
8715 Register opr2 = as_Register($tmp2$$reg);
8716 Register dst = $dst$$Register;
8717 Register src = $src$$Register;
8718 int flag = $cop$$cmpcode;
8720 switch(flag) {
8721 case 0x01: //equal
8722 __ subu(AT, opr1, opr2);
8723 __ movz(dst, src, AT);
8724 break;
8726 case 0x02: //not_equal
8727 __ subu(AT, opr1, opr2);
8728 __ movn(dst, src, AT);
8729 break;
8731 case 0x03: //greater
8732 __ slt(AT, opr2, opr1);
8733 __ movn(dst, src, AT);
8734 break;
8736 case 0x04: //greater_equal
8737 __ slt(AT, opr1, opr2);
8738 __ movz(dst, src, AT);
8739 break;
8741 case 0x05: //less
8742 __ slt(AT, opr1, opr2);
8743 __ movn(dst, src, AT);
8744 break;
8746 case 0x06: //less_equal
8747 __ slt(AT, opr2, opr1);
8748 __ movz(dst, src, AT);
8749 break;
8751 default:
8752 Unimplemented();
8753 }
8754 %}
8756 ins_pipe( pipe_slow );
8757 %}
8759 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8760 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8761 ins_cost(80);
8762 format %{
8763 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8764 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8765 %}
8766 ins_encode %{
8767 Register op1 = $tmp1$$Register;
8768 Register op2 = $tmp2$$Register;
8769 Register dst = $dst$$Register;
8770 Register src = $src$$Register;
8771 int flag = $cop$$cmpcode;
8773 switch(flag) {
8774 case 0x01: //equal
8775 __ subu32(AT, op1, op2);
8776 __ movz(dst, src, AT);
8777 break;
8779 case 0x02: //not_equal
8780 __ subu32(AT, op1, op2);
8781 __ movn(dst, src, AT);
8782 break;
8784 case 0x03: //above
8785 __ slt(AT, op2, op1);
8786 __ movn(dst, src, AT);
8787 break;
8789 case 0x04: //above_equal
8790 __ slt(AT, op1, op2);
8791 __ movz(dst, src, AT);
8792 break;
8794 case 0x05: //below
8795 __ slt(AT, op1, op2);
8796 __ movn(dst, src, AT);
8797 break;
8799 case 0x06: //below_equal
8800 __ slt(AT, op2, op1);
8801 __ movz(dst, src, AT);
8802 break;
8804 default:
8805 Unimplemented();
8806 }
8807 %}
8809 ins_pipe( pipe_slow );
8810 %}
8812 instruct cmovL_cmpU_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8813 match(Set dst (CMoveL (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8814 ins_cost(80);
8815 format %{
8816 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpU_reg_reg\n\t"
8817 "CMOV $dst,$src\t @cmovL_cmpU_reg_reg"
8818 %}
8819 ins_encode %{
8820 Register op1 = $tmp1$$Register;
8821 Register op2 = $tmp2$$Register;
8822 Register dst = $dst$$Register;
8823 Register src = $src$$Register;
8824 int flag = $cop$$cmpcode;
8826 switch(flag) {
8827 case 0x01: //equal
8828 __ subu32(AT, op1, op2);
8829 __ movz(dst, src, AT);
8830 break;
8832 case 0x02: //not_equal
8833 __ subu32(AT, op1, op2);
8834 __ movn(dst, src, AT);
8835 break;
8837 case 0x03: //above
8838 __ sltu(AT, op2, op1);
8839 __ movn(dst, src, AT);
8840 break;
8842 case 0x04: //above_equal
8843 __ sltu(AT, op1, op2);
8844 __ movz(dst, src, AT);
8845 break;
8847 case 0x05: //below
8848 __ sltu(AT, op1, op2);
8849 __ movn(dst, src, AT);
8850 break;
8852 case 0x06: //below_equal
8853 __ sltu(AT, op2, op1);
8854 __ movz(dst, src, AT);
8855 break;
8857 default:
8858 Unimplemented();
8859 }
8860 %}
8862 ins_pipe( pipe_slow );
8863 %}
8865 instruct cmovL_cmpF_reg_reg(mRegL dst, mRegL src, regF tmp1, regF tmp2, cmpOp cop ) %{
8866 match(Set dst (CMoveL (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8867 ins_cost(80);
8868 format %{
8869 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpF_reg_reg\n"
8870 "\tCMOV $dst,$src \t @cmovL_cmpF_reg_reg"
8871 %}
8873 ins_encode %{
8874 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8875 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8876 Register dst = $dst$$Register;
8877 Register src = $src$$Register;
8878 int flag = $cop$$cmpcode;
8880 switch(flag) {
8881 case 0x01: //equal
8882 __ c_eq_s(reg_op1, reg_op2);
8883 __ movt(dst, src);
8884 break;
8885 case 0x02: //not_equal
8886 __ c_eq_s(reg_op1, reg_op2);
8887 __ movf(dst, src);
8888 break;
8889 case 0x03: //greater
8890 __ c_ole_s(reg_op1, reg_op2);
8891 __ movf(dst, src);
8892 break;
8893 case 0x04: //greater_equal
8894 __ c_olt_s(reg_op1, reg_op2);
8895 __ movf(dst, src);
8896 break;
8897 case 0x05: //less
8898 __ c_ult_s(reg_op1, reg_op2);
8899 __ movt(dst, src);
8900 break;
8901 case 0x06: //less_equal
8902 __ c_ule_s(reg_op1, reg_op2);
8903 __ movt(dst, src);
8904 break;
8905 default:
8906 Unimplemented();
8907 }
8908 %}
8909 ins_pipe( pipe_slow );
8910 %}
8912 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8913 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8914 ins_cost(80);
8915 format %{
8916 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8917 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8918 %}
8920 ins_encode %{
8921 Register op1 = $tmp1$$Register;
8922 Register op2 = $tmp2$$Register;
8923 Register dst = as_Register($dst$$reg);
8924 Register src = as_Register($src$$reg);
8925 int flag = $cop$$cmpcode;
8927 switch(flag)
8928 {
8929 case 0x01: //equal
8930 __ subu32(AT, op1, op2);
8931 __ movz(dst, src, AT);
8932 break;
8934 case 0x02: //not_equal
8935 __ subu32(AT, op1, op2);
8936 __ movn(dst, src, AT);
8937 break;
8939 case 0x03: //great
8940 __ slt(AT, op2, op1);
8941 __ movn(dst, src, AT);
8942 break;
8944 case 0x04: //great_equal
8945 __ slt(AT, op1, op2);
8946 __ movz(dst, src, AT);
8947 break;
8949 case 0x05: //less
8950 __ slt(AT, op1, op2);
8951 __ movn(dst, src, AT);
8952 break;
8954 case 0x06: //less_equal
8955 __ slt(AT, op2, op1);
8956 __ movz(dst, src, AT);
8957 break;
8959 default:
8960 Unimplemented();
8961 }
8962 %}
8964 ins_pipe( pipe_slow );
8965 %}
8967 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8968 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8969 ins_cost(80);
8970 format %{
8971 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8972 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8973 %}
8974 ins_encode %{
8975 Register opr1 = as_Register($tmp1$$reg);
8976 Register opr2 = as_Register($tmp2$$reg);
8977 Register dst = as_Register($dst$$reg);
8978 Register src = as_Register($src$$reg);
8979 int flag = $cop$$cmpcode;
8981 switch(flag) {
8982 case 0x01: //equal
8983 __ subu(AT, opr1, opr2);
8984 __ movz(dst, src, AT);
8985 break;
8987 case 0x02: //not_equal
8988 __ subu(AT, opr1, opr2);
8989 __ movn(dst, src, AT);
8990 break;
8992 case 0x03: //greater
8993 __ slt(AT, opr2, opr1);
8994 __ movn(dst, src, AT);
8995 break;
8997 case 0x04: //greater_equal
8998 __ slt(AT, opr1, opr2);
8999 __ movz(dst, src, AT);
9000 break;
9002 case 0x05: //less
9003 __ slt(AT, opr1, opr2);
9004 __ movn(dst, src, AT);
9005 break;
9007 case 0x06: //less_equal
9008 __ slt(AT, opr2, opr1);
9009 __ movz(dst, src, AT);
9010 break;
9012 default:
9013 Unimplemented();
9014 }
9015 %}
9017 ins_pipe( pipe_slow );
9018 %}
9020 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9021 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9022 ins_cost(80);
9023 format %{
9024 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9025 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9026 %}
9027 ins_encode %{
9028 Register op1 = $tmp1$$Register;
9029 Register op2 = $tmp2$$Register;
9030 Register dst = $dst$$Register;
9031 Register src = $src$$Register;
9032 int flag = $cop$$cmpcode;
9034 switch(flag) {
9035 case 0x01: //equal
9036 __ subu32(AT, op1, op2);
9037 __ movz(dst, src, AT);
9038 break;
9040 case 0x02: //not_equal
9041 __ subu32(AT, op1, op2);
9042 __ movn(dst, src, AT);
9043 break;
9045 case 0x03: //above
9046 __ sltu(AT, op2, op1);
9047 __ movn(dst, src, AT);
9048 break;
9050 case 0x04: //above_equal
9051 __ sltu(AT, op1, op2);
9052 __ movz(dst, src, AT);
9053 break;
9055 case 0x05: //below
9056 __ sltu(AT, op1, op2);
9057 __ movn(dst, src, AT);
9058 break;
9060 case 0x06: //below_equal
9061 __ sltu(AT, op2, op1);
9062 __ movz(dst, src, AT);
9063 break;
9065 default:
9066 Unimplemented();
9067 }
9068 %}
9070 ins_pipe( pipe_slow );
9071 %}
9074 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9075 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9076 ins_cost(80);
9077 format %{
9078 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9079 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9080 %}
9081 ins_encode %{
9082 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9083 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9084 Register dst = as_Register($dst$$reg);
9085 Register src = as_Register($src$$reg);
9087 int flag = $cop$$cmpcode;
9089 switch(flag) {
9090 case 0x01: //equal
9091 __ c_eq_d(reg_op1, reg_op2);
9092 __ movt(dst, src);
9093 break;
9094 case 0x02: //not_equal
9095 __ c_eq_d(reg_op1, reg_op2);
9096 __ movf(dst, src);
9097 break;
9098 case 0x03: //greater
9099 __ c_ole_d(reg_op1, reg_op2);
9100 __ movf(dst, src);
9101 break;
9102 case 0x04: //greater_equal
9103 __ c_olt_d(reg_op1, reg_op2);
9104 __ movf(dst, src);
9105 break;
9106 case 0x05: //less
9107 __ c_ult_d(reg_op1, reg_op2);
9108 __ movt(dst, src);
9109 break;
9110 case 0x06: //less_equal
9111 __ c_ule_d(reg_op1, reg_op2);
9112 __ movt(dst, src);
9113 break;
9114 default:
9115 Unimplemented();
9116 }
9117 %}
9119 ins_pipe( pipe_slow );
9120 %}
9122 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9123 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9124 ins_cost(200);
9125 format %{
9126 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9127 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9128 %}
9129 ins_encode %{
9130 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9131 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9132 FloatRegister dst = as_FloatRegister($dst$$reg);
9133 FloatRegister src = as_FloatRegister($src$$reg);
9135 int flag = $cop$$cmpcode;
9137 switch(flag) {
9138 case 0x01: //equal
9139 __ c_eq_d(reg_op1, reg_op2);
9140 __ movt_d(dst, src);
9141 break;
9142 case 0x02: //not_equal
9143 __ c_eq_d(reg_op1, reg_op2);
9144 __ movf_d(dst, src);
9145 break;
9146 case 0x03: //greater
9147 __ c_ole_d(reg_op1, reg_op2);
9148 __ movf_d(dst, src);
9149 break;
9150 case 0x04: //greater_equal
9151 __ c_olt_d(reg_op1, reg_op2);
9152 __ movf_d(dst, src);
9153 break;
9154 case 0x05: //less
9155 __ c_ult_d(reg_op1, reg_op2);
9156 __ movt_d(dst, src);
9157 break;
9158 case 0x06: //less_equal
9159 __ c_ule_d(reg_op1, reg_op2);
9160 __ movt_d(dst, src);
9161 break;
9162 default:
9163 Unimplemented();
9164 }
9165 %}
9167 ins_pipe( pipe_slow );
9168 %}
9170 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9171 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9172 ins_cost(200);
9173 format %{
9174 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9175 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9176 %}
9178 ins_encode %{
9179 Register op1 = $tmp1$$Register;
9180 Register op2 = $tmp2$$Register;
9181 FloatRegister dst = as_FloatRegister($dst$$reg);
9182 FloatRegister src = as_FloatRegister($src$$reg);
9183 int flag = $cop$$cmpcode;
9184 Label L;
9186 switch(flag) {
9187 case 0x01: //equal
9188 __ bne(op1, op2, L);
9189 __ nop();
9190 __ mov_s(dst, src);
9191 __ bind(L);
9192 break;
9193 case 0x02: //not_equal
9194 __ beq(op1, op2, L);
9195 __ nop();
9196 __ mov_s(dst, src);
9197 __ bind(L);
9198 break;
9199 case 0x03: //great
9200 __ slt(AT, op2, op1);
9201 __ beq(AT, R0, L);
9202 __ nop();
9203 __ mov_s(dst, src);
9204 __ bind(L);
9205 break;
9206 case 0x04: //great_equal
9207 __ slt(AT, op1, op2);
9208 __ bne(AT, R0, L);
9209 __ nop();
9210 __ mov_s(dst, src);
9211 __ bind(L);
9212 break;
9213 case 0x05: //less
9214 __ slt(AT, op1, op2);
9215 __ beq(AT, R0, L);
9216 __ nop();
9217 __ mov_s(dst, src);
9218 __ bind(L);
9219 break;
9220 case 0x06: //less_equal
9221 __ slt(AT, op2, op1);
9222 __ bne(AT, R0, L);
9223 __ nop();
9224 __ mov_s(dst, src);
9225 __ bind(L);
9226 break;
9227 default:
9228 Unimplemented();
9229 }
9230 %}
9232 ins_pipe( pipe_slow );
9233 %}
9235 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9236 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9237 ins_cost(200);
9238 format %{
9239 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9240 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9241 %}
9243 ins_encode %{
9244 Register op1 = $tmp1$$Register;
9245 Register op2 = $tmp2$$Register;
9246 FloatRegister dst = as_FloatRegister($dst$$reg);
9247 FloatRegister src = as_FloatRegister($src$$reg);
9248 int flag = $cop$$cmpcode;
9249 Label L;
9251 switch(flag) {
9252 case 0x01: //equal
9253 __ bne(op1, op2, L);
9254 __ nop();
9255 __ mov_d(dst, src);
9256 __ bind(L);
9257 break;
9258 case 0x02: //not_equal
9259 __ beq(op1, op2, L);
9260 __ nop();
9261 __ mov_d(dst, src);
9262 __ bind(L);
9263 break;
9264 case 0x03: //great
9265 __ slt(AT, op2, op1);
9266 __ beq(AT, R0, L);
9267 __ nop();
9268 __ mov_d(dst, src);
9269 __ bind(L);
9270 break;
9271 case 0x04: //great_equal
9272 __ slt(AT, op1, op2);
9273 __ bne(AT, R0, L);
9274 __ nop();
9275 __ mov_d(dst, src);
9276 __ bind(L);
9277 break;
9278 case 0x05: //less
9279 __ slt(AT, op1, op2);
9280 __ beq(AT, R0, L);
9281 __ nop();
9282 __ mov_d(dst, src);
9283 __ bind(L);
9284 break;
9285 case 0x06: //less_equal
9286 __ slt(AT, op2, op1);
9287 __ bne(AT, R0, L);
9288 __ nop();
9289 __ mov_d(dst, src);
9290 __ bind(L);
9291 break;
9292 default:
9293 Unimplemented();
9294 }
9295 %}
9297 ins_pipe( pipe_slow );
9298 %}
9300 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9301 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9302 ins_cost(200);
9303 format %{
9304 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9305 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9306 %}
9308 ins_encode %{
9309 Register op1 = $tmp1$$Register;
9310 Register op2 = $tmp2$$Register;
9311 FloatRegister dst = as_FloatRegister($dst$$reg);
9312 FloatRegister src = as_FloatRegister($src$$reg);
9313 int flag = $cop$$cmpcode;
9314 Label L;
9316 switch(flag) {
9317 case 0x01: //equal
9318 __ bne(op1, op2, L);
9319 __ nop();
9320 __ mov_d(dst, src);
9321 __ bind(L);
9322 break;
9323 case 0x02: //not_equal
9324 __ beq(op1, op2, L);
9325 __ nop();
9326 __ mov_d(dst, src);
9327 __ bind(L);
9328 break;
9329 case 0x03: //great
9330 __ slt(AT, op2, op1);
9331 __ beq(AT, R0, L);
9332 __ nop();
9333 __ mov_d(dst, src);
9334 __ bind(L);
9335 break;
9336 case 0x04: //great_equal
9337 __ slt(AT, op1, op2);
9338 __ bne(AT, R0, L);
9339 __ nop();
9340 __ mov_d(dst, src);
9341 __ bind(L);
9342 break;
9343 case 0x05: //less
9344 __ slt(AT, op1, op2);
9345 __ beq(AT, R0, L);
9346 __ nop();
9347 __ mov_d(dst, src);
9348 __ bind(L);
9349 break;
9350 case 0x06: //less_equal
9351 __ slt(AT, op2, op1);
9352 __ bne(AT, R0, L);
9353 __ nop();
9354 __ mov_d(dst, src);
9355 __ bind(L);
9356 break;
9357 default:
9358 Unimplemented();
9359 }
9360 %}
9362 ins_pipe( pipe_slow );
9363 %}
9365 //FIXME
9366 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9367 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9368 ins_cost(80);
9369 format %{
9370 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9371 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9372 %}
9374 ins_encode %{
9375 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9376 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9377 Register dst = $dst$$Register;
9378 Register src = $src$$Register;
9379 int flag = $cop$$cmpcode;
9381 switch(flag) {
9382 case 0x01: //equal
9383 __ c_eq_s(reg_op1, reg_op2);
9384 __ movt(dst, src);
9385 break;
9386 case 0x02: //not_equal
9387 __ c_eq_s(reg_op1, reg_op2);
9388 __ movf(dst, src);
9389 break;
9390 case 0x03: //greater
9391 __ c_ole_s(reg_op1, reg_op2);
9392 __ movf(dst, src);
9393 break;
9394 case 0x04: //greater_equal
9395 __ c_olt_s(reg_op1, reg_op2);
9396 __ movf(dst, src);
9397 break;
9398 case 0x05: //less
9399 __ c_ult_s(reg_op1, reg_op2);
9400 __ movt(dst, src);
9401 break;
9402 case 0x06: //less_equal
9403 __ c_ule_s(reg_op1, reg_op2);
9404 __ movt(dst, src);
9405 break;
9406 default:
9407 Unimplemented();
9408 }
9409 %}
9410 ins_pipe( pipe_slow );
9411 %}
9413 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9414 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9415 ins_cost(200);
9416 format %{
9417 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9418 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9419 %}
9421 ins_encode %{
9422 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9423 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9424 FloatRegister dst = $dst$$FloatRegister;
9425 FloatRegister src = $src$$FloatRegister;
9426 int flag = $cop$$cmpcode;
9428 switch(flag) {
9429 case 0x01: //equal
9430 __ c_eq_s(reg_op1, reg_op2);
9431 __ movt_s(dst, src);
9432 break;
9433 case 0x02: //not_equal
9434 __ c_eq_s(reg_op1, reg_op2);
9435 __ movf_s(dst, src);
9436 break;
9437 case 0x03: //greater
9438 __ c_ole_s(reg_op1, reg_op2);
9439 __ movf_s(dst, src);
9440 break;
9441 case 0x04: //greater_equal
9442 __ c_olt_s(reg_op1, reg_op2);
9443 __ movf_s(dst, src);
9444 break;
9445 case 0x05: //less
9446 __ c_ult_s(reg_op1, reg_op2);
9447 __ movt_s(dst, src);
9448 break;
9449 case 0x06: //less_equal
9450 __ c_ule_s(reg_op1, reg_op2);
9451 __ movt_s(dst, src);
9452 break;
9453 default:
9454 Unimplemented();
9455 }
9456 %}
9457 ins_pipe( pipe_slow );
9458 %}
9460 // Manifest a CmpL result in an integer register. Very painful.
9461 // This is the test to avoid.
9462 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9463 match(Set dst (CmpL3 src1 src2));
9464 ins_cost(1000);
9465 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9466 ins_encode %{
9467 Register opr1 = as_Register($src1$$reg);
9468 Register opr2 = as_Register($src2$$reg);
9469 Register dst = as_Register($dst$$reg);
9471 Label Done;
9473 __ subu(AT, opr1, opr2);
9474 __ bltz(AT, Done);
9475 __ delayed()->daddiu(dst, R0, -1);
9477 __ move(dst, 1);
9478 __ movz(dst, R0, AT);
9480 __ bind(Done);
9481 %}
9482 ins_pipe( pipe_slow );
9483 %}
9485 //
9486 // less_rsult = -1
9487 // greater_result = 1
9488 // equal_result = 0
9489 // nan_result = -1
9490 //
9491 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9492 match(Set dst (CmpF3 src1 src2));
9493 ins_cost(1000);
9494 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9495 ins_encode %{
9496 FloatRegister src1 = as_FloatRegister($src1$$reg);
9497 FloatRegister src2 = as_FloatRegister($src2$$reg);
9498 Register dst = as_Register($dst$$reg);
9500 Label Done;
9502 __ c_ult_s(src1, src2);
9503 __ bc1t(Done);
9504 __ delayed()->daddiu(dst, R0, -1);
9506 __ c_eq_s(src1, src2);
9507 __ move(dst, 1);
9508 __ movt(dst, R0);
9510 __ bind(Done);
9511 %}
9512 ins_pipe( pipe_slow );
9513 %}
9515 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9516 match(Set dst (CmpD3 src1 src2));
9517 ins_cost(1000);
9518 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9519 ins_encode %{
9520 FloatRegister src1 = as_FloatRegister($src1$$reg);
9521 FloatRegister src2 = as_FloatRegister($src2$$reg);
9522 Register dst = as_Register($dst$$reg);
9524 Label Done;
9526 __ c_ult_d(src1, src2);
9527 __ bc1t(Done);
9528 __ delayed()->daddiu(dst, R0, -1);
9530 __ c_eq_d(src1, src2);
9531 __ move(dst, 1);
9532 __ movt(dst, R0);
9534 __ bind(Done);
9535 %}
9536 ins_pipe( pipe_slow );
9537 %}
9539 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9540 match(Set dummy (ClearArray cnt base));
9541 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9542 ins_encode %{
9543 //Assume cnt is the number of bytes in an array to be cleared,
9544 //and base points to the starting address of the array.
9545 Register base = $base$$Register;
9546 Register num = $cnt$$Register;
9547 Label Loop, done;
9549 __ beq(num, R0, done);
9550 __ delayed()->daddu(AT, base, R0);
9552 __ move(T9, num); /* T9 = words */
9554 __ bind(Loop);
9555 __ sd(R0, AT, 0);
9556 __ daddi(T9, T9, -1);
9557 __ bne(T9, R0, Loop);
9558 __ delayed()->daddi(AT, AT, wordSize);
9560 __ bind(done);
9561 %}
9562 ins_pipe( pipe_slow );
9563 %}
9565 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9566 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9567 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9569 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9570 ins_encode %{
9571 // Get the first character position in both strings
9572 // [8] char array, [12] offset, [16] count
9573 Register str1 = $str1$$Register;
9574 Register str2 = $str2$$Register;
9575 Register cnt1 = $cnt1$$Register;
9576 Register cnt2 = $cnt2$$Register;
9577 Register result = $result$$Register;
9579 Label L, Loop, haveResult, done;
9581 // compute the and difference of lengths (in result)
9582 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9584 // compute the shorter length (in cnt1)
9585 __ slt(AT, cnt2, cnt1);
9586 __ movn(cnt1, cnt2, AT);
9588 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9589 __ bind(Loop); // Loop begin
9590 __ beq(cnt1, R0, done);
9591 __ delayed()->lhu(AT, str1, 0);;
9593 // compare current character
9594 __ lhu(cnt2, str2, 0);
9595 __ bne(AT, cnt2, haveResult);
9596 __ delayed()->addi(str1, str1, 2);
9597 __ addi(str2, str2, 2);
9598 __ b(Loop);
9599 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9601 __ bind(haveResult);
9602 __ subu(result, AT, cnt2);
9604 __ bind(done);
9605 %}
9607 ins_pipe( pipe_slow );
9608 %}
9610 // intrinsic optimization
9611 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9612 match(Set result (StrEquals (Binary str1 str2) cnt));
9613 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9615 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9616 ins_encode %{
9617 // Get the first character position in both strings
9618 // [8] char array, [12] offset, [16] count
9619 Register str1 = $str1$$Register;
9620 Register str2 = $str2$$Register;
9621 Register cnt = $cnt$$Register;
9622 Register tmp = $temp$$Register;
9623 Register result = $result$$Register;
9625 Label Loop, done;
9628 __ beq(str1, str2, done); // same char[] ?
9629 __ daddiu(result, R0, 1);
9631 __ bind(Loop); // Loop begin
9632 __ beq(cnt, R0, done);
9633 __ daddiu(result, R0, 1); // count == 0
9635 // compare current character
9636 __ lhu(AT, str1, 0);;
9637 __ lhu(tmp, str2, 0);
9638 __ bne(AT, tmp, done);
9639 __ delayed()->daddi(result, R0, 0);
9640 __ addi(str1, str1, 2);
9641 __ addi(str2, str2, 2);
9642 __ b(Loop);
9643 __ delayed()->addi(cnt, cnt, -1); // Loop end
9645 __ bind(done);
9646 %}
9648 ins_pipe( pipe_slow );
9649 %}
9651 //----------Arithmetic Instructions-------------------------------------------
9652 //----------Addition Instructions---------------------------------------------
9653 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9654 match(Set dst (AddI src1 src2));
9656 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9657 ins_encode %{
9658 Register dst = $dst$$Register;
9659 Register src1 = $src1$$Register;
9660 Register src2 = $src2$$Register;
9661 __ addu32(dst, src1, src2);
9662 %}
9663 ins_pipe( ialu_regI_regI );
9664 %}
9666 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9667 match(Set dst (AddI src1 src2));
9669 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9670 ins_encode %{
9671 Register dst = $dst$$Register;
9672 Register src1 = $src1$$Register;
9673 int imm = $src2$$constant;
9675 if(Assembler::is_simm16(imm)) {
9676 __ addiu32(dst, src1, imm);
9677 } else {
9678 __ move(AT, imm);
9679 __ addu32(dst, src1, AT);
9680 }
9681 %}
9682 ins_pipe( ialu_regI_regI );
9683 %}
9685 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9686 match(Set dst (AddP src1 src2));
9688 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9690 ins_encode %{
9691 Register dst = $dst$$Register;
9692 Register src1 = $src1$$Register;
9693 Register src2 = $src2$$Register;
9694 __ daddu(dst, src1, src2);
9695 %}
9697 ins_pipe( ialu_regI_regI );
9698 %}
9700 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9701 match(Set dst (AddP src1 (ConvI2L src2)));
9703 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9705 ins_encode %{
9706 Register dst = $dst$$Register;
9707 Register src1 = $src1$$Register;
9708 Register src2 = $src2$$Register;
9709 __ daddu(dst, src1, src2);
9710 %}
9712 ins_pipe( ialu_regI_regI );
9713 %}
9715 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9716 match(Set dst (AddP src1 src2));
9718 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9719 ins_encode %{
9720 Register src1 = $src1$$Register;
9721 long src2 = $src2$$constant;
9722 Register dst = $dst$$Register;
9724 if(Assembler::is_simm16(src2)) {
9725 __ daddiu(dst, src1, src2);
9726 } else {
9727 __ set64(AT, src2);
9728 __ daddu(dst, src1, AT);
9729 }
9730 %}
9731 ins_pipe( ialu_regI_imm16 );
9732 %}
9734 // Add Long Register with Register
9735 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9736 match(Set dst (AddL src1 src2));
9737 ins_cost(200);
9738 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9740 ins_encode %{
9741 Register dst_reg = as_Register($dst$$reg);
9742 Register src1_reg = as_Register($src1$$reg);
9743 Register src2_reg = as_Register($src2$$reg);
9745 __ daddu(dst_reg, src1_reg, src2_reg);
9746 %}
9748 ins_pipe( ialu_regL_regL );
9749 %}
9751 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9752 %{
9753 match(Set dst (AddL src1 src2));
9755 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9756 ins_encode %{
9757 Register dst_reg = as_Register($dst$$reg);
9758 Register src1_reg = as_Register($src1$$reg);
9759 int src2_imm = $src2$$constant;
9761 __ daddiu(dst_reg, src1_reg, src2_imm);
9762 %}
9764 ins_pipe( ialu_regL_regL );
9765 %}
9767 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9768 %{
9769 match(Set dst (AddL (ConvI2L src1) src2));
9771 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9772 ins_encode %{
9773 Register dst_reg = as_Register($dst$$reg);
9774 Register src1_reg = as_Register($src1$$reg);
9775 int src2_imm = $src2$$constant;
9777 __ daddiu(dst_reg, src1_reg, src2_imm);
9778 %}
9780 ins_pipe( ialu_regL_regL );
9781 %}
9783 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9784 match(Set dst (AddL (ConvI2L src1) src2));
9785 ins_cost(200);
9786 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9788 ins_encode %{
9789 Register dst_reg = as_Register($dst$$reg);
9790 Register src1_reg = as_Register($src1$$reg);
9791 Register src2_reg = as_Register($src2$$reg);
9793 __ daddu(dst_reg, src1_reg, src2_reg);
9794 %}
9796 ins_pipe( ialu_regL_regL );
9797 %}
9799 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9800 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9801 ins_cost(200);
9802 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9804 ins_encode %{
9805 Register dst_reg = as_Register($dst$$reg);
9806 Register src1_reg = as_Register($src1$$reg);
9807 Register src2_reg = as_Register($src2$$reg);
9809 __ daddu(dst_reg, src1_reg, src2_reg);
9810 %}
9812 ins_pipe( ialu_regL_regL );
9813 %}
9815 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9816 match(Set dst (AddL src1 (ConvI2L src2)));
9817 ins_cost(200);
9818 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9820 ins_encode %{
9821 Register dst_reg = as_Register($dst$$reg);
9822 Register src1_reg = as_Register($src1$$reg);
9823 Register src2_reg = as_Register($src2$$reg);
9825 __ daddu(dst_reg, src1_reg, src2_reg);
9826 %}
9828 ins_pipe( ialu_regL_regL );
9829 %}
9831 //----------Subtraction Instructions-------------------------------------------
9832 // Integer Subtraction Instructions
9833 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9834 match(Set dst (SubI src1 src2));
9835 ins_cost(100);
9837 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9838 ins_encode %{
9839 Register dst = $dst$$Register;
9840 Register src1 = $src1$$Register;
9841 Register src2 = $src2$$Register;
9842 __ subu32(dst, src1, src2);
9843 %}
9844 ins_pipe( ialu_regI_regI );
9845 %}
9847 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9848 match(Set dst (SubI src1 src2));
9849 ins_cost(80);
9851 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9852 ins_encode %{
9853 Register dst = $dst$$Register;
9854 Register src1 = $src1$$Register;
9855 __ addiu32(dst, src1, -1 * $src2$$constant);
9856 %}
9857 ins_pipe( ialu_regI_regI );
9858 %}
9860 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9861 match(Set dst (SubI zero src));
9862 ins_cost(80);
9864 format %{ "neg $dst, $src #@negI_Reg" %}
9865 ins_encode %{
9866 Register dst = $dst$$Register;
9867 Register src = $src$$Register;
9868 __ subu32(dst, R0, src);
9869 %}
9870 ins_pipe( ialu_regI_regI );
9871 %}
9873 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9874 match(Set dst (SubL zero src));
9875 ins_cost(80);
9877 format %{ "neg $dst, $src #@negL_Reg" %}
9878 ins_encode %{
9879 Register dst = $dst$$Register;
9880 Register src = $src$$Register;
9881 __ subu(dst, R0, src);
9882 %}
9883 ins_pipe( ialu_regI_regI );
9884 %}
9886 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9887 match(Set dst (SubL src1 src2));
9888 ins_cost(80);
9890 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9891 ins_encode %{
9892 Register dst = $dst$$Register;
9893 Register src1 = $src1$$Register;
9894 __ daddiu(dst, src1, -1 * $src2$$constant);
9895 %}
9896 ins_pipe( ialu_regI_regI );
9897 %}
9899 // Subtract Long Register with Register.
9900 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9901 match(Set dst (SubL src1 src2));
9902 ins_cost(100);
9903 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9904 ins_encode %{
9905 Register dst = as_Register($dst$$reg);
9906 Register src1 = as_Register($src1$$reg);
9907 Register src2 = as_Register($src2$$reg);
9909 __ subu(dst, src1, src2);
9910 %}
9911 ins_pipe( ialu_regL_regL );
9912 %}
9914 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9915 match(Set dst (SubL src1 (ConvI2L src2)));
9916 ins_cost(100);
9917 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9918 ins_encode %{
9919 Register dst = as_Register($dst$$reg);
9920 Register src1 = as_Register($src1$$reg);
9921 Register src2 = as_Register($src2$$reg);
9923 __ subu(dst, src1, src2);
9924 %}
9925 ins_pipe( ialu_regL_regL );
9926 %}
9928 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9929 match(Set dst (SubL (ConvI2L src1) src2));
9930 ins_cost(200);
9931 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9932 ins_encode %{
9933 Register dst = as_Register($dst$$reg);
9934 Register src1 = as_Register($src1$$reg);
9935 Register src2 = as_Register($src2$$reg);
9937 __ subu(dst, src1, src2);
9938 %}
9939 ins_pipe( ialu_regL_regL );
9940 %}
9942 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9943 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9944 ins_cost(200);
9945 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9946 ins_encode %{
9947 Register dst = as_Register($dst$$reg);
9948 Register src1 = as_Register($src1$$reg);
9949 Register src2 = as_Register($src2$$reg);
9951 __ subu(dst, src1, src2);
9952 %}
9953 ins_pipe( ialu_regL_regL );
9954 %}
9956 // Integer MOD with Register
9957 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9958 match(Set dst (ModI src1 src2));
9959 ins_cost(300);
9960 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9961 ins_encode %{
9962 Register dst = $dst$$Register;
9963 Register src1 = $src1$$Register;
9964 Register src2 = $src2$$Register;
9966 //if (UseLoongsonISA) {
9967 if (0) {
9968 // 2016.08.10
9969 // Experiments show that gsmod is slower that div+mfhi.
9970 // So I just disable it here.
9971 __ gsmod(dst, src1, src2);
9972 } else {
9973 __ div(src1, src2);
9974 __ mfhi(dst);
9975 }
9976 %}
9978 //ins_pipe( ialu_mod );
9979 ins_pipe( ialu_regI_regI );
9980 %}
9982 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9983 match(Set dst (ModL src1 src2));
9984 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9986 ins_encode %{
9987 Register dst = as_Register($dst$$reg);
9988 Register op1 = as_Register($src1$$reg);
9989 Register op2 = as_Register($src2$$reg);
9991 if (UseLoongsonISA) {
9992 __ gsdmod(dst, op1, op2);
9993 } else {
9994 __ ddiv(op1, op2);
9995 __ mfhi(dst);
9996 }
9997 %}
9998 ins_pipe( pipe_slow );
9999 %}
10001 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10002 match(Set dst (MulI src1 src2));
10004 ins_cost(300);
10005 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10006 ins_encode %{
10007 Register src1 = $src1$$Register;
10008 Register src2 = $src2$$Register;
10009 Register dst = $dst$$Register;
10011 __ mul(dst, src1, src2);
10012 %}
10013 ins_pipe( ialu_mult );
10014 %}
10016 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10017 match(Set dst (AddI (MulI src1 src2) src3));
10019 ins_cost(999);
10020 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10021 ins_encode %{
10022 Register src1 = $src1$$Register;
10023 Register src2 = $src2$$Register;
10024 Register src3 = $src3$$Register;
10025 Register dst = $dst$$Register;
10027 __ mtlo(src3);
10028 __ madd(src1, src2);
10029 __ mflo(dst);
10030 %}
10031 ins_pipe( ialu_mult );
10032 %}
10034 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10035 match(Set dst (DivI src1 src2));
10037 ins_cost(300);
10038 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10039 ins_encode %{
10040 Register src1 = $src1$$Register;
10041 Register src2 = $src2$$Register;
10042 Register dst = $dst$$Register;
10044 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10045 We must trap an exception manually. */
10046 __ teq(R0, src2, 0x7);
10048 if (UseLoongsonISA) {
10049 __ gsdiv(dst, src1, src2);
10050 } else {
10051 __ div(src1, src2);
10053 __ nop();
10054 __ nop();
10055 __ mflo(dst);
10056 }
10057 %}
10058 ins_pipe( ialu_mod );
10059 %}
10061 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10062 match(Set dst (DivF src1 src2));
10064 ins_cost(300);
10065 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10066 ins_encode %{
10067 FloatRegister src1 = $src1$$FloatRegister;
10068 FloatRegister src2 = $src2$$FloatRegister;
10069 FloatRegister dst = $dst$$FloatRegister;
10071 /* Here do we need to trap an exception manually ? */
10072 __ div_s(dst, src1, src2);
10073 %}
10074 ins_pipe( pipe_slow );
10075 %}
10077 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10078 match(Set dst (DivD src1 src2));
10080 ins_cost(300);
10081 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10082 ins_encode %{
10083 FloatRegister src1 = $src1$$FloatRegister;
10084 FloatRegister src2 = $src2$$FloatRegister;
10085 FloatRegister dst = $dst$$FloatRegister;
10087 /* Here do we need to trap an exception manually ? */
10088 __ div_d(dst, src1, src2);
10089 %}
10090 ins_pipe( pipe_slow );
10091 %}
10093 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10094 match(Set dst (MulL src1 src2));
10095 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10096 ins_encode %{
10097 Register dst = as_Register($dst$$reg);
10098 Register op1 = as_Register($src1$$reg);
10099 Register op2 = as_Register($src2$$reg);
10101 if (UseLoongsonISA) {
10102 __ gsdmult(dst, op1, op2);
10103 } else {
10104 __ dmult(op1, op2);
10105 __ mflo(dst);
10106 }
10107 %}
10108 ins_pipe( pipe_slow );
10109 %}
10111 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10112 match(Set dst (MulL src1 (ConvI2L src2)));
10113 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10114 ins_encode %{
10115 Register dst = as_Register($dst$$reg);
10116 Register op1 = as_Register($src1$$reg);
10117 Register op2 = as_Register($src2$$reg);
10119 if (UseLoongsonISA) {
10120 __ gsdmult(dst, op1, op2);
10121 } else {
10122 __ dmult(op1, op2);
10123 __ mflo(dst);
10124 }
10125 %}
10126 ins_pipe( pipe_slow );
10127 %}
10129 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10130 match(Set dst (DivL src1 src2));
10131 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10133 ins_encode %{
10134 Register dst = as_Register($dst$$reg);
10135 Register op1 = as_Register($src1$$reg);
10136 Register op2 = as_Register($src2$$reg);
10138 if (UseLoongsonISA) {
10139 __ gsddiv(dst, op1, op2);
10140 } else {
10141 __ ddiv(op1, op2);
10142 __ mflo(dst);
10143 }
10144 %}
10145 ins_pipe( pipe_slow );
10146 %}
10148 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10149 match(Set dst (AddF src1 src2));
10150 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10151 ins_encode %{
10152 FloatRegister src1 = as_FloatRegister($src1$$reg);
10153 FloatRegister src2 = as_FloatRegister($src2$$reg);
10154 FloatRegister dst = as_FloatRegister($dst$$reg);
10156 __ add_s(dst, src1, src2);
10157 %}
10158 ins_pipe( fpu_regF_regF );
10159 %}
10161 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10162 match(Set dst (SubF src1 src2));
10163 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10164 ins_encode %{
10165 FloatRegister src1 = as_FloatRegister($src1$$reg);
10166 FloatRegister src2 = as_FloatRegister($src2$$reg);
10167 FloatRegister dst = as_FloatRegister($dst$$reg);
10169 __ sub_s(dst, src1, src2);
10170 %}
10171 ins_pipe( fpu_regF_regF );
10172 %}
10173 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10174 match(Set dst (AddD src1 src2));
10175 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10176 ins_encode %{
10177 FloatRegister src1 = as_FloatRegister($src1$$reg);
10178 FloatRegister src2 = as_FloatRegister($src2$$reg);
10179 FloatRegister dst = as_FloatRegister($dst$$reg);
10181 __ add_d(dst, src1, src2);
10182 %}
10183 ins_pipe( fpu_regF_regF );
10184 %}
10186 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10187 match(Set dst (SubD src1 src2));
10188 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10189 ins_encode %{
10190 FloatRegister src1 = as_FloatRegister($src1$$reg);
10191 FloatRegister src2 = as_FloatRegister($src2$$reg);
10192 FloatRegister dst = as_FloatRegister($dst$$reg);
10194 __ sub_d(dst, src1, src2);
10195 %}
10196 ins_pipe( fpu_regF_regF );
10197 %}
10199 instruct negF_reg(regF dst, regF src) %{
10200 match(Set dst (NegF src));
10201 format %{ "negF $dst, $src @negF_reg" %}
10202 ins_encode %{
10203 FloatRegister src = as_FloatRegister($src$$reg);
10204 FloatRegister dst = as_FloatRegister($dst$$reg);
10206 __ neg_s(dst, src);
10207 %}
10208 ins_pipe( fpu_regF_regF );
10209 %}
10211 instruct negD_reg(regD dst, regD src) %{
10212 match(Set dst (NegD src));
10213 format %{ "negD $dst, $src @negD_reg" %}
10214 ins_encode %{
10215 FloatRegister src = as_FloatRegister($src$$reg);
10216 FloatRegister dst = as_FloatRegister($dst$$reg);
10218 __ neg_d(dst, src);
10219 %}
10220 ins_pipe( fpu_regF_regF );
10221 %}
10224 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10225 match(Set dst (MulF src1 src2));
10226 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10227 ins_encode %{
10228 FloatRegister src1 = $src1$$FloatRegister;
10229 FloatRegister src2 = $src2$$FloatRegister;
10230 FloatRegister dst = $dst$$FloatRegister;
10232 __ mul_s(dst, src1, src2);
10233 %}
10234 ins_pipe( fpu_regF_regF );
10235 %}
10237 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10238 match(Set dst (AddF (MulF src1 src2) src3));
10239 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10240 ins_cost(44444);
10241 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10242 ins_encode %{
10243 FloatRegister src1 = $src1$$FloatRegister;
10244 FloatRegister src2 = $src2$$FloatRegister;
10245 FloatRegister src3 = $src3$$FloatRegister;
10246 FloatRegister dst = $dst$$FloatRegister;
10248 __ madd_s(dst, src1, src2, src3);
10249 %}
10250 ins_pipe( fpu_regF_regF );
10251 %}
10253 // Mul two double precision floating piont number
10254 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10255 match(Set dst (MulD src1 src2));
10256 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10257 ins_encode %{
10258 FloatRegister src1 = $src1$$FloatRegister;
10259 FloatRegister src2 = $src2$$FloatRegister;
10260 FloatRegister dst = $dst$$FloatRegister;
10262 __ mul_d(dst, src1, src2);
10263 %}
10264 ins_pipe( fpu_regF_regF );
10265 %}
10267 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10268 match(Set dst (AddD (MulD src1 src2) src3));
10269 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10270 ins_cost(44444);
10271 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10272 ins_encode %{
10273 FloatRegister src1 = $src1$$FloatRegister;
10274 FloatRegister src2 = $src2$$FloatRegister;
10275 FloatRegister src3 = $src3$$FloatRegister;
10276 FloatRegister dst = $dst$$FloatRegister;
10278 __ madd_d(dst, src1, src2, src3);
10279 %}
10280 ins_pipe( fpu_regF_regF );
10281 %}
10283 instruct absF_reg(regF dst, regF src) %{
10284 match(Set dst (AbsF src));
10285 ins_cost(100);
10286 format %{ "absF $dst, $src @absF_reg" %}
10287 ins_encode %{
10288 FloatRegister src = as_FloatRegister($src$$reg);
10289 FloatRegister dst = as_FloatRegister($dst$$reg);
10291 __ abs_s(dst, src);
10292 %}
10293 ins_pipe( fpu_regF_regF );
10294 %}
10297 // intrinsics for math_native.
10298 // AbsD SqrtD CosD SinD TanD LogD Log10D
10300 instruct absD_reg(regD dst, regD src) %{
10301 match(Set dst (AbsD src));
10302 ins_cost(100);
10303 format %{ "absD $dst, $src @absD_reg" %}
10304 ins_encode %{
10305 FloatRegister src = as_FloatRegister($src$$reg);
10306 FloatRegister dst = as_FloatRegister($dst$$reg);
10308 __ abs_d(dst, src);
10309 %}
10310 ins_pipe( fpu_regF_regF );
10311 %}
10313 instruct sqrtD_reg(regD dst, regD src) %{
10314 match(Set dst (SqrtD src));
10315 ins_cost(100);
10316 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10317 ins_encode %{
10318 FloatRegister src = as_FloatRegister($src$$reg);
10319 FloatRegister dst = as_FloatRegister($dst$$reg);
10321 __ sqrt_d(dst, src);
10322 %}
10323 ins_pipe( fpu_regF_regF );
10324 %}
10326 instruct sqrtF_reg(regF dst, regF src) %{
10327 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10328 ins_cost(100);
10329 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10330 ins_encode %{
10331 FloatRegister src = as_FloatRegister($src$$reg);
10332 FloatRegister dst = as_FloatRegister($dst$$reg);
10334 __ sqrt_s(dst, src);
10335 %}
10336 ins_pipe( fpu_regF_regF );
10337 %}
10338 //----------------------------------Logical Instructions----------------------
10339 //__________________________________Integer Logical Instructions-------------
10341 //And Instuctions
10342 // And Register with Immediate
10343 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10344 match(Set dst (AndI src1 src2));
10346 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10347 ins_encode %{
10348 Register dst = $dst$$Register;
10349 Register src = $src1$$Register;
10350 int val = $src2$$constant;
10352 __ move(AT, val);
10353 __ andr(dst, src, AT);
10354 %}
10355 ins_pipe( ialu_regI_regI );
10356 %}
10358 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10359 match(Set dst (AndI src1 src2));
10360 ins_cost(60);
10362 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10363 ins_encode %{
10364 Register dst = $dst$$Register;
10365 Register src = $src1$$Register;
10366 int val = $src2$$constant;
10368 __ andi(dst, src, val);
10369 %}
10370 ins_pipe( ialu_regI_regI );
10371 %}
10373 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10374 match(Set dst (AndI src1 mask));
10375 ins_cost(60);
10377 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10378 ins_encode %{
10379 Register dst = $dst$$Register;
10380 Register src = $src1$$Register;
10381 int size = Assembler::is_int_mask($mask$$constant);
10383 __ ext(dst, src, 0, size);
10384 %}
10385 ins_pipe( ialu_regI_regI );
10386 %}
10388 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10389 match(Set dst (AndL src1 mask));
10390 ins_cost(60);
10392 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10393 ins_encode %{
10394 Register dst = $dst$$Register;
10395 Register src = $src1$$Register;
10396 int size = Assembler::is_jlong_mask($mask$$constant);
10398 __ dext(dst, src, 0, size);
10399 %}
10400 ins_pipe( ialu_regI_regI );
10401 %}
10403 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10404 match(Set dst (XorI src1 src2));
10405 ins_cost(60);
10407 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10408 ins_encode %{
10409 Register dst = $dst$$Register;
10410 Register src = $src1$$Register;
10411 int val = $src2$$constant;
10413 __ xori(dst, src, val);
10414 %}
10415 ins_pipe( ialu_regI_regI );
10416 %}
10418 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10419 match(Set dst (XorI src1 M1));
10420 predicate(UseLoongsonISA && Use3A2000);
10421 ins_cost(60);
10423 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10424 ins_encode %{
10425 Register dst = $dst$$Register;
10426 Register src = $src1$$Register;
10428 __ gsorn(dst, R0, src);
10429 %}
10430 ins_pipe( ialu_regI_regI );
10431 %}
10433 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10434 match(Set dst (XorI (ConvL2I src1) M1));
10435 predicate(UseLoongsonISA && Use3A2000);
10436 ins_cost(60);
10438 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10439 ins_encode %{
10440 Register dst = $dst$$Register;
10441 Register src = $src1$$Register;
10443 __ gsorn(dst, R0, src);
10444 %}
10445 ins_pipe( ialu_regI_regI );
10446 %}
10448 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10449 match(Set dst (XorL src1 src2));
10450 ins_cost(60);
10452 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10453 ins_encode %{
10454 Register dst = $dst$$Register;
10455 Register src = $src1$$Register;
10456 int val = $src2$$constant;
10458 __ xori(dst, src, val);
10459 %}
10460 ins_pipe( ialu_regI_regI );
10461 %}
10463 /*
10464 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10465 match(Set dst (XorL src1 M1));
10466 predicate(UseLoongsonISA);
10467 ins_cost(60);
10469 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10470 ins_encode %{
10471 Register dst = $dst$$Register;
10472 Register src = $src1$$Register;
10474 __ gsorn(dst, R0, src);
10475 %}
10476 ins_pipe( ialu_regI_regI );
10477 %}
10478 */
10480 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10481 match(Set dst (AndI mask (LoadB mem)));
10482 ins_cost(60);
10484 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10485 ins_encode(load_UB_enc(dst, mem));
10486 ins_pipe( ialu_loadI );
10487 %}
10489 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10490 match(Set dst (AndI (LoadB mem) mask));
10491 ins_cost(60);
10493 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10494 ins_encode(load_UB_enc(dst, mem));
10495 ins_pipe( ialu_loadI );
10496 %}
10498 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10499 match(Set dst (AndI src1 src2));
10501 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10502 ins_encode %{
10503 Register dst = $dst$$Register;
10504 Register src1 = $src1$$Register;
10505 Register src2 = $src2$$Register;
10506 __ andr(dst, src1, src2);
10507 %}
10508 ins_pipe( ialu_regI_regI );
10509 %}
10511 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10512 match(Set dst (AndI src1 (XorI src2 M1)));
10513 predicate(UseLoongsonISA && Use3A2000);
10515 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10516 ins_encode %{
10517 Register dst = $dst$$Register;
10518 Register src1 = $src1$$Register;
10519 Register src2 = $src2$$Register;
10521 __ gsandn(dst, src1, src2);
10522 %}
10523 ins_pipe( ialu_regI_regI );
10524 %}
10526 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10527 match(Set dst (OrI src1 (XorI src2 M1)));
10528 predicate(UseLoongsonISA && Use3A2000);
10530 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10531 ins_encode %{
10532 Register dst = $dst$$Register;
10533 Register src1 = $src1$$Register;
10534 Register src2 = $src2$$Register;
10536 __ gsorn(dst, src1, src2);
10537 %}
10538 ins_pipe( ialu_regI_regI );
10539 %}
10541 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10542 match(Set dst (AndI (XorI src1 M1) src2));
10543 predicate(UseLoongsonISA && Use3A2000);
10545 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10546 ins_encode %{
10547 Register dst = $dst$$Register;
10548 Register src1 = $src1$$Register;
10549 Register src2 = $src2$$Register;
10551 __ gsandn(dst, src2, src1);
10552 %}
10553 ins_pipe( ialu_regI_regI );
10554 %}
10556 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10557 match(Set dst (OrI (XorI src1 M1) src2));
10558 predicate(UseLoongsonISA && Use3A2000);
10560 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10561 ins_encode %{
10562 Register dst = $dst$$Register;
10563 Register src1 = $src1$$Register;
10564 Register src2 = $src2$$Register;
10566 __ gsorn(dst, src2, src1);
10567 %}
10568 ins_pipe( ialu_regI_regI );
10569 %}
10571 // And Long Register with Register
10572 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10573 match(Set dst (AndL src1 src2));
10574 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10575 ins_encode %{
10576 Register dst_reg = as_Register($dst$$reg);
10577 Register src1_reg = as_Register($src1$$reg);
10578 Register src2_reg = as_Register($src2$$reg);
10580 __ andr(dst_reg, src1_reg, src2_reg);
10581 %}
10582 ins_pipe( ialu_regL_regL );
10583 %}
10585 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10586 match(Set dst (AndL src1 (ConvI2L src2)));
10587 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10588 ins_encode %{
10589 Register dst_reg = as_Register($dst$$reg);
10590 Register src1_reg = as_Register($src1$$reg);
10591 Register src2_reg = as_Register($src2$$reg);
10593 __ andr(dst_reg, src1_reg, src2_reg);
10594 %}
10595 ins_pipe( ialu_regL_regL );
10596 %}
10598 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10599 match(Set dst (AndL src1 src2));
10600 ins_cost(60);
10602 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10603 ins_encode %{
10604 Register dst = $dst$$Register;
10605 Register src = $src1$$Register;
10606 long val = $src2$$constant;
10608 __ andi(dst, src, val);
10609 %}
10610 ins_pipe( ialu_regI_regI );
10611 %}
10613 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10614 match(Set dst (ConvL2I (AndL src1 src2)));
10615 ins_cost(60);
10617 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10618 ins_encode %{
10619 Register dst = $dst$$Register;
10620 Register src = $src1$$Register;
10621 long val = $src2$$constant;
10623 __ andi(dst, src, val);
10624 %}
10625 ins_pipe( ialu_regI_regI );
10626 %}
10628 /*
10629 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10630 match(Set dst (AndL src1 (XorL src2 M1)));
10631 predicate(UseLoongsonISA);
10633 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10634 ins_encode %{
10635 Register dst = $dst$$Register;
10636 Register src1 = $src1$$Register;
10637 Register src2 = $src2$$Register;
10639 __ gsandn(dst, src1, src2);
10640 %}
10641 ins_pipe( ialu_regI_regI );
10642 %}
10643 */
10645 /*
10646 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10647 match(Set dst (OrL src1 (XorL src2 M1)));
10648 predicate(UseLoongsonISA);
10650 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10651 ins_encode %{
10652 Register dst = $dst$$Register;
10653 Register src1 = $src1$$Register;
10654 Register src2 = $src2$$Register;
10656 __ gsorn(dst, src1, src2);
10657 %}
10658 ins_pipe( ialu_regI_regI );
10659 %}
10660 */
10662 /*
10663 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10664 match(Set dst (AndL (XorL src1 M1) src2));
10665 predicate(UseLoongsonISA);
10667 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10668 ins_encode %{
10669 Register dst = $dst$$Register;
10670 Register src1 = $src1$$Register;
10671 Register src2 = $src2$$Register;
10673 __ gsandn(dst, src2, src1);
10674 %}
10675 ins_pipe( ialu_regI_regI );
10676 %}
10677 */
10679 /*
10680 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10681 match(Set dst (OrL (XorL src1 M1) src2));
10682 predicate(UseLoongsonISA);
10684 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10685 ins_encode %{
10686 Register dst = $dst$$Register;
10687 Register src1 = $src1$$Register;
10688 Register src2 = $src2$$Register;
10690 __ gsorn(dst, src2, src1);
10691 %}
10692 ins_pipe( ialu_regI_regI );
10693 %}
10694 */
10696 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10697 match(Set dst (AndL dst M8));
10698 ins_cost(60);
10700 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10701 ins_encode %{
10702 Register dst = $dst$$Register;
10704 __ dins(dst, R0, 0, 3);
10705 %}
10706 ins_pipe( ialu_regI_regI );
10707 %}
10709 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10710 match(Set dst (AndL dst M5));
10711 ins_cost(60);
10713 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10714 ins_encode %{
10715 Register dst = $dst$$Register;
10717 __ dins(dst, R0, 2, 1);
10718 %}
10719 ins_pipe( ialu_regI_regI );
10720 %}
10722 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10723 match(Set dst (AndL dst M7));
10724 ins_cost(60);
10726 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10727 ins_encode %{
10728 Register dst = $dst$$Register;
10730 __ dins(dst, R0, 1, 2);
10731 %}
10732 ins_pipe( ialu_regI_regI );
10733 %}
10735 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10736 match(Set dst (AndL dst M4));
10737 ins_cost(60);
10739 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10740 ins_encode %{
10741 Register dst = $dst$$Register;
10743 __ dins(dst, R0, 0, 2);
10744 %}
10745 ins_pipe( ialu_regI_regI );
10746 %}
10748 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10749 match(Set dst (AndL dst M121));
10750 ins_cost(60);
10752 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10753 ins_encode %{
10754 Register dst = $dst$$Register;
10756 __ dins(dst, R0, 3, 4);
10757 %}
10758 ins_pipe( ialu_regI_regI );
10759 %}
10761 // Or Long Register with Register
10762 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10763 match(Set dst (OrL src1 src2));
10764 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10765 ins_encode %{
10766 Register dst_reg = $dst$$Register;
10767 Register src1_reg = $src1$$Register;
10768 Register src2_reg = $src2$$Register;
10770 __ orr(dst_reg, src1_reg, src2_reg);
10771 %}
10772 ins_pipe( ialu_regL_regL );
10773 %}
10775 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10776 match(Set dst (OrL (CastP2X src1) src2));
10777 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10778 ins_encode %{
10779 Register dst_reg = $dst$$Register;
10780 Register src1_reg = $src1$$Register;
10781 Register src2_reg = $src2$$Register;
10783 __ orr(dst_reg, src1_reg, src2_reg);
10784 %}
10785 ins_pipe( ialu_regL_regL );
10786 %}
10788 // Xor Long Register with Register
10789 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10790 match(Set dst (XorL src1 src2));
10791 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10792 ins_encode %{
10793 Register dst_reg = as_Register($dst$$reg);
10794 Register src1_reg = as_Register($src1$$reg);
10795 Register src2_reg = as_Register($src2$$reg);
10797 __ xorr(dst_reg, src1_reg, src2_reg);
10798 %}
10799 ins_pipe( ialu_regL_regL );
10800 %}
10802 // Shift Left by 8-bit immediate
10803 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10804 match(Set dst (LShiftI src shift));
10806 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10807 ins_encode %{
10808 Register src = $src$$Register;
10809 Register dst = $dst$$Register;
10810 int shamt = $shift$$constant;
10812 __ sll(dst, src, shamt);
10813 %}
10814 ins_pipe( ialu_regI_regI );
10815 %}
10817 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10818 match(Set dst (LShiftI (ConvL2I src) shift));
10820 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10821 ins_encode %{
10822 Register src = $src$$Register;
10823 Register dst = $dst$$Register;
10824 int shamt = $shift$$constant;
10826 __ sll(dst, src, shamt);
10827 %}
10828 ins_pipe( ialu_regI_regI );
10829 %}
10831 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10832 match(Set dst (AndI (LShiftI src shift) mask));
10834 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10835 ins_encode %{
10836 Register src = $src$$Register;
10837 Register dst = $dst$$Register;
10839 __ sll(dst, src, 16);
10840 %}
10841 ins_pipe( ialu_regI_regI );
10842 %}
10844 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10845 %{
10846 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10848 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10849 ins_encode %{
10850 Register src = $src$$Register;
10851 Register dst = $dst$$Register;
10853 __ andi(dst, src, 7);
10854 %}
10855 ins_pipe(ialu_regI_regI);
10856 %}
10858 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10859 %{
10860 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10862 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10863 ins_encode %{
10864 Register src = $src1$$Register;
10865 int val = $src2$$constant;
10866 Register dst = $dst$$Register;
10868 __ ori(dst, src, val);
10869 %}
10870 ins_pipe(ialu_regI_regI);
10871 %}
10873 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10874 // This idiom is used by the compiler the i2s bytecode.
10875 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10876 %{
10877 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10879 format %{ "i2s $dst, $src\t# @i2s" %}
10880 ins_encode %{
10881 Register src = $src$$Register;
10882 Register dst = $dst$$Register;
10884 __ seh(dst, src);
10885 %}
10886 ins_pipe(ialu_regI_regI);
10887 %}
10889 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10890 // This idiom is used by the compiler for the i2b bytecode.
10891 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10892 %{
10893 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10895 format %{ "i2b $dst, $src\t# @i2b" %}
10896 ins_encode %{
10897 Register src = $src$$Register;
10898 Register dst = $dst$$Register;
10900 __ seb(dst, src);
10901 %}
10902 ins_pipe(ialu_regI_regI);
10903 %}
10906 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10907 match(Set dst (LShiftI (ConvL2I src) shift));
10909 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10910 ins_encode %{
10911 Register src = $src$$Register;
10912 Register dst = $dst$$Register;
10913 int shamt = $shift$$constant;
10915 __ sll(dst, src, shamt);
10916 %}
10917 ins_pipe( ialu_regI_regI );
10918 %}
10920 // Shift Left by 8-bit immediate
10921 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10922 match(Set dst (LShiftI src shift));
10924 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10925 ins_encode %{
10926 Register src = $src$$Register;
10927 Register dst = $dst$$Register;
10928 Register shamt = $shift$$Register;
10929 __ sllv(dst, src, shamt);
10930 %}
10931 ins_pipe( ialu_regI_regI );
10932 %}
10935 // Shift Left Long
10936 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10937 //predicate(UseNewLongLShift);
10938 match(Set dst (LShiftL src shift));
10939 ins_cost(100);
10940 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10941 ins_encode %{
10942 Register src_reg = as_Register($src$$reg);
10943 Register dst_reg = as_Register($dst$$reg);
10944 int shamt = $shift$$constant;
10946 if (__ is_simm(shamt, 5))
10947 __ dsll(dst_reg, src_reg, shamt);
10948 else {
10949 int sa = Assembler::low(shamt, 6);
10950 if (sa < 32) {
10951 __ dsll(dst_reg, src_reg, sa);
10952 } else {
10953 __ dsll32(dst_reg, src_reg, sa - 32);
10954 }
10955 }
10956 %}
10957 ins_pipe( ialu_regL_regL );
10958 %}
10960 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10961 //predicate(UseNewLongLShift);
10962 match(Set dst (LShiftL (ConvI2L src) shift));
10963 ins_cost(100);
10964 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10965 ins_encode %{
10966 Register src_reg = as_Register($src$$reg);
10967 Register dst_reg = as_Register($dst$$reg);
10968 int shamt = $shift$$constant;
10970 if (__ is_simm(shamt, 5))
10971 __ dsll(dst_reg, src_reg, shamt);
10972 else {
10973 int sa = Assembler::low(shamt, 6);
10974 if (sa < 32) {
10975 __ dsll(dst_reg, src_reg, sa);
10976 } else {
10977 __ dsll32(dst_reg, src_reg, sa - 32);
10978 }
10979 }
10980 %}
10981 ins_pipe( ialu_regL_regL );
10982 %}
10984 // Shift Left Long
10985 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10986 //predicate(UseNewLongLShift);
10987 match(Set dst (LShiftL src shift));
10988 ins_cost(100);
10989 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10990 ins_encode %{
10991 Register src_reg = as_Register($src$$reg);
10992 Register dst_reg = as_Register($dst$$reg);
10994 __ dsllv(dst_reg, src_reg, $shift$$Register);
10995 %}
10996 ins_pipe( ialu_regL_regL );
10997 %}
10999 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11000 match(Set dst (LShiftL (ConvI2L src) shift));
11001 ins_cost(100);
11002 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11003 ins_encode %{
11004 Register src_reg = as_Register($src$$reg);
11005 Register dst_reg = as_Register($dst$$reg);
11006 int shamt = $shift$$constant;
11008 if (__ is_simm(shamt, 5)) {
11009 __ dsll(dst_reg, src_reg, shamt);
11010 } else {
11011 int sa = Assembler::low(shamt, 6);
11012 if (sa < 32) {
11013 __ dsll(dst_reg, src_reg, sa);
11014 } else {
11015 __ dsll32(dst_reg, src_reg, sa - 32);
11016 }
11017 }
11018 %}
11019 ins_pipe( ialu_regL_regL );
11020 %}
11022 // Shift Right Long
11023 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11024 match(Set dst (RShiftL src shift));
11025 ins_cost(100);
11026 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11027 ins_encode %{
11028 Register src_reg = as_Register($src$$reg);
11029 Register dst_reg = as_Register($dst$$reg);
11030 int shamt = ($shift$$constant & 0x3f);
11031 if (__ is_simm(shamt, 5))
11032 __ dsra(dst_reg, src_reg, shamt);
11033 else {
11034 int sa = Assembler::low(shamt, 6);
11035 if (sa < 32) {
11036 __ dsra(dst_reg, src_reg, sa);
11037 } else {
11038 __ dsra32(dst_reg, src_reg, sa - 32);
11039 }
11040 }
11041 %}
11042 ins_pipe( ialu_regL_regL );
11043 %}
11045 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11046 match(Set dst (ConvL2I (RShiftL src shift)));
11047 ins_cost(100);
11048 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11049 ins_encode %{
11050 Register src_reg = as_Register($src$$reg);
11051 Register dst_reg = as_Register($dst$$reg);
11052 int shamt = $shift$$constant;
11054 __ dsra32(dst_reg, src_reg, shamt - 32);
11055 %}
11056 ins_pipe( ialu_regL_regL );
11057 %}
11059 // Shift Right Long arithmetically
11060 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11061 //predicate(UseNewLongLShift);
11062 match(Set dst (RShiftL src shift));
11063 ins_cost(100);
11064 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11065 ins_encode %{
11066 Register src_reg = as_Register($src$$reg);
11067 Register dst_reg = as_Register($dst$$reg);
11069 __ dsrav(dst_reg, src_reg, $shift$$Register);
11070 %}
11071 ins_pipe( ialu_regL_regL );
11072 %}
11074 // Shift Right Long logically
11075 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11076 match(Set dst (URShiftL src shift));
11077 ins_cost(100);
11078 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11079 ins_encode %{
11080 Register src_reg = as_Register($src$$reg);
11081 Register dst_reg = as_Register($dst$$reg);
11083 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11084 %}
11085 ins_pipe( ialu_regL_regL );
11086 %}
11088 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11089 match(Set dst (URShiftL src shift));
11090 ins_cost(80);
11091 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11092 ins_encode %{
11093 Register src_reg = as_Register($src$$reg);
11094 Register dst_reg = as_Register($dst$$reg);
11095 int shamt = $shift$$constant;
11097 __ dsrl(dst_reg, src_reg, shamt);
11098 %}
11099 ins_pipe( ialu_regL_regL );
11100 %}
11102 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11103 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11104 ins_cost(80);
11105 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11106 ins_encode %{
11107 Register src_reg = as_Register($src$$reg);
11108 Register dst_reg = as_Register($dst$$reg);
11109 int shamt = $shift$$constant;
11111 __ dext(dst_reg, src_reg, shamt, 31);
11112 %}
11113 ins_pipe( ialu_regL_regL );
11114 %}
11116 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11117 match(Set dst (URShiftL (CastP2X src) shift));
11118 ins_cost(80);
11119 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11120 ins_encode %{
11121 Register src_reg = as_Register($src$$reg);
11122 Register dst_reg = as_Register($dst$$reg);
11123 int shamt = $shift$$constant;
11125 __ dsrl(dst_reg, src_reg, shamt);
11126 %}
11127 ins_pipe( ialu_regL_regL );
11128 %}
11130 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11131 match(Set dst (URShiftL src shift));
11132 ins_cost(80);
11133 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11134 ins_encode %{
11135 Register src_reg = as_Register($src$$reg);
11136 Register dst_reg = as_Register($dst$$reg);
11137 int shamt = $shift$$constant;
11139 __ dsrl32(dst_reg, src_reg, shamt - 32);
11140 %}
11141 ins_pipe( ialu_regL_regL );
11142 %}
11144 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11145 match(Set dst (ConvL2I (URShiftL src shift)));
11146 predicate(n->in(1)->in(2)->get_int() > 32);
11147 ins_cost(80);
11148 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11149 ins_encode %{
11150 Register src_reg = as_Register($src$$reg);
11151 Register dst_reg = as_Register($dst$$reg);
11152 int shamt = $shift$$constant;
11154 __ dsrl32(dst_reg, src_reg, shamt - 32);
11155 %}
11156 ins_pipe( ialu_regL_regL );
11157 %}
11159 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11160 match(Set dst (URShiftL (CastP2X src) shift));
11161 ins_cost(80);
11162 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11163 ins_encode %{
11164 Register src_reg = as_Register($src$$reg);
11165 Register dst_reg = as_Register($dst$$reg);
11166 int shamt = $shift$$constant;
11168 __ dsrl32(dst_reg, src_reg, shamt - 32);
11169 %}
11170 ins_pipe( ialu_regL_regL );
11171 %}
11173 // Xor Instructions
11174 // Xor Register with Register
11175 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11176 match(Set dst (XorI src1 src2));
11178 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11180 ins_encode %{
11181 Register dst = $dst$$Register;
11182 Register src1 = $src1$$Register;
11183 Register src2 = $src2$$Register;
11184 __ xorr(dst, src1, src2);
11185 __ sll(dst, dst, 0); /* long -> int */
11186 %}
11188 ins_pipe( ialu_regI_regI );
11189 %}
11191 // Or Instructions
11192 // Or Register with Register
11193 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11194 match(Set dst (OrI src1 src2));
11196 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11197 ins_encode %{
11198 Register dst = $dst$$Register;
11199 Register src1 = $src1$$Register;
11200 Register src2 = $src2$$Register;
11201 __ orr(dst, src1, src2);
11202 %}
11204 ins_pipe( ialu_regI_regI );
11205 %}
11207 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11208 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11209 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11211 format %{ "rotr $dst, $src, 1 ...\n\t"
11212 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11213 ins_encode %{
11214 Register dst = $dst$$Register;
11215 Register src = $src$$Register;
11216 int rshift = $rshift$$constant;
11218 __ rotr(dst, src, 1);
11219 if (rshift - 1) {
11220 __ srl(dst, dst, rshift - 1);
11221 }
11222 %}
11224 ins_pipe( ialu_regI_regI );
11225 %}
11227 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11228 match(Set dst (OrI src1 (CastP2X src2)));
11230 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11231 ins_encode %{
11232 Register dst = $dst$$Register;
11233 Register src1 = $src1$$Register;
11234 Register src2 = $src2$$Register;
11235 __ orr(dst, src1, src2);
11236 %}
11238 ins_pipe( ialu_regI_regI );
11239 %}
11241 // Logical Shift Right by 8-bit immediate
11242 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11243 match(Set dst (URShiftI src shift));
11244 //effect(KILL cr);
11246 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11247 ins_encode %{
11248 Register src = $src$$Register;
11249 Register dst = $dst$$Register;
11250 int shift = $shift$$constant;
11252 __ srl(dst, src, shift);
11253 %}
11254 ins_pipe( ialu_regI_regI );
11255 %}
11257 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11258 match(Set dst (AndI (URShiftI src shift) mask));
11260 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11261 ins_encode %{
11262 Register src = $src$$Register;
11263 Register dst = $dst$$Register;
11264 int pos = $shift$$constant;
11265 int size = Assembler::is_int_mask($mask$$constant);
11267 __ ext(dst, src, pos, size);
11268 %}
11269 ins_pipe( ialu_regI_regI );
11270 %}
11272 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11273 %{
11274 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11275 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11277 ins_cost(100);
11278 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11279 ins_encode %{
11280 Register dst = $dst$$Register;
11281 int sa = $rshift$$constant;
11283 __ rotr(dst, dst, sa);
11284 %}
11285 ins_pipe( ialu_regI_regI );
11286 %}
11288 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11289 %{
11290 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11291 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11293 ins_cost(100);
11294 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11295 ins_encode %{
11296 Register dst = $dst$$Register;
11297 int sa = $rshift$$constant;
11299 __ drotr(dst, dst, sa);
11300 %}
11301 ins_pipe( ialu_regI_regI );
11302 %}
11304 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11305 %{
11306 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11307 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11309 ins_cost(100);
11310 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11311 ins_encode %{
11312 Register dst = $dst$$Register;
11313 int sa = $rshift$$constant;
11315 __ drotr32(dst, dst, sa - 32);
11316 %}
11317 ins_pipe( ialu_regI_regI );
11318 %}
11320 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11321 %{
11322 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11323 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11325 ins_cost(100);
11326 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11327 ins_encode %{
11328 Register dst = $dst$$Register;
11329 int sa = $rshift$$constant;
11331 __ rotr(dst, dst, sa);
11332 %}
11333 ins_pipe( ialu_regI_regI );
11334 %}
11336 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11337 %{
11338 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11339 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11341 ins_cost(100);
11342 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11343 ins_encode %{
11344 Register dst = $dst$$Register;
11345 int sa = $rshift$$constant;
11347 __ drotr(dst, dst, sa);
11348 %}
11349 ins_pipe( ialu_regI_regI );
11350 %}
11352 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11353 %{
11354 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11355 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11357 ins_cost(100);
11358 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11359 ins_encode %{
11360 Register dst = $dst$$Register;
11361 int sa = $rshift$$constant;
11363 __ drotr32(dst, dst, sa - 32);
11364 %}
11365 ins_pipe( ialu_regI_regI );
11366 %}
11368 // Logical Shift Right
11369 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11370 match(Set dst (URShiftI src shift));
11372 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11373 ins_encode %{
11374 Register src = $src$$Register;
11375 Register dst = $dst$$Register;
11376 Register shift = $shift$$Register;
11377 __ srlv(dst, src, shift);
11378 %}
11379 ins_pipe( ialu_regI_regI );
11380 %}
11383 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11384 match(Set dst (RShiftI src shift));
11385 // effect(KILL cr);
11387 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11388 ins_encode %{
11389 Register src = $src$$Register;
11390 Register dst = $dst$$Register;
11391 int shift = $shift$$constant;
11392 __ sra(dst, src, shift);
11393 %}
11394 ins_pipe( ialu_regI_regI );
11395 %}
11397 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11398 match(Set dst (RShiftI src shift));
11399 // effect(KILL cr);
11401 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11402 ins_encode %{
11403 Register src = $src$$Register;
11404 Register dst = $dst$$Register;
11405 Register shift = $shift$$Register;
11406 __ srav(dst, src, shift);
11407 %}
11408 ins_pipe( ialu_regI_regI );
11409 %}
11411 //----------Convert Int to Boolean---------------------------------------------
11413 instruct convI2B(mRegI dst, mRegI src) %{
11414 match(Set dst (Conv2B src));
11416 ins_cost(100);
11417 format %{ "convI2B $dst, $src @ convI2B" %}
11418 ins_encode %{
11419 Register dst = as_Register($dst$$reg);
11420 Register src = as_Register($src$$reg);
11422 if (dst != src) {
11423 __ daddiu(dst, R0, 1);
11424 __ movz(dst, R0, src);
11425 } else {
11426 __ move(AT, src);
11427 __ daddiu(dst, R0, 1);
11428 __ movz(dst, R0, AT);
11429 }
11430 %}
11432 ins_pipe( ialu_regL_regL );
11433 %}
11435 instruct convI2L_reg( mRegL dst, mRegI src) %{
11436 match(Set dst (ConvI2L src));
11438 ins_cost(100);
11439 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11440 ins_encode %{
11441 Register dst = as_Register($dst$$reg);
11442 Register src = as_Register($src$$reg);
11444 if(dst != src) __ sll(dst, src, 0);
11445 %}
11446 ins_pipe( ialu_regL_regL );
11447 %}
11450 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11451 match(Set dst (ConvL2I src));
11453 format %{ "MOV $dst, $src @ convL2I_reg" %}
11454 ins_encode %{
11455 Register dst = as_Register($dst$$reg);
11456 Register src = as_Register($src$$reg);
11458 __ sll(dst, src, 0);
11459 %}
11461 ins_pipe( ialu_regI_regI );
11462 %}
11464 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11465 match(Set dst (ConvI2L (ConvL2I src)));
11467 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11468 ins_encode %{
11469 Register dst = as_Register($dst$$reg);
11470 Register src = as_Register($src$$reg);
11472 __ sll(dst, src, 0);
11473 %}
11475 ins_pipe( ialu_regI_regI );
11476 %}
11478 instruct convL2D_reg( regD dst, mRegL src ) %{
11479 match(Set dst (ConvL2D src));
11480 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11481 ins_encode %{
11482 Register src = as_Register($src$$reg);
11483 FloatRegister dst = as_FloatRegister($dst$$reg);
11485 __ dmtc1(src, dst);
11486 __ cvt_d_l(dst, dst);
11487 %}
11489 ins_pipe( pipe_slow );
11490 %}
11493 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11494 match(Set dst (ConvD2L src));
11495 ins_cost(150);
11496 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11497 ins_encode %{
11498 Register dst = as_Register($dst$$reg);
11499 FloatRegister src = as_FloatRegister($src$$reg);
11501 Label Done;
11503 __ trunc_l_d(F30, src);
11504 // max_long: 0x7fffffffffffffff
11505 // __ set64(AT, 0x7fffffffffffffff);
11506 __ daddiu(AT, R0, -1);
11507 __ dsrl(AT, AT, 1);
11508 __ dmfc1(dst, F30);
11510 __ bne(dst, AT, Done);
11511 __ delayed()->mtc1(R0, F30);
11513 __ cvt_d_w(F30, F30);
11514 __ c_ult_d(src, F30);
11515 __ bc1f(Done);
11516 __ delayed()->daddiu(T9, R0, -1);
11518 __ c_un_d(src, src); //NaN?
11519 __ subu(dst, T9, AT);
11520 __ movt(dst, R0);
11522 __ bind(Done);
11523 %}
11525 ins_pipe( pipe_slow );
11526 %}
11529 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11530 match(Set dst (ConvD2L src));
11531 ins_cost(250);
11532 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11533 ins_encode %{
11534 Register dst = as_Register($dst$$reg);
11535 FloatRegister src = as_FloatRegister($src$$reg);
11537 Label L;
11539 __ c_un_d(src, src); //NaN?
11540 __ bc1t(L);
11541 __ delayed();
11542 __ move(dst, R0);
11544 __ trunc_l_d(F30, src);
11545 __ cfc1(AT, 31);
11546 __ li(T9, 0x10000);
11547 __ andr(AT, AT, T9);
11548 __ beq(AT, R0, L);
11549 __ delayed()->dmfc1(dst, F30);
11551 __ mov_d(F12, src);
11552 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11553 __ move(dst, V0);
11554 __ bind(L);
11555 %}
11557 ins_pipe( pipe_slow );
11558 %}
11561 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11562 match(Set dst (ConvF2I src));
11563 ins_cost(150);
11564 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11565 ins_encode %{
11566 Register dreg = $dst$$Register;
11567 FloatRegister fval = $src$$FloatRegister;
11568 Label L;
11570 __ trunc_w_s(F30, fval);
11571 __ move(AT, 0x7fffffff);
11572 __ mfc1(dreg, F30);
11573 __ c_un_s(fval, fval); //NaN?
11574 __ movt(dreg, R0);
11576 __ bne(AT, dreg, L);
11577 __ delayed()->lui(T9, 0x8000);
11579 __ mfc1(AT, fval);
11580 __ andr(AT, AT, T9);
11582 __ movn(dreg, T9, AT);
11584 __ bind(L);
11586 %}
11588 ins_pipe( pipe_slow );
11589 %}
11593 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11594 match(Set dst (ConvF2I src));
11595 ins_cost(250);
11596 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11597 ins_encode %{
11598 Register dreg = $dst$$Register;
11599 FloatRegister fval = $src$$FloatRegister;
11600 Label L;
11602 __ c_un_s(fval, fval); //NaN?
11603 __ bc1t(L);
11604 __ delayed();
11605 __ move(dreg, R0);
11607 __ trunc_w_s(F30, fval);
11609 /* Call SharedRuntime:f2i() to do valid convention */
11610 __ cfc1(AT, 31);
11611 __ li(T9, 0x10000);
11612 __ andr(AT, AT, T9);
11613 __ beq(AT, R0, L);
11614 __ delayed()->mfc1(dreg, F30);
11616 __ mov_s(F12, fval);
11618 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11619 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11620 *
11621 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11622 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11623 */
11624 __ push(fval);
11625 if(dreg != V0) {
11626 __ push(V0);
11627 }
11628 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11629 if(dreg != V0) {
11630 __ move(dreg, V0);
11631 __ pop(V0);
11632 }
11633 __ pop(fval);
11634 __ bind(L);
11635 %}
11637 ins_pipe( pipe_slow );
11638 %}
11641 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11642 match(Set dst (ConvF2L src));
11643 ins_cost(150);
11644 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11645 ins_encode %{
11646 Register dreg = $dst$$Register;
11647 FloatRegister fval = $src$$FloatRegister;
11648 Label L;
11650 __ trunc_l_s(F30, fval);
11651 __ daddiu(AT, R0, -1);
11652 __ dsrl(AT, AT, 1);
11653 __ dmfc1(dreg, F30);
11654 __ c_un_s(fval, fval); //NaN?
11655 __ movt(dreg, R0);
11657 __ bne(AT, dreg, L);
11658 __ delayed()->lui(T9, 0x8000);
11660 __ mfc1(AT, fval);
11661 __ andr(AT, AT, T9);
11663 __ dsll32(T9, T9, 0);
11664 __ movn(dreg, T9, AT);
11666 __ bind(L);
11667 %}
11669 ins_pipe( pipe_slow );
11670 %}
11673 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11674 match(Set dst (ConvF2L src));
11675 ins_cost(250);
11676 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11677 ins_encode %{
11678 Register dst = as_Register($dst$$reg);
11679 FloatRegister fval = $src$$FloatRegister;
11680 Label L;
11682 __ c_un_s(fval, fval); //NaN?
11683 __ bc1t(L);
11684 __ delayed();
11685 __ move(dst, R0);
11687 __ trunc_l_s(F30, fval);
11688 __ cfc1(AT, 31);
11689 __ li(T9, 0x10000);
11690 __ andr(AT, AT, T9);
11691 __ beq(AT, R0, L);
11692 __ delayed()->dmfc1(dst, F30);
11694 __ mov_s(F12, fval);
11695 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11696 __ move(dst, V0);
11697 __ bind(L);
11698 %}
11700 ins_pipe( pipe_slow );
11701 %}
11703 instruct convL2F_reg( regF dst, mRegL src ) %{
11704 match(Set dst (ConvL2F src));
11705 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11706 ins_encode %{
11707 FloatRegister dst = $dst$$FloatRegister;
11708 Register src = as_Register($src$$reg);
11709 Label L;
11711 __ dmtc1(src, dst);
11712 __ cvt_s_l(dst, dst);
11713 %}
11715 ins_pipe( pipe_slow );
11716 %}
11718 instruct convI2F_reg( regF dst, mRegI src ) %{
11719 match(Set dst (ConvI2F src));
11720 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11721 ins_encode %{
11722 Register src = $src$$Register;
11723 FloatRegister dst = $dst$$FloatRegister;
11725 __ mtc1(src, dst);
11726 __ cvt_s_w(dst, dst);
11727 %}
11729 ins_pipe( fpu_regF_regF );
11730 %}
11732 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11733 match(Set dst (CmpLTMask p zero));
11734 ins_cost(100);
11736 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11737 ins_encode %{
11738 Register src = $p$$Register;
11739 Register dst = $dst$$Register;
11741 __ sra(dst, src, 31);
11742 %}
11743 ins_pipe( pipe_slow );
11744 %}
11747 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11748 match(Set dst (CmpLTMask p q));
11749 ins_cost(400);
11751 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11752 ins_encode %{
11753 Register p = $p$$Register;
11754 Register q = $q$$Register;
11755 Register dst = $dst$$Register;
11757 __ slt(dst, p, q);
11758 __ subu(dst, R0, dst);
11759 %}
11760 ins_pipe( pipe_slow );
11761 %}
11763 instruct convP2B(mRegI dst, mRegP src) %{
11764 match(Set dst (Conv2B src));
11766 ins_cost(100);
11767 format %{ "convP2B $dst, $src @ convP2B" %}
11768 ins_encode %{
11769 Register dst = as_Register($dst$$reg);
11770 Register src = as_Register($src$$reg);
11772 if (dst != src) {
11773 __ daddiu(dst, R0, 1);
11774 __ movz(dst, R0, src);
11775 } else {
11776 __ move(AT, src);
11777 __ daddiu(dst, R0, 1);
11778 __ movz(dst, R0, AT);
11779 }
11780 %}
11782 ins_pipe( ialu_regL_regL );
11783 %}
11786 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11787 match(Set dst (ConvI2D src));
11788 format %{ "conI2D $dst, $src @convI2D_reg" %}
11789 ins_encode %{
11790 Register src = $src$$Register;
11791 FloatRegister dst = $dst$$FloatRegister;
11792 __ mtc1(src, dst);
11793 __ cvt_d_w(dst, dst);
11794 %}
11795 ins_pipe( fpu_regF_regF );
11796 %}
11798 instruct convF2D_reg_reg(regD dst, regF src) %{
11799 match(Set dst (ConvF2D src));
11800 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11801 ins_encode %{
11802 FloatRegister dst = $dst$$FloatRegister;
11803 FloatRegister src = $src$$FloatRegister;
11805 __ cvt_d_s(dst, src);
11806 %}
11807 ins_pipe( fpu_regF_regF );
11808 %}
11810 instruct convD2F_reg_reg(regF dst, regD src) %{
11811 match(Set dst (ConvD2F src));
11812 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11813 ins_encode %{
11814 FloatRegister dst = $dst$$FloatRegister;
11815 FloatRegister src = $src$$FloatRegister;
11817 __ cvt_s_d(dst, src);
11818 %}
11819 ins_pipe( fpu_regF_regF );
11820 %}
11823 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11824 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11825 match(Set dst (ConvD2I src));
11827 ins_cost(150);
11828 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11830 ins_encode %{
11831 FloatRegister src = $src$$FloatRegister;
11832 Register dst = $dst$$Register;
11834 Label Done;
11836 __ trunc_w_d(F30, src);
11837 // max_int: 2147483647
11838 __ move(AT, 0x7fffffff);
11839 __ mfc1(dst, F30);
11841 __ bne(dst, AT, Done);
11842 __ delayed()->mtc1(R0, F30);
11844 __ cvt_d_w(F30, F30);
11845 __ c_ult_d(src, F30);
11846 __ bc1f(Done);
11847 __ delayed()->addiu(T9, R0, -1);
11849 __ c_un_d(src, src); //NaN?
11850 __ subu32(dst, T9, AT);
11851 __ movt(dst, R0);
11853 __ bind(Done);
11854 %}
11855 ins_pipe( pipe_slow );
11856 %}
11859 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11860 match(Set dst (ConvD2I src));
11862 ins_cost(250);
11863 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11865 ins_encode %{
11866 FloatRegister src = $src$$FloatRegister;
11867 Register dst = $dst$$Register;
11868 Label L;
11870 __ trunc_w_d(F30, src);
11871 __ cfc1(AT, 31);
11872 __ li(T9, 0x10000);
11873 __ andr(AT, AT, T9);
11874 __ beq(AT, R0, L);
11875 __ delayed()->mfc1(dst, F30);
11877 __ mov_d(F12, src);
11878 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11879 __ move(dst, V0);
11880 __ bind(L);
11882 %}
11883 ins_pipe( pipe_slow );
11884 %}
11886 // Convert oop pointer into compressed form
11887 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11888 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11889 match(Set dst (EncodeP src));
11890 format %{ "encode_heap_oop $dst,$src" %}
11891 ins_encode %{
11892 Register src = $src$$Register;
11893 Register dst = $dst$$Register;
11895 __ encode_heap_oop(dst, src);
11896 %}
11897 ins_pipe( ialu_regL_regL );
11898 %}
11900 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11901 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11902 match(Set dst (EncodeP src));
11903 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11904 ins_encode %{
11905 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11906 %}
11907 ins_pipe( ialu_regL_regL );
11908 %}
11910 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11911 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11912 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11913 match(Set dst (DecodeN src));
11914 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11915 ins_encode %{
11916 Register s = $src$$Register;
11917 Register d = $dst$$Register;
11919 __ decode_heap_oop(d, s);
11920 %}
11921 ins_pipe( ialu_regL_regL );
11922 %}
11924 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11925 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11926 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11927 match(Set dst (DecodeN src));
11928 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11929 ins_encode %{
11930 Register s = $src$$Register;
11931 Register d = $dst$$Register;
11932 if (s != d) {
11933 __ decode_heap_oop_not_null(d, s);
11934 } else {
11935 __ decode_heap_oop_not_null(d);
11936 }
11937 %}
11938 ins_pipe( ialu_regL_regL );
11939 %}
11941 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11942 match(Set dst (EncodePKlass src));
11943 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11944 ins_encode %{
11945 __ encode_klass_not_null($dst$$Register, $src$$Register);
11946 %}
11947 ins_pipe( ialu_regL_regL );
11948 %}
11950 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11951 match(Set dst (DecodeNKlass src));
11952 format %{ "decode_heap_klass_not_null $dst,$src" %}
11953 ins_encode %{
11954 Register s = $src$$Register;
11955 Register d = $dst$$Register;
11956 if (s != d) {
11957 __ decode_klass_not_null(d, s);
11958 } else {
11959 __ decode_klass_not_null(d);
11960 }
11961 %}
11962 ins_pipe( ialu_regL_regL );
11963 %}
11965 //FIXME
11966 instruct tlsLoadP(mRegP dst) %{
11967 match(Set dst (ThreadLocal));
11969 ins_cost(0);
11970 format %{ " get_thread in $dst #@tlsLoadP" %}
11971 ins_encode %{
11972 Register dst = $dst$$Register;
11973 #ifdef OPT_THREAD
11974 __ move(dst, TREG);
11975 #else
11976 __ get_thread(dst);
11977 #endif
11978 %}
11980 ins_pipe( ialu_loadI );
11981 %}
11984 instruct checkCastPP( mRegP dst ) %{
11985 match(Set dst (CheckCastPP dst));
11987 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11988 ins_encode( /*empty encoding*/ );
11989 ins_pipe( empty );
11990 %}
11992 instruct castPP(mRegP dst)
11993 %{
11994 match(Set dst (CastPP dst));
11996 size(0);
11997 format %{ "# castPP of $dst" %}
11998 ins_encode(/* empty encoding */);
11999 ins_pipe(empty);
12000 %}
12002 instruct castII( mRegI dst ) %{
12003 match(Set dst (CastII dst));
12004 format %{ "#castII of $dst empty encoding" %}
12005 ins_encode( /*empty encoding*/ );
12006 ins_cost(0);
12007 ins_pipe( empty );
12008 %}
12010 // Return Instruction
12011 // Remove the return address & jump to it.
12012 instruct Ret() %{
12013 match(Return);
12014 format %{ "RET #@Ret" %}
12016 ins_encode %{
12017 __ jr(RA);
12018 __ nop();
12019 %}
12021 ins_pipe( pipe_jump );
12022 %}
12024 /*
12025 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12026 instruct jumpXtnd(mRegL switch_val) %{
12027 match(Jump switch_val);
12029 ins_cost(350);
12031 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12032 "jr T9\n\t"
12033 "nop" %}
12034 ins_encode %{
12035 Register table_base = $constanttablebase;
12036 int con_offset = $constantoffset;
12037 Register switch_reg = $switch_val$$Register;
12039 if (UseLoongsonISA) {
12040 if (Assembler::is_simm(con_offset, 8)) {
12041 __ gsldx(T9, table_base, switch_reg, con_offset);
12042 } else if (Assembler::is_simm16(con_offset)) {
12043 __ daddu(T9, table_base, switch_reg);
12044 __ ld(T9, T9, con_offset);
12045 } else {
12046 __ move(T9, con_offset);
12047 __ daddu(AT, table_base, switch_reg);
12048 __ gsldx(T9, AT, T9, 0);
12049 }
12050 } else {
12051 if (Assembler::is_simm16(con_offset)) {
12052 __ daddu(T9, table_base, switch_reg);
12053 __ ld(T9, T9, con_offset);
12054 } else {
12055 __ move(T9, con_offset);
12056 __ daddu(AT, table_base, switch_reg);
12057 __ daddu(AT, T9, AT);
12058 __ ld(T9, AT, 0);
12059 }
12060 }
12062 __ jr(T9);
12063 __ nop();
12065 %}
12066 ins_pipe(pipe_jump);
12067 %}
12068 */
12070 // Jump Direct - Label defines a relative address from JMP
12071 instruct jmpDir(label labl) %{
12072 match(Goto);
12073 effect(USE labl);
12075 ins_cost(300);
12076 format %{ "JMP $labl #@jmpDir" %}
12078 ins_encode %{
12079 Label &L = *($labl$$label);
12080 if(&L)
12081 __ b(L);
12082 else
12083 __ b(int(0));
12084 __ nop();
12085 %}
12087 ins_pipe( pipe_jump );
12088 ins_pc_relative(1);
12089 %}
12093 // Tail Jump; remove the return address; jump to target.
12094 // TailCall above leaves the return address around.
12095 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12096 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12097 // "restore" before this instruction (in Epilogue), we need to materialize it
12098 // in %i0.
12099 //FIXME
12100 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12101 match( TailJump jump_target ex_oop );
12102 ins_cost(200);
12103 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12104 ins_encode %{
12105 Register target = $jump_target$$Register;
12107 /* 2012/9/14 Jin: V0, V1 are indicated in:
12108 * [stubGenerator_mips.cpp] generate_forward_exception()
12109 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12110 */
12111 Register oop = $ex_oop$$Register;
12112 Register exception_oop = V0;
12113 Register exception_pc = V1;
12115 __ move(exception_pc, RA);
12116 __ move(exception_oop, oop);
12118 __ jr(target);
12119 __ nop();
12120 %}
12121 ins_pipe( pipe_jump );
12122 %}
12124 // ============================================================================
12125 // Procedure Call/Return Instructions
12126 // Call Java Static Instruction
12127 // Note: If this code changes, the corresponding ret_addr_offset() and
12128 // compute_padding() functions will have to be adjusted.
12129 instruct CallStaticJavaDirect(method meth) %{
12130 match(CallStaticJava);
12131 effect(USE meth);
12133 ins_cost(300);
12134 format %{ "CALL,static #@CallStaticJavaDirect " %}
12135 ins_encode( Java_Static_Call( meth ) );
12136 ins_pipe( pipe_slow );
12137 ins_pc_relative(1);
12138 %}
12140 // Call Java Dynamic Instruction
12141 // Note: If this code changes, the corresponding ret_addr_offset() and
12142 // compute_padding() functions will have to be adjusted.
12143 instruct CallDynamicJavaDirect(method meth) %{
12144 match(CallDynamicJava);
12145 effect(USE meth);
12147 ins_cost(300);
12148 format %{"MOV IC_Klass, (oop)-1\n\t"
12149 "CallDynamic @ CallDynamicJavaDirect" %}
12150 ins_encode( Java_Dynamic_Call( meth ) );
12151 ins_pipe( pipe_slow );
12152 ins_pc_relative(1);
12153 %}
12155 instruct CallLeafNoFPDirect(method meth) %{
12156 match(CallLeafNoFP);
12157 effect(USE meth);
12159 ins_cost(300);
12160 format %{ "CALL_LEAF_NOFP,runtime " %}
12161 ins_encode(Java_To_Runtime(meth));
12162 ins_pipe( pipe_slow );
12163 ins_pc_relative(1);
12164 ins_alignment(16);
12165 %}
12167 // Prefetch instructions.
12169 instruct prefetchrNTA( memory mem ) %{
12170 match(PrefetchRead mem);
12171 ins_cost(125);
12173 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12174 ins_encode %{
12175 int base = $mem$$base;
12176 int index = $mem$$index;
12177 int scale = $mem$$scale;
12178 int disp = $mem$$disp;
12180 if( index != 0 ) {
12181 if (scale == 0) {
12182 __ daddu(AT, as_Register(base), as_Register(index));
12183 } else {
12184 __ dsll(AT, as_Register(index), scale);
12185 __ daddu(AT, as_Register(base), AT);
12186 }
12187 } else {
12188 __ move(AT, as_Register(base));
12189 }
12190 if( Assembler::is_simm16(disp) ) {
12191 __ daddiu(AT, as_Register(base), disp);
12192 __ daddiu(AT, AT, disp);
12193 } else {
12194 __ move(T9, disp);
12195 __ daddu(AT, as_Register(base), T9);
12196 }
12197 __ pref(0, AT, 0); //hint: 0:load
12198 %}
12199 ins_pipe(pipe_slow);
12200 %}
12202 instruct prefetchwNTA( memory mem ) %{
12203 match(PrefetchWrite mem);
12204 ins_cost(125);
12205 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12206 ins_encode %{
12207 int base = $mem$$base;
12208 int index = $mem$$index;
12209 int scale = $mem$$scale;
12210 int disp = $mem$$disp;
12212 if( index != 0 ) {
12213 if (scale == 0) {
12214 __ daddu(AT, as_Register(base), as_Register(index));
12215 } else {
12216 __ dsll(AT, as_Register(index), scale);
12217 __ daddu(AT, as_Register(base), AT);
12218 }
12219 } else {
12220 __ move(AT, as_Register(base));
12221 }
12222 if( Assembler::is_simm16(disp) ) {
12223 __ daddiu(AT, as_Register(base), disp);
12224 __ daddiu(AT, AT, disp);
12225 } else {
12226 __ move(T9, disp);
12227 __ daddu(AT, as_Register(base), T9);
12228 }
12229 __ pref(1, AT, 0); //hint: 1:store
12230 %}
12231 ins_pipe(pipe_slow);
12232 %}
12234 // Prefetch instructions for allocation.
12236 instruct prefetchAllocNTA( memory mem ) %{
12237 match(PrefetchAllocation mem);
12238 ins_cost(125);
12239 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12240 ins_encode %{
12241 int base = $mem$$base;
12242 int index = $mem$$index;
12243 int scale = $mem$$scale;
12244 int disp = $mem$$disp;
12246 Register dst = R0;
12248 if( index != 0 ) {
12249 if( Assembler::is_simm16(disp) ) {
12250 if( UseLoongsonISA ) {
12251 if (scale == 0) {
12252 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12253 } else {
12254 __ dsll(AT, as_Register(index), scale);
12255 __ gslbx(dst, as_Register(base), AT, disp);
12256 }
12257 } else {
12258 if (scale == 0) {
12259 __ addu(AT, as_Register(base), as_Register(index));
12260 } else {
12261 __ dsll(AT, as_Register(index), scale);
12262 __ addu(AT, as_Register(base), AT);
12263 }
12264 __ lb(dst, AT, disp);
12265 }
12266 } else {
12267 if (scale == 0) {
12268 __ addu(AT, as_Register(base), as_Register(index));
12269 } else {
12270 __ dsll(AT, as_Register(index), scale);
12271 __ addu(AT, as_Register(base), AT);
12272 }
12273 __ move(T9, disp);
12274 if( UseLoongsonISA ) {
12275 __ gslbx(dst, AT, T9, 0);
12276 } else {
12277 __ addu(AT, AT, T9);
12278 __ lb(dst, AT, 0);
12279 }
12280 }
12281 } else {
12282 if( Assembler::is_simm16(disp) ) {
12283 __ lb(dst, as_Register(base), disp);
12284 } else {
12285 __ move(T9, disp);
12286 if( UseLoongsonISA ) {
12287 __ gslbx(dst, as_Register(base), T9, 0);
12288 } else {
12289 __ addu(AT, as_Register(base), T9);
12290 __ lb(dst, AT, 0);
12291 }
12292 }
12293 }
12294 %}
12295 ins_pipe(pipe_slow);
12296 %}
12299 // Call runtime without safepoint
12300 instruct CallLeafDirect(method meth) %{
12301 match(CallLeaf);
12302 effect(USE meth);
12304 ins_cost(300);
12305 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12306 ins_encode(Java_To_Runtime(meth));
12307 ins_pipe( pipe_slow );
12308 ins_pc_relative(1);
12309 ins_alignment(16);
12310 %}
12312 // Load Char (16bit unsigned)
12313 instruct loadUS(mRegI dst, memory mem) %{
12314 match(Set dst (LoadUS mem));
12316 ins_cost(125);
12317 format %{ "loadUS $dst,$mem @ loadC" %}
12318 ins_encode(load_C_enc(dst, mem));
12319 ins_pipe( ialu_loadI );
12320 %}
12322 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12323 match(Set dst (ConvI2L (LoadUS mem)));
12325 ins_cost(125);
12326 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12327 ins_encode(load_C_enc(dst, mem));
12328 ins_pipe( ialu_loadI );
12329 %}
12331 // Store Char (16bit unsigned)
12332 instruct storeC(memory mem, mRegI src) %{
12333 match(Set mem (StoreC mem src));
12335 ins_cost(125);
12336 format %{ "storeC $src, $mem @ storeC" %}
12337 ins_encode(store_C_reg_enc(mem, src));
12338 ins_pipe( ialu_loadI );
12339 %}
12341 instruct storeC0(memory mem, immI0 zero) %{
12342 match(Set mem (StoreC mem zero));
12344 ins_cost(125);
12345 format %{ "storeC $zero, $mem @ storeC0" %}
12346 ins_encode(store_C0_enc(mem));
12347 ins_pipe( ialu_loadI );
12348 %}
12351 instruct loadConF0(regF dst, immF0 zero) %{
12352 match(Set dst zero);
12353 ins_cost(100);
12355 format %{ "mov $dst, zero @ loadConF0\n"%}
12356 ins_encode %{
12357 FloatRegister dst = $dst$$FloatRegister;
12359 __ mtc1(R0, dst);
12360 %}
12361 ins_pipe( fpu_loadF );
12362 %}
12365 instruct loadConF(regF dst, immF src) %{
12366 match(Set dst src);
12367 ins_cost(125);
12369 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12370 ins_encode %{
12371 int con_offset = $constantoffset($src);
12373 if (Assembler::is_simm16(con_offset)) {
12374 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12375 } else {
12376 __ set64(AT, con_offset);
12377 if (UseLoongsonISA) {
12378 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12379 } else {
12380 __ daddu(AT, $constanttablebase, AT);
12381 __ lwc1($dst$$FloatRegister, AT, 0);
12382 }
12383 }
12384 %}
12385 ins_pipe( fpu_loadF );
12386 %}
12389 instruct loadConD0(regD dst, immD0 zero) %{
12390 match(Set dst zero);
12391 ins_cost(100);
12393 format %{ "mov $dst, zero @ loadConD0"%}
12394 ins_encode %{
12395 FloatRegister dst = as_FloatRegister($dst$$reg);
12397 __ dmtc1(R0, dst);
12398 %}
12399 ins_pipe( fpu_loadF );
12400 %}
12402 instruct loadConD(regD dst, immD src) %{
12403 match(Set dst src);
12404 ins_cost(125);
12406 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12407 ins_encode %{
12408 int con_offset = $constantoffset($src);
12410 if (Assembler::is_simm16(con_offset)) {
12411 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12412 } else {
12413 __ set64(AT, con_offset);
12414 if (UseLoongsonISA) {
12415 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12416 } else {
12417 __ daddu(AT, $constanttablebase, AT);
12418 __ ldc1($dst$$FloatRegister, AT, 0);
12419 }
12420 }
12421 %}
12422 ins_pipe( fpu_loadF );
12423 %}
12425 // Store register Float value (it is faster than store from FPU register)
12426 instruct storeF_reg( memory mem, regF src) %{
12427 match(Set mem (StoreF mem src));
12429 ins_cost(50);
12430 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12431 ins_encode(store_F_reg_enc(mem, src));
12432 ins_pipe( fpu_storeF );
12433 %}
12435 instruct storeF_imm0( memory mem, immF0 zero) %{
12436 match(Set mem (StoreF mem zero));
12438 ins_cost(40);
12439 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12440 ins_encode %{
12441 int base = $mem$$base;
12442 int index = $mem$$index;
12443 int scale = $mem$$scale;
12444 int disp = $mem$$disp;
12446 if( index != 0 ) {
12447 if ( UseLoongsonISA ) {
12448 if ( Assembler::is_simm(disp, 8) ) {
12449 if ( scale == 0 ) {
12450 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12451 } else {
12452 __ dsll(T9, as_Register(index), scale);
12453 __ gsswx(R0, as_Register(base), T9, disp);
12454 }
12455 } else if ( Assembler::is_simm16(disp) ) {
12456 if ( scale == 0 ) {
12457 __ daddu(AT, as_Register(base), as_Register(index));
12458 } else {
12459 __ dsll(T9, as_Register(index), scale);
12460 __ daddu(AT, as_Register(base), T9);
12461 }
12462 __ sw(R0, AT, disp);
12463 } else {
12464 if ( scale == 0 ) {
12465 __ move(T9, disp);
12466 __ daddu(AT, as_Register(index), T9);
12467 __ gsswx(R0, as_Register(base), AT, 0);
12468 } else {
12469 __ dsll(T9, as_Register(index), scale);
12470 __ move(AT, disp);
12471 __ daddu(AT, AT, T9);
12472 __ gsswx(R0, as_Register(base), AT, 0);
12473 }
12474 }
12475 } else { //not use loongson isa
12476 if(scale != 0) {
12477 __ dsll(T9, as_Register(index), scale);
12478 __ daddu(AT, as_Register(base), T9);
12479 } else {
12480 __ daddu(AT, as_Register(base), as_Register(index));
12481 }
12482 if( Assembler::is_simm16(disp) ) {
12483 __ sw(R0, AT, disp);
12484 } else {
12485 __ move(T9, disp);
12486 __ daddu(AT, AT, T9);
12487 __ sw(R0, AT, 0);
12488 }
12489 }
12490 } else { //index is 0
12491 if ( UseLoongsonISA ) {
12492 if ( Assembler::is_simm16(disp) ) {
12493 __ sw(R0, as_Register(base), disp);
12494 } else {
12495 __ move(T9, disp);
12496 __ gsswx(R0, as_Register(base), T9, 0);
12497 }
12498 } else {
12499 if( Assembler::is_simm16(disp) ) {
12500 __ sw(R0, as_Register(base), disp);
12501 } else {
12502 __ move(T9, disp);
12503 __ daddu(AT, as_Register(base), T9);
12504 __ sw(R0, AT, 0);
12505 }
12506 }
12507 }
12508 %}
12509 ins_pipe( ialu_storeI );
12510 %}
12512 // Load Double
12513 instruct loadD(regD dst, memory mem) %{
12514 match(Set dst (LoadD mem));
12516 ins_cost(150);
12517 format %{ "loadD $dst, $mem #@loadD" %}
12518 ins_encode(load_D_enc(dst, mem));
12519 ins_pipe( ialu_loadI );
12520 %}
12522 // Load Double - UNaligned
12523 instruct loadD_unaligned(regD dst, memory mem ) %{
12524 match(Set dst (LoadD_unaligned mem));
12525 ins_cost(250);
12526 // FIXME: Jin: Need more effective ldl/ldr
12527 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12528 ins_encode(load_D_enc(dst, mem));
12529 ins_pipe( ialu_loadI );
12530 %}
12532 instruct storeD_reg( memory mem, regD src) %{
12533 match(Set mem (StoreD mem src));
12535 ins_cost(50);
12536 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12537 ins_encode(store_D_reg_enc(mem, src));
12538 ins_pipe( fpu_storeF );
12539 %}
12541 instruct storeD_imm0( memory mem, immD0 zero) %{
12542 match(Set mem (StoreD mem zero));
12544 ins_cost(40);
12545 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12546 ins_encode %{
12547 int base = $mem$$base;
12548 int index = $mem$$index;
12549 int scale = $mem$$scale;
12550 int disp = $mem$$disp;
12552 __ mtc1(R0, F30);
12553 __ cvt_d_w(F30, F30);
12555 if( index != 0 ) {
12556 if ( UseLoongsonISA ) {
12557 if ( Assembler::is_simm(disp, 8) ) {
12558 if (scale == 0) {
12559 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12560 } else {
12561 __ dsll(T9, as_Register(index), scale);
12562 __ gssdxc1(F30, as_Register(base), T9, disp);
12563 }
12564 } else if ( Assembler::is_simm16(disp) ) {
12565 if (scale == 0) {
12566 __ daddu(AT, as_Register(base), as_Register(index));
12567 __ sdc1(F30, AT, disp);
12568 } else {
12569 __ dsll(T9, as_Register(index), scale);
12570 __ daddu(AT, as_Register(base), T9);
12571 __ sdc1(F30, AT, disp);
12572 }
12573 } else {
12574 if (scale == 0) {
12575 __ move(T9, disp);
12576 __ daddu(AT, as_Register(index), T9);
12577 __ gssdxc1(F30, as_Register(base), AT, 0);
12578 } else {
12579 __ move(T9, disp);
12580 __ dsll(AT, as_Register(index), scale);
12581 __ daddu(AT, AT, T9);
12582 __ gssdxc1(F30, as_Register(base), AT, 0);
12583 }
12584 }
12585 } else { // not use loongson isa
12586 if(scale != 0) {
12587 __ dsll(T9, as_Register(index), scale);
12588 __ daddu(AT, as_Register(base), T9);
12589 } else {
12590 __ daddu(AT, as_Register(base), as_Register(index));
12591 }
12592 if( Assembler::is_simm16(disp) ) {
12593 __ sdc1(F30, AT, disp);
12594 } else {
12595 __ move(T9, disp);
12596 __ daddu(AT, AT, T9);
12597 __ sdc1(F30, AT, 0);
12598 }
12599 }
12600 } else {// index is 0
12601 if ( UseLoongsonISA ) {
12602 if ( Assembler::is_simm16(disp) ) {
12603 __ sdc1(F30, as_Register(base), disp);
12604 } else {
12605 __ move(T9, disp);
12606 __ gssdxc1(F30, as_Register(base), T9, 0);
12607 }
12608 } else {
12609 if( Assembler::is_simm16(disp) ) {
12610 __ sdc1(F30, as_Register(base), disp);
12611 } else {
12612 __ move(T9, disp);
12613 __ daddu(AT, as_Register(base), T9);
12614 __ sdc1(F30, AT, 0);
12615 }
12616 }
12617 }
12618 %}
12619 ins_pipe( ialu_storeI );
12620 %}
12622 instruct loadSSI(mRegI dst, stackSlotI src)
12623 %{
12624 match(Set dst src);
12626 ins_cost(125);
12627 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12628 ins_encode %{
12629 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12630 __ lw($dst$$Register, SP, $src$$disp);
12631 %}
12632 ins_pipe(ialu_loadI);
12633 %}
12635 instruct storeSSI(stackSlotI dst, mRegI src)
12636 %{
12637 match(Set dst src);
12639 ins_cost(100);
12640 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12641 ins_encode %{
12642 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12643 __ sw($src$$Register, SP, $dst$$disp);
12644 %}
12645 ins_pipe(ialu_storeI);
12646 %}
12648 instruct loadSSL(mRegL dst, stackSlotL src)
12649 %{
12650 match(Set dst src);
12652 ins_cost(125);
12653 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12654 ins_encode %{
12655 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12656 __ ld($dst$$Register, SP, $src$$disp);
12657 %}
12658 ins_pipe(ialu_loadI);
12659 %}
12661 instruct storeSSL(stackSlotL dst, mRegL src)
12662 %{
12663 match(Set dst src);
12665 ins_cost(100);
12666 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12667 ins_encode %{
12668 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12669 __ sd($src$$Register, SP, $dst$$disp);
12670 %}
12671 ins_pipe(ialu_storeI);
12672 %}
12674 instruct loadSSP(mRegP dst, stackSlotP src)
12675 %{
12676 match(Set dst src);
12678 ins_cost(125);
12679 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12680 ins_encode %{
12681 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12682 __ ld($dst$$Register, SP, $src$$disp);
12683 %}
12684 ins_pipe(ialu_loadI);
12685 %}
12687 instruct storeSSP(stackSlotP dst, mRegP src)
12688 %{
12689 match(Set dst src);
12691 ins_cost(100);
12692 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12693 ins_encode %{
12694 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12695 __ sd($src$$Register, SP, $dst$$disp);
12696 %}
12697 ins_pipe(ialu_storeI);
12698 %}
12700 instruct loadSSF(regF dst, stackSlotF src)
12701 %{
12702 match(Set dst src);
12704 ins_cost(125);
12705 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12706 ins_encode %{
12707 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12708 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12709 %}
12710 ins_pipe(ialu_loadI);
12711 %}
12713 instruct storeSSF(stackSlotF dst, regF src)
12714 %{
12715 match(Set dst src);
12717 ins_cost(100);
12718 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12719 ins_encode %{
12720 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12721 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12722 %}
12723 ins_pipe(fpu_storeF);
12724 %}
12726 // Use the same format since predicate() can not be used here.
12727 instruct loadSSD(regD dst, stackSlotD src)
12728 %{
12729 match(Set dst src);
12731 ins_cost(125);
12732 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12733 ins_encode %{
12734 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12735 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12736 %}
12737 ins_pipe(ialu_loadI);
12738 %}
12740 instruct storeSSD(stackSlotD dst, regD src)
12741 %{
12742 match(Set dst src);
12744 ins_cost(100);
12745 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12746 ins_encode %{
12747 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12748 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12749 %}
12750 ins_pipe(fpu_storeF);
12751 %}
12753 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12754 match( Set cr (FastLock object box) );
12755 effect( TEMP tmp, TEMP scr, USE_KILL box );
12756 ins_cost(300);
12757 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12758 ins_encode %{
12759 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12760 %}
12762 ins_pipe( pipe_slow );
12763 ins_pc_relative(1);
12764 %}
12766 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12767 match( Set cr (FastUnlock object box) );
12768 effect( TEMP tmp, USE_KILL box );
12769 ins_cost(300);
12770 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12771 ins_encode %{
12772 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12773 %}
12775 ins_pipe( pipe_slow );
12776 ins_pc_relative(1);
12777 %}
12779 // Store CMS card-mark Immediate
12780 instruct storeImmCM(memory mem, immI8 src) %{
12781 match(Set mem (StoreCM mem src));
12783 ins_cost(150);
12784 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12785 // opcode(0xC6);
12786 ins_encode(store_B_immI_enc_sync(mem, src));
12787 ins_pipe( ialu_storeI );
12788 %}
12790 // Die now
12791 instruct ShouldNotReachHere( )
12792 %{
12793 match(Halt);
12794 ins_cost(300);
12796 // Use the following format syntax
12797 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12798 ins_encode %{
12799 // Here we should emit illtrap !
12801 __ stop("in ShoudNotReachHere");
12803 %}
12804 ins_pipe( pipe_jump );
12805 %}
12807 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12808 %{
12809 predicate(Universe::narrow_oop_shift() == 0);
12810 match(Set dst mem);
12812 ins_cost(110);
12813 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12814 ins_encode %{
12815 Register dst = $dst$$Register;
12816 Register base = as_Register($mem$$base);
12817 int disp = $mem$$disp;
12819 __ daddiu(dst, base, disp);
12820 %}
12821 ins_pipe( ialu_regI_imm16 );
12822 %}
12824 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12825 %{
12826 match(Set dst mem);
12828 ins_cost(110);
12829 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12830 ins_encode %{
12831 Register dst = $dst$$Register;
12832 Register base = as_Register($mem$$base);
12833 Register index = as_Register($mem$$index);
12834 int scale = $mem$$scale;
12835 int disp = $mem$$disp;
12837 if (scale == 0) {
12838 __ daddu(AT, base, index);
12839 __ daddiu(dst, AT, disp);
12840 } else {
12841 __ dsll(AT, index, scale);
12842 __ daddu(AT, base, AT);
12843 __ daddiu(dst, AT, disp);
12844 }
12845 %}
12847 ins_pipe( ialu_regI_imm16 );
12848 %}
12850 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12851 %{
12852 match(Set dst mem);
12854 ins_cost(110);
12855 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12856 ins_encode %{
12857 Register dst = $dst$$Register;
12858 Register base = as_Register($mem$$base);
12859 Register index = as_Register($mem$$index);
12860 int scale = $mem$$scale;
12862 if (scale == 0) {
12863 __ daddu(dst, base, index);
12864 } else {
12865 __ dsll(AT, index, scale);
12866 __ daddu(dst, base, AT);
12867 }
12868 %}
12870 ins_pipe( ialu_regI_imm16 );
12871 %}
12873 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12874 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12875 match(CountedLoopEnd cop (CmpI src1 src2));
12876 effect(USE labl);
12878 ins_cost(300);
12879 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12880 ins_encode %{
12881 Register op1 = $src1$$Register;
12882 Register op2 = $src2$$Register;
12883 Label &L = *($labl$$label);
12884 int flag = $cop$$cmpcode;
12886 switch(flag) {
12887 case 0x01: //equal
12888 if (&L)
12889 __ beq(op1, op2, L);
12890 else
12891 __ beq(op1, op2, (int)0);
12892 break;
12893 case 0x02: //not_equal
12894 if (&L)
12895 __ bne(op1, op2, L);
12896 else
12897 __ bne(op1, op2, (int)0);
12898 break;
12899 case 0x03: //above
12900 __ slt(AT, op2, op1);
12901 if(&L)
12902 __ bne(AT, R0, L);
12903 else
12904 __ bne(AT, R0, (int)0);
12905 break;
12906 case 0x04: //above_equal
12907 __ slt(AT, op1, op2);
12908 if(&L)
12909 __ beq(AT, R0, L);
12910 else
12911 __ beq(AT, R0, (int)0);
12912 break;
12913 case 0x05: //below
12914 __ slt(AT, op1, op2);
12915 if(&L)
12916 __ bne(AT, R0, L);
12917 else
12918 __ bne(AT, R0, (int)0);
12919 break;
12920 case 0x06: //below_equal
12921 __ slt(AT, op2, op1);
12922 if(&L)
12923 __ beq(AT, R0, L);
12924 else
12925 __ beq(AT, R0, (int)0);
12926 break;
12927 default:
12928 Unimplemented();
12929 }
12930 __ nop();
12931 %}
12932 ins_pipe( pipe_jump );
12933 ins_pc_relative(1);
12934 %}
12936 instruct jmpLoopEnd_reg_immI(cmpOp cop, mRegI src1, immI src2, label labl) %{
12937 match(CountedLoopEnd cop (CmpI src1 src2));
12938 effect(USE labl);
12940 ins_cost(300);
12941 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_immI" %}
12942 ins_encode %{
12943 Register op1 = $src1$$Register;
12944 Register op2 = AT;
12945 Label &L = *($labl$$label);
12946 int flag = $cop$$cmpcode;
12948 __ move(op2, $src2$$constant);
12950 switch(flag) {
12951 case 0x01: //equal
12952 if (&L)
12953 __ beq(op1, op2, L);
12954 else
12955 __ beq(op1, op2, (int)0);
12956 break;
12957 case 0x02: //not_equal
12958 if (&L)
12959 __ bne(op1, op2, L);
12960 else
12961 __ bne(op1, op2, (int)0);
12962 break;
12963 case 0x03: //above
12964 __ slt(AT, op2, op1);
12965 if(&L)
12966 __ bne(AT, R0, L);
12967 else
12968 __ bne(AT, R0, (int)0);
12969 break;
12970 case 0x04: //above_equal
12971 __ slt(AT, op1, op2);
12972 if(&L)
12973 __ beq(AT, R0, L);
12974 else
12975 __ beq(AT, R0, (int)0);
12976 break;
12977 case 0x05: //below
12978 __ slt(AT, op1, op2);
12979 if(&L)
12980 __ bne(AT, R0, L);
12981 else
12982 __ bne(AT, R0, (int)0);
12983 break;
12984 case 0x06: //below_equal
12985 __ slt(AT, op2, op1);
12986 if(&L)
12987 __ beq(AT, R0, L);
12988 else
12989 __ beq(AT, R0, (int)0);
12990 break;
12991 default:
12992 Unimplemented();
12993 }
12994 __ nop();
12995 %}
12996 ins_pipe( pipe_jump );
12997 ins_pc_relative(1);
12998 %}
13001 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13002 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13003 match(If cop cr);
13004 effect(USE labl);
13006 ins_cost(300);
13007 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13009 ins_encode %{
13010 Label &L = *($labl$$label);
13011 switch($cop$$cmpcode) {
13012 case 0x01: //equal
13013 if (&L)
13014 __ bne(AT, R0, L);
13015 else
13016 __ bne(AT, R0, (int)0);
13017 break;
13018 case 0x02: //not equal
13019 if (&L)
13020 __ beq(AT, R0, L);
13021 else
13022 __ beq(AT, R0, (int)0);
13023 break;
13024 default:
13025 Unimplemented();
13026 }
13027 __ nop();
13028 %}
13030 ins_pipe( pipe_jump );
13031 ins_pc_relative(1);
13032 %}
13035 // ============================================================================
13036 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13037 // array for an instance of the superklass. Set a hidden internal cache on a
13038 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13039 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13040 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13041 match(Set result (PartialSubtypeCheck sub super));
13042 effect(KILL tmp);
13043 ins_cost(1100); // slightly larger than the next version
13044 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13046 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13047 ins_pipe( pipe_slow );
13048 %}
13051 // Conditional-store of an int value.
13052 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13053 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13054 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13055 // effect(KILL oldval);
13056 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13058 ins_encode %{
13059 Register oldval = $oldval$$Register;
13060 Register newval = $newval$$Register;
13061 Address addr(as_Register($mem$$base), $mem$$disp);
13062 Label again, failure;
13064 int index = $mem$$index;
13065 int scale = $mem$$scale;
13066 int disp = $mem$$disp;
13068 guarantee(Assembler::is_simm16(disp), "");
13070 if( index != 0 ) {
13071 __ stop("in storeIConditional: index != 0");
13072 } else {
13073 __ bind(again);
13074 if(UseSyncLevel >= 3000 || UseSyncLevel < 2000) __ sync();
13075 __ ll(AT, addr);
13076 __ bne(AT, oldval, failure);
13077 __ delayed()->addu(AT, R0, R0);
13079 __ addu(AT, newval, R0);
13080 __ sc(AT, addr);
13081 __ beq(AT, R0, again);
13082 __ delayed()->addiu(AT, R0, 0xFF);
13083 __ bind(failure);
13084 __ sync();
13085 }
13086 %}
13088 ins_pipe( long_memory_op );
13089 %}
13091 // Conditional-store of a long value.
13092 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13093 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13094 %{
13095 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13096 effect(KILL oldval);
13098 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13099 ins_encode%{
13100 Register oldval = $oldval$$Register;
13101 Register newval = $newval$$Register;
13102 Address addr((Register)$mem$$base, $mem$$disp);
13104 int index = $mem$$index;
13105 int scale = $mem$$scale;
13106 int disp = $mem$$disp;
13108 guarantee(Assembler::is_simm16(disp), "");
13110 if( index != 0 ) {
13111 __ stop("in storeIConditional: index != 0");
13112 } else {
13113 __ cmpxchg(newval, addr, oldval);
13114 }
13115 %}
13116 ins_pipe( long_memory_op );
13117 %}
13120 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13121 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13122 effect(KILL oldval);
13123 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13124 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapL\n\t"
13125 "MOV $res, 1 @ compareAndSwapI\n\t"
13126 "BNE AT, R0 @ compareAndSwapI\n\t"
13127 "MOV $res, 0 @ compareAndSwapI\n"
13128 "L:" %}
13129 ins_encode %{
13130 Register newval = $newval$$Register;
13131 Register oldval = $oldval$$Register;
13132 Register res = $res$$Register;
13133 Address addr($mem_ptr$$Register, 0);
13134 Label L;
13136 __ cmpxchg32(newval, addr, oldval);
13137 __ move(res, AT);
13138 %}
13139 ins_pipe( long_memory_op );
13140 %}
13142 instruct compareAndSwapL( mRegI res, mRegP mem_ptr, s2RegL oldval, mRegL newval) %{
13143 predicate(VM_Version::supports_cx8());
13144 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
13145 effect(KILL oldval);
13146 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13147 "MOV $res, 1 @ compareAndSwapI\n\t"
13148 "BNE AT, R0 @ compareAndSwapI\n\t"
13149 "MOV $res, 0 @ compareAndSwapI\n"
13150 "L:" %}
13151 ins_encode %{
13152 Register newval = $newval$$Register;
13153 Register oldval = $oldval$$Register;
13154 Register res = $res$$Register;
13155 Address addr($mem_ptr$$Register, 0);
13156 Label L;
13158 __ cmpxchg(newval, addr, oldval);
13159 __ move(res, AT);
13160 %}
13161 ins_pipe( long_memory_op );
13162 %}
13164 //FIXME:
13165 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13166 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13167 effect(KILL oldval);
13168 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13169 "MOV $res, AT @ compareAndSwapP\n\t"
13170 "L:" %}
13171 ins_encode %{
13172 Register newval = $newval$$Register;
13173 Register oldval = $oldval$$Register;
13174 Register res = $res$$Register;
13175 Address addr($mem_ptr$$Register, 0);
13176 Label L;
13178 __ cmpxchg(newval, addr, oldval);
13179 __ move(res, AT);
13180 %}
13181 ins_pipe( long_memory_op );
13182 %}
13184 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13185 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13186 effect(KILL oldval);
13187 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13188 "MOV $res, AT @ compareAndSwapN\n\t"
13189 "L:" %}
13190 ins_encode %{
13191 Register newval = $newval$$Register;
13192 Register oldval = $oldval$$Register;
13193 Register res = $res$$Register;
13194 Address addr($mem_ptr$$Register, 0);
13195 Label L;
13197 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13198 * Thus, we should extend oldval's sign for correct comparision.
13199 */
13200 __ sll(oldval, oldval, 0);
13202 __ cmpxchg32(newval, addr, oldval);
13203 __ move(res, AT);
13204 %}
13205 ins_pipe( long_memory_op );
13206 %}
13208 //----------Max and Min--------------------------------------------------------
13209 // Min Instructions
13210 ////
13211 // *** Min and Max using the conditional move are slower than the
13212 // *** branch version on a Pentium III.
13213 // // Conditional move for min
13214 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13215 // effect( USE_DEF op2, USE op1, USE cr );
13216 // format %{ "CMOVlt $op2,$op1\t! min" %}
13217 // opcode(0x4C,0x0F);
13218 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13219 // ins_pipe( pipe_cmov_reg );
13220 //%}
13221 //
13222 //// Min Register with Register (P6 version)
13223 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13224 // predicate(VM_Version::supports_cmov() );
13225 // match(Set op2 (MinI op1 op2));
13226 // ins_cost(200);
13227 // expand %{
13228 // eFlagsReg cr;
13229 // compI_eReg(cr,op1,op2);
13230 // cmovI_reg_lt(op2,op1,cr);
13231 // %}
13232 //%}
13234 // Min Register with Register (generic version)
13235 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13236 match(Set dst (MinI dst src));
13237 //effect(KILL flags);
13238 ins_cost(80);
13240 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13241 ins_encode %{
13242 Register dst = $dst$$Register;
13243 Register src = $src$$Register;
13245 __ slt(AT, src, dst);
13246 __ movn(dst, src, AT);
13248 %}
13250 ins_pipe( pipe_slow );
13251 %}
13253 // Max Register with Register
13254 // *** Min and Max using the conditional move are slower than the
13255 // *** branch version on a Pentium III.
13256 // // Conditional move for max
13257 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13258 // effect( USE_DEF op2, USE op1, USE cr );
13259 // format %{ "CMOVgt $op2,$op1\t! max" %}
13260 // opcode(0x4F,0x0F);
13261 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13262 // ins_pipe( pipe_cmov_reg );
13263 //%}
13264 //
13265 // // Max Register with Register (P6 version)
13266 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13267 // predicate(VM_Version::supports_cmov() );
13268 // match(Set op2 (MaxI op1 op2));
13269 // ins_cost(200);
13270 // expand %{
13271 // eFlagsReg cr;
13272 // compI_eReg(cr,op1,op2);
13273 // cmovI_reg_gt(op2,op1,cr);
13274 // %}
13275 //%}
13277 // Max Register with Register (generic version)
13278 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13279 match(Set dst (MaxI dst src));
13280 ins_cost(80);
13282 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13284 ins_encode %{
13285 Register dst = $dst$$Register;
13286 Register src = $src$$Register;
13288 __ slt(AT, dst, src);
13289 __ movn(dst, src, AT);
13291 %}
13293 ins_pipe( pipe_slow );
13294 %}
13296 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13297 match(Set dst (MaxI dst zero));
13298 ins_cost(50);
13300 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13302 ins_encode %{
13303 Register dst = $dst$$Register;
13305 __ slt(AT, dst, R0);
13306 __ movn(dst, R0, AT);
13308 %}
13310 ins_pipe( pipe_slow );
13311 %}
13313 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13314 %{
13315 match(Set dst (AndL src mask));
13317 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13318 ins_encode %{
13319 Register dst = $dst$$Register;
13320 Register src = $src$$Register;
13322 __ dext(dst, src, 0, 32);
13323 %}
13324 ins_pipe(ialu_regI_regI);
13325 %}
13327 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13328 %{
13329 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13331 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13332 ins_encode %{
13333 Register dst = $dst$$Register;
13334 Register src1 = $src1$$Register;
13335 Register src2 = $src2$$Register;
13337 if (src1 == dst) {
13338 __ dinsu(dst, src2, 32, 32);
13339 } else if (src2 == dst) {
13340 __ dsll32(dst, dst, 0);
13341 __ dins(dst, src1, 0, 32);
13342 } else {
13343 __ dext(dst, src1, 0, 32);
13344 __ dinsu(dst, src2, 32, 32);
13345 }
13346 %}
13347 ins_pipe(ialu_regI_regI);
13348 %}
13350 // Zero-extend convert int to long
13351 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13352 %{
13353 match(Set dst (AndL (ConvI2L src) mask));
13355 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13356 ins_encode %{
13357 Register dst = $dst$$Register;
13358 Register src = $src$$Register;
13360 __ dext(dst, src, 0, 32);
13361 %}
13362 ins_pipe(ialu_regI_regI);
13363 %}
13365 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13366 %{
13367 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13369 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13370 ins_encode %{
13371 Register dst = $dst$$Register;
13372 Register src = $src$$Register;
13374 __ dext(dst, src, 0, 32);
13375 %}
13376 ins_pipe(ialu_regI_regI);
13377 %}
13379 // Match loading integer and casting it to unsigned int in long register.
13380 // LoadI + ConvI2L + AndL 0xffffffff.
13381 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13382 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13384 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13385 ins_encode (load_N_enc(dst, mem));
13386 ins_pipe(ialu_loadI);
13387 %}
13389 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13390 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13392 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13393 ins_encode (load_N_enc(dst, mem));
13394 ins_pipe(ialu_loadI);
13395 %}
13398 // ============================================================================
13399 // Safepoint Instruction
13400 instruct safePoint_poll_reg(mRegP poll) %{
13401 match(SafePoint poll);
13402 predicate(false);
13403 effect(USE poll);
13405 ins_cost(125);
13406 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13408 ins_encode %{
13409 Register poll_reg = $poll$$Register;
13411 __ block_comment("Safepoint:");
13412 __ relocate(relocInfo::poll_type);
13413 __ lw(AT, poll_reg, 0);
13414 %}
13416 ins_pipe( ialu_storeI );
13417 %}
13419 instruct safePoint_poll() %{
13420 match(SafePoint);
13422 ins_cost(105);
13423 format %{ "poll for GC @ safePoint_poll" %}
13425 ins_encode %{
13426 __ block_comment("Safepoint:");
13427 __ set64(T9, (long)os::get_polling_page());
13428 __ relocate(relocInfo::poll_type);
13429 __ lw(AT, T9, 0);
13430 %}
13432 ins_pipe( ialu_storeI );
13433 %}
13435 //----------Arithmetic Conversion Instructions---------------------------------
13437 instruct roundFloat_nop(regF dst)
13438 %{
13439 match(Set dst (RoundFloat dst));
13441 ins_cost(0);
13442 ins_encode();
13443 ins_pipe(empty);
13444 %}
13446 instruct roundDouble_nop(regD dst)
13447 %{
13448 match(Set dst (RoundDouble dst));
13450 ins_cost(0);
13451 ins_encode();
13452 ins_pipe(empty);
13453 %}
13455 //---------- Zeros Count Instructions ------------------------------------------
13456 // CountLeadingZerosINode CountTrailingZerosINode
13457 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13458 predicate(UseCountLeadingZerosInstruction);
13459 match(Set dst (CountLeadingZerosI src));
13461 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13462 ins_encode %{
13463 __ clz($dst$$Register, $src$$Register);
13464 %}
13465 ins_pipe( ialu_regL_regL );
13466 %}
13468 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13469 predicate(UseCountLeadingZerosInstruction);
13470 match(Set dst (CountLeadingZerosL src));
13472 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13473 ins_encode %{
13474 __ dclz($dst$$Register, $src$$Register);
13475 %}
13476 ins_pipe( ialu_regL_regL );
13477 %}
13479 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13480 predicate(UseCountTrailingZerosInstruction);
13481 match(Set dst (CountTrailingZerosI src));
13483 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13484 ins_encode %{
13485 // ctz and dctz is gs instructions.
13486 __ ctz($dst$$Register, $src$$Register);
13487 %}
13488 ins_pipe( ialu_regL_regL );
13489 %}
13491 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13492 predicate(UseCountTrailingZerosInstruction);
13493 match(Set dst (CountTrailingZerosL src));
13495 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13496 ins_encode %{
13497 __ dctz($dst$$Register, $src$$Register);
13498 %}
13499 ins_pipe( ialu_regL_regL );
13500 %}
13502 // ====================VECTOR INSTRUCTIONS=====================================
13504 // Load vectors (8 bytes long)
13505 instruct loadV8(vecD dst, memory mem) %{
13506 predicate(n->as_LoadVector()->memory_size() == 8);
13507 match(Set dst (LoadVector mem));
13508 ins_cost(125);
13509 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13510 ins_encode(load_D_enc(dst, mem));
13511 ins_pipe( fpu_loadF );
13512 %}
13514 // Store vectors (8 bytes long)
13515 instruct storeV8(memory mem, vecD src) %{
13516 predicate(n->as_StoreVector()->memory_size() == 8);
13517 match(Set mem (StoreVector mem src));
13518 ins_cost(145);
13519 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13520 ins_encode(store_D_reg_enc(mem, src));
13521 ins_pipe( fpu_storeF );
13522 %}
13524 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13525 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13526 match(Set dst (ReplicateB src));
13527 ins_cost(100);
13528 format %{ "replv_ob AT, $src\n\t"
13529 "dmtc1 AT, $dst\t! replicate8B" %}
13530 ins_encode %{
13531 __ replv_ob(AT, $src$$Register);
13532 __ dmtc1(AT, $dst$$FloatRegister);
13533 %}
13534 ins_pipe( pipe_mtc1 );
13535 %}
13537 instruct Repl8B(vecD dst, mRegI src) %{
13538 predicate(n->as_Vector()->length() == 8);
13539 match(Set dst (ReplicateB src));
13540 ins_cost(140);
13541 format %{ "move AT, $src\n\t"
13542 "dins AT, AT, 8, 8\n\t"
13543 "dins AT, AT, 16, 16\n\t"
13544 "dinsu AT, AT, 32, 32\n\t"
13545 "dmtc1 AT, $dst\t! replicate8B" %}
13546 ins_encode %{
13547 __ move(AT, $src$$Register);
13548 __ dins(AT, AT, 8, 8);
13549 __ dins(AT, AT, 16, 16);
13550 __ dinsu(AT, AT, 32, 32);
13551 __ dmtc1(AT, $dst$$FloatRegister);
13552 %}
13553 ins_pipe( pipe_mtc1 );
13554 %}
13556 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13557 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13558 match(Set dst (ReplicateB con));
13559 ins_cost(110);
13560 format %{ "repl_ob AT, [$con]\n\t"
13561 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13562 ins_encode %{
13563 int val = $con$$constant;
13564 __ repl_ob(AT, val);
13565 __ dmtc1(AT, $dst$$FloatRegister);
13566 %}
13567 ins_pipe( pipe_mtc1 );
13568 %}
13570 instruct Repl8B_imm(vecD dst, immI con) %{
13571 predicate(n->as_Vector()->length() == 8);
13572 match(Set dst (ReplicateB con));
13573 ins_cost(150);
13574 format %{ "move AT, [$con]\n\t"
13575 "dins AT, AT, 8, 8\n\t"
13576 "dins AT, AT, 16, 16\n\t"
13577 "dinsu AT, AT, 32, 32\n\t"
13578 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13579 ins_encode %{
13580 __ move(AT, $con$$constant);
13581 __ dins(AT, AT, 8, 8);
13582 __ dins(AT, AT, 16, 16);
13583 __ dinsu(AT, AT, 32, 32);
13584 __ dmtc1(AT, $dst$$FloatRegister);
13585 %}
13586 ins_pipe( pipe_mtc1 );
13587 %}
13589 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13590 predicate(n->as_Vector()->length() == 8);
13591 match(Set dst (ReplicateB zero));
13592 ins_cost(90);
13593 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13594 ins_encode %{
13595 __ dmtc1(R0, $dst$$FloatRegister);
13596 %}
13597 ins_pipe( pipe_mtc1 );
13598 %}
13600 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13601 predicate(n->as_Vector()->length() == 8);
13602 match(Set dst (ReplicateB M1));
13603 ins_cost(80);
13604 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13605 ins_encode %{
13606 __ nor(AT, R0, R0);
13607 __ dmtc1(AT, $dst$$FloatRegister);
13608 %}
13609 ins_pipe( pipe_mtc1 );
13610 %}
13612 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13613 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13614 match(Set dst (ReplicateS src));
13615 ins_cost(100);
13616 format %{ "replv_qh AT, $src\n\t"
13617 "dmtc1 AT, $dst\t! replicate4S" %}
13618 ins_encode %{
13619 __ replv_qh(AT, $src$$Register);
13620 __ dmtc1(AT, $dst$$FloatRegister);
13621 %}
13622 ins_pipe( pipe_mtc1 );
13623 %}
13625 instruct Repl4S(vecD dst, mRegI src) %{
13626 predicate(n->as_Vector()->length() == 4);
13627 match(Set dst (ReplicateS src));
13628 ins_cost(120);
13629 format %{ "move AT, $src \n\t"
13630 "dins AT, AT, 16, 16\n\t"
13631 "dinsu AT, AT, 32, 32\n\t"
13632 "dmtc1 AT, $dst\t! replicate4S" %}
13633 ins_encode %{
13634 __ move(AT, $src$$Register);
13635 __ dins(AT, AT, 16, 16);
13636 __ dinsu(AT, AT, 32, 32);
13637 __ dmtc1(AT, $dst$$FloatRegister);
13638 %}
13639 ins_pipe( pipe_mtc1 );
13640 %}
13642 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13643 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13644 match(Set dst (ReplicateS con));
13645 ins_cost(100);
13646 format %{ "repl_qh AT, [$con]\n\t"
13647 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13648 ins_encode %{
13649 int val = $con$$constant;
13650 if ( Assembler::is_simm(val, 10)) {
13651 //repl_qh supports 10 bits immediate
13652 __ repl_qh(AT, val);
13653 } else {
13654 __ li32(AT, val);
13655 __ replv_qh(AT, AT);
13656 }
13657 __ dmtc1(AT, $dst$$FloatRegister);
13658 %}
13659 ins_pipe( pipe_mtc1 );
13660 %}
13662 instruct Repl4S_imm(vecD dst, immI con) %{
13663 predicate(n->as_Vector()->length() == 4);
13664 match(Set dst (ReplicateS con));
13665 ins_cost(110);
13666 format %{ "move AT, [$con]\n\t"
13667 "dins AT, AT, 16, 16\n\t"
13668 "dinsu AT, AT, 32, 32\n\t"
13669 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13670 ins_encode %{
13671 __ move(AT, $con$$constant);
13672 __ dins(AT, AT, 16, 16);
13673 __ dinsu(AT, AT, 32, 32);
13674 __ dmtc1(AT, $dst$$FloatRegister);
13675 %}
13676 ins_pipe( pipe_mtc1 );
13677 %}
13679 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13680 predicate(n->as_Vector()->length() == 4);
13681 match(Set dst (ReplicateS zero));
13682 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13683 ins_encode %{
13684 __ dmtc1(R0, $dst$$FloatRegister);
13685 %}
13686 ins_pipe( pipe_mtc1 );
13687 %}
13689 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13690 predicate(n->as_Vector()->length() == 4);
13691 match(Set dst (ReplicateS M1));
13692 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13693 ins_encode %{
13694 __ nor(AT, R0, R0);
13695 __ dmtc1(AT, $dst$$FloatRegister);
13696 %}
13697 ins_pipe( pipe_mtc1 );
13698 %}
13700 // Replicate integer (4 byte) scalar to be vector
13701 instruct Repl2I(vecD dst, mRegI src) %{
13702 predicate(n->as_Vector()->length() == 2);
13703 match(Set dst (ReplicateI src));
13704 format %{ "dins AT, $src, 0, 32\n\t"
13705 "dinsu AT, $src, 32, 32\n\t"
13706 "dmtc1 AT, $dst\t! replicate2I" %}
13707 ins_encode %{
13708 __ dins(AT, $src$$Register, 0, 32);
13709 __ dinsu(AT, $src$$Register, 32, 32);
13710 __ dmtc1(AT, $dst$$FloatRegister);
13711 %}
13712 ins_pipe( pipe_mtc1 );
13713 %}
13715 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13716 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13717 predicate(n->as_Vector()->length() == 2);
13718 match(Set dst (ReplicateI con));
13719 effect(KILL tmp);
13720 format %{ "li32 AT, [$con], 32\n\t"
13721 "dinsu AT, AT\n\t"
13722 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13723 ins_encode %{
13724 int val = $con$$constant;
13725 __ li32(AT, val);
13726 __ dinsu(AT, AT, 32, 32);
13727 __ dmtc1(AT, $dst$$FloatRegister);
13728 %}
13729 ins_pipe( pipe_mtc1 );
13730 %}
13732 // Replicate integer (4 byte) scalar zero to be vector
13733 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13734 predicate(n->as_Vector()->length() == 2);
13735 match(Set dst (ReplicateI zero));
13736 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13737 ins_encode %{
13738 __ dmtc1(R0, $dst$$FloatRegister);
13739 %}
13740 ins_pipe( pipe_mtc1 );
13741 %}
13743 // Replicate integer (4 byte) scalar -1 to be vector
13744 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13745 predicate(n->as_Vector()->length() == 2);
13746 match(Set dst (ReplicateI M1));
13747 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13748 ins_encode %{
13749 __ nor(AT, R0, R0);
13750 __ dmtc1(AT, $dst$$FloatRegister);
13751 %}
13752 ins_pipe( pipe_mtc1 );
13753 %}
13755 // Replicate float (4 byte) scalar to be vector
13756 instruct Repl2F(vecD dst, regF src) %{
13757 predicate(n->as_Vector()->length() == 2);
13758 match(Set dst (ReplicateF src));
13759 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13760 ins_encode %{
13761 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13762 %}
13763 ins_pipe( pipe_slow );
13764 %}
13766 // Replicate float (4 byte) scalar zero to be vector
13767 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13768 predicate(n->as_Vector()->length() == 2);
13769 match(Set dst (ReplicateF zero));
13770 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13771 ins_encode %{
13772 __ dmtc1(R0, $dst$$FloatRegister);
13773 %}
13774 ins_pipe( pipe_mtc1 );
13775 %}
13778 // ====================VECTOR ARITHMETIC=======================================
13780 // --------------------------------- ADD --------------------------------------
13782 // Floats vector add
13783 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13784 instruct vadd2F(vecD dst, vecD src) %{
13785 predicate(n->as_Vector()->length() == 2);
13786 match(Set dst (AddVF dst src));
13787 format %{ "add.ps $dst,$src\t! add packed2F" %}
13788 ins_encode %{
13789 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13790 %}
13791 ins_pipe( pipe_slow );
13792 %}
13794 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13795 predicate(n->as_Vector()->length() == 2);
13796 match(Set dst (AddVF src1 src2));
13797 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13798 ins_encode %{
13799 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13800 %}
13801 ins_pipe( fpu_regF_regF );
13802 %}
13804 // --------------------------------- SUB --------------------------------------
13806 // Floats vector sub
13807 instruct vsub2F(vecD dst, vecD src) %{
13808 predicate(n->as_Vector()->length() == 2);
13809 match(Set dst (SubVF dst src));
13810 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13811 ins_encode %{
13812 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13813 %}
13814 ins_pipe( fpu_regF_regF );
13815 %}
13817 // --------------------------------- MUL --------------------------------------
13819 // Floats vector mul
13820 instruct vmul2F(vecD dst, vecD src) %{
13821 predicate(n->as_Vector()->length() == 2);
13822 match(Set dst (MulVF dst src));
13823 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13824 ins_encode %{
13825 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13826 %}
13827 ins_pipe( fpu_regF_regF );
13828 %}
13830 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13831 predicate(n->as_Vector()->length() == 2);
13832 match(Set dst (MulVF src1 src2));
13833 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13834 ins_encode %{
13835 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13836 %}
13837 ins_pipe( fpu_regF_regF );
13838 %}
13840 // --------------------------------- DIV --------------------------------------
13841 // MIPS do not have div.ps
13843 // --------------------------------- MADD --------------------------------------
13844 // Floats vector madd
13845 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
13846 // predicate(n->as_Vector()->length() == 2);
13847 // match(Set dst (AddVF (MulVF src1 src2) src3));
13848 // ins_cost(50);
13849 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
13850 // ins_encode %{
13851 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13852 // %}
13853 // ins_pipe( fpu_regF_regF );
13854 //%}
13857 //----------PEEPHOLE RULES-----------------------------------------------------
13858 // These must follow all instruction definitions as they use the names
13859 // defined in the instructions definitions.
13860 //
13861 // peepmatch ( root_instr_name [preceeding_instruction]* );
13862 //
13863 // peepconstraint %{
13864 // (instruction_number.operand_name relational_op instruction_number.operand_name
13865 // [, ...] );
13866 // // instruction numbers are zero-based using left to right order in peepmatch
13867 //
13868 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13869 // // provide an instruction_number.operand_name for each operand that appears
13870 // // in the replacement instruction's match rule
13871 //
13872 // ---------VM FLAGS---------------------------------------------------------
13873 //
13874 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13875 //
13876 // Each peephole rule is given an identifying number starting with zero and
13877 // increasing by one in the order seen by the parser. An individual peephole
13878 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13879 // on the command-line.
13880 //
13881 // ---------CURRENT LIMITATIONS----------------------------------------------
13882 //
13883 // Only match adjacent instructions in same basic block
13884 // Only equality constraints
13885 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13886 // Only one replacement instruction
13887 //
13888 // ---------EXAMPLE----------------------------------------------------------
13889 //
13890 // // pertinent parts of existing instructions in architecture description
13891 // instruct movI(eRegI dst, eRegI src) %{
13892 // match(Set dst (CopyI src));
13893 // %}
13894 //
13895 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13896 // match(Set dst (AddI dst src));
13897 // effect(KILL cr);
13898 // %}
13899 //
13900 // // Change (inc mov) to lea
13901 // peephole %{
13902 // // increment preceeded by register-register move
13903 // peepmatch ( incI_eReg movI );
13904 // // require that the destination register of the increment
13905 // // match the destination register of the move
13906 // peepconstraint ( 0.dst == 1.dst );
13907 // // construct a replacement instruction that sets
13908 // // the destination to ( move's source register + one )
13909 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13910 // %}
13911 //
13912 // Implementation no longer uses movX instructions since
13913 // machine-independent system no longer uses CopyX nodes.
13914 //
13915 // peephole %{
13916 // peepmatch ( incI_eReg movI );
13917 // peepconstraint ( 0.dst == 1.dst );
13918 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13919 // %}
13920 //
13921 // peephole %{
13922 // peepmatch ( decI_eReg movI );
13923 // peepconstraint ( 0.dst == 1.dst );
13924 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13925 // %}
13926 //
13927 // peephole %{
13928 // peepmatch ( addI_eReg_imm movI );
13929 // peepconstraint ( 0.dst == 1.dst );
13930 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13931 // %}
13932 //
13933 // peephole %{
13934 // peepmatch ( addP_eReg_imm movP );
13935 // peepconstraint ( 0.dst == 1.dst );
13936 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13937 // %}
13939 // // Change load of spilled value to only a spill
13940 // instruct storeI(memory mem, eRegI src) %{
13941 // match(Set mem (StoreI mem src));
13942 // %}
13943 //
13944 // instruct loadI(eRegI dst, memory mem) %{
13945 // match(Set dst (LoadI mem));
13946 // %}
13947 //
13948 //peephole %{
13949 // peepmatch ( loadI storeI );
13950 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13951 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13952 //%}
13954 //----------SMARTSPILL RULES---------------------------------------------------
13955 // These must follow all instruction definitions as they use the names
13956 // defined in the instructions definitions.