Tue, 14 Mar 2017 14:41:34 -0400
[C2] Add some instructs in mips_64.ad for UseCompressedOops.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else {
691 assert(false, "Not implemented yet !" );
692 Unimplemented();
693 }
694 }
697 // No additional cost for CMOVL.
698 const int Matcher::long_cmove_cost() { return 0; }
700 // No CMOVF/CMOVD with SSE2
701 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
703 // Does the CPU require late expand (see block.cpp for description of late expand)?
704 const bool Matcher::require_postalloc_expand = false;
706 // Should the Matcher clone shifts on addressing modes, expecting them
707 // to be subsumed into complex addressing expressions or compute them
708 // into registers? True for Intel but false for most RISCs
709 const bool Matcher::clone_shift_expressions = false;
711 // Do we need to mask the count passed to shift instructions or does
712 // the cpu only look at the lower 5/6 bits anyway?
713 const bool Matcher::need_masked_shift_count = false;
715 bool Matcher::narrow_oop_use_complex_address() {
716 NOT_LP64(ShouldNotCallThis());
717 assert(UseCompressedOops, "only for compressed oops code");
718 return false;
719 }
721 bool Matcher::narrow_klass_use_complex_address() {
722 NOT_LP64(ShouldNotCallThis());
723 assert(UseCompressedClassPointers, "only for compressed klass code");
724 return false;
725 }
727 // This is UltraSparc specific, true just means we have fast l2f conversion
728 const bool Matcher::convL2FSupported(void) {
729 return true;
730 }
732 // Max vector size in bytes. 0 if not supported.
733 const int Matcher::vector_width_in_bytes(BasicType bt) {
734 assert(MaxVectorSize == 8, "");
735 return 8;
736 }
738 // Vector ideal reg
739 const int Matcher::vector_ideal_reg(int size) {
740 assert(MaxVectorSize == 8, "");
741 switch(size) {
742 case 8: return Op_VecD;
743 }
744 ShouldNotReachHere();
745 return 0;
746 }
748 // Only lowest bits of xmm reg are used for vector shift count.
749 const int Matcher::vector_shift_count_ideal_reg(int size) {
750 fatal("vector shift is not supported");
751 return Node::NotAMachineReg;
752 }
754 // Limits on vector size (number of elements) loaded into vector.
755 const int Matcher::max_vector_size(const BasicType bt) {
756 assert(is_java_primitive(bt), "only primitive type vectors");
757 return vector_width_in_bytes(bt)/type2aelembytes(bt);
758 }
760 const int Matcher::min_vector_size(const BasicType bt) {
761 return max_vector_size(bt); // Same as max.
762 }
764 // MIPS supports misaligned vectors store/load? FIXME
765 const bool Matcher::misaligned_vectors_ok() {
766 return false;
767 //return !AlignVector; // can be changed by flag
768 }
770 // Register for DIVI projection of divmodI
771 RegMask Matcher::divI_proj_mask() {
772 ShouldNotReachHere();
773 return RegMask();
774 }
776 // Register for MODI projection of divmodI
777 RegMask Matcher::modI_proj_mask() {
778 ShouldNotReachHere();
779 return RegMask();
780 }
782 // Register for DIVL projection of divmodL
783 RegMask Matcher::divL_proj_mask() {
784 ShouldNotReachHere();
785 return RegMask();
786 }
788 int Matcher::regnum_to_fpu_offset(int regnum) {
789 return regnum - 32; // The FP registers are in the second chunk
790 }
793 const bool Matcher::isSimpleConstant64(jlong value) {
794 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
795 return true;
796 }
799 // Return whether or not this register is ever used as an argument. This
800 // function is used on startup to build the trampoline stubs in generateOptoStub.
801 // Registers not mentioned will be killed by the VM call in the trampoline, and
802 // arguments in those registers not be available to the callee.
803 bool Matcher::can_be_java_arg( int reg ) {
804 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
805 if ( reg == T0_num || reg == T0_H_num
806 || reg == A0_num || reg == A0_H_num
807 || reg == A1_num || reg == A1_H_num
808 || reg == A2_num || reg == A2_H_num
809 || reg == A3_num || reg == A3_H_num
810 || reg == A4_num || reg == A4_H_num
811 || reg == A5_num || reg == A5_H_num
812 || reg == A6_num || reg == A6_H_num
813 || reg == A7_num || reg == A7_H_num )
814 return true;
816 if ( reg == F12_num || reg == F12_H_num
817 || reg == F13_num || reg == F13_H_num
818 || reg == F14_num || reg == F14_H_num
819 || reg == F15_num || reg == F15_H_num
820 || reg == F16_num || reg == F16_H_num
821 || reg == F17_num || reg == F17_H_num
822 || reg == F18_num || reg == F18_H_num
823 || reg == F19_num || reg == F19_H_num )
824 return true;
826 return false;
827 }
829 bool Matcher::is_spillable_arg( int reg ) {
830 return can_be_java_arg(reg);
831 }
833 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
834 return false;
835 }
837 // Register for MODL projection of divmodL
838 RegMask Matcher::modL_proj_mask() {
839 ShouldNotReachHere();
840 return RegMask();
841 }
843 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
844 return FP_REG_mask();
845 }
847 // MIPS doesn't support AES intrinsics
848 const bool Matcher::pass_original_key_for_aes() {
849 return false;
850 }
852 // The address of the call instruction needs to be 16-byte aligned to
853 // ensure that it does not span a cache line so that it can be patched.
855 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
856 //lui
857 //ori
858 //dsll
859 //ori
861 //jalr
862 //nop
864 return round_to(current_offset, alignment_required()) - current_offset;
865 }
867 // The address of the call instruction needs to be 16-byte aligned to
868 // ensure that it does not span a cache line so that it can be patched.
869 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
870 //loadIC <--- skip
872 //lui
873 //ori
874 //nop
875 //nop
877 //jalr
878 //nop
880 current_offset += 4 * 4;
881 return round_to(current_offset, alignment_required()) - current_offset;
882 }
884 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
885 //lui
886 //ori
887 //dsll
888 //ori
890 //jalr
891 //nop
893 return round_to(current_offset, alignment_required()) - current_offset;
894 }
896 int CallLeafDirectNode::compute_padding(int current_offset) const {
897 //lui
898 //ori
899 //dsll
900 //ori
902 //jalr
903 //nop
905 return round_to(current_offset, alignment_required()) - current_offset;
906 }
908 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
909 //lui
910 //ori
911 //dsll
912 //ori
914 //jalr
915 //nop
917 return round_to(current_offset, alignment_required()) - current_offset;
918 }
920 // If CPU can load and store mis-aligned doubles directly then no fixup is
921 // needed. Else we split the double into 2 integer pieces and move it
922 // piece-by-piece. Only happens when passing doubles into C code as the
923 // Java calling convention forces doubles to be aligned.
924 const bool Matcher::misaligned_doubles_ok = false;
925 // Do floats take an entire double register or just half?
926 //const bool Matcher::float_in_double = true;
927 bool Matcher::float_in_double() { return false; }
928 // Threshold size for cleararray.
929 const int Matcher::init_array_short_size = 8 * BytesPerLong;
930 // Do ints take an entire long register or just half?
931 const bool Matcher::int_in_long = true;
932 // Is it better to copy float constants, or load them directly from memory?
933 // Intel can load a float constant from a direct address, requiring no
934 // extra registers. Most RISCs will have to materialize an address into a
935 // register first, so they would do better to copy the constant from stack.
936 const bool Matcher::rematerialize_float_constants = false;
937 // Advertise here if the CPU requires explicit rounding operations
938 // to implement the UseStrictFP mode.
939 const bool Matcher::strict_fp_requires_explicit_rounding = false;
940 // The ecx parameter to rep stos for the ClearArray node is in dwords.
941 const bool Matcher::init_array_count_is_in_bytes = false;
944 // Indicate if the safepoint node needs the polling page as an input.
945 // Since MIPS doesn't have absolute addressing, it needs.
946 bool SafePointNode::needs_polling_address_input() {
947 return false;
948 }
950 // !!!!! Special hack to get all type of calls to specify the byte offset
951 // from the start of the call to the point where the return address
952 // will point.
953 int MachCallStaticJavaNode::ret_addr_offset() {
954 //lui
955 //ori
956 //nop
957 //nop
958 //jalr
959 //nop
960 return 24;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 //lui IC_Klass,
965 //ori IC_Klass,
966 //dsll IC_Klass
967 //ori IC_Klass
969 //lui T9
970 //ori T9
971 //nop
972 //nop
973 //jalr T9
974 //nop
975 return 4 * 4 + 4 * 6;
976 }
978 //=============================================================================
980 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
981 enum RC { rc_bad, rc_int, rc_float, rc_stack };
982 static enum RC rc_class( OptoReg::Name reg ) {
983 if( !OptoReg::is_valid(reg) ) return rc_bad;
984 if (OptoReg::is_stack(reg)) return rc_stack;
985 VMReg r = OptoReg::as_VMReg(reg);
986 if (r->is_Register()) return rc_int;
987 assert(r->is_FloatRegister(), "must be");
988 return rc_float;
989 }
991 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
992 // Get registers to move
993 OptoReg::Name src_second = ra_->get_reg_second(in(1));
994 OptoReg::Name src_first = ra_->get_reg_first(in(1));
995 OptoReg::Name dst_second = ra_->get_reg_second(this );
996 OptoReg::Name dst_first = ra_->get_reg_first(this );
998 enum RC src_second_rc = rc_class(src_second);
999 enum RC src_first_rc = rc_class(src_first);
1000 enum RC dst_second_rc = rc_class(dst_second);
1001 enum RC dst_first_rc = rc_class(dst_first);
1003 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1005 // Generate spill code!
1006 int size = 0;
1008 if( src_first == dst_first && src_second == dst_second )
1009 return 0; // Self copy, no move
1011 if (src_first_rc == rc_stack) {
1012 // mem ->
1013 if (dst_first_rc == rc_stack) {
1014 // mem -> mem
1015 assert(src_second != dst_first, "overlap");
1016 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1017 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1018 // 64-bit
1019 int src_offset = ra_->reg2offset(src_first);
1020 int dst_offset = ra_->reg2offset(dst_first);
1021 if (cbuf) {
1022 MacroAssembler _masm(cbuf);
1023 __ ld(AT, Address(SP, src_offset));
1024 __ sd(AT, Address(SP, dst_offset));
1025 #ifndef PRODUCT
1026 } else {
1027 if(!do_size){
1028 if (size != 0) st->print("\n\t");
1029 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1030 "sd AT, [SP + #%d]",
1031 src_offset, dst_offset);
1032 }
1033 #endif
1034 }
1035 size += 8;
1036 } else {
1037 // 32-bit
1038 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1039 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1040 // No pushl/popl, so:
1041 int src_offset = ra_->reg2offset(src_first);
1042 int dst_offset = ra_->reg2offset(dst_first);
1043 if (cbuf) {
1044 MacroAssembler _masm(cbuf);
1045 __ lw(AT, Address(SP, src_offset));
1046 __ sw(AT, Address(SP, dst_offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("lw AT, [SP + #%d] spill 2\n\t"
1052 "sw AT, [SP + #%d]\n\t",
1053 src_offset, dst_offset);
1054 }
1055 #endif
1056 }
1057 size += 8;
1058 }
1059 return size;
1060 } else if (dst_first_rc == rc_int) {
1061 // mem -> gpr
1062 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1063 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1064 // 64-bit
1065 int offset = ra_->reg2offset(src_first);
1066 if (cbuf) {
1067 MacroAssembler _masm(cbuf);
1068 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1069 #ifndef PRODUCT
1070 } else {
1071 if(!do_size){
1072 if (size != 0) st->print("\n\t");
1073 st->print("ld %s, [SP + #%d]\t# spill 3",
1074 Matcher::regName[dst_first],
1075 offset);
1076 }
1077 #endif
1078 }
1079 size += 4;
1080 } else {
1081 // 32-bit
1082 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1083 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1084 int offset = ra_->reg2offset(src_first);
1085 if (cbuf) {
1086 MacroAssembler _masm(cbuf);
1087 if (this->ideal_reg() == Op_RegI)
1088 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1089 else
1090 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 if (this->ideal_reg() == Op_RegI)
1096 st->print("lw %s, [SP + #%d]\t# spill 4",
1097 Matcher::regName[dst_first],
1098 offset);
1099 else
1100 st->print("lwu %s, [SP + #%d]\t# spill 5",
1101 Matcher::regName[dst_first],
1102 offset);
1103 }
1104 #endif
1105 }
1106 size += 4;
1107 }
1108 return size;
1109 } else if (dst_first_rc == rc_float) {
1110 // mem-> xmm
1111 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1112 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1113 // 64-bit
1114 int offset = ra_->reg2offset(src_first);
1115 if (cbuf) {
1116 MacroAssembler _masm(cbuf);
1117 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1118 #ifndef PRODUCT
1119 } else {
1120 if(!do_size){
1121 if (size != 0) st->print("\n\t");
1122 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1123 Matcher::regName[dst_first],
1124 offset);
1125 }
1126 #endif
1127 }
1128 size += 4;
1129 } else {
1130 // 32-bit
1131 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1132 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1133 int offset = ra_->reg2offset(src_first);
1134 if (cbuf) {
1135 MacroAssembler _masm(cbuf);
1136 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 #ifndef PRODUCT
1138 } else {
1139 if(!do_size){
1140 if (size != 0) st->print("\n\t");
1141 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1142 Matcher::regName[dst_first],
1143 offset);
1144 }
1145 #endif
1146 }
1147 size += 4;
1148 }
1149 return size;
1150 }
1151 } else if (src_first_rc == rc_int) {
1152 // gpr ->
1153 if (dst_first_rc == rc_stack) {
1154 // gpr -> mem
1155 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1156 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1157 // 64-bit
1158 int offset = ra_->reg2offset(dst_first);
1159 if (cbuf) {
1160 MacroAssembler _masm(cbuf);
1161 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1162 #ifndef PRODUCT
1163 } else {
1164 if(!do_size){
1165 if (size != 0) st->print("\n\t");
1166 st->print("sd %s, [SP + #%d] # spill 8",
1167 Matcher::regName[src_first],
1168 offset);
1169 }
1170 #endif
1171 }
1172 size += 4;
1173 } else {
1174 // 32-bit
1175 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1176 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1177 int offset = ra_->reg2offset(dst_first);
1178 if (cbuf) {
1179 MacroAssembler _masm(cbuf);
1180 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1181 #ifndef PRODUCT
1182 } else {
1183 if(!do_size){
1184 if (size != 0) st->print("\n\t");
1185 st->print("sw %s, [SP + #%d]\t# spill 9",
1186 Matcher::regName[src_first], offset);
1187 }
1188 #endif
1189 }
1190 size += 4;
1191 }
1192 return size;
1193 } else if (dst_first_rc == rc_int) {
1194 // gpr -> gpr
1195 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1196 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1197 // 64-bit
1198 if (cbuf) {
1199 MacroAssembler _masm(cbuf);
1200 __ move(as_Register(Matcher::_regEncode[dst_first]),
1201 as_Register(Matcher::_regEncode[src_first]));
1202 #ifndef PRODUCT
1203 } else {
1204 if(!do_size){
1205 if (size != 0) st->print("\n\t");
1206 st->print("move(64bit) %s <-- %s\t# spill 10",
1207 Matcher::regName[dst_first],
1208 Matcher::regName[src_first]);
1209 }
1210 #endif
1211 }
1212 size += 4;
1213 return size;
1214 } else {
1215 // 32-bit
1216 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1217 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1218 if (cbuf) {
1219 MacroAssembler _masm(cbuf);
1220 if (this->ideal_reg() == Op_RegI)
1221 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1222 else
1223 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1225 #ifndef PRODUCT
1226 } else {
1227 if(!do_size){
1228 if (size != 0) st->print("\n\t");
1229 st->print("move(32-bit) %s <-- %s\t# spill 11",
1230 Matcher::regName[dst_first],
1231 Matcher::regName[src_first]);
1232 }
1233 #endif
1234 }
1235 size += 4;
1236 return size;
1237 }
1238 } else if (dst_first_rc == rc_float) {
1239 // gpr -> xmm
1240 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1241 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1242 // 64-bit
1243 if (cbuf) {
1244 MacroAssembler _masm(cbuf);
1245 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1246 #ifndef PRODUCT
1247 } else {
1248 if(!do_size){
1249 if (size != 0) st->print("\n\t");
1250 st->print("dmtc1 %s, %s\t# spill 12",
1251 Matcher::regName[dst_first],
1252 Matcher::regName[src_first]);
1253 }
1254 #endif
1255 }
1256 size += 4;
1257 } else {
1258 // 32-bit
1259 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1260 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1261 if (cbuf) {
1262 MacroAssembler _masm(cbuf);
1263 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1264 #ifndef PRODUCT
1265 } else {
1266 if(!do_size){
1267 if (size != 0) st->print("\n\t");
1268 st->print("mtc1 %s, %s\t# spill 13",
1269 Matcher::regName[dst_first],
1270 Matcher::regName[src_first]);
1271 }
1272 #endif
1273 }
1274 size += 4;
1275 }
1276 return size;
1277 }
1278 } else if (src_first_rc == rc_float) {
1279 // xmm ->
1280 if (dst_first_rc == rc_stack) {
1281 // xmm -> mem
1282 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1283 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1284 // 64-bit
1285 int offset = ra_->reg2offset(dst_first);
1286 if (cbuf) {
1287 MacroAssembler _masm(cbuf);
1288 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1289 #ifndef PRODUCT
1290 } else {
1291 if(!do_size){
1292 if (size != 0) st->print("\n\t");
1293 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1294 Matcher::regName[src_first],
1295 offset);
1296 }
1297 #endif
1298 }
1299 size += 4;
1300 } else {
1301 // 32-bit
1302 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1303 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1304 int offset = ra_->reg2offset(dst_first);
1305 if (cbuf) {
1306 MacroAssembler _masm(cbuf);
1307 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1308 #ifndef PRODUCT
1309 } else {
1310 if(!do_size){
1311 if (size != 0) st->print("\n\t");
1312 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1313 Matcher::regName[src_first],
1314 offset);
1315 }
1316 #endif
1317 }
1318 size += 4;
1319 }
1320 return size;
1321 } else if (dst_first_rc == rc_int) {
1322 // xmm -> gpr
1323 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1324 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1325 // 64-bit
1326 if (cbuf) {
1327 MacroAssembler _masm(cbuf);
1328 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1329 #ifndef PRODUCT
1330 } else {
1331 if(!do_size){
1332 if (size != 0) st->print("\n\t");
1333 st->print("dmfc1 %s, %s\t# spill 16",
1334 Matcher::regName[dst_first],
1335 Matcher::regName[src_first]);
1336 }
1337 #endif
1338 }
1339 size += 4;
1340 } else {
1341 // 32-bit
1342 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1343 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1344 if (cbuf) {
1345 MacroAssembler _masm(cbuf);
1346 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1347 #ifndef PRODUCT
1348 } else {
1349 if(!do_size){
1350 if (size != 0) st->print("\n\t");
1351 st->print("mfc1 %s, %s\t# spill 17",
1352 Matcher::regName[dst_first],
1353 Matcher::regName[src_first]);
1354 }
1355 #endif
1356 }
1357 size += 4;
1358 }
1359 return size;
1360 } else if (dst_first_rc == rc_float) {
1361 // xmm -> xmm
1362 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1363 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1364 // 64-bit
1365 if (cbuf) {
1366 MacroAssembler _masm(cbuf);
1367 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1368 #ifndef PRODUCT
1369 } else {
1370 if(!do_size){
1371 if (size != 0) st->print("\n\t");
1372 st->print("mov_d %s <-- %s\t# spill 18",
1373 Matcher::regName[dst_first],
1374 Matcher::regName[src_first]);
1375 }
1376 #endif
1377 }
1378 size += 4;
1379 } else {
1380 // 32-bit
1381 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1382 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1383 if (cbuf) {
1384 MacroAssembler _masm(cbuf);
1385 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1386 #ifndef PRODUCT
1387 } else {
1388 if(!do_size){
1389 if (size != 0) st->print("\n\t");
1390 st->print("mov_s %s <-- %s\t# spill 19",
1391 Matcher::regName[dst_first],
1392 Matcher::regName[src_first]);
1393 }
1394 #endif
1395 }
1396 size += 4;
1397 }
1398 return size;
1399 }
1400 }
1402 assert(0," foo ");
1403 Unimplemented();
1404 return size;
1406 }
1408 #ifndef PRODUCT
1409 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1410 implementation( NULL, ra_, false, st );
1411 }
1412 #endif
1414 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1415 implementation( &cbuf, ra_, false, NULL );
1416 }
1418 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1419 return implementation( NULL, ra_, true, NULL );
1420 }
1422 //=============================================================================
1423 #
1425 #ifndef PRODUCT
1426 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1427 st->print("INT3");
1428 }
1429 #endif
1431 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1432 MacroAssembler _masm(&cbuf);
1433 __ int3();
1434 }
1436 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1437 return MachNode::size(ra_);
1438 }
1441 //=============================================================================
1442 #ifndef PRODUCT
1443 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1444 Compile *C = ra_->C;
1445 int framesize = C->frame_size_in_bytes();
1447 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1449 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1450 st->cr(); st->print("\t");
1451 if (UseLoongsonISA) {
1452 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1453 } else {
1454 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1455 st->cr(); st->print("\t");
1456 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1457 }
1459 if( do_polling() && C->is_method_compilation() ) {
1460 st->print("Poll Safepoint # MachEpilogNode");
1461 }
1462 }
1463 #endif
1465 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1466 Compile *C = ra_->C;
1467 MacroAssembler _masm(&cbuf);
1468 int framesize = C->frame_size_in_bytes();
1470 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1472 __ daddiu(SP, SP, framesize);
1474 if (UseLoongsonISA) {
1475 __ gslq(RA, FP, SP, -wordSize*2);
1476 } else {
1477 __ ld(RA, SP, -wordSize );
1478 __ ld(FP, SP, -wordSize*2 );
1479 }
1481 if( do_polling() && C->is_method_compilation() ) {
1482 __ set64(AT, (long)os::get_polling_page());
1483 __ relocate(relocInfo::poll_return_type);
1484 __ lw(AT, AT, 0);
1485 }
1486 }
1488 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1489 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1490 }
1492 int MachEpilogNode::reloc() const {
1493 return 0; // a large enough number
1494 }
1496 const Pipeline * MachEpilogNode::pipeline() const {
1497 return MachNode::pipeline_class();
1498 }
1500 int MachEpilogNode::safepoint_offset() const { return 0; }
1502 //=============================================================================
1504 #ifndef PRODUCT
1505 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1506 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1507 int reg = ra_->get_reg_first(this);
1508 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1509 }
1510 #endif
1513 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1514 return 4;
1515 }
1517 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1518 MacroAssembler _masm(&cbuf);
1519 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1520 int reg = ra_->get_encode(this);
1522 __ addi(as_Register(reg), SP, offset);
1523 /*
1524 if( offset >= 128 ) {
1525 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1526 emit_rm(cbuf, 0x2, reg, 0x04);
1527 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1528 emit_d32(cbuf, offset);
1529 }
1530 else {
1531 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1532 emit_rm(cbuf, 0x1, reg, 0x04);
1533 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1534 emit_d8(cbuf, offset);
1535 }
1536 */
1537 }
1540 //static int sizeof_FFree_Float_Stack_All = -1;
1542 int MachCallRuntimeNode::ret_addr_offset() {
1543 //lui
1544 //ori
1545 //dsll
1546 //ori
1547 //jalr
1548 //nop
1549 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1550 return NativeCall::instruction_size;
1551 // return 16;
1552 }
1558 //=============================================================================
1559 #ifndef PRODUCT
1560 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1561 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1562 }
1563 #endif
1565 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1566 MacroAssembler _masm(&cbuf);
1567 int i = 0;
1568 for(i = 0; i < _count; i++)
1569 __ nop();
1570 }
1572 uint MachNopNode::size(PhaseRegAlloc *) const {
1573 return 4 * _count;
1574 }
1575 const Pipeline* MachNopNode::pipeline() const {
1576 return MachNode::pipeline_class();
1577 }
1579 //=============================================================================
1581 //=============================================================================
1582 #ifndef PRODUCT
1583 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1584 st->print_cr("load_klass(T9, T0)");
1585 st->print_cr("\tbeq(T9, iCache, L)");
1586 st->print_cr("\tnop");
1587 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1588 st->print_cr("\tnop");
1589 st->print_cr("\tnop");
1590 st->print_cr(" L:");
1591 }
1592 #endif
1595 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1596 MacroAssembler _masm(&cbuf);
1597 #ifdef ASSERT
1598 //uint code_size = cbuf.code_size();
1599 #endif
1600 int ic_reg = Matcher::inline_cache_reg_encode();
1601 Label L;
1602 Register receiver = T0;
1603 Register iCache = as_Register(ic_reg);
1604 __ load_klass(T9, receiver);
1605 __ beq(T9, iCache, L);
1606 __ nop();
1608 __ relocate(relocInfo::runtime_call_type);
1609 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1610 __ jr(T9);
1611 __ nop();
1613 /* WARNING these NOPs are critical so that verified entry point is properly
1614 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1615 __ align(CodeEntryAlignment);
1616 __ bind(L);
1617 }
1619 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1620 return MachNode::size(ra_);
1621 }
1625 //=============================================================================
1627 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1629 int Compile::ConstantTable::calculate_table_base_offset() const {
1630 return 0; // absolute addressing, no offset
1631 }
1633 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1634 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1635 ShouldNotReachHere();
1636 }
1638 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1639 Compile* C = ra_->C;
1640 Compile::ConstantTable& constant_table = C->constant_table();
1641 MacroAssembler _masm(&cbuf);
1643 Register Rtoc = as_Register(ra_->get_encode(this));
1644 CodeSection* consts_section = __ code()->consts();
1645 int consts_size = consts_section->align_at_start(consts_section->size());
1646 assert(constant_table.size() == consts_size, "must be equal");
1648 if (consts_section->size()) {
1649 // Materialize the constant table base.
1650 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1651 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1652 __ relocate(relocInfo::internal_pc_type);
1653 __ patchable_set48(Rtoc, (long)baseaddr);
1654 }
1655 }
1657 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1658 // patchable_set48 (4 insts)
1659 return 4 * 4;
1660 }
1662 #ifndef PRODUCT
1663 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1664 Register r = as_Register(ra_->get_encode(this));
1665 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1666 }
1667 #endif
1670 //=============================================================================
1671 #ifndef PRODUCT
1672 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1673 Compile* C = ra_->C;
1675 int framesize = C->frame_size_in_bytes();
1676 int bangsize = C->bang_size_in_bytes();
1677 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1679 // Calls to C2R adapters often do not accept exceptional returns.
1680 // We require that their callers must bang for them. But be careful, because
1681 // some VM calls (such as call site linkage) can use several kilobytes of
1682 // stack. But the stack safety zone should account for that.
1683 // See bugs 4446381, 4468289, 4497237.
1684 if (C->need_stack_bang(bangsize)) {
1685 st->print_cr("# stack bang"); st->print("\t");
1686 }
1687 if (UseLoongsonISA) {
1688 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1689 } else {
1690 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1691 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1692 }
1693 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1694 st->print("daddiu SP, SP, -%d \t",framesize);
1695 }
1696 #endif
1699 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1700 Compile* C = ra_->C;
1701 MacroAssembler _masm(&cbuf);
1703 int framesize = C->frame_size_in_bytes();
1704 int bangsize = C->bang_size_in_bytes();
1706 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1708 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1710 if (C->need_stack_bang(framesize)) {
1711 __ generate_stack_overflow_check(framesize);
1712 }
1714 if (UseLoongsonISA) {
1715 __ gssq(RA, FP, SP, -wordSize*2);
1716 } else {
1717 __ sd(RA, SP, -wordSize);
1718 __ sd(FP, SP, -wordSize*2);
1719 }
1720 __ daddiu(FP, SP, -wordSize*2);
1721 __ daddiu(SP, SP, -framesize);
1722 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1723 __ nop();
1725 C->set_frame_complete(cbuf.insts_size());
1726 if (C->has_mach_constant_base_node()) {
1727 // NOTE: We set the table base offset here because users might be
1728 // emitted before MachConstantBaseNode.
1729 Compile::ConstantTable& constant_table = C->constant_table();
1730 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1731 }
1733 }
1736 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1737 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1738 return MachNode::size(ra_); // too many variables; just compute it the hard way
1739 }
1741 int MachPrologNode::reloc() const {
1742 return 0; // a large enough number
1743 }
1745 %}
1747 //----------ENCODING BLOCK-----------------------------------------------------
1748 // This block specifies the encoding classes used by the compiler to output
1749 // byte streams. Encoding classes generate functions which are called by
1750 // Machine Instruction Nodes in order to generate the bit encoding of the
1751 // instruction. Operands specify their base encoding interface with the
1752 // interface keyword. There are currently supported four interfaces,
1753 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1754 // operand to generate a function which returns its register number when
1755 // queried. CONST_INTER causes an operand to generate a function which
1756 // returns the value of the constant when queried. MEMORY_INTER causes an
1757 // operand to generate four functions which return the Base Register, the
1758 // Index Register, the Scale Value, and the Offset Value of the operand when
1759 // queried. COND_INTER causes an operand to generate six functions which
1760 // return the encoding code (ie - encoding bits for the instruction)
1761 // associated with each basic boolean condition for a conditional instruction.
1762 // Instructions specify two basic values for encoding. They use the
1763 // ins_encode keyword to specify their encoding class (which must be one of
1764 // the class names specified in the encoding block), and they use the
1765 // opcode keyword to specify, in order, their primary, secondary, and
1766 // tertiary opcode. Only the opcode sections which a particular instruction
1767 // needs for encoding need to be specified.
1768 encode %{
1769 /*
1770 Alias:
1771 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1772 118 B14: # B19 B15 <- B13 Freq: 0.899955
1773 118 add S1, S2, V0 #@addP_reg_reg
1774 11c lb S0, [S1 + #-8257524] #@loadB
1775 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1776 */
1777 //Load byte signed
1778 enc_class load_B_enc (mRegI dst, memory mem) %{
1779 MacroAssembler _masm(&cbuf);
1780 int dst = $dst$$reg;
1781 int base = $mem$$base;
1782 int index = $mem$$index;
1783 int scale = $mem$$scale;
1784 int disp = $mem$$disp;
1786 if( index != 0 ) {
1787 if( Assembler::is_simm16(disp) ) {
1788 if( UseLoongsonISA ) {
1789 if (scale == 0) {
1790 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1791 } else {
1792 __ dsll(AT, as_Register(index), scale);
1793 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1794 }
1795 } else {
1796 if (scale == 0) {
1797 __ addu(AT, as_Register(base), as_Register(index));
1798 } else {
1799 __ dsll(AT, as_Register(index), scale);
1800 __ addu(AT, as_Register(base), AT);
1801 }
1802 __ lb(as_Register(dst), AT, disp);
1803 }
1804 } else {
1805 if (scale == 0) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 } else {
1808 __ dsll(AT, as_Register(index), scale);
1809 __ addu(AT, as_Register(base), AT);
1810 }
1811 __ move(T9, disp);
1812 if( UseLoongsonISA ) {
1813 __ gslbx(as_Register(dst), AT, T9, 0);
1814 } else {
1815 __ addu(AT, AT, T9);
1816 __ lb(as_Register(dst), AT, 0);
1817 }
1818 }
1819 } else {
1820 if( Assembler::is_simm16(disp) ) {
1821 __ lb(as_Register(dst), as_Register(base), disp);
1822 } else {
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1826 } else {
1827 __ addu(AT, as_Register(base), T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 }
1832 %}
1834 //Load byte unsigned
1835 enc_class load_UB_enc (mRegI dst, memory mem) %{
1836 MacroAssembler _masm(&cbuf);
1837 int dst = $dst$$reg;
1838 int base = $mem$$base;
1839 int index = $mem$$index;
1840 int scale = $mem$$scale;
1841 int disp = $mem$$disp;
1843 if( index != 0 ) {
1844 if (scale == 0) {
1845 __ daddu(AT, as_Register(base), as_Register(index));
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ daddu(AT, as_Register(base), AT);
1849 }
1850 if( Assembler::is_simm16(disp) ) {
1851 __ lbu(as_Register(dst), AT, disp);
1852 } else {
1853 __ move(T9, disp);
1854 __ daddu(AT, AT, T9);
1855 __ lbu(as_Register(dst), AT, 0);
1856 }
1857 } else {
1858 if( Assembler::is_simm16(disp) ) {
1859 __ lbu(as_Register(dst), as_Register(base), disp);
1860 } else {
1861 __ move(T9, disp);
1862 __ daddu(AT, as_Register(base), T9);
1863 __ lbu(as_Register(dst), AT, 0);
1864 }
1865 }
1866 %}
1868 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1869 MacroAssembler _masm(&cbuf);
1870 int src = $src$$reg;
1871 int base = $mem$$base;
1872 int index = $mem$$index;
1873 int scale = $mem$$scale;
1874 int disp = $mem$$disp;
1876 if( index != 0 ) {
1877 if (scale == 0) {
1878 if( Assembler::is_simm(disp, 8) ) {
1879 if (UseLoongsonISA) {
1880 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1881 } else {
1882 __ addu(AT, as_Register(base), as_Register(index));
1883 __ sb(as_Register(src), AT, disp);
1884 }
1885 } else if( Assembler::is_simm16(disp) ) {
1886 __ addu(AT, as_Register(base), as_Register(index));
1887 __ sb(as_Register(src), AT, disp);
1888 } else {
1889 __ addu(AT, as_Register(base), as_Register(index));
1890 __ move(T9, disp);
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), AT, T9, 0);
1893 } else {
1894 __ addu(AT, AT, T9);
1895 __ sb(as_Register(src), AT, 0);
1896 }
1897 }
1898 } else {
1899 __ dsll(AT, as_Register(index), scale);
1900 if( Assembler::is_simm(disp, 8) ) {
1901 if (UseLoongsonISA) {
1902 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1903 } else {
1904 __ addu(AT, as_Register(base), AT);
1905 __ sb(as_Register(src), AT, disp);
1906 }
1907 } else if( Assembler::is_simm16(disp) ) {
1908 __ addu(AT, as_Register(base), AT);
1909 __ sb(as_Register(src), AT, disp);
1910 } else {
1911 __ addu(AT, as_Register(base), AT);
1912 __ move(T9, disp);
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, T9, 0);
1915 } else {
1916 __ addu(AT, AT, T9);
1917 __ sb(as_Register(src), AT, 0);
1918 }
1919 }
1920 }
1921 } else {
1922 if( Assembler::is_simm16(disp) ) {
1923 __ sb(as_Register(src), as_Register(base), disp);
1924 } else {
1925 __ move(T9, disp);
1926 if (UseLoongsonISA) {
1927 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1928 } else {
1929 __ addu(AT, as_Register(base), T9);
1930 __ sb(as_Register(src), AT, 0);
1931 }
1932 }
1933 }
1934 %}
1936 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1937 MacroAssembler _masm(&cbuf);
1938 int base = $mem$$base;
1939 int index = $mem$$index;
1940 int scale = $mem$$scale;
1941 int disp = $mem$$disp;
1942 int value = $src$$constant;
1944 if( index != 0 ) {
1945 if (!UseLoongsonISA) {
1946 if (scale == 0) {
1947 __ daddu(AT, as_Register(base), as_Register(index));
1948 } else {
1949 __ dsll(AT, as_Register(index), scale);
1950 __ daddu(AT, as_Register(base), AT);
1951 }
1952 if( Assembler::is_simm16(disp) ) {
1953 if (value == 0) {
1954 __ sb(R0, AT, disp);
1955 } else {
1956 __ move(T9, value);
1957 __ sb(T9, AT, disp);
1958 }
1959 } else {
1960 if (value == 0) {
1961 __ move(T9, disp);
1962 __ daddu(AT, AT, T9);
1963 __ sb(R0, AT, 0);
1964 } else {
1965 __ move(T9, disp);
1966 __ daddu(AT, AT, T9);
1967 __ move(T9, value);
1968 __ sb(T9, AT, 0);
1969 }
1970 }
1971 } else {
1973 if (scale == 0) {
1974 if( Assembler::is_simm(disp, 8) ) {
1975 if (value == 0) {
1976 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1977 } else {
1978 __ move(T9, value);
1979 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1980 }
1981 } else if( Assembler::is_simm16(disp) ) {
1982 __ daddu(AT, as_Register(base), as_Register(index));
1983 if (value == 0) {
1984 __ sb(R0, AT, disp);
1985 } else {
1986 __ move(T9, value);
1987 __ sb(T9, AT, disp);
1988 }
1989 } else {
1990 if (value == 0) {
1991 __ daddu(AT, as_Register(base), as_Register(index));
1992 __ move(T9, disp);
1993 __ gssbx(R0, AT, T9, 0);
1994 } else {
1995 __ move(AT, disp);
1996 __ move(T9, value);
1997 __ daddu(AT, as_Register(base), AT);
1998 __ gssbx(T9, AT, as_Register(index), 0);
1999 }
2000 }
2002 } else {
2004 if( Assembler::is_simm(disp, 8) ) {
2005 __ dsll(AT, as_Register(index), scale);
2006 if (value == 0) {
2007 __ gssbx(R0, as_Register(base), AT, disp);
2008 } else {
2009 __ move(T9, value);
2010 __ gssbx(T9, as_Register(base), AT, disp);
2011 }
2012 } else if( Assembler::is_simm16(disp) ) {
2013 __ dsll(AT, as_Register(index), scale);
2014 __ daddu(AT, as_Register(base), AT);
2015 if (value == 0) {
2016 __ sb(R0, AT, disp);
2017 } else {
2018 __ move(T9, value);
2019 __ sb(T9, AT, disp);
2020 }
2021 } else {
2022 __ dsll(AT, as_Register(index), scale);
2023 if (value == 0) {
2024 __ daddu(AT, as_Register(base), AT);
2025 __ move(T9, disp);
2026 __ gssbx(R0, AT, T9, 0);
2027 } else {
2028 __ move(T9, disp);
2029 __ daddu(AT, AT, T9);
2030 __ move(T9, value);
2031 __ gssbx(T9, as_Register(base), AT, 0);
2032 }
2033 }
2034 }
2035 }
2036 } else {
2037 if( Assembler::is_simm16(disp) ) {
2038 if (value == 0) {
2039 __ sb(R0, as_Register(base), disp);
2040 } else {
2041 __ move(AT, value);
2042 __ sb(AT, as_Register(base), disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ move(T9, disp);
2047 if (UseLoongsonISA) {
2048 __ gssbx(R0, as_Register(base), T9, 0);
2049 } else {
2050 __ daddu(AT, as_Register(base), T9);
2051 __ sb(R0, AT, 0);
2052 }
2053 } else {
2054 __ move(T9, disp);
2055 if (UseLoongsonISA) {
2056 __ move(AT, value);
2057 __ gssbx(AT, as_Register(base), T9, 0);
2058 } else {
2059 __ daddu(AT, as_Register(base), T9);
2060 __ move(T9, value);
2061 __ sb(T9, AT, 0);
2062 }
2063 }
2064 }
2065 }
2066 %}
2069 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2070 MacroAssembler _masm(&cbuf);
2071 int base = $mem$$base;
2072 int index = $mem$$index;
2073 int scale = $mem$$scale;
2074 int disp = $mem$$disp;
2075 int value = $src$$constant;
2077 if( index != 0 ) {
2078 if ( UseLoongsonISA ) {
2079 if ( Assembler::is_simm(disp,8) ) {
2080 if ( scale == 0 ) {
2081 if ( value == 0 ) {
2082 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2083 } else {
2084 __ move(AT, value);
2085 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2086 }
2087 } else {
2088 __ dsll(AT, as_Register(index), scale);
2089 if ( value == 0 ) {
2090 __ gssbx(R0, as_Register(base), AT, disp);
2091 } else {
2092 __ move(T9, value);
2093 __ gssbx(T9, as_Register(base), AT, disp);
2094 }
2095 }
2096 } else if ( Assembler::is_simm16(disp) ) {
2097 if ( scale == 0 ) {
2098 __ daddu(AT, as_Register(base), as_Register(index));
2099 if ( value == 0 ){
2100 __ sb(R0, AT, disp);
2101 } else {
2102 __ move(T9, value);
2103 __ sb(T9, AT, disp);
2104 }
2105 } else {
2106 __ dsll(AT, as_Register(index), scale);
2107 __ daddu(AT, as_Register(base), AT);
2108 if ( value == 0 ) {
2109 __ sb(R0, AT, disp);
2110 } else {
2111 __ move(T9, value);
2112 __ sb(T9, AT, disp);
2113 }
2114 }
2115 } else {
2116 if ( scale == 0 ) {
2117 __ move(AT, disp);
2118 __ daddu(AT, as_Register(index), AT);
2119 if ( value == 0 ) {
2120 __ gssbx(R0, as_Register(base), AT, 0);
2121 } else {
2122 __ move(T9, value);
2123 __ gssbx(T9, as_Register(base), AT, 0);
2124 }
2125 } else {
2126 __ dsll(AT, as_Register(index), scale);
2127 __ move(T9, disp);
2128 __ daddu(AT, AT, T9);
2129 if ( value == 0 ) {
2130 __ gssbx(R0, as_Register(base), AT, 0);
2131 } else {
2132 __ move(T9, value);
2133 __ gssbx(T9, as_Register(base), AT, 0);
2134 }
2135 }
2136 }
2137 } else { //not use loongson isa
2138 if (scale == 0) {
2139 __ daddu(AT, as_Register(base), as_Register(index));
2140 } else {
2141 __ dsll(AT, as_Register(index), scale);
2142 __ daddu(AT, as_Register(base), AT);
2143 }
2144 if( Assembler::is_simm16(disp) ) {
2145 if (value == 0) {
2146 __ sb(R0, AT, disp);
2147 } else {
2148 __ move(T9, value);
2149 __ sb(T9, AT, disp);
2150 }
2151 } else {
2152 if (value == 0) {
2153 __ move(T9, disp);
2154 __ daddu(AT, AT, T9);
2155 __ sb(R0, AT, 0);
2156 } else {
2157 __ move(T9, disp);
2158 __ daddu(AT, AT, T9);
2159 __ move(T9, value);
2160 __ sb(T9, AT, 0);
2161 }
2162 }
2163 }
2164 } else {
2165 if ( UseLoongsonISA ){
2166 if ( Assembler::is_simm16(disp) ){
2167 if ( value == 0 ) {
2168 __ sb(R0, as_Register(base), disp);
2169 } else {
2170 __ move(AT, value);
2171 __ sb(AT, as_Register(base), disp);
2172 }
2173 } else {
2174 __ move(AT, disp);
2175 if ( value == 0 ) {
2176 __ gssbx(R0, as_Register(base), AT, 0);
2177 } else {
2178 __ move(T9, value);
2179 __ gssbx(T9, as_Register(base), AT, 0);
2180 }
2181 }
2182 } else {
2183 if( Assembler::is_simm16(disp) ) {
2184 if (value == 0) {
2185 __ sb(R0, as_Register(base), disp);
2186 } else {
2187 __ move(AT, value);
2188 __ sb(AT, as_Register(base), disp);
2189 }
2190 } else {
2191 if (value == 0) {
2192 __ move(T9, disp);
2193 __ daddu(AT, as_Register(base), T9);
2194 __ sb(R0, AT, 0);
2195 } else {
2196 __ move(T9, disp);
2197 __ daddu(AT, as_Register(base), T9);
2198 __ move(T9, value);
2199 __ sb(T9, AT, 0);
2200 }
2201 }
2202 }
2203 }
2205 __ sync();
2206 %}
2208 // Load Short (16bit signed)
2209 enc_class load_S_enc (mRegI dst, memory mem) %{
2210 MacroAssembler _masm(&cbuf);
2211 int dst = $dst$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if ( UseLoongsonISA ) {
2219 if ( Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2225 }
2226 } else if ( Assembler::is_simm16(disp) ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 __ lh(as_Register(dst), AT, disp);
2230 } else {
2231 __ dsll(AT, as_Register(index), scale);
2232 __ daddu(AT, as_Register(base), AT);
2233 __ lh(as_Register(dst), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ move(AT, disp);
2238 __ daddu(AT, as_Register(index), AT);
2239 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2240 } else {
2241 __ dsll(AT, as_Register(index), scale);
2242 __ move(T9, disp);
2243 __ daddu(AT, AT, T9);
2244 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2245 }
2246 }
2247 } else { // not use loongson isa
2248 if (scale == 0) {
2249 __ daddu(AT, as_Register(base), as_Register(index));
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ daddu(AT, as_Register(base), AT);
2253 }
2254 if( Assembler::is_simm16(disp) ) {
2255 __ lh(as_Register(dst), AT, disp);
2256 } else {
2257 __ move(T9, disp);
2258 __ daddu(AT, AT, T9);
2259 __ lh(as_Register(dst), AT, 0);
2260 }
2261 }
2262 } else { // index is 0
2263 if ( UseLoongsonISA ) {
2264 if ( Assembler::is_simm16(disp) ) {
2265 __ lh(as_Register(dst), as_Register(base), disp);
2266 } else {
2267 __ move(T9, disp);
2268 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2269 }
2270 } else { //not use loongson isa
2271 if( Assembler::is_simm16(disp) ) {
2272 __ lh(as_Register(dst), as_Register(base), disp);
2273 } else {
2274 __ move(T9, disp);
2275 __ daddu(AT, as_Register(base), T9);
2276 __ lh(as_Register(dst), AT, 0);
2277 }
2278 }
2279 }
2280 %}
2282 // Load Char (16bit unsigned)
2283 enc_class load_C_enc (mRegI dst, memory mem) %{
2284 MacroAssembler _masm(&cbuf);
2285 int dst = $dst$$reg;
2286 int base = $mem$$base;
2287 int index = $mem$$index;
2288 int scale = $mem$$scale;
2289 int disp = $mem$$disp;
2291 if( index != 0 ) {
2292 if (scale == 0) {
2293 __ daddu(AT, as_Register(base), as_Register(index));
2294 } else {
2295 __ dsll(AT, as_Register(index), scale);
2296 __ daddu(AT, as_Register(base), AT);
2297 }
2298 if( Assembler::is_simm16(disp) ) {
2299 __ lhu(as_Register(dst), AT, disp);
2300 } else {
2301 __ move(T9, disp);
2302 __ addu(AT, AT, T9);
2303 __ lhu(as_Register(dst), AT, 0);
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ lhu(as_Register(dst), as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 __ daddu(AT, as_Register(base), T9);
2311 __ lhu(as_Register(dst), AT, 0);
2312 }
2313 }
2314 %}
2316 // Store Char (16bit unsigned)
2317 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2318 MacroAssembler _masm(&cbuf);
2319 int src = $src$$reg;
2320 int base = $mem$$base;
2321 int index = $mem$$index;
2322 int scale = $mem$$scale;
2323 int disp = $mem$$disp;
2325 if( index != 0 ) {
2326 if( Assembler::is_simm16(disp) ) {
2327 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2328 if (scale == 0) {
2329 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2330 } else {
2331 __ dsll(AT, as_Register(index), scale);
2332 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ sh(as_Register(src), AT, disp);
2342 }
2343 } else {
2344 if (scale == 0) {
2345 __ addu(AT, as_Register(base), as_Register(index));
2346 } else {
2347 __ dsll(AT, as_Register(index), scale);
2348 __ addu(AT, as_Register(base), AT);
2349 }
2350 __ move(T9, disp);
2351 if( UseLoongsonISA ) {
2352 __ gsshx(as_Register(src), AT, T9, 0);
2353 } else {
2354 __ addu(AT, AT, T9);
2355 __ sh(as_Register(src), AT, 0);
2356 }
2357 }
2358 } else {
2359 if( Assembler::is_simm16(disp) ) {
2360 __ sh(as_Register(src), as_Register(base), disp);
2361 } else {
2362 __ move(T9, disp);
2363 if( UseLoongsonISA ) {
2364 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2365 } else {
2366 __ addu(AT, as_Register(base), T9);
2367 __ sh(as_Register(src), AT, 0);
2368 }
2369 }
2370 }
2371 %}
2373 enc_class store_C0_enc (memory mem) %{
2374 MacroAssembler _masm(&cbuf);
2375 int base = $mem$$base;
2376 int index = $mem$$index;
2377 int scale = $mem$$scale;
2378 int disp = $mem$$disp;
2380 if( index != 0 ) {
2381 if( Assembler::is_simm16(disp) ) {
2382 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2383 if (scale == 0) {
2384 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2385 } else {
2386 __ dsll(AT, as_Register(index), scale);
2387 __ gsshx(R0, as_Register(base), AT, disp);
2388 }
2389 } else {
2390 if (scale == 0) {
2391 __ addu(AT, as_Register(base), as_Register(index));
2392 } else {
2393 __ dsll(AT, as_Register(index), scale);
2394 __ addu(AT, as_Register(base), AT);
2395 }
2396 __ sh(R0, AT, disp);
2397 }
2398 } else {
2399 if (scale == 0) {
2400 __ addu(AT, as_Register(base), as_Register(index));
2401 } else {
2402 __ dsll(AT, as_Register(index), scale);
2403 __ addu(AT, as_Register(base), AT);
2404 }
2405 __ move(T9, disp);
2406 if( UseLoongsonISA ) {
2407 __ gsshx(R0, AT, T9, 0);
2408 } else {
2409 __ addu(AT, AT, T9);
2410 __ sh(R0, AT, 0);
2411 }
2412 }
2413 } else {
2414 if( Assembler::is_simm16(disp) ) {
2415 __ sh(R0, as_Register(base), disp);
2416 } else {
2417 __ move(T9, disp);
2418 if( UseLoongsonISA ) {
2419 __ gsshx(R0, as_Register(base), T9, 0);
2420 } else {
2421 __ addu(AT, as_Register(base), T9);
2422 __ sh(R0, AT, 0);
2423 }
2424 }
2425 }
2426 %}
2428 enc_class load_I_enc (mRegI dst, memory mem) %{
2429 MacroAssembler _masm(&cbuf);
2430 int dst = $dst$$reg;
2431 int base = $mem$$base;
2432 int index = $mem$$index;
2433 int scale = $mem$$scale;
2434 int disp = $mem$$disp;
2436 if( index != 0 ) {
2437 if( Assembler::is_simm16(disp) ) {
2438 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2439 if (scale == 0) {
2440 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2441 } else {
2442 __ dsll(AT, as_Register(index), scale);
2443 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2444 }
2445 } else {
2446 if (scale == 0) {
2447 __ addu(AT, as_Register(base), as_Register(index));
2448 } else {
2449 __ dsll(AT, as_Register(index), scale);
2450 __ addu(AT, as_Register(base), AT);
2451 }
2452 __ lw(as_Register(dst), AT, disp);
2453 }
2454 } else {
2455 if (scale == 0) {
2456 __ addu(AT, as_Register(base), as_Register(index));
2457 } else {
2458 __ dsll(AT, as_Register(index), scale);
2459 __ addu(AT, as_Register(base), AT);
2460 }
2461 __ move(T9, disp);
2462 if( UseLoongsonISA ) {
2463 __ gslwx(as_Register(dst), AT, T9, 0);
2464 } else {
2465 __ addu(AT, AT, T9);
2466 __ lw(as_Register(dst), AT, 0);
2467 }
2468 }
2469 } else {
2470 if( Assembler::is_simm16(disp) ) {
2471 __ lw(as_Register(dst), as_Register(base), disp);
2472 } else {
2473 __ move(T9, disp);
2474 if( UseLoongsonISA ) {
2475 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2476 } else {
2477 __ addu(AT, as_Register(base), T9);
2478 __ lw(as_Register(dst), AT, 0);
2479 }
2480 }
2481 }
2482 %}
2484 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2485 MacroAssembler _masm(&cbuf);
2486 int src = $src$$reg;
2487 int base = $mem$$base;
2488 int index = $mem$$index;
2489 int scale = $mem$$scale;
2490 int disp = $mem$$disp;
2492 if( index != 0 ) {
2493 if( Assembler::is_simm16(disp) ) {
2494 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2495 if (scale == 0) {
2496 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2497 } else {
2498 __ dsll(AT, as_Register(index), scale);
2499 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2500 }
2501 } else {
2502 if (scale == 0) {
2503 __ addu(AT, as_Register(base), as_Register(index));
2504 } else {
2505 __ dsll(AT, as_Register(index), scale);
2506 __ addu(AT, as_Register(base), AT);
2507 }
2508 __ sw(as_Register(src), AT, disp);
2509 }
2510 } else {
2511 if (scale == 0) {
2512 __ addu(AT, as_Register(base), as_Register(index));
2513 } else {
2514 __ dsll(AT, as_Register(index), scale);
2515 __ addu(AT, as_Register(base), AT);
2516 }
2517 __ move(T9, disp);
2518 if( UseLoongsonISA ) {
2519 __ gsswx(as_Register(src), AT, T9, 0);
2520 } else {
2521 __ addu(AT, AT, T9);
2522 __ sw(as_Register(src), AT, 0);
2523 }
2524 }
2525 } else {
2526 if( Assembler::is_simm16(disp) ) {
2527 __ sw(as_Register(src), as_Register(base), disp);
2528 } else {
2529 __ move(T9, disp);
2530 if( UseLoongsonISA ) {
2531 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2532 } else {
2533 __ addu(AT, as_Register(base), T9);
2534 __ sw(as_Register(src), AT, 0);
2535 }
2536 }
2537 }
2538 %}
2540 enc_class store_I_immI_enc (memory mem, immI src) %{
2541 MacroAssembler _masm(&cbuf);
2542 int base = $mem$$base;
2543 int index = $mem$$index;
2544 int scale = $mem$$scale;
2545 int disp = $mem$$disp;
2546 int value = $src$$constant;
2548 if( index != 0 ) {
2549 if ( UseLoongsonISA ) {
2550 if ( Assembler::is_simm(disp, 8) ) {
2551 if ( scale == 0 ) {
2552 if ( value == 0 ) {
2553 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2554 } else {
2555 __ move(T9, value);
2556 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2557 }
2558 } else {
2559 __ dsll(AT, as_Register(index), scale);
2560 if ( value == 0 ) {
2561 __ gsswx(R0, as_Register(base), AT, disp);
2562 } else {
2563 __ move(T9, value);
2564 __ gsswx(T9, as_Register(base), AT, disp);
2565 }
2566 }
2567 } else if ( Assembler::is_simm16(disp) ) {
2568 if ( scale == 0 ) {
2569 __ daddu(AT, as_Register(base), as_Register(index));
2570 if ( value == 0 ) {
2571 __ sw(R0, AT, disp);
2572 } else {
2573 __ move(T9, value);
2574 __ sw(T9, AT, disp);
2575 }
2576 } else {
2577 __ dsll(AT, as_Register(index), scale);
2578 __ daddu(AT, as_Register(base), AT);
2579 if ( value == 0 ) {
2580 __ sw(R0, AT, disp);
2581 } else {
2582 __ move(T9, value);
2583 __ sw(T9, AT, disp);
2584 }
2585 }
2586 } else {
2587 if ( scale == 0 ) {
2588 __ move(T9, disp);
2589 __ daddu(AT, as_Register(index), T9);
2590 if ( value ==0 ) {
2591 __ gsswx(R0, as_Register(base), AT, 0);
2592 } else {
2593 __ move(T9, value);
2594 __ gsswx(T9, as_Register(base), AT, 0);
2595 }
2596 } else {
2597 __ dsll(AT, as_Register(index), scale);
2598 __ move(T9, disp);
2599 __ daddu(AT, AT, T9);
2600 if ( value == 0 ) {
2601 __ gsswx(R0, as_Register(base), AT, 0);
2602 } else {
2603 __ move(T9, value);
2604 __ gsswx(T9, as_Register(base), AT, 0);
2605 }
2606 }
2607 }
2608 } else { //not use loongson isa
2609 if (scale == 0) {
2610 __ daddu(AT, as_Register(base), as_Register(index));
2611 } else {
2612 __ dsll(AT, as_Register(index), scale);
2613 __ daddu(AT, as_Register(base), AT);
2614 }
2615 if( Assembler::is_simm16(disp) ) {
2616 if (value == 0) {
2617 __ sw(R0, AT, disp);
2618 } else {
2619 __ move(T9, value);
2620 __ sw(T9, AT, disp);
2621 }
2622 } else {
2623 if (value == 0) {
2624 __ move(T9, disp);
2625 __ daddu(AT, AT, T9);
2626 __ sw(R0, AT, 0);
2627 } else {
2628 __ move(T9, disp);
2629 __ daddu(AT, AT, T9);
2630 __ move(T9, value);
2631 __ sw(T9, AT, 0);
2632 }
2633 }
2634 }
2635 } else {
2636 if ( UseLoongsonISA ) {
2637 if ( Assembler::is_simm16(disp) ) {
2638 if ( value == 0 ) {
2639 __ sw(R0, as_Register(base), disp);
2640 } else {
2641 __ move(AT, value);
2642 __ sw(AT, as_Register(base), disp);
2643 }
2644 } else {
2645 __ move(T9, disp);
2646 if ( value == 0 ) {
2647 __ gsswx(R0, as_Register(base), T9, 0);
2648 } else {
2649 __ move(AT, value);
2650 __ gsswx(AT, as_Register(base), T9, 0);
2651 }
2652 }
2653 } else {
2654 if( Assembler::is_simm16(disp) ) {
2655 if (value == 0) {
2656 __ sw(R0, as_Register(base), disp);
2657 } else {
2658 __ move(AT, value);
2659 __ sw(AT, as_Register(base), disp);
2660 }
2661 } else {
2662 if (value == 0) {
2663 __ move(T9, disp);
2664 __ daddu(AT, as_Register(base), T9);
2665 __ sw(R0, AT, 0);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(base), T9);
2669 __ move(T9, value);
2670 __ sw(T9, AT, 0);
2671 }
2672 }
2673 }
2674 }
2675 %}
2677 enc_class load_N_enc (mRegN dst, memory mem) %{
2678 MacroAssembler _masm(&cbuf);
2679 int dst = $dst$$reg;
2680 int base = $mem$$base;
2681 int index = $mem$$index;
2682 int scale = $mem$$scale;
2683 int disp = $mem$$disp;
2684 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2685 assert(disp_reloc == relocInfo::none, "cannot have disp");
2687 if( index != 0 ) {
2688 if (scale == 0) {
2689 __ daddu(AT, as_Register(base), as_Register(index));
2690 } else {
2691 __ dsll(AT, as_Register(index), scale);
2692 __ daddu(AT, as_Register(base), AT);
2693 }
2694 if( Assembler::is_simm16(disp) ) {
2695 __ lwu(as_Register(dst), AT, disp);
2696 } else {
2697 __ set64(T9, disp);
2698 __ daddu(AT, AT, T9);
2699 __ lwu(as_Register(dst), AT, 0);
2700 }
2701 } else {
2702 if( Assembler::is_simm16(disp) ) {
2703 __ lwu(as_Register(dst), as_Register(base), disp);
2704 } else {
2705 __ set64(T9, disp);
2706 __ daddu(AT, as_Register(base), T9);
2707 __ lwu(as_Register(dst), AT, 0);
2708 }
2709 }
2711 %}
2714 enc_class load_P_enc (mRegP dst, memory mem) %{
2715 MacroAssembler _masm(&cbuf);
2716 int dst = $dst$$reg;
2717 int base = $mem$$base;
2718 int index = $mem$$index;
2719 int scale = $mem$$scale;
2720 int disp = $mem$$disp;
2721 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2722 assert(disp_reloc == relocInfo::none, "cannot have disp");
2724 if( index != 0 ) {
2725 if ( UseLoongsonISA ) {
2726 if ( Assembler::is_simm(disp, 8) ) {
2727 if ( scale != 0 ) {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2730 } else {
2731 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2732 }
2733 } else if ( Assembler::is_simm16(disp) ){
2734 if ( scale != 0 ) {
2735 __ dsll(AT, as_Register(index), scale);
2736 __ daddu(AT, AT, as_Register(base));
2737 } else {
2738 __ daddu(AT, as_Register(index), as_Register(base));
2739 }
2740 __ ld(as_Register(dst), AT, disp);
2741 } else {
2742 if ( scale != 0 ) {
2743 __ dsll(AT, as_Register(index), scale);
2744 __ move(T9, disp);
2745 __ daddu(AT, AT, T9);
2746 } else {
2747 __ move(T9, disp);
2748 __ daddu(AT, as_Register(index), T9);
2749 }
2750 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2751 }
2752 } else { //not use loongson isa
2753 if (scale == 0) {
2754 __ daddu(AT, as_Register(base), as_Register(index));
2755 } else {
2756 __ dsll(AT, as_Register(index), scale);
2757 __ daddu(AT, as_Register(base), AT);
2758 }
2759 if( Assembler::is_simm16(disp) ) {
2760 __ ld(as_Register(dst), AT, disp);
2761 } else {
2762 __ set64(T9, disp);
2763 __ daddu(AT, AT, T9);
2764 __ ld(as_Register(dst), AT, 0);
2765 }
2766 }
2767 } else {
2768 if ( UseLoongsonISA ) {
2769 if ( Assembler::is_simm16(disp) ){
2770 __ ld(as_Register(dst), as_Register(base), disp);
2771 } else {
2772 __ set64(T9, disp);
2773 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2774 }
2775 } else { //not use loongson isa
2776 if( Assembler::is_simm16(disp) ) {
2777 __ ld(as_Register(dst), as_Register(base), disp);
2778 } else {
2779 __ set64(T9, disp);
2780 __ daddu(AT, as_Register(base), T9);
2781 __ ld(as_Register(dst), AT, 0);
2782 }
2783 }
2784 }
2785 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2786 %}
2788 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2789 MacroAssembler _masm(&cbuf);
2790 int src = $src$$reg;
2791 int base = $mem$$base;
2792 int index = $mem$$index;
2793 int scale = $mem$$scale;
2794 int disp = $mem$$disp;
2796 if( index != 0 ) {
2797 if ( UseLoongsonISA ){
2798 if ( Assembler::is_simm(disp, 8) ) {
2799 if ( scale == 0 ) {
2800 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2801 } else {
2802 __ dsll(AT, as_Register(index), scale);
2803 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2804 }
2805 } else if ( Assembler::is_simm16(disp) ) {
2806 if ( scale == 0 ) {
2807 __ daddu(AT, as_Register(base), as_Register(index));
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ daddu(AT, as_Register(base), AT);
2811 }
2812 __ sd(as_Register(src), AT, disp);
2813 } else {
2814 if ( scale == 0 ) {
2815 __ move(T9, disp);
2816 __ daddu(AT, as_Register(index), T9);
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ move(T9, disp);
2820 __ daddu(AT, AT, T9);
2821 }
2822 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2823 }
2824 } else { //not use loongson isa
2825 if (scale == 0) {
2826 __ daddu(AT, as_Register(base), as_Register(index));
2827 } else {
2828 __ dsll(AT, as_Register(index), scale);
2829 __ daddu(AT, as_Register(base), AT);
2830 }
2831 if( Assembler::is_simm16(disp) ) {
2832 __ sd(as_Register(src), AT, disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ daddu(AT, AT, T9);
2836 __ sd(as_Register(src), AT, 0);
2837 }
2838 }
2839 } else {
2840 if ( UseLoongsonISA ) {
2841 if ( Assembler::is_simm16(disp) ) {
2842 __ sd(as_Register(src), as_Register(base), disp);
2843 } else {
2844 __ move(T9, disp);
2845 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2846 }
2847 } else {
2848 if( Assembler::is_simm16(disp) ) {
2849 __ sd(as_Register(src), as_Register(base), disp);
2850 } else {
2851 __ move(T9, disp);
2852 __ daddu(AT, as_Register(base), T9);
2853 __ sd(as_Register(src), AT, 0);
2854 }
2855 }
2856 }
2857 %}
2859 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2860 MacroAssembler _masm(&cbuf);
2861 int src = $src$$reg;
2862 int base = $mem$$base;
2863 int index = $mem$$index;
2864 int scale = $mem$$scale;
2865 int disp = $mem$$disp;
2867 if( index != 0 ) {
2868 if ( UseLoongsonISA ){
2869 if ( Assembler::is_simm(disp, 8) ) {
2870 if ( scale == 0 ) {
2871 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2872 } else {
2873 __ dsll(AT, as_Register(index), scale);
2874 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2875 }
2876 } else if ( Assembler::is_simm16(disp) ) {
2877 if ( scale == 0 ) {
2878 __ daddu(AT, as_Register(base), as_Register(index));
2879 } else {
2880 __ dsll(AT, as_Register(index), scale);
2881 __ daddu(AT, as_Register(base), AT);
2882 }
2883 __ sw(as_Register(src), AT, disp);
2884 } else {
2885 if ( scale == 0 ) {
2886 __ move(T9, disp);
2887 __ daddu(AT, as_Register(index), T9);
2888 } else {
2889 __ dsll(AT, as_Register(index), scale);
2890 __ move(T9, disp);
2891 __ daddu(AT, AT, T9);
2892 }
2893 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2894 }
2895 } else { //not use loongson isa
2896 if (scale == 0) {
2897 __ daddu(AT, as_Register(base), as_Register(index));
2898 } else {
2899 __ dsll(AT, as_Register(index), scale);
2900 __ daddu(AT, as_Register(base), AT);
2901 }
2902 if( Assembler::is_simm16(disp) ) {
2903 __ sw(as_Register(src), AT, disp);
2904 } else {
2905 __ move(T9, disp);
2906 __ daddu(AT, AT, T9);
2907 __ sw(as_Register(src), AT, 0);
2908 }
2909 }
2910 } else {
2911 if ( UseLoongsonISA ) {
2912 if ( Assembler::is_simm16(disp) ) {
2913 __ sw(as_Register(src), as_Register(base), disp);
2914 } else {
2915 __ move(T9, disp);
2916 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2917 }
2918 } else {
2919 if( Assembler::is_simm16(disp) ) {
2920 __ sw(as_Register(src), as_Register(base), disp);
2921 } else {
2922 __ move(T9, disp);
2923 __ daddu(AT, as_Register(base), T9);
2924 __ sw(as_Register(src), AT, 0);
2925 }
2926 }
2927 }
2928 %}
2930 enc_class store_P_immP0_enc (memory mem) %{
2931 MacroAssembler _masm(&cbuf);
2932 int base = $mem$$base;
2933 int index = $mem$$index;
2934 int scale = $mem$$scale;
2935 int disp = $mem$$disp;
2937 if( index != 0 ) {
2938 if (scale == 0) {
2939 if( Assembler::is_simm16(disp) ) {
2940 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2941 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2942 } else {
2943 __ daddu(AT, as_Register(base), as_Register(index));
2944 __ sd(R0, AT, disp);
2945 }
2946 } else {
2947 __ daddu(AT, as_Register(base), as_Register(index));
2948 __ move(T9, disp);
2949 if(UseLoongsonISA) {
2950 __ gssdx(R0, AT, T9, 0);
2951 } else {
2952 __ daddu(AT, AT, T9);
2953 __ sd(R0, AT, 0);
2954 }
2955 }
2956 } else {
2957 __ dsll(AT, as_Register(index), scale);
2958 if( Assembler::is_simm16(disp) ) {
2959 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2960 __ gssdx(R0, as_Register(base), AT, disp);
2961 } else {
2962 __ daddu(AT, as_Register(base), AT);
2963 __ sd(R0, AT, disp);
2964 }
2965 } else {
2966 __ daddu(AT, as_Register(base), AT);
2967 __ move(T9, disp);
2968 if (UseLoongsonISA) {
2969 __ gssdx(R0, AT, T9, 0);
2970 } else {
2971 __ daddu(AT, AT, T9);
2972 __ sd(R0, AT, 0);
2973 }
2974 }
2975 }
2976 } else {
2977 if( Assembler::is_simm16(disp) ) {
2978 __ sd(R0, as_Register(base), disp);
2979 } else {
2980 __ move(T9, disp);
2981 if (UseLoongsonISA) {
2982 __ gssdx(R0, as_Register(base), T9, 0);
2983 } else {
2984 __ daddu(AT, as_Register(base), T9);
2985 __ sd(R0, AT, 0);
2986 }
2987 }
2988 }
2989 %}
2992 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2993 MacroAssembler _masm(&cbuf);
2994 int base = $mem$$base;
2995 int index = $mem$$index;
2996 int scale = $mem$$scale;
2997 int disp = $mem$$disp;
2999 if(index!=0){
3000 if (scale == 0) {
3001 __ daddu(AT, as_Register(base), as_Register(index));
3002 } else {
3003 __ dsll(AT, as_Register(index), scale);
3004 __ daddu(AT, as_Register(base), AT);
3005 }
3007 if( Assembler::is_simm16(disp) ) {
3008 __ sw(R0, AT, disp);
3009 } else {
3010 __ move(T9, disp);
3011 __ daddu(AT, AT, T9);
3012 __ sw(R0, AT, 0);
3013 }
3014 }
3015 else {
3016 if( Assembler::is_simm16(disp) ) {
3017 __ sw(R0, as_Register(base), disp);
3018 } else {
3019 __ move(T9, disp);
3020 __ daddu(AT, as_Register(base), T9);
3021 __ sw(R0, AT, 0);
3022 }
3023 }
3024 %}
3026 enc_class load_L_enc (mRegL dst, memory mem) %{
3027 MacroAssembler _masm(&cbuf);
3028 int base = $mem$$base;
3029 int index = $mem$$index;
3030 int scale = $mem$$scale;
3031 int disp = $mem$$disp;
3032 Register dst_reg = as_Register($dst$$reg);
3034 /*********************2013/03/27**************************
3035 * Jin: $base may contain a null object.
3036 * Server JIT force the exception_offset to be the pos of
3037 * the first instruction.
3038 * I insert such a 'null_check' at the beginning.
3039 *******************************************************/
3041 __ lw(AT, as_Register(base), 0);
3043 /*********************2012/10/04**************************
3044 * Error case found in SortTest
3045 * 337 b java.util.Arrays::sort1 (401 bytes)
3046 * B73:
3047 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3048 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3049 *
3050 * The original instructions generated here are :
3051 * __ lw(dst_lo, as_Register(base), disp);
3052 * __ lw(dst_hi, as_Register(base), disp + 4);
3053 *******************************************************/
3055 if( index != 0 ) {
3056 if (scale == 0) {
3057 __ daddu(AT, as_Register(base), as_Register(index));
3058 } else {
3059 __ dsll(AT, as_Register(index), scale);
3060 __ daddu(AT, as_Register(base), AT);
3061 }
3062 if( Assembler::is_simm16(disp) ) {
3063 __ ld(dst_reg, AT, disp);
3064 } else {
3065 __ move(T9, disp);
3066 __ daddu(AT, AT, T9);
3067 __ ld(dst_reg, AT, 0);
3068 }
3069 } else {
3070 if( Assembler::is_simm16(disp) ) {
3071 __ move(AT, as_Register(base));
3072 __ ld(dst_reg, AT, disp);
3073 } else {
3074 __ move(T9, disp);
3075 __ daddu(AT, as_Register(base), T9);
3076 __ ld(dst_reg, AT, 0);
3077 }
3078 }
3079 %}
3081 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3082 MacroAssembler _masm(&cbuf);
3083 int base = $mem$$base;
3084 int index = $mem$$index;
3085 int scale = $mem$$scale;
3086 int disp = $mem$$disp;
3087 Register src_reg = as_Register($src$$reg);
3089 if( index != 0 ) {
3090 if (scale == 0) {
3091 __ daddu(AT, as_Register(base), as_Register(index));
3092 } else {
3093 __ dsll(AT, as_Register(index), scale);
3094 __ daddu(AT, as_Register(base), AT);
3095 }
3096 if( Assembler::is_simm16(disp) ) {
3097 __ sd(src_reg, AT, disp);
3098 } else {
3099 __ move(T9, disp);
3100 __ daddu(AT, AT, T9);
3101 __ sd(src_reg, AT, 0);
3102 }
3103 } else {
3104 if( Assembler::is_simm16(disp) ) {
3105 __ move(AT, as_Register(base));
3106 __ sd(src_reg, AT, disp);
3107 } else {
3108 __ move(T9, disp);
3109 __ daddu(AT, as_Register(base), T9);
3110 __ sd(src_reg, AT, 0);
3111 }
3112 }
3113 %}
3115 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3116 MacroAssembler _masm(&cbuf);
3117 int base = $mem$$base;
3118 int index = $mem$$index;
3119 int scale = $mem$$scale;
3120 int disp = $mem$$disp;
3122 if( index != 0 ) {
3123 if (scale == 0) {
3124 __ daddu(AT, as_Register(base), as_Register(index));
3125 } else {
3126 __ dsll(AT, as_Register(index), scale);
3127 __ daddu(AT, as_Register(base), AT);
3128 }
3129 if( Assembler::is_simm16(disp) ) {
3130 __ sd(R0, AT, disp);
3131 } else {
3132 __ move(T9, disp);
3133 __ addu(AT, AT, T9);
3134 __ sd(R0, AT, 0);
3135 }
3136 } else {
3137 if( Assembler::is_simm16(disp) ) {
3138 __ move(AT, as_Register(base));
3139 __ sd(R0, AT, disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ addu(AT, as_Register(base), T9);
3143 __ sd(R0, AT, 0);
3144 }
3145 }
3146 %}
3148 enc_class load_F_enc (regF dst, memory mem) %{
3149 MacroAssembler _masm(&cbuf);
3150 int base = $mem$$base;
3151 int index = $mem$$index;
3152 int scale = $mem$$scale;
3153 int disp = $mem$$disp;
3154 FloatRegister dst = $dst$$FloatRegister;
3156 if( index != 0 ) {
3157 if( Assembler::is_simm16(disp) ) {
3158 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3159 if (scale == 0) {
3160 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3161 } else {
3162 __ dsll(AT, as_Register(index), scale);
3163 __ gslwxc1(dst, as_Register(base), AT, disp);
3164 }
3165 } else {
3166 if (scale == 0) {
3167 __ daddu(AT, as_Register(base), as_Register(index));
3168 } else {
3169 __ dsll(AT, as_Register(index), scale);
3170 __ daddu(AT, as_Register(base), AT);
3171 }
3172 __ lwc1(dst, AT, disp);
3173 }
3174 } else {
3175 if (scale == 0) {
3176 __ daddu(AT, as_Register(base), as_Register(index));
3177 } else {
3178 __ dsll(AT, as_Register(index), scale);
3179 __ daddu(AT, as_Register(base), AT);
3180 }
3181 __ move(T9, disp);
3182 if( UseLoongsonISA ) {
3183 __ gslwxc1(dst, AT, T9, 0);
3184 } else {
3185 __ daddu(AT, AT, T9);
3186 __ lwc1(dst, AT, 0);
3187 }
3188 }
3189 } else {
3190 if( Assembler::is_simm16(disp) ) {
3191 __ lwc1(dst, as_Register(base), disp);
3192 } else {
3193 __ move(T9, disp);
3194 if( UseLoongsonISA ) {
3195 __ gslwxc1(dst, as_Register(base), T9, 0);
3196 } else {
3197 __ daddu(AT, as_Register(base), T9);
3198 __ lwc1(dst, AT, 0);
3199 }
3200 }
3201 }
3202 %}
3204 enc_class store_F_reg_enc (memory mem, regF src) %{
3205 MacroAssembler _masm(&cbuf);
3206 int base = $mem$$base;
3207 int index = $mem$$index;
3208 int scale = $mem$$scale;
3209 int disp = $mem$$disp;
3210 FloatRegister src = $src$$FloatRegister;
3212 if( index != 0 ) {
3213 if( Assembler::is_simm16(disp) ) {
3214 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3215 if (scale == 0) {
3216 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3217 } else {
3218 __ dsll(AT, as_Register(index), scale);
3219 __ gsswxc1(src, as_Register(base), AT, disp);
3220 }
3221 } else {
3222 if (scale == 0) {
3223 __ daddu(AT, as_Register(base), as_Register(index));
3224 } else {
3225 __ dsll(AT, as_Register(index), scale);
3226 __ daddu(AT, as_Register(base), AT);
3227 }
3228 __ swc1(src, AT, disp);
3229 }
3230 } else {
3231 if (scale == 0) {
3232 __ daddu(AT, as_Register(base), as_Register(index));
3233 } else {
3234 __ dsll(AT, as_Register(index), scale);
3235 __ daddu(AT, as_Register(base), AT);
3236 }
3237 __ move(T9, disp);
3238 if( UseLoongsonISA ) {
3239 __ gsswxc1(src, AT, T9, 0);
3240 } else {
3241 __ daddu(AT, AT, T9);
3242 __ swc1(src, AT, 0);
3243 }
3244 }
3245 } else {
3246 if( Assembler::is_simm16(disp) ) {
3247 __ swc1(src, as_Register(base), disp);
3248 } else {
3249 __ move(T9, disp);
3250 if( UseLoongsonISA ) {
3251 __ gslwxc1(src, as_Register(base), T9, 0);
3252 } else {
3253 __ daddu(AT, as_Register(base), T9);
3254 __ swc1(src, AT, 0);
3255 }
3256 }
3257 }
3258 %}
3260 enc_class load_D_enc (regD dst, memory mem) %{
3261 MacroAssembler _masm(&cbuf);
3262 int base = $mem$$base;
3263 int index = $mem$$index;
3264 int scale = $mem$$scale;
3265 int disp = $mem$$disp;
3266 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3268 if( index != 0 ) {
3269 if( Assembler::is_simm16(disp) ) {
3270 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3271 if (scale == 0) {
3272 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3273 } else {
3274 __ dsll(AT, as_Register(index), scale);
3275 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3276 }
3277 } else {
3278 if (scale == 0) {
3279 __ daddu(AT, as_Register(base), as_Register(index));
3280 } else {
3281 __ dsll(AT, as_Register(index), scale);
3282 __ daddu(AT, as_Register(base), AT);
3283 }
3284 __ ldc1(dst_reg, AT, disp);
3285 }
3286 } else {
3287 if (scale == 0) {
3288 __ daddu(AT, as_Register(base), as_Register(index));
3289 } else {
3290 __ dsll(AT, as_Register(index), scale);
3291 __ daddu(AT, as_Register(base), AT);
3292 }
3293 __ move(T9, disp);
3294 if( UseLoongsonISA ) {
3295 __ gsldxc1(dst_reg, AT, T9, 0);
3296 } else {
3297 __ addu(AT, AT, T9);
3298 __ ldc1(dst_reg, AT, 0);
3299 }
3300 }
3301 } else {
3302 if( Assembler::is_simm16(disp) ) {
3303 __ ldc1(dst_reg, as_Register(base), disp);
3304 } else {
3305 __ move(T9, disp);
3306 if( UseLoongsonISA ) {
3307 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3308 } else {
3309 __ addu(AT, as_Register(base), T9);
3310 __ ldc1(dst_reg, AT, 0);
3311 }
3312 }
3313 }
3314 %}
3316 enc_class store_D_reg_enc (memory mem, regD src) %{
3317 MacroAssembler _masm(&cbuf);
3318 int base = $mem$$base;
3319 int index = $mem$$index;
3320 int scale = $mem$$scale;
3321 int disp = $mem$$disp;
3322 FloatRegister src_reg = as_FloatRegister($src$$reg);
3324 if( index != 0 ) {
3325 if( Assembler::is_simm16(disp) ) {
3326 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3327 if (scale == 0) {
3328 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3329 } else {
3330 __ dsll(AT, as_Register(index), scale);
3331 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3332 }
3333 } else {
3334 if (scale == 0) {
3335 __ daddu(AT, as_Register(base), as_Register(index));
3336 } else {
3337 __ dsll(AT, as_Register(index), scale);
3338 __ daddu(AT, as_Register(base), AT);
3339 }
3340 __ sdc1(src_reg, AT, disp);
3341 }
3342 } else {
3343 if (scale == 0) {
3344 __ daddu(AT, as_Register(base), as_Register(index));
3345 } else {
3346 __ dsll(AT, as_Register(index), scale);
3347 __ daddu(AT, as_Register(base), AT);
3348 }
3349 __ move(T9, disp);
3350 if( UseLoongsonISA ) {
3351 __ gssdxc1(src_reg, AT, T9, 0);
3352 } else {
3353 __ addu(AT, AT, T9);
3354 __ sdc1(src_reg, AT, 0);
3355 }
3356 }
3357 } else {
3358 if( Assembler::is_simm16(disp) ) {
3359 __ sdc1(src_reg, as_Register(base), disp);
3360 } else {
3361 __ move(T9, disp);
3362 if( UseLoongsonISA ) {
3363 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3364 } else {
3365 __ addu(AT, as_Register(base), T9);
3366 __ sdc1(src_reg, AT, 0);
3367 }
3368 }
3369 }
3370 %}
3372 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3373 MacroAssembler _masm(&cbuf);
3374 // This is the instruction starting address for relocation info.
3375 __ block_comment("Java_To_Runtime");
3376 cbuf.set_insts_mark();
3377 __ relocate(relocInfo::runtime_call_type);
3379 __ patchable_set48(T9, (long)$meth$$method);
3380 __ jalr(T9);
3381 __ nop();
3382 %}
3384 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3385 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3386 // who we intended to call.
3387 MacroAssembler _masm(&cbuf);
3388 cbuf.set_insts_mark();
3390 if ( !_method ) {
3391 __ relocate(relocInfo::runtime_call_type);
3392 } else if(_optimized_virtual) {
3393 __ relocate(relocInfo::opt_virtual_call_type);
3394 } else {
3395 __ relocate(relocInfo::static_call_type);
3396 }
3398 __ patchable_set48(T9, $meth$$method);
3399 __ jalr(T9);
3400 __ nop();
3401 if( _method ) { // Emit stub for static call
3402 emit_java_to_interp(cbuf);
3403 }
3404 %}
3407 /*
3408 * [Ref: LIR_Assembler::ic_call() ]
3409 */
3410 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3411 MacroAssembler _masm(&cbuf);
3412 __ block_comment("Java_Dynamic_Call");
3413 __ ic_call((address)$meth$$method);
3414 %}
3417 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3418 Register flags = $cr$$Register;
3419 Label L;
3421 MacroAssembler _masm(&cbuf);
3423 __ addu(flags, R0, R0);
3424 __ beq(AT, R0, L);
3425 __ delayed()->nop();
3426 __ move(flags, 0xFFFFFFFF);
3427 __ bind(L);
3428 %}
3430 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3431 Register result = $result$$Register;
3432 Register sub = $sub$$Register;
3433 Register super = $super$$Register;
3434 Register length = $tmp$$Register;
3435 Register tmp = T9;
3436 Label miss;
3438 /* 2012/9/28 Jin: result may be the same as sub
3439 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3440 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3441 * 4bc mov S2, NULL #@loadConP
3442 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3443 */
3444 MacroAssembler _masm(&cbuf);
3445 Label done;
3446 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3447 NULL, &miss,
3448 /*set_cond_codes:*/ true);
3449 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3450 __ move(result, 0);
3451 __ b(done);
3452 __ nop();
3454 __ bind(miss);
3455 __ move(result, 1);
3456 __ bind(done);
3457 %}
3459 %}
3462 //---------MIPS FRAME--------------------------------------------------------------
3463 // Definition of frame structure and management information.
3464 //
3465 // S T A C K L A Y O U T Allocators stack-slot number
3466 // | (to get allocators register number
3467 // G Owned by | | v add SharedInfo::stack0)
3468 // r CALLER | |
3469 // o | +--------+ pad to even-align allocators stack-slot
3470 // w V | pad0 | numbers; owned by CALLER
3471 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3472 // h ^ | in | 5
3473 // | | args | 4 Holes in incoming args owned by SELF
3474 // | | old | | 3
3475 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3476 // v | | ret | 3 return address
3477 // Owned by +--------+
3478 // Self | pad2 | 2 pad to align old SP
3479 // | +--------+ 1
3480 // | | locks | 0
3481 // | +--------+----> SharedInfo::stack0, even aligned
3482 // | | pad1 | 11 pad to align new SP
3483 // | +--------+
3484 // | | | 10
3485 // | | spills | 9 spills
3486 // V | | 8 (pad0 slot for callee)
3487 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3488 // ^ | out | 7
3489 // | | args | 6 Holes in outgoing args owned by CALLEE
3490 // Owned by new | |
3491 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3492 // | |
3493 //
3494 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3495 // known from SELF's arguments and the Java calling convention.
3496 // Region 6-7 is determined per call site.
3497 // Note 2: If the calling convention leaves holes in the incoming argument
3498 // area, those holes are owned by SELF. Holes in the outgoing area
3499 // are owned by the CALLEE. Holes should not be nessecary in the
3500 // incoming area, as the Java calling convention is completely under
3501 // the control of the AD file. Doubles can be sorted and packed to
3502 // avoid holes. Holes in the outgoing arguments may be nessecary for
3503 // varargs C calling conventions.
3504 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3505 // even aligned with pad0 as needed.
3506 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3507 // region 6-11 is even aligned; it may be padded out more so that
3508 // the region from SP to FP meets the minimum stack alignment.
3509 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3510 // alignment. Region 11, pad1, may be dynamically extended so that
3511 // SP meets the minimum alignment.
3514 frame %{
3516 stack_direction(TOWARDS_LOW);
3518 // These two registers define part of the calling convention
3519 // between compiled code and the interpreter.
3520 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3521 // for more information. by yjl 3/16/2006
3523 inline_cache_reg(T1); // Inline Cache Register
3524 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3525 /*
3526 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3527 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3528 */
3530 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3531 cisc_spilling_operand_name(indOffset32);
3533 // Number of stack slots consumed by locking an object
3534 // generate Compile::sync_stack_slots
3535 #ifdef _LP64
3536 sync_stack_slots(2);
3537 #else
3538 sync_stack_slots(1);
3539 #endif
3541 frame_pointer(SP);
3543 // Interpreter stores its frame pointer in a register which is
3544 // stored to the stack by I2CAdaptors.
3545 // I2CAdaptors convert from interpreted java to compiled java.
3547 interpreter_frame_pointer(FP);
3549 // generate Matcher::stack_alignment
3550 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3552 // Number of stack slots between incoming argument block and the start of
3553 // a new frame. The PROLOG must add this many slots to the stack. The
3554 // EPILOG must remove this many slots. Intel needs one slot for
3555 // return address.
3556 // generate Matcher::in_preserve_stack_slots
3557 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3558 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3560 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3561 // for calls to C. Supports the var-args backing area for register parms.
3562 varargs_C_out_slots_killed(0);
3564 // The after-PROLOG location of the return address. Location of
3565 // return address specifies a type (REG or STACK) and a number
3566 // representing the register number (i.e. - use a register name) or
3567 // stack slot.
3568 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3569 // Otherwise, it is above the locks and verification slot and alignment word
3570 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3571 return_addr(REG RA);
3573 // Body of function which returns an integer array locating
3574 // arguments either in registers or in stack slots. Passed an array
3575 // of ideal registers called "sig" and a "length" count. Stack-slot
3576 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3577 // arguments for a CALLEE. Incoming stack arguments are
3578 // automatically biased by the preserve_stack_slots field above.
3581 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3582 // StartNode::calling_convention call this. by yjl 3/16/2006
3583 calling_convention %{
3584 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3585 %}
3590 // Body of function which returns an integer array locating
3591 // arguments either in registers or in stack slots. Passed an array
3592 // of ideal registers called "sig" and a "length" count. Stack-slot
3593 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3594 // arguments for a CALLEE. Incoming stack arguments are
3595 // automatically biased by the preserve_stack_slots field above.
3598 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3599 c_calling_convention %{
3600 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3601 %}
3604 // Location of C & interpreter return values
3605 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3606 // SEE Matcher::match. by yjl 3/16/2006
3607 c_return_value %{
3608 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3609 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3610 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3611 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3612 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3613 %}
3615 // Location of return values
3616 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3617 // SEE Matcher::match. by yjl 3/16/2006
3619 return_value %{
3620 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3621 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3622 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3623 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3624 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3625 %}
3627 %}
3629 //----------ATTRIBUTES---------------------------------------------------------
3630 //----------Operand Attributes-------------------------------------------------
3631 op_attrib op_cost(0); // Required cost attribute
3633 //----------Instruction Attributes---------------------------------------------
3634 ins_attrib ins_cost(100); // Required cost attribute
3635 ins_attrib ins_size(32); // Required size attribute (in bits)
3636 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3637 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3638 // non-matching short branch variant of some
3639 // long branch?
3640 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3641 // specifies the alignment that some part of the instruction (not
3642 // necessarily the start) requires. If > 1, a compute_padding()
3643 // function must be provided for the instruction
3645 //----------OPERANDS-----------------------------------------------------------
3646 // Operand definitions must precede instruction definitions for correct parsing
3647 // in the ADLC because operands constitute user defined types which are used in
3648 // instruction definitions.
3650 // Vectors
3651 operand vecD() %{
3652 constraint(ALLOC_IN_RC(dbl_reg));
3653 match(VecD);
3655 format %{ %}
3656 interface(REG_INTER);
3657 %}
3659 // Flags register, used as output of compare instructions
3660 operand FlagsReg() %{
3661 constraint(ALLOC_IN_RC(mips_flags));
3662 match(RegFlags);
3664 format %{ "EFLAGS" %}
3665 interface(REG_INTER);
3666 %}
3668 //----------Simple Operands----------------------------------------------------
3669 //TODO: Should we need to define some more special immediate number ?
3670 // Immediate Operands
3671 // Integer Immediate
3672 operand immI() %{
3673 match(ConI);
3674 //TODO: should not match immI8 here LEE
3675 match(immI8);
3677 op_cost(20);
3678 format %{ %}
3679 interface(CONST_INTER);
3680 %}
3682 // Long Immediate 8-bit
3683 operand immL8()
3684 %{
3685 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3686 match(ConL);
3688 op_cost(5);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 // Constant for test vs zero
3694 operand immI0() %{
3695 predicate(n->get_int() == 0);
3696 match(ConI);
3698 op_cost(0);
3699 format %{ %}
3700 interface(CONST_INTER);
3701 %}
3703 // Constant for increment
3704 operand immI1() %{
3705 predicate(n->get_int() == 1);
3706 match(ConI);
3708 op_cost(0);
3709 format %{ %}
3710 interface(CONST_INTER);
3711 %}
3713 // Constant for decrement
3714 operand immI_M1() %{
3715 predicate(n->get_int() == -1);
3716 match(ConI);
3718 op_cost(0);
3719 format %{ %}
3720 interface(CONST_INTER);
3721 %}
3723 operand immI_MaxI() %{
3724 predicate(n->get_int() == 2147483647);
3725 match(ConI);
3727 op_cost(0);
3728 format %{ %}
3729 interface(CONST_INTER);
3730 %}
3732 // Valid scale values for addressing modes
3733 operand immI2() %{
3734 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3735 match(ConI);
3737 format %{ %}
3738 interface(CONST_INTER);
3739 %}
3741 operand immI8() %{
3742 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3743 match(ConI);
3745 op_cost(5);
3746 format %{ %}
3747 interface(CONST_INTER);
3748 %}
3750 operand immI16() %{
3751 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3752 match(ConI);
3754 op_cost(10);
3755 format %{ %}
3756 interface(CONST_INTER);
3757 %}
3759 // Constant for long shifts
3760 operand immI_32() %{
3761 predicate( n->get_int() == 32 );
3762 match(ConI);
3764 op_cost(0);
3765 format %{ %}
3766 interface(CONST_INTER);
3767 %}
3769 operand immI_63() %{
3770 predicate( n->get_int() == 63 );
3771 match(ConI);
3773 op_cost(0);
3774 format %{ %}
3775 interface(CONST_INTER);
3776 %}
3778 operand immI_0_31() %{
3779 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3780 match(ConI);
3782 op_cost(0);
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 // Operand for non-negtive integer mask
3788 operand immI_nonneg_mask() %{
3789 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3790 match(ConI);
3792 op_cost(0);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 operand immI_32_63() %{
3798 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3799 match(ConI);
3800 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 operand immI16_sub() %{
3807 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3808 match(ConI);
3810 op_cost(10);
3811 format %{ %}
3812 interface(CONST_INTER);
3813 %}
3815 operand immI_0_32767() %{
3816 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3817 match(ConI);
3818 op_cost(0);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 operand immI_0_65535() %{
3825 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3826 match(ConI);
3827 op_cost(0);
3829 format %{ %}
3830 interface(CONST_INTER);
3831 %}
3833 operand immI_1() %{
3834 predicate( n->get_int() == 1 );
3835 match(ConI);
3837 op_cost(0);
3838 format %{ %}
3839 interface(CONST_INTER);
3840 %}
3842 operand immI_2() %{
3843 predicate( n->get_int() == 2 );
3844 match(ConI);
3846 op_cost(0);
3847 format %{ %}
3848 interface(CONST_INTER);
3849 %}
3851 operand immI_3() %{
3852 predicate( n->get_int() == 3 );
3853 match(ConI);
3855 op_cost(0);
3856 format %{ %}
3857 interface(CONST_INTER);
3858 %}
3860 operand immI_7() %{
3861 predicate( n->get_int() == 7 );
3862 match(ConI);
3864 format %{ %}
3865 interface(CONST_INTER);
3866 %}
3868 // Immediates for special shifts (sign extend)
3870 // Constants for increment
3871 operand immI_16() %{
3872 predicate( n->get_int() == 16 );
3873 match(ConI);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 operand immI_24() %{
3880 predicate( n->get_int() == 24 );
3881 match(ConI);
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3887 // Constant for byte-wide masking
3888 operand immI_255() %{
3889 predicate( n->get_int() == 255 );
3890 match(ConI);
3892 op_cost(0);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 operand immI_65535() %{
3898 predicate( n->get_int() == 65535 );
3899 match(ConI);
3901 op_cost(5);
3902 format %{ %}
3903 interface(CONST_INTER);
3904 %}
3906 operand immI_65536() %{
3907 predicate( n->get_int() == 65536 );
3908 match(ConI);
3910 op_cost(5);
3911 format %{ %}
3912 interface(CONST_INTER);
3913 %}
3915 operand immI_M65536() %{
3916 predicate( n->get_int() == -65536 );
3917 match(ConI);
3919 op_cost(5);
3920 format %{ %}
3921 interface(CONST_INTER);
3922 %}
3924 // Pointer Immediate
3925 operand immP() %{
3926 match(ConP);
3928 op_cost(10);
3929 format %{ %}
3930 interface(CONST_INTER);
3931 %}
3933 // NULL Pointer Immediate
3934 operand immP0() %{
3935 predicate( n->get_ptr() == 0 );
3936 match(ConP);
3937 op_cost(0);
3939 format %{ %}
3940 interface(CONST_INTER);
3941 %}
3943 // Pointer Immediate: 64-bit
3944 operand immP_set() %{
3945 match(ConP);
3947 op_cost(5);
3948 // formats are generated automatically for constants and base registers
3949 format %{ %}
3950 interface(CONST_INTER);
3951 %}
3953 // Pointer Immediate: 64-bit
3954 operand immP_load() %{
3955 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3956 match(ConP);
3958 op_cost(5);
3959 // formats are generated automatically for constants and base registers
3960 format %{ %}
3961 interface(CONST_INTER);
3962 %}
3964 // Pointer Immediate: 64-bit
3965 operand immP_no_oop_cheap() %{
3966 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3967 match(ConP);
3969 op_cost(5);
3970 // formats are generated automatically for constants and base registers
3971 format %{ %}
3972 interface(CONST_INTER);
3973 %}
3975 // Pointer for polling page
3976 operand immP_poll() %{
3977 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3978 match(ConP);
3979 op_cost(5);
3981 format %{ %}
3982 interface(CONST_INTER);
3983 %}
3985 // Pointer Immediate
3986 operand immN() %{
3987 match(ConN);
3989 op_cost(10);
3990 format %{ %}
3991 interface(CONST_INTER);
3992 %}
3994 operand immNKlass() %{
3995 match(ConNKlass);
3997 op_cost(10);
3998 format %{ %}
3999 interface(CONST_INTER);
4000 %}
4002 // NULL Pointer Immediate
4003 operand immN0() %{
4004 predicate(n->get_narrowcon() == 0);
4005 match(ConN);
4007 op_cost(5);
4008 format %{ %}
4009 interface(CONST_INTER);
4010 %}
4012 // Long Immediate
4013 operand immL() %{
4014 match(ConL);
4016 op_cost(20);
4017 format %{ %}
4018 interface(CONST_INTER);
4019 %}
4021 // Long Immediate zero
4022 operand immL0() %{
4023 predicate( n->get_long() == 0L );
4024 match(ConL);
4025 op_cost(0);
4027 format %{ %}
4028 interface(CONST_INTER);
4029 %}
4031 operand immL7() %{
4032 predicate( n->get_long() == 7L );
4033 match(ConL);
4034 op_cost(0);
4036 format %{ %}
4037 interface(CONST_INTER);
4038 %}
4040 operand immL_M1() %{
4041 predicate( n->get_long() == -1L );
4042 match(ConL);
4043 op_cost(0);
4045 format %{ %}
4046 interface(CONST_INTER);
4047 %}
4049 // bit 0..2 zero
4050 operand immL_M8() %{
4051 predicate( n->get_long() == -8L );
4052 match(ConL);
4053 op_cost(0);
4055 format %{ %}
4056 interface(CONST_INTER);
4057 %}
4059 // bit 2 zero
4060 operand immL_M5() %{
4061 predicate( n->get_long() == -5L );
4062 match(ConL);
4063 op_cost(0);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // bit 1..2 zero
4070 operand immL_M7() %{
4071 predicate( n->get_long() == -7L );
4072 match(ConL);
4073 op_cost(0);
4075 format %{ %}
4076 interface(CONST_INTER);
4077 %}
4079 // bit 0..1 zero
4080 operand immL_M4() %{
4081 predicate( n->get_long() == -4L );
4082 match(ConL);
4083 op_cost(0);
4085 format %{ %}
4086 interface(CONST_INTER);
4087 %}
4089 // bit 3..6 zero
4090 operand immL_M121() %{
4091 predicate( n->get_long() == -121L );
4092 match(ConL);
4093 op_cost(0);
4095 format %{ %}
4096 interface(CONST_INTER);
4097 %}
4099 // Long immediate from 0 to 127.
4100 // Used for a shorter form of long mul by 10.
4101 operand immL_127() %{
4102 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4103 match(ConL);
4104 op_cost(0);
4106 format %{ %}
4107 interface(CONST_INTER);
4108 %}
4110 // Operand for non-negtive long mask
4111 operand immL_nonneg_mask() %{
4112 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4113 match(ConL);
4115 op_cost(0);
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 operand immL_0_65535() %{
4121 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4122 match(ConL);
4123 op_cost(0);
4125 format %{ %}
4126 interface(CONST_INTER);
4127 %}
4129 // Long Immediate: cheap (materialize in <= 3 instructions)
4130 operand immL_cheap() %{
4131 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4132 match(ConL);
4133 op_cost(0);
4135 format %{ %}
4136 interface(CONST_INTER);
4137 %}
4139 // Long Immediate: expensive (materialize in > 3 instructions)
4140 operand immL_expensive() %{
4141 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4142 match(ConL);
4143 op_cost(0);
4145 format %{ %}
4146 interface(CONST_INTER);
4147 %}
4149 operand immL16() %{
4150 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4151 match(ConL);
4153 op_cost(10);
4154 format %{ %}
4155 interface(CONST_INTER);
4156 %}
4158 operand immL16_sub() %{
4159 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4160 match(ConL);
4162 op_cost(10);
4163 format %{ %}
4164 interface(CONST_INTER);
4165 %}
4167 // Long Immediate: low 32-bit mask
4168 operand immL_32bits() %{
4169 predicate(n->get_long() == 0xFFFFFFFFL);
4170 match(ConL);
4171 op_cost(20);
4173 format %{ %}
4174 interface(CONST_INTER);
4175 %}
4177 // Long Immediate 32-bit signed
4178 operand immL32()
4179 %{
4180 predicate(n->get_long() == (int) (n->get_long()));
4181 match(ConL);
4183 op_cost(15);
4184 format %{ %}
4185 interface(CONST_INTER);
4186 %}
4189 //single-precision floating-point zero
4190 operand immF0() %{
4191 predicate(jint_cast(n->getf()) == 0);
4192 match(ConF);
4194 op_cost(5);
4195 format %{ %}
4196 interface(CONST_INTER);
4197 %}
4199 //single-precision floating-point immediate
4200 operand immF() %{
4201 match(ConF);
4203 op_cost(20);
4204 format %{ %}
4205 interface(CONST_INTER);
4206 %}
4208 //double-precision floating-point zero
4209 operand immD0() %{
4210 predicate(jlong_cast(n->getd()) == 0);
4211 match(ConD);
4213 op_cost(5);
4214 format %{ %}
4215 interface(CONST_INTER);
4216 %}
4218 //double-precision floating-point immediate
4219 operand immD() %{
4220 match(ConD);
4222 op_cost(20);
4223 format %{ %}
4224 interface(CONST_INTER);
4225 %}
4227 // Register Operands
4228 // Integer Register
4229 operand mRegI() %{
4230 constraint(ALLOC_IN_RC(int_reg));
4231 match(RegI);
4233 format %{ %}
4234 interface(REG_INTER);
4235 %}
4237 operand no_Ax_mRegI() %{
4238 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4239 match(RegI);
4240 match(mRegI);
4242 format %{ %}
4243 interface(REG_INTER);
4244 %}
4246 operand mS0RegI() %{
4247 constraint(ALLOC_IN_RC(s0_reg));
4248 match(RegI);
4249 match(mRegI);
4251 format %{ "S0" %}
4252 interface(REG_INTER);
4253 %}
4255 operand mS1RegI() %{
4256 constraint(ALLOC_IN_RC(s1_reg));
4257 match(RegI);
4258 match(mRegI);
4260 format %{ "S1" %}
4261 interface(REG_INTER);
4262 %}
4264 operand mS2RegI() %{
4265 constraint(ALLOC_IN_RC(s2_reg));
4266 match(RegI);
4267 match(mRegI);
4269 format %{ "S2" %}
4270 interface(REG_INTER);
4271 %}
4273 operand mS3RegI() %{
4274 constraint(ALLOC_IN_RC(s3_reg));
4275 match(RegI);
4276 match(mRegI);
4278 format %{ "S3" %}
4279 interface(REG_INTER);
4280 %}
4282 operand mS4RegI() %{
4283 constraint(ALLOC_IN_RC(s4_reg));
4284 match(RegI);
4285 match(mRegI);
4287 format %{ "S4" %}
4288 interface(REG_INTER);
4289 %}
4291 operand mS5RegI() %{
4292 constraint(ALLOC_IN_RC(s5_reg));
4293 match(RegI);
4294 match(mRegI);
4296 format %{ "S5" %}
4297 interface(REG_INTER);
4298 %}
4300 operand mS6RegI() %{
4301 constraint(ALLOC_IN_RC(s6_reg));
4302 match(RegI);
4303 match(mRegI);
4305 format %{ "S6" %}
4306 interface(REG_INTER);
4307 %}
4309 operand mS7RegI() %{
4310 constraint(ALLOC_IN_RC(s7_reg));
4311 match(RegI);
4312 match(mRegI);
4314 format %{ "S7" %}
4315 interface(REG_INTER);
4316 %}
4319 operand mT0RegI() %{
4320 constraint(ALLOC_IN_RC(t0_reg));
4321 match(RegI);
4322 match(mRegI);
4324 format %{ "T0" %}
4325 interface(REG_INTER);
4326 %}
4328 operand mT1RegI() %{
4329 constraint(ALLOC_IN_RC(t1_reg));
4330 match(RegI);
4331 match(mRegI);
4333 format %{ "T1" %}
4334 interface(REG_INTER);
4335 %}
4337 operand mT2RegI() %{
4338 constraint(ALLOC_IN_RC(t2_reg));
4339 match(RegI);
4340 match(mRegI);
4342 format %{ "T2" %}
4343 interface(REG_INTER);
4344 %}
4346 operand mT3RegI() %{
4347 constraint(ALLOC_IN_RC(t3_reg));
4348 match(RegI);
4349 match(mRegI);
4351 format %{ "T3" %}
4352 interface(REG_INTER);
4353 %}
4355 operand mT8RegI() %{
4356 constraint(ALLOC_IN_RC(t8_reg));
4357 match(RegI);
4358 match(mRegI);
4360 format %{ "T8" %}
4361 interface(REG_INTER);
4362 %}
4364 operand mT9RegI() %{
4365 constraint(ALLOC_IN_RC(t9_reg));
4366 match(RegI);
4367 match(mRegI);
4369 format %{ "T9" %}
4370 interface(REG_INTER);
4371 %}
4373 operand mA0RegI() %{
4374 constraint(ALLOC_IN_RC(a0_reg));
4375 match(RegI);
4376 match(mRegI);
4378 format %{ "A0" %}
4379 interface(REG_INTER);
4380 %}
4382 operand mA1RegI() %{
4383 constraint(ALLOC_IN_RC(a1_reg));
4384 match(RegI);
4385 match(mRegI);
4387 format %{ "A1" %}
4388 interface(REG_INTER);
4389 %}
4391 operand mA2RegI() %{
4392 constraint(ALLOC_IN_RC(a2_reg));
4393 match(RegI);
4394 match(mRegI);
4396 format %{ "A2" %}
4397 interface(REG_INTER);
4398 %}
4400 operand mA3RegI() %{
4401 constraint(ALLOC_IN_RC(a3_reg));
4402 match(RegI);
4403 match(mRegI);
4405 format %{ "A3" %}
4406 interface(REG_INTER);
4407 %}
4409 operand mA4RegI() %{
4410 constraint(ALLOC_IN_RC(a4_reg));
4411 match(RegI);
4412 match(mRegI);
4414 format %{ "A4" %}
4415 interface(REG_INTER);
4416 %}
4418 operand mA5RegI() %{
4419 constraint(ALLOC_IN_RC(a5_reg));
4420 match(RegI);
4421 match(mRegI);
4423 format %{ "A5" %}
4424 interface(REG_INTER);
4425 %}
4427 operand mA6RegI() %{
4428 constraint(ALLOC_IN_RC(a6_reg));
4429 match(RegI);
4430 match(mRegI);
4432 format %{ "A6" %}
4433 interface(REG_INTER);
4434 %}
4436 operand mA7RegI() %{
4437 constraint(ALLOC_IN_RC(a7_reg));
4438 match(RegI);
4439 match(mRegI);
4441 format %{ "A7" %}
4442 interface(REG_INTER);
4443 %}
4445 operand mV0RegI() %{
4446 constraint(ALLOC_IN_RC(v0_reg));
4447 match(RegI);
4448 match(mRegI);
4450 format %{ "V0" %}
4451 interface(REG_INTER);
4452 %}
4454 operand mV1RegI() %{
4455 constraint(ALLOC_IN_RC(v1_reg));
4456 match(RegI);
4457 match(mRegI);
4459 format %{ "V1" %}
4460 interface(REG_INTER);
4461 %}
4463 operand mRegN() %{
4464 constraint(ALLOC_IN_RC(int_reg));
4465 match(RegN);
4467 format %{ %}
4468 interface(REG_INTER);
4469 %}
4471 operand t0_RegN() %{
4472 constraint(ALLOC_IN_RC(t0_reg));
4473 match(RegN);
4474 match(mRegN);
4476 format %{ %}
4477 interface(REG_INTER);
4478 %}
4480 operand t1_RegN() %{
4481 constraint(ALLOC_IN_RC(t1_reg));
4482 match(RegN);
4483 match(mRegN);
4485 format %{ %}
4486 interface(REG_INTER);
4487 %}
4489 operand t2_RegN() %{
4490 constraint(ALLOC_IN_RC(t2_reg));
4491 match(RegN);
4492 match(mRegN);
4494 format %{ %}
4495 interface(REG_INTER);
4496 %}
4498 operand t3_RegN() %{
4499 constraint(ALLOC_IN_RC(t3_reg));
4500 match(RegN);
4501 match(mRegN);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand t8_RegN() %{
4508 constraint(ALLOC_IN_RC(t8_reg));
4509 match(RegN);
4510 match(mRegN);
4512 format %{ %}
4513 interface(REG_INTER);
4514 %}
4516 operand t9_RegN() %{
4517 constraint(ALLOC_IN_RC(t9_reg));
4518 match(RegN);
4519 match(mRegN);
4521 format %{ %}
4522 interface(REG_INTER);
4523 %}
4525 operand a0_RegN() %{
4526 constraint(ALLOC_IN_RC(a0_reg));
4527 match(RegN);
4528 match(mRegN);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand a1_RegN() %{
4535 constraint(ALLOC_IN_RC(a1_reg));
4536 match(RegN);
4537 match(mRegN);
4539 format %{ %}
4540 interface(REG_INTER);
4541 %}
4543 operand a2_RegN() %{
4544 constraint(ALLOC_IN_RC(a2_reg));
4545 match(RegN);
4546 match(mRegN);
4548 format %{ %}
4549 interface(REG_INTER);
4550 %}
4552 operand a3_RegN() %{
4553 constraint(ALLOC_IN_RC(a3_reg));
4554 match(RegN);
4555 match(mRegN);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand a4_RegN() %{
4562 constraint(ALLOC_IN_RC(a4_reg));
4563 match(RegN);
4564 match(mRegN);
4566 format %{ %}
4567 interface(REG_INTER);
4568 %}
4570 operand a5_RegN() %{
4571 constraint(ALLOC_IN_RC(a5_reg));
4572 match(RegN);
4573 match(mRegN);
4575 format %{ %}
4576 interface(REG_INTER);
4577 %}
4579 operand a6_RegN() %{
4580 constraint(ALLOC_IN_RC(a6_reg));
4581 match(RegN);
4582 match(mRegN);
4584 format %{ %}
4585 interface(REG_INTER);
4586 %}
4588 operand a7_RegN() %{
4589 constraint(ALLOC_IN_RC(a7_reg));
4590 match(RegN);
4591 match(mRegN);
4593 format %{ %}
4594 interface(REG_INTER);
4595 %}
4597 operand s0_RegN() %{
4598 constraint(ALLOC_IN_RC(s0_reg));
4599 match(RegN);
4600 match(mRegN);
4602 format %{ %}
4603 interface(REG_INTER);
4604 %}
4606 operand s1_RegN() %{
4607 constraint(ALLOC_IN_RC(s1_reg));
4608 match(RegN);
4609 match(mRegN);
4611 format %{ %}
4612 interface(REG_INTER);
4613 %}
4615 operand s2_RegN() %{
4616 constraint(ALLOC_IN_RC(s2_reg));
4617 match(RegN);
4618 match(mRegN);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand s3_RegN() %{
4625 constraint(ALLOC_IN_RC(s3_reg));
4626 match(RegN);
4627 match(mRegN);
4629 format %{ %}
4630 interface(REG_INTER);
4631 %}
4633 operand s4_RegN() %{
4634 constraint(ALLOC_IN_RC(s4_reg));
4635 match(RegN);
4636 match(mRegN);
4638 format %{ %}
4639 interface(REG_INTER);
4640 %}
4642 operand s5_RegN() %{
4643 constraint(ALLOC_IN_RC(s5_reg));
4644 match(RegN);
4645 match(mRegN);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 operand s6_RegN() %{
4652 constraint(ALLOC_IN_RC(s6_reg));
4653 match(RegN);
4654 match(mRegN);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4660 operand s7_RegN() %{
4661 constraint(ALLOC_IN_RC(s7_reg));
4662 match(RegN);
4663 match(mRegN);
4665 format %{ %}
4666 interface(REG_INTER);
4667 %}
4669 operand v0_RegN() %{
4670 constraint(ALLOC_IN_RC(v0_reg));
4671 match(RegN);
4672 match(mRegN);
4674 format %{ %}
4675 interface(REG_INTER);
4676 %}
4678 operand v1_RegN() %{
4679 constraint(ALLOC_IN_RC(v1_reg));
4680 match(RegN);
4681 match(mRegN);
4683 format %{ %}
4684 interface(REG_INTER);
4685 %}
4687 // Pointer Register
4688 operand mRegP() %{
4689 constraint(ALLOC_IN_RC(p_reg));
4690 match(RegP);
4692 format %{ %}
4693 interface(REG_INTER);
4694 %}
4696 operand no_T8_mRegP() %{
4697 constraint(ALLOC_IN_RC(no_T8_p_reg));
4698 match(RegP);
4699 match(mRegP);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand s0_RegP()
4706 %{
4707 constraint(ALLOC_IN_RC(s0_long_reg));
4708 match(RegP);
4709 match(mRegP);
4710 match(no_T8_mRegP);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 operand s1_RegP()
4717 %{
4718 constraint(ALLOC_IN_RC(s1_long_reg));
4719 match(RegP);
4720 match(mRegP);
4721 match(no_T8_mRegP);
4723 format %{ %}
4724 interface(REG_INTER);
4725 %}
4727 operand s2_RegP()
4728 %{
4729 constraint(ALLOC_IN_RC(s2_long_reg));
4730 match(RegP);
4731 match(mRegP);
4732 match(no_T8_mRegP);
4734 format %{ %}
4735 interface(REG_INTER);
4736 %}
4738 operand s3_RegP()
4739 %{
4740 constraint(ALLOC_IN_RC(s3_long_reg));
4741 match(RegP);
4742 match(mRegP);
4743 match(no_T8_mRegP);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand s4_RegP()
4750 %{
4751 constraint(ALLOC_IN_RC(s4_long_reg));
4752 match(RegP);
4753 match(mRegP);
4754 match(no_T8_mRegP);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand s5_RegP()
4761 %{
4762 constraint(ALLOC_IN_RC(s5_long_reg));
4763 match(RegP);
4764 match(mRegP);
4765 match(no_T8_mRegP);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand s6_RegP()
4772 %{
4773 constraint(ALLOC_IN_RC(s6_long_reg));
4774 match(RegP);
4775 match(mRegP);
4776 match(no_T8_mRegP);
4778 format %{ %}
4779 interface(REG_INTER);
4780 %}
4782 operand s7_RegP()
4783 %{
4784 constraint(ALLOC_IN_RC(s7_long_reg));
4785 match(RegP);
4786 match(mRegP);
4787 match(no_T8_mRegP);
4789 format %{ %}
4790 interface(REG_INTER);
4791 %}
4793 operand t0_RegP()
4794 %{
4795 constraint(ALLOC_IN_RC(t0_long_reg));
4796 match(RegP);
4797 match(mRegP);
4798 match(no_T8_mRegP);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand t1_RegP()
4805 %{
4806 constraint(ALLOC_IN_RC(t1_long_reg));
4807 match(RegP);
4808 match(mRegP);
4809 match(no_T8_mRegP);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4815 operand t2_RegP()
4816 %{
4817 constraint(ALLOC_IN_RC(t2_long_reg));
4818 match(RegP);
4819 match(mRegP);
4820 match(no_T8_mRegP);
4822 format %{ %}
4823 interface(REG_INTER);
4824 %}
4826 operand t3_RegP()
4827 %{
4828 constraint(ALLOC_IN_RC(t3_long_reg));
4829 match(RegP);
4830 match(mRegP);
4831 match(no_T8_mRegP);
4833 format %{ %}
4834 interface(REG_INTER);
4835 %}
4837 operand t8_RegP()
4838 %{
4839 constraint(ALLOC_IN_RC(t8_long_reg));
4840 match(RegP);
4841 match(mRegP);
4843 format %{ %}
4844 interface(REG_INTER);
4845 %}
4847 operand t9_RegP()
4848 %{
4849 constraint(ALLOC_IN_RC(t9_long_reg));
4850 match(RegP);
4851 match(mRegP);
4852 match(no_T8_mRegP);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand a0_RegP()
4859 %{
4860 constraint(ALLOC_IN_RC(a0_long_reg));
4861 match(RegP);
4862 match(mRegP);
4863 match(no_T8_mRegP);
4865 format %{ %}
4866 interface(REG_INTER);
4867 %}
4869 operand a1_RegP()
4870 %{
4871 constraint(ALLOC_IN_RC(a1_long_reg));
4872 match(RegP);
4873 match(mRegP);
4874 match(no_T8_mRegP);
4876 format %{ %}
4877 interface(REG_INTER);
4878 %}
4880 operand a2_RegP()
4881 %{
4882 constraint(ALLOC_IN_RC(a2_long_reg));
4883 match(RegP);
4884 match(mRegP);
4885 match(no_T8_mRegP);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 operand a3_RegP()
4892 %{
4893 constraint(ALLOC_IN_RC(a3_long_reg));
4894 match(RegP);
4895 match(mRegP);
4896 match(no_T8_mRegP);
4898 format %{ %}
4899 interface(REG_INTER);
4900 %}
4902 operand a4_RegP()
4903 %{
4904 constraint(ALLOC_IN_RC(a4_long_reg));
4905 match(RegP);
4906 match(mRegP);
4907 match(no_T8_mRegP);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4914 operand a5_RegP()
4915 %{
4916 constraint(ALLOC_IN_RC(a5_long_reg));
4917 match(RegP);
4918 match(mRegP);
4919 match(no_T8_mRegP);
4921 format %{ %}
4922 interface(REG_INTER);
4923 %}
4925 operand a6_RegP()
4926 %{
4927 constraint(ALLOC_IN_RC(a6_long_reg));
4928 match(RegP);
4929 match(mRegP);
4930 match(no_T8_mRegP);
4932 format %{ %}
4933 interface(REG_INTER);
4934 %}
4936 operand a7_RegP()
4937 %{
4938 constraint(ALLOC_IN_RC(a7_long_reg));
4939 match(RegP);
4940 match(mRegP);
4941 match(no_T8_mRegP);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand v0_RegP()
4948 %{
4949 constraint(ALLOC_IN_RC(v0_long_reg));
4950 match(RegP);
4951 match(mRegP);
4952 match(no_T8_mRegP);
4954 format %{ %}
4955 interface(REG_INTER);
4956 %}
4958 operand v1_RegP()
4959 %{
4960 constraint(ALLOC_IN_RC(v1_long_reg));
4961 match(RegP);
4962 match(mRegP);
4963 match(no_T8_mRegP);
4965 format %{ %}
4966 interface(REG_INTER);
4967 %}
4969 /*
4970 operand mSPRegP(mRegP reg) %{
4971 constraint(ALLOC_IN_RC(sp_reg));
4972 match(reg);
4974 format %{ "SP" %}
4975 interface(REG_INTER);
4976 %}
4978 operand mFPRegP(mRegP reg) %{
4979 constraint(ALLOC_IN_RC(fp_reg));
4980 match(reg);
4982 format %{ "FP" %}
4983 interface(REG_INTER);
4984 %}
4985 */
4987 operand mRegL() %{
4988 constraint(ALLOC_IN_RC(long_reg));
4989 match(RegL);
4991 format %{ %}
4992 interface(REG_INTER);
4993 %}
4995 operand v0RegL() %{
4996 constraint(ALLOC_IN_RC(v0_long_reg));
4997 match(RegL);
4998 match(mRegL);
5000 format %{ %}
5001 interface(REG_INTER);
5002 %}
5004 operand v1RegL() %{
5005 constraint(ALLOC_IN_RC(v1_long_reg));
5006 match(RegL);
5007 match(mRegL);
5009 format %{ %}
5010 interface(REG_INTER);
5011 %}
5013 operand a0RegL() %{
5014 constraint(ALLOC_IN_RC(a0_long_reg));
5015 match(RegL);
5016 match(mRegL);
5018 format %{ "A0" %}
5019 interface(REG_INTER);
5020 %}
5022 operand a1RegL() %{
5023 constraint(ALLOC_IN_RC(a1_long_reg));
5024 match(RegL);
5025 match(mRegL);
5027 format %{ %}
5028 interface(REG_INTER);
5029 %}
5031 operand a2RegL() %{
5032 constraint(ALLOC_IN_RC(a2_long_reg));
5033 match(RegL);
5034 match(mRegL);
5036 format %{ %}
5037 interface(REG_INTER);
5038 %}
5040 operand a3RegL() %{
5041 constraint(ALLOC_IN_RC(a3_long_reg));
5042 match(RegL);
5043 match(mRegL);
5045 format %{ %}
5046 interface(REG_INTER);
5047 %}
5049 operand t0RegL() %{
5050 constraint(ALLOC_IN_RC(t0_long_reg));
5051 match(RegL);
5052 match(mRegL);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand t1RegL() %{
5059 constraint(ALLOC_IN_RC(t1_long_reg));
5060 match(RegL);
5061 match(mRegL);
5063 format %{ %}
5064 interface(REG_INTER);
5065 %}
5067 operand t2RegL() %{
5068 constraint(ALLOC_IN_RC(t2_long_reg));
5069 match(RegL);
5070 match(mRegL);
5072 format %{ %}
5073 interface(REG_INTER);
5074 %}
5076 operand t3RegL() %{
5077 constraint(ALLOC_IN_RC(t3_long_reg));
5078 match(RegL);
5079 match(mRegL);
5081 format %{ %}
5082 interface(REG_INTER);
5083 %}
5085 operand t8RegL() %{
5086 constraint(ALLOC_IN_RC(t8_long_reg));
5087 match(RegL);
5088 match(mRegL);
5090 format %{ %}
5091 interface(REG_INTER);
5092 %}
5094 operand a4RegL() %{
5095 constraint(ALLOC_IN_RC(a4_long_reg));
5096 match(RegL);
5097 match(mRegL);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand a5RegL() %{
5104 constraint(ALLOC_IN_RC(a5_long_reg));
5105 match(RegL);
5106 match(mRegL);
5108 format %{ %}
5109 interface(REG_INTER);
5110 %}
5112 operand a6RegL() %{
5113 constraint(ALLOC_IN_RC(a6_long_reg));
5114 match(RegL);
5115 match(mRegL);
5117 format %{ %}
5118 interface(REG_INTER);
5119 %}
5121 operand a7RegL() %{
5122 constraint(ALLOC_IN_RC(a7_long_reg));
5123 match(RegL);
5124 match(mRegL);
5126 format %{ %}
5127 interface(REG_INTER);
5128 %}
5130 operand s0RegL() %{
5131 constraint(ALLOC_IN_RC(s0_long_reg));
5132 match(RegL);
5133 match(mRegL);
5135 format %{ %}
5136 interface(REG_INTER);
5137 %}
5139 operand s1RegL() %{
5140 constraint(ALLOC_IN_RC(s1_long_reg));
5141 match(RegL);
5142 match(mRegL);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 operand s2RegL() %{
5149 constraint(ALLOC_IN_RC(s2_long_reg));
5150 match(RegL);
5151 match(mRegL);
5153 format %{ %}
5154 interface(REG_INTER);
5155 %}
5157 operand s3RegL() %{
5158 constraint(ALLOC_IN_RC(s3_long_reg));
5159 match(RegL);
5160 match(mRegL);
5162 format %{ %}
5163 interface(REG_INTER);
5164 %}
5166 operand s4RegL() %{
5167 constraint(ALLOC_IN_RC(s4_long_reg));
5168 match(RegL);
5169 match(mRegL);
5171 format %{ %}
5172 interface(REG_INTER);
5173 %}
5175 operand s7RegL() %{
5176 constraint(ALLOC_IN_RC(s7_long_reg));
5177 match(RegL);
5178 match(mRegL);
5180 format %{ %}
5181 interface(REG_INTER);
5182 %}
5184 // Floating register operands
5185 operand regF() %{
5186 constraint(ALLOC_IN_RC(flt_reg));
5187 match(RegF);
5189 format %{ %}
5190 interface(REG_INTER);
5191 %}
5193 //Double Precision Floating register operands
5194 operand regD() %{
5195 constraint(ALLOC_IN_RC(dbl_reg));
5196 match(RegD);
5198 format %{ %}
5199 interface(REG_INTER);
5200 %}
5202 //----------Memory Operands----------------------------------------------------
5203 // Indirect Memory Operand
5204 operand indirect(mRegP reg) %{
5205 constraint(ALLOC_IN_RC(p_reg));
5206 match(reg);
5208 format %{ "[$reg] @ indirect" %}
5209 interface(MEMORY_INTER) %{
5210 base($reg);
5211 index(0x0); /* NO_INDEX */
5212 scale(0x0);
5213 disp(0x0);
5214 %}
5215 %}
5217 // Indirect Memory Plus Short Offset Operand
5218 operand indOffset8(mRegP reg, immL8 off)
5219 %{
5220 constraint(ALLOC_IN_RC(p_reg));
5221 match(AddP reg off);
5223 op_cost(10);
5224 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5225 interface(MEMORY_INTER) %{
5226 base($reg);
5227 index(0x0); /* NO_INDEX */
5228 scale(0x0);
5229 disp($off);
5230 %}
5231 %}
5233 // Indirect Memory Times Scale Plus Index Register
5234 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5235 %{
5236 constraint(ALLOC_IN_RC(p_reg));
5237 match(AddP reg (LShiftL lreg scale));
5239 op_cost(10);
5240 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5241 interface(MEMORY_INTER) %{
5242 base($reg);
5243 index($lreg);
5244 scale($scale);
5245 disp(0x0);
5246 %}
5247 %}
5250 // [base + index + offset]
5251 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5252 %{
5253 constraint(ALLOC_IN_RC(p_reg));
5254 op_cost(5);
5255 match(AddP (AddP base index) off);
5257 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5258 interface(MEMORY_INTER) %{
5259 base($base);
5260 index($index);
5261 scale(0x0);
5262 disp($off);
5263 %}
5264 %}
5266 // [base + index + offset]
5267 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5268 %{
5269 constraint(ALLOC_IN_RC(p_reg));
5270 op_cost(5);
5271 match(AddP (AddP base (ConvI2L index)) off);
5273 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5274 interface(MEMORY_INTER) %{
5275 base($base);
5276 index($index);
5277 scale(0x0);
5278 disp($off);
5279 %}
5280 %}
5282 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5283 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5284 %{
5285 constraint(ALLOC_IN_RC(p_reg));
5286 match(AddP (AddP reg (LShiftL lreg scale)) off);
5288 op_cost(10);
5289 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5290 interface(MEMORY_INTER) %{
5291 base($reg);
5292 index($lreg);
5293 scale($scale);
5294 disp($off);
5295 %}
5296 %}
5298 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5299 %{
5300 constraint(ALLOC_IN_RC(p_reg));
5301 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5303 op_cost(10);
5304 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5305 interface(MEMORY_INTER) %{
5306 base($reg);
5307 index($ireg);
5308 scale($scale);
5309 disp($off);
5310 %}
5311 %}
5313 // [base + index<<scale + offset]
5314 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5315 %{
5316 constraint(ALLOC_IN_RC(p_reg));
5317 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5318 op_cost(10);
5319 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5321 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5322 interface(MEMORY_INTER) %{
5323 base($base);
5324 index($index);
5325 scale($scale);
5326 disp($off);
5327 %}
5328 %}
5330 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5331 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5332 %{
5333 predicate(Universe::narrow_oop_shift() == 0);
5334 constraint(ALLOC_IN_RC(p_reg));
5335 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5337 op_cost(10);
5338 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5339 interface(MEMORY_INTER) %{
5340 base($reg);
5341 index($lreg);
5342 scale($scale);
5343 disp($off);
5344 %}
5345 %}
5347 // [base + index<<scale + offset] for compressd Oops
5348 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5349 %{
5350 constraint(ALLOC_IN_RC(p_reg));
5351 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5352 predicate(Universe::narrow_oop_shift() == 0);
5353 op_cost(10);
5354 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5356 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5357 interface(MEMORY_INTER) %{
5358 base($base);
5359 index($index);
5360 scale($scale);
5361 disp($off);
5362 %}
5363 %}
5365 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5366 // Indirect Memory Plus Long Offset Operand
5367 operand indOffset32(mRegP reg, immL32 off) %{
5368 constraint(ALLOC_IN_RC(p_reg));
5369 op_cost(20);
5370 match(AddP reg off);
5372 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5373 interface(MEMORY_INTER) %{
5374 base($reg);
5375 index(0x0); /* NO_INDEX */
5376 scale(0x0);
5377 disp($off);
5378 %}
5379 %}
5381 // Indirect Memory Plus Index Register
5382 operand indIndex(mRegP addr, mRegL index) %{
5383 constraint(ALLOC_IN_RC(p_reg));
5384 match(AddP addr index);
5386 op_cost(20);
5387 format %{"[$addr + $index] @ indIndex" %}
5388 interface(MEMORY_INTER) %{
5389 base($addr);
5390 index($index);
5391 scale(0x0);
5392 disp(0x0);
5393 %}
5394 %}
5396 operand indirectNarrowKlass(mRegN reg)
5397 %{
5398 predicate(Universe::narrow_klass_shift() == 0);
5399 constraint(ALLOC_IN_RC(p_reg));
5400 op_cost(10);
5401 match(DecodeNKlass reg);
5403 format %{ "[$reg] @ indirectNarrowKlass" %}
5404 interface(MEMORY_INTER) %{
5405 base($reg);
5406 index(0x0);
5407 scale(0x0);
5408 disp(0x0);
5409 %}
5410 %}
5412 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5413 %{
5414 predicate(Universe::narrow_klass_shift() == 0);
5415 constraint(ALLOC_IN_RC(p_reg));
5416 op_cost(10);
5417 match(AddP (DecodeNKlass reg) off);
5419 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5420 interface(MEMORY_INTER) %{
5421 base($reg);
5422 index(0x0);
5423 scale(0x0);
5424 disp($off);
5425 %}
5426 %}
5428 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5429 %{
5430 predicate(Universe::narrow_klass_shift() == 0);
5431 constraint(ALLOC_IN_RC(p_reg));
5432 op_cost(10);
5433 match(AddP (DecodeNKlass reg) off);
5435 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5436 interface(MEMORY_INTER) %{
5437 base($reg);
5438 index(0x0);
5439 scale(0x0);
5440 disp($off);
5441 %}
5442 %}
5444 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5445 %{
5446 predicate(Universe::narrow_klass_shift() == 0);
5447 constraint(ALLOC_IN_RC(p_reg));
5448 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5450 op_cost(10);
5451 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5452 interface(MEMORY_INTER) %{
5453 base($reg);
5454 index($lreg);
5455 scale(0x0);
5456 disp($off);
5457 %}
5458 %}
5460 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5461 %{
5462 predicate(Universe::narrow_klass_shift() == 0);
5463 constraint(ALLOC_IN_RC(p_reg));
5464 match(AddP (DecodeNKlass reg) lreg);
5466 op_cost(10);
5467 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5468 interface(MEMORY_INTER) %{
5469 base($reg);
5470 index($lreg);
5471 scale(0x0);
5472 disp(0x0);
5473 %}
5474 %}
5476 // Indirect Memory Operand
5477 operand indirectNarrow(mRegN reg)
5478 %{
5479 predicate(Universe::narrow_oop_shift() == 0);
5480 constraint(ALLOC_IN_RC(p_reg));
5481 op_cost(10);
5482 match(DecodeN reg);
5484 format %{ "[$reg] @ indirectNarrow" %}
5485 interface(MEMORY_INTER) %{
5486 base($reg);
5487 index(0x0);
5488 scale(0x0);
5489 disp(0x0);
5490 %}
5491 %}
5493 // Indirect Memory Plus Short Offset Operand
5494 operand indOffset8Narrow(mRegN reg, immL8 off)
5495 %{
5496 predicate(Universe::narrow_oop_shift() == 0);
5497 constraint(ALLOC_IN_RC(p_reg));
5498 op_cost(10);
5499 match(AddP (DecodeN reg) off);
5501 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5502 interface(MEMORY_INTER) %{
5503 base($reg);
5504 index(0x0);
5505 scale(0x0);
5506 disp($off);
5507 %}
5508 %}
5510 // Indirect Memory Plus Index Register Plus Offset Operand
5511 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5512 %{
5513 predicate(Universe::narrow_oop_shift() == 0);
5514 constraint(ALLOC_IN_RC(p_reg));
5515 match(AddP (AddP (DecodeN reg) lreg) off);
5517 op_cost(10);
5518 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5519 interface(MEMORY_INTER) %{
5520 base($reg);
5521 index($lreg);
5522 scale(0x0);
5523 disp($off);
5524 %}
5525 %}
5527 //----------Load Long Memory Operands------------------------------------------
5528 // The load-long idiom will use it's address expression again after loading
5529 // the first word of the long. If the load-long destination overlaps with
5530 // registers used in the addressing expression, the 2nd half will be loaded
5531 // from a clobbered address. Fix this by requiring that load-long use
5532 // address registers that do not overlap with the load-long target.
5534 // load-long support
5535 operand load_long_RegP() %{
5536 constraint(ALLOC_IN_RC(p_reg));
5537 match(RegP);
5538 match(mRegP);
5539 op_cost(100);
5540 format %{ %}
5541 interface(REG_INTER);
5542 %}
5544 // Indirect Memory Operand Long
5545 operand load_long_indirect(load_long_RegP reg) %{
5546 constraint(ALLOC_IN_RC(p_reg));
5547 match(reg);
5549 format %{ "[$reg]" %}
5550 interface(MEMORY_INTER) %{
5551 base($reg);
5552 index(0x0);
5553 scale(0x0);
5554 disp(0x0);
5555 %}
5556 %}
5558 // Indirect Memory Plus Long Offset Operand
5559 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5560 match(AddP reg off);
5562 format %{ "[$reg + $off]" %}
5563 interface(MEMORY_INTER) %{
5564 base($reg);
5565 index(0x0);
5566 scale(0x0);
5567 disp($off);
5568 %}
5569 %}
5571 //----------Conditional Branch Operands----------------------------------------
5572 // Comparison Op - This is the operation of the comparison, and is limited to
5573 // the following set of codes:
5574 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5575 //
5576 // Other attributes of the comparison, such as unsignedness, are specified
5577 // by the comparison instruction that sets a condition code flags register.
5578 // That result is represented by a flags operand whose subtype is appropriate
5579 // to the unsignedness (etc.) of the comparison.
5580 //
5581 // Later, the instruction which matches both the Comparison Op (a Bool) and
5582 // the flags (produced by the Cmp) specifies the coding of the comparison op
5583 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5585 // Comparision Code
5586 operand cmpOp() %{
5587 match(Bool);
5589 format %{ "" %}
5590 interface(COND_INTER) %{
5591 equal(0x01);
5592 not_equal(0x02);
5593 greater(0x03);
5594 greater_equal(0x04);
5595 less(0x05);
5596 less_equal(0x06);
5597 overflow(0x7);
5598 no_overflow(0x8);
5599 %}
5600 %}
5603 // Comparision Code
5604 // Comparison Code, unsigned compare. Used by FP also, with
5605 // C2 (unordered) turned into GT or LT already. The other bits
5606 // C0 and C3 are turned into Carry & Zero flags.
5607 operand cmpOpU() %{
5608 match(Bool);
5610 format %{ "" %}
5611 interface(COND_INTER) %{
5612 equal(0x01);
5613 not_equal(0x02);
5614 greater(0x03);
5615 greater_equal(0x04);
5616 less(0x05);
5617 less_equal(0x06);
5618 overflow(0x7);
5619 no_overflow(0x8);
5620 %}
5621 %}
5623 /*
5624 // Comparison Code, unsigned compare. Used by FP also, with
5625 // C2 (unordered) turned into GT or LT already. The other bits
5626 // C0 and C3 are turned into Carry & Zero flags.
5627 operand cmpOpU() %{
5628 match(Bool);
5630 format %{ "" %}
5631 interface(COND_INTER) %{
5632 equal(0x4);
5633 not_equal(0x5);
5634 less(0x2);
5635 greater_equal(0x3);
5636 less_equal(0x6);
5637 greater(0x7);
5638 %}
5639 %}
5640 */
5641 /*
5642 // Comparison Code for FP conditional move
5643 operand cmpOp_fcmov() %{
5644 match(Bool);
5646 format %{ "" %}
5647 interface(COND_INTER) %{
5648 equal (0x01);
5649 not_equal (0x02);
5650 greater (0x03);
5651 greater_equal(0x04);
5652 less (0x05);
5653 less_equal (0x06);
5654 %}
5655 %}
5657 // Comparision Code used in long compares
5658 operand cmpOp_commute() %{
5659 match(Bool);
5661 format %{ "" %}
5662 interface(COND_INTER) %{
5663 equal(0x4);
5664 not_equal(0x5);
5665 less(0xF);
5666 greater_equal(0xE);
5667 less_equal(0xD);
5668 greater(0xC);
5669 %}
5670 %}
5671 */
5673 //----------Special Memory Operands--------------------------------------------
5674 // Stack Slot Operand - This operand is used for loading and storing temporary
5675 // values on the stack where a match requires a value to
5676 // flow through memory.
5677 operand stackSlotP(sRegP reg) %{
5678 constraint(ALLOC_IN_RC(stack_slots));
5679 // No match rule because this operand is only generated in matching
5680 op_cost(50);
5681 format %{ "[$reg]" %}
5682 interface(MEMORY_INTER) %{
5683 base(0x1d); // SP
5684 index(0x0); // No Index
5685 scale(0x0); // No Scale
5686 disp($reg); // Stack Offset
5687 %}
5688 %}
5690 operand stackSlotI(sRegI reg) %{
5691 constraint(ALLOC_IN_RC(stack_slots));
5692 // No match rule because this operand is only generated in matching
5693 op_cost(50);
5694 format %{ "[$reg]" %}
5695 interface(MEMORY_INTER) %{
5696 base(0x1d); // SP
5697 index(0x0); // No Index
5698 scale(0x0); // No Scale
5699 disp($reg); // Stack Offset
5700 %}
5701 %}
5703 operand stackSlotF(sRegF reg) %{
5704 constraint(ALLOC_IN_RC(stack_slots));
5705 // No match rule because this operand is only generated in matching
5706 op_cost(50);
5707 format %{ "[$reg]" %}
5708 interface(MEMORY_INTER) %{
5709 base(0x1d); // SP
5710 index(0x0); // No Index
5711 scale(0x0); // No Scale
5712 disp($reg); // Stack Offset
5713 %}
5714 %}
5716 operand stackSlotD(sRegD reg) %{
5717 constraint(ALLOC_IN_RC(stack_slots));
5718 // No match rule because this operand is only generated in matching
5719 op_cost(50);
5720 format %{ "[$reg]" %}
5721 interface(MEMORY_INTER) %{
5722 base(0x1d); // SP
5723 index(0x0); // No Index
5724 scale(0x0); // No Scale
5725 disp($reg); // Stack Offset
5726 %}
5727 %}
5729 operand stackSlotL(sRegL reg) %{
5730 constraint(ALLOC_IN_RC(stack_slots));
5731 // No match rule because this operand is only generated in matching
5732 op_cost(50);
5733 format %{ "[$reg]" %}
5734 interface(MEMORY_INTER) %{
5735 base(0x1d); // SP
5736 index(0x0); // No Index
5737 scale(0x0); // No Scale
5738 disp($reg); // Stack Offset
5739 %}
5740 %}
5743 //------------------------OPERAND CLASSES--------------------------------------
5744 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5745 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5748 //----------PIPELINE-----------------------------------------------------------
5749 // Rules which define the behavior of the target architectures pipeline.
5751 pipeline %{
5753 //----------ATTRIBUTES---------------------------------------------------------
5754 attributes %{
5755 fixed_size_instructions; // Fixed size instructions
5756 branch_has_delay_slot; // branch have delay slot in gs2
5757 max_instructions_per_bundle = 1; // 1 instruction per bundle
5758 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5759 bundle_unit_size=4;
5760 instruction_unit_size = 4; // An instruction is 4 bytes long
5761 instruction_fetch_unit_size = 16; // The processor fetches one line
5762 instruction_fetch_units = 1; // of 16 bytes
5764 // List of nop instructions
5765 nops( MachNop );
5766 %}
5768 //----------RESOURCES----------------------------------------------------------
5769 // Resources are the functional units available to the machine
5771 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5773 //----------PIPELINE DESCRIPTION-----------------------------------------------
5774 // Pipeline Description specifies the stages in the machine's pipeline
5776 // IF: fetch
5777 // ID: decode
5778 // RD: read
5779 // CA: caculate
5780 // WB: write back
5781 // CM: commit
5783 pipe_desc(IF, ID, RD, CA, WB, CM);
5786 //----------PIPELINE CLASSES---------------------------------------------------
5787 // Pipeline Classes describe the stages in which input and output are
5788 // referenced by the hardware pipeline.
5790 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5791 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5792 single_instruction;
5793 src1 : RD(read);
5794 src2 : RD(read);
5795 dst : WB(write)+1;
5796 DECODE : ID;
5797 ALU : CA;
5798 %}
5800 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5801 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5802 src1 : RD(read);
5803 src2 : RD(read);
5804 dst : WB(write)+5;
5805 DECODE : ID;
5806 ALU2 : CA;
5807 %}
5809 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5810 src1 : RD(read);
5811 src2 : RD(read);
5812 dst : WB(write)+10;
5813 DECODE : ID;
5814 ALU2 : CA;
5815 %}
5817 //No.19 Integer div operation : dst <-- reg1 div reg2
5818 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5819 src1 : RD(read);
5820 src2 : RD(read);
5821 dst : WB(write)+10;
5822 DECODE : ID;
5823 ALU2 : CA;
5824 %}
5826 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5827 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5828 instruction_count(2);
5829 src1 : RD(read);
5830 src2 : RD(read);
5831 dst : WB(write)+10;
5832 DECODE : ID;
5833 ALU2 : CA;
5834 %}
5836 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5837 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5838 instruction_count(2);
5839 src1 : RD(read);
5840 src2 : RD(read);
5841 dst : WB(write);
5842 DECODE : ID;
5843 ALU : CA;
5844 %}
5846 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5847 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5848 instruction_count(2);
5849 src : RD(read);
5850 dst : WB(write);
5851 DECODE : ID;
5852 ALU : CA;
5853 %}
5855 //no.16 load Long from memory :
5856 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5857 instruction_count(2);
5858 mem : RD(read);
5859 dst : WB(write)+5;
5860 DECODE : ID;
5861 MEM : RD;
5862 %}
5864 //No.17 Store Long to Memory :
5865 pipe_class ialu_storeL(mRegL src, memory mem) %{
5866 instruction_count(2);
5867 mem : RD(read);
5868 src : RD(read);
5869 DECODE : ID;
5870 MEM : RD;
5871 %}
5873 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5874 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5875 single_instruction;
5876 src : RD(read);
5877 dst : WB(write);
5878 DECODE : ID;
5879 ALU : CA;
5880 %}
5882 //No.3 Integer move operation : dst <-- reg
5883 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5884 src : RD(read);
5885 dst : WB(write);
5886 DECODE : ID;
5887 ALU : CA;
5888 %}
5890 //No.4 No instructions : do nothing
5891 pipe_class empty( ) %{
5892 instruction_count(0);
5893 %}
5895 //No.5 UnConditional branch :
5896 pipe_class pipe_jump( label labl ) %{
5897 multiple_bundles;
5898 DECODE : ID;
5899 BR : RD;
5900 %}
5902 //No.6 ALU Conditional branch :
5903 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5904 multiple_bundles;
5905 src1 : RD(read);
5906 src2 : RD(read);
5907 DECODE : ID;
5908 BR : RD;
5909 %}
5911 //no.7 load integer from memory :
5912 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5913 mem : RD(read);
5914 dst : WB(write)+3;
5915 DECODE : ID;
5916 MEM : RD;
5917 %}
5919 //No.8 Store Integer to Memory :
5920 pipe_class ialu_storeI(mRegI src, memory mem) %{
5921 mem : RD(read);
5922 src : RD(read);
5923 DECODE : ID;
5924 MEM : RD;
5925 %}
5928 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5929 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5930 src1 : RD(read);
5931 src2 : RD(read);
5932 dst : WB(write);
5933 DECODE : ID;
5934 FPU : CA;
5935 %}
5937 //No.22 Floating div operation : dst <-- reg1 div reg2
5938 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5939 src1 : RD(read);
5940 src2 : RD(read);
5941 dst : WB(write);
5942 DECODE : ID;
5943 FPU2 : CA;
5944 %}
5946 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5947 src : RD(read);
5948 dst : WB(write);
5949 DECODE : ID;
5950 FPU1 : CA;
5951 %}
5953 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5954 src : RD(read);
5955 dst : WB(write);
5956 DECODE : ID;
5957 FPU1 : CA;
5958 %}
5960 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5961 src : RD(read);
5962 dst : WB(write);
5963 DECODE : ID;
5964 MEM : RD;
5965 %}
5967 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5968 src : RD(read);
5969 dst : WB(write);
5970 DECODE : ID;
5971 MEM : RD(5);
5972 %}
5974 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5975 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5976 multiple_bundles;
5977 src1 : RD(read);
5978 src2 : RD(read);
5979 dst : WB(write);
5980 DECODE : ID;
5981 FPU2 : CA;
5982 %}
5984 //No.11 Load Floating from Memory :
5985 pipe_class fpu_loadF(regF dst, memory mem) %{
5986 instruction_count(1);
5987 mem : RD(read);
5988 dst : WB(write)+3;
5989 DECODE : ID;
5990 MEM : RD;
5991 %}
5993 //No.12 Store Floating to Memory :
5994 pipe_class fpu_storeF(regF src, memory mem) %{
5995 instruction_count(1);
5996 mem : RD(read);
5997 src : RD(read);
5998 DECODE : ID;
5999 MEM : RD;
6000 %}
6002 //No.13 FPU Conditional branch :
6003 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6004 multiple_bundles;
6005 src1 : RD(read);
6006 src2 : RD(read);
6007 DECODE : ID;
6008 BR : RD;
6009 %}
6011 //No.14 Floating FPU reg operation : dst <-- op reg
6012 pipe_class fpu1_regF(regF dst, regF src) %{
6013 src : RD(read);
6014 dst : WB(write);
6015 DECODE : ID;
6016 FPU : CA;
6017 %}
6019 pipe_class long_memory_op() %{
6020 instruction_count(10); multiple_bundles; force_serialization;
6021 fixed_latency(30);
6022 %}
6024 pipe_class simple_call() %{
6025 instruction_count(10); multiple_bundles; force_serialization;
6026 fixed_latency(200);
6027 BR : RD;
6028 %}
6030 pipe_class call() %{
6031 instruction_count(10); multiple_bundles; force_serialization;
6032 fixed_latency(200);
6033 %}
6035 //FIXME:
6036 //No.9 Piple slow : for multi-instructions
6037 pipe_class pipe_slow( ) %{
6038 instruction_count(20);
6039 force_serialization;
6040 multiple_bundles;
6041 fixed_latency(50);
6042 %}
6044 %}
6048 //----------INSTRUCTIONS-------------------------------------------------------
6049 //
6050 // match -- States which machine-independent subtree may be replaced
6051 // by this instruction.
6052 // ins_cost -- The estimated cost of this instruction is used by instruction
6053 // selection to identify a minimum cost tree of machine
6054 // instructions that matches a tree of machine-independent
6055 // instructions.
6056 // format -- A string providing the disassembly for this instruction.
6057 // The value of an instruction's operand may be inserted
6058 // by referring to it with a '$' prefix.
6059 // opcode -- Three instruction opcodes may be provided. These are referred
6060 // to within an encode class as $primary, $secondary, and $tertiary
6061 // respectively. The primary opcode is commonly used to
6062 // indicate the type of machine instruction, while secondary
6063 // and tertiary are often used for prefix options or addressing
6064 // modes.
6065 // ins_encode -- A list of encode classes with parameters. The encode class
6066 // name must have been defined in an 'enc_class' specification
6067 // in the encode section of the architecture description.
6070 // Load Integer
6071 instruct loadI(mRegI dst, memory mem) %{
6072 match(Set dst (LoadI mem));
6074 ins_cost(125);
6075 format %{ "lw $dst, $mem #@loadI" %}
6076 ins_encode (load_I_enc(dst, mem));
6077 ins_pipe( ialu_loadI );
6078 %}
6080 instruct loadI_convI2L(mRegL dst, memory mem) %{
6081 match(Set dst (ConvI2L (LoadI mem)));
6083 ins_cost(125);
6084 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6085 ins_encode (load_I_enc(dst, mem));
6086 ins_pipe( ialu_loadI );
6087 %}
6089 // Load Integer (32 bit signed) to Byte (8 bit signed)
6090 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6091 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6093 ins_cost(125);
6094 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6095 ins_encode(load_B_enc(dst, mem));
6096 ins_pipe(ialu_loadI);
6097 %}
6099 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6100 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6101 match(Set dst (AndI (LoadI mem) mask));
6103 ins_cost(125);
6104 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6105 ins_encode(load_UB_enc(dst, mem));
6106 ins_pipe(ialu_loadI);
6107 %}
6109 // Load Integer (32 bit signed) to Short (16 bit signed)
6110 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6111 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6113 ins_cost(125);
6114 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6115 ins_encode(load_S_enc(dst, mem));
6116 ins_pipe(ialu_loadI);
6117 %}
6119 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6120 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6121 match(Set dst (AndI (LoadI mem) mask));
6123 ins_cost(125);
6124 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6125 ins_encode(load_C_enc(dst, mem));
6126 ins_pipe(ialu_loadI);
6127 %}
6129 // Load Long.
6130 instruct loadL(mRegL dst, memory mem) %{
6131 // predicate(!((LoadLNode*)n)->require_atomic_access());
6132 match(Set dst (LoadL mem));
6134 ins_cost(250);
6135 format %{ "ld $dst, $mem #@loadL" %}
6136 ins_encode(load_L_enc(dst, mem));
6137 ins_pipe( ialu_loadL );
6138 %}
6140 // Load Long - UNaligned
6141 instruct loadL_unaligned(mRegL dst, memory mem) %{
6142 match(Set dst (LoadL_unaligned mem));
6144 // FIXME: Jin: Need more effective ldl/ldr
6145 ins_cost(450);
6146 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6147 ins_encode(load_L_enc(dst, mem));
6148 ins_pipe( ialu_loadL );
6149 %}
6151 // Store Long
6152 instruct storeL_reg(memory mem, mRegL src) %{
6153 match(Set mem (StoreL mem src));
6155 ins_cost(200);
6156 format %{ "sd $mem, $src #@storeL_reg\n" %}
6157 ins_encode(store_L_reg_enc(mem, src));
6158 ins_pipe( ialu_storeL );
6159 %}
6162 instruct storeL_immL0(memory mem, immL0 zero) %{
6163 match(Set mem (StoreL mem zero));
6165 ins_cost(180);
6166 format %{ "sd $mem, zero #@storeL_immL0" %}
6167 ins_encode(store_L_immL0_enc(mem, zero));
6168 ins_pipe( ialu_storeL );
6169 %}
6171 // Load Compressed Pointer
6172 instruct loadN(mRegN dst, memory mem)
6173 %{
6174 match(Set dst (LoadN mem));
6176 ins_cost(125); // XXX
6177 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6178 ins_encode (load_N_enc(dst, mem));
6179 ins_pipe( ialu_loadI ); // XXX
6180 %}
6182 instruct loadN2P(mRegP dst, memory mem)
6183 %{
6184 match(Set dst (DecodeN (LoadN mem)));
6185 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6187 ins_cost(125); // XXX
6188 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6189 ins_encode (load_N_enc(dst, mem));
6190 ins_pipe( ialu_loadI ); // XXX
6191 %}
6193 // Load Pointer
6194 instruct loadP(mRegP dst, memory mem) %{
6195 match(Set dst (LoadP mem));
6197 ins_cost(125);
6198 format %{ "ld $dst, $mem #@loadP" %}
6199 ins_encode (load_P_enc(dst, mem));
6200 ins_pipe( ialu_loadI );
6201 %}
6203 // Load Klass Pointer
6204 instruct loadKlass(mRegP dst, memory mem) %{
6205 match(Set dst (LoadKlass mem));
6207 ins_cost(125);
6208 format %{ "MOV $dst,$mem @ loadKlass" %}
6209 ins_encode (load_P_enc(dst, mem));
6210 ins_pipe( ialu_loadI );
6211 %}
6213 // Load narrow Klass Pointer
6214 instruct loadNKlass(mRegN dst, memory mem)
6215 %{
6216 match(Set dst (LoadNKlass mem));
6218 ins_cost(125); // XXX
6219 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6220 ins_encode (load_N_enc(dst, mem));
6221 ins_pipe( ialu_loadI ); // XXX
6222 %}
6224 instruct loadN2PKlass(mRegP dst, memory mem)
6225 %{
6226 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6227 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6229 ins_cost(125); // XXX
6230 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6231 ins_encode (load_N_enc(dst, mem));
6232 ins_pipe( ialu_loadI ); // XXX
6233 %}
6235 // Load Constant
6236 instruct loadConI(mRegI dst, immI src) %{
6237 match(Set dst src);
6239 ins_cost(150);
6240 format %{ "mov $dst, $src #@loadConI" %}
6241 ins_encode %{
6242 Register dst = $dst$$Register;
6243 int value = $src$$constant;
6244 __ move(dst, value);
6245 %}
6246 ins_pipe( ialu_regI_regI );
6247 %}
6250 instruct loadConL_set64(mRegL dst, immL src) %{
6251 match(Set dst src);
6252 ins_cost(120);
6253 format %{ "li $dst, $src @ loadConL_set64" %}
6254 ins_encode %{
6255 __ set64($dst$$Register, $src$$constant);
6256 %}
6257 ins_pipe(ialu_regL_regL);
6258 %}
6260 /*
6261 // Load long value from constant table (predicated by immL_expensive).
6262 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6263 match(Set dst src);
6264 ins_cost(150);
6265 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6266 ins_encode %{
6267 int con_offset = $constantoffset($src);
6269 if (Assembler::is_simm16(con_offset)) {
6270 __ ld($dst$$Register, $constanttablebase, con_offset);
6271 } else {
6272 __ set64(AT, con_offset);
6273 if (UseLoongsonISA) {
6274 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6275 } else {
6276 __ daddu(AT, $constanttablebase, AT);
6277 __ ld($dst$$Register, AT, 0);
6278 }
6279 }
6280 %}
6281 ins_pipe(ialu_loadI);
6282 %}
6283 */
6285 instruct loadConL16(mRegL dst, immL16 src) %{
6286 match(Set dst src);
6287 ins_cost(105);
6288 format %{ "mov $dst, $src #@loadConL16" %}
6289 ins_encode %{
6290 Register dst_reg = as_Register($dst$$reg);
6291 int value = $src$$constant;
6292 __ daddiu(dst_reg, R0, value);
6293 %}
6294 ins_pipe( ialu_regL_regL );
6295 %}
6298 instruct loadConL0(mRegL dst, immL0 src) %{
6299 match(Set dst src);
6300 ins_cost(100);
6301 format %{ "mov $dst, zero #@loadConL0" %}
6302 ins_encode %{
6303 Register dst_reg = as_Register($dst$$reg);
6304 __ daddu(dst_reg, R0, R0);
6305 %}
6306 ins_pipe( ialu_regL_regL );
6307 %}
6309 // Load Range
6310 instruct loadRange(mRegI dst, memory mem) %{
6311 match(Set dst (LoadRange mem));
6313 ins_cost(125);
6314 format %{ "MOV $dst,$mem @ loadRange" %}
6315 ins_encode(load_I_enc(dst, mem));
6316 ins_pipe( ialu_loadI );
6317 %}
6320 instruct storeP(memory mem, mRegP src ) %{
6321 match(Set mem (StoreP mem src));
6323 ins_cost(125);
6324 format %{ "sd $src, $mem #@storeP" %}
6325 ins_encode(store_P_reg_enc(mem, src));
6326 ins_pipe( ialu_storeI );
6327 %}
6329 // Store NULL Pointer, mark word, or other simple pointer constant.
6330 instruct storeImmP0(memory mem, immP0 zero) %{
6331 match(Set mem (StoreP mem zero));
6333 ins_cost(125);
6334 format %{ "mov $mem, $zero #@storeImmP0" %}
6335 ins_encode(store_P_immP0_enc(mem));
6336 ins_pipe( ialu_storeI );
6337 %}
6339 // Store Byte Immediate
6340 instruct storeImmB(memory mem, immI8 src) %{
6341 match(Set mem (StoreB mem src));
6343 ins_cost(150);
6344 format %{ "movb $mem, $src #@storeImmB" %}
6345 ins_encode(store_B_immI_enc(mem, src));
6346 ins_pipe( ialu_storeI );
6347 %}
6349 // Store Compressed Pointer
6350 instruct storeN(memory mem, mRegN src)
6351 %{
6352 match(Set mem (StoreN mem src));
6354 ins_cost(125); // XXX
6355 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6356 ins_encode(store_N_reg_enc(mem, src));
6357 ins_pipe( ialu_storeI );
6358 %}
6360 instruct storeP2N(memory mem, mRegP src)
6361 %{
6362 match(Set mem (StoreN mem (EncodeP src)));
6363 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6365 ins_cost(125); // XXX
6366 format %{ "sw $mem, $src\t# @ storeP2N" %}
6367 ins_encode(store_N_reg_enc(mem, src));
6368 ins_pipe( ialu_storeI );
6369 %}
6371 instruct storeNKlass(memory mem, mRegN src)
6372 %{
6373 match(Set mem (StoreNKlass mem src));
6375 ins_cost(125); // XXX
6376 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6377 ins_encode(store_N_reg_enc(mem, src));
6378 ins_pipe( ialu_storeI );
6379 %}
6381 instruct storeP2NKlass(memory mem, mRegP src)
6382 %{
6383 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6384 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6386 ins_cost(125); // XXX
6387 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6388 ins_encode(store_N_reg_enc(mem, src));
6389 ins_pipe( ialu_storeI );
6390 %}
6392 instruct storeImmN0(memory mem, immN0 zero)
6393 %{
6394 match(Set mem (StoreN mem zero));
6396 ins_cost(125); // XXX
6397 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6398 ins_encode(storeImmN0_enc(mem, zero));
6399 ins_pipe( ialu_storeI );
6400 %}
6402 // Store Byte
6403 instruct storeB(memory mem, mRegI src) %{
6404 match(Set mem (StoreB mem src));
6406 ins_cost(125);
6407 format %{ "sb $src, $mem #@storeB" %}
6408 ins_encode(store_B_reg_enc(mem, src));
6409 ins_pipe( ialu_storeI );
6410 %}
6412 instruct storeB_convL2I(memory mem, mRegL src) %{
6413 match(Set mem (StoreB mem (ConvL2I src)));
6415 ins_cost(125);
6416 format %{ "sb $src, $mem #@storeB_convL2I" %}
6417 ins_encode(store_B_reg_enc(mem, src));
6418 ins_pipe( ialu_storeI );
6419 %}
6421 // Load Byte (8bit signed)
6422 instruct loadB(mRegI dst, memory mem) %{
6423 match(Set dst (LoadB mem));
6425 ins_cost(125);
6426 format %{ "lb $dst, $mem #@loadB" %}
6427 ins_encode(load_B_enc(dst, mem));
6428 ins_pipe( ialu_loadI );
6429 %}
6431 instruct loadB_convI2L(mRegL dst, memory mem) %{
6432 match(Set dst (ConvI2L (LoadB mem)));
6434 ins_cost(125);
6435 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6436 ins_encode(load_B_enc(dst, mem));
6437 ins_pipe( ialu_loadI );
6438 %}
6440 // Load Byte (8bit UNsigned)
6441 instruct loadUB(mRegI dst, memory mem) %{
6442 match(Set dst (LoadUB mem));
6444 ins_cost(125);
6445 format %{ "lbu $dst, $mem #@loadUB" %}
6446 ins_encode(load_UB_enc(dst, mem));
6447 ins_pipe( ialu_loadI );
6448 %}
6450 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6451 match(Set dst (ConvI2L (LoadUB mem)));
6453 ins_cost(125);
6454 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6455 ins_encode(load_UB_enc(dst, mem));
6456 ins_pipe( ialu_loadI );
6457 %}
6459 // Load Short (16bit signed)
6460 instruct loadS(mRegI dst, memory mem) %{
6461 match(Set dst (LoadS mem));
6463 ins_cost(125);
6464 format %{ "lh $dst, $mem #@loadS" %}
6465 ins_encode(load_S_enc(dst, mem));
6466 ins_pipe( ialu_loadI );
6467 %}
6469 // Load Short (16 bit signed) to Byte (8 bit signed)
6470 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6471 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6473 ins_cost(125);
6474 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6475 ins_encode(load_B_enc(dst, mem));
6476 ins_pipe(ialu_loadI);
6477 %}
6479 instruct loadS_convI2L(mRegL dst, memory mem) %{
6480 match(Set dst (ConvI2L (LoadS mem)));
6482 ins_cost(125);
6483 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6484 ins_encode(load_S_enc(dst, mem));
6485 ins_pipe( ialu_loadI );
6486 %}
6488 // Store Integer Immediate
6489 instruct storeImmI(memory mem, immI src) %{
6490 match(Set mem (StoreI mem src));
6492 ins_cost(150);
6493 format %{ "mov $mem, $src #@storeImmI" %}
6494 ins_encode(store_I_immI_enc(mem, src));
6495 ins_pipe( ialu_storeI );
6496 %}
6498 // Store Integer
6499 instruct storeI(memory mem, mRegI src) %{
6500 match(Set mem (StoreI mem src));
6502 ins_cost(125);
6503 format %{ "sw $mem, $src #@storeI" %}
6504 ins_encode(store_I_reg_enc(mem, src));
6505 ins_pipe( ialu_storeI );
6506 %}
6508 instruct storeI_convL2I(memory mem, mRegL src) %{
6509 match(Set mem (StoreI mem (ConvL2I src)));
6511 ins_cost(125);
6512 format %{ "sw $mem, $src #@storeI_convL2I" %}
6513 ins_encode(store_I_reg_enc(mem, src));
6514 ins_pipe( ialu_storeI );
6515 %}
6517 // Load Float
6518 instruct loadF(regF dst, memory mem) %{
6519 match(Set dst (LoadF mem));
6521 ins_cost(150);
6522 format %{ "loadF $dst, $mem #@loadF" %}
6523 ins_encode(load_F_enc(dst, mem));
6524 ins_pipe( ialu_loadI );
6525 %}
6527 instruct loadConP_general(mRegP dst, immP src) %{
6528 match(Set dst src);
6530 ins_cost(120);
6531 format %{ "li $dst, $src #@loadConP_general" %}
6533 ins_encode %{
6534 Register dst = $dst$$Register;
6535 long* value = (long*)$src$$constant;
6537 if($src->constant_reloc() == relocInfo::metadata_type){
6538 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6539 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6541 __ relocate(rspec);
6542 __ patchable_set48(dst, (long)value);
6543 }else if($src->constant_reloc() == relocInfo::oop_type){
6544 int oop_index = __ oop_recorder()->find_index((jobject)value);
6545 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6547 __ relocate(rspec);
6548 __ patchable_set48(dst, (long)value);
6549 } else if ($src->constant_reloc() == relocInfo::none) {
6550 __ set64(dst, (long)value);
6551 }
6552 %}
6554 ins_pipe( ialu_regI_regI );
6555 %}
6557 /*
6558 instruct loadConP_load(mRegP dst, immP_load src) %{
6559 match(Set dst src);
6561 ins_cost(100);
6562 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6564 ins_encode %{
6566 int con_offset = $constantoffset($src);
6568 if (Assembler::is_simm16(con_offset)) {
6569 __ ld($dst$$Register, $constanttablebase, con_offset);
6570 } else {
6571 __ set64(AT, con_offset);
6572 if (UseLoongsonISA) {
6573 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6574 } else {
6575 __ daddu(AT, $constanttablebase, AT);
6576 __ ld($dst$$Register, AT, 0);
6577 }
6578 }
6579 %}
6581 ins_pipe(ialu_loadI);
6582 %}
6583 */
6585 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6586 match(Set dst src);
6588 ins_cost(80);
6589 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6591 ins_encode %{
6592 __ set64($dst$$Register, $src$$constant);
6593 %}
6595 ins_pipe(ialu_regI_regI);
6596 %}
6599 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6600 match(Set dst src);
6602 ins_cost(50);
6603 format %{ "li $dst, $src #@loadConP_poll" %}
6605 ins_encode %{
6606 Register dst = $dst$$Register;
6607 intptr_t value = (intptr_t)$src$$constant;
6609 __ set64(dst, (jlong)value);
6610 %}
6612 ins_pipe( ialu_regI_regI );
6613 %}
6615 instruct loadConP0(mRegP dst, immP0 src)
6616 %{
6617 match(Set dst src);
6619 ins_cost(50);
6620 format %{ "mov $dst, R0\t# ptr" %}
6621 ins_encode %{
6622 Register dst_reg = $dst$$Register;
6623 __ daddu(dst_reg, R0, R0);
6624 %}
6625 ins_pipe( ialu_regI_regI );
6626 %}
6628 instruct loadConN0(mRegN dst, immN0 src) %{
6629 match(Set dst src);
6630 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6631 ins_encode %{
6632 __ move($dst$$Register, R0);
6633 %}
6634 ins_pipe( ialu_regI_regI );
6635 %}
6637 instruct loadConN(mRegN dst, immN src) %{
6638 match(Set dst src);
6640 ins_cost(125);
6641 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6642 ins_encode %{
6643 Register dst = $dst$$Register;
6644 __ set_narrow_oop(dst, (jobject)$src$$constant);
6645 %}
6646 ins_pipe( ialu_regI_regI ); // XXX
6647 %}
6649 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6650 match(Set dst src);
6652 ins_cost(125);
6653 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6654 ins_encode %{
6655 Register dst = $dst$$Register;
6656 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6657 %}
6658 ins_pipe( ialu_regI_regI ); // XXX
6659 %}
6661 //FIXME
6662 // Tail Call; Jump from runtime stub to Java code.
6663 // Also known as an 'interprocedural jump'.
6664 // Target of jump will eventually return to caller.
6665 // TailJump below removes the return address.
6666 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6667 match(TailCall jump_target method_oop );
6668 ins_cost(300);
6669 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6671 ins_encode %{
6672 Register target = $jump_target$$Register;
6673 Register oop = $method_oop$$Register;
6675 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6676 __ push(RA);
6678 __ move(S3, oop);
6679 __ jr(target);
6680 __ nop();
6681 %}
6683 ins_pipe( pipe_jump );
6684 %}
6686 // Create exception oop: created by stack-crawling runtime code.
6687 // Created exception is now available to this handler, and is setup
6688 // just prior to jumping to this handler. No code emitted.
6689 instruct CreateException( a0_RegP ex_oop )
6690 %{
6691 match(Set ex_oop (CreateEx));
6693 // use the following format syntax
6694 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6695 ins_encode %{
6696 /* Jin: X86 leaves this function empty */
6697 __ block_comment("CreateException is empty in X86/MIPS");
6698 %}
6699 ins_pipe( empty );
6700 // ins_pipe( pipe_jump );
6701 %}
6704 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6706 - Common try/catch:
6707 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6708 |- V0, V1 are created
6709 |- T9 <= SharedRuntime::exception_handler_for_return_address
6710 `- jr T9
6711 `- the caller's exception_handler
6712 `- jr OptoRuntime::exception_blob
6713 `- here
6714 - Rethrow(e.g. 'unwind'):
6715 * The callee:
6716 |- an exception is triggered during execution
6717 `- exits the callee method through RethrowException node
6718 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6719 `- The callee jumps to OptoRuntime::rethrow_stub()
6720 * In OptoRuntime::rethrow_stub:
6721 |- The VM calls _rethrow_Java to determine the return address in the caller method
6722 `- exits the stub with tailjmpInd
6723 |- pops exception_oop(V0) and exception_pc(V1)
6724 `- jumps to the return address(usually an exception_handler)
6725 * The caller:
6726 `- continues processing the exception_blob with V0/V1
6727 */
6729 /*
6730 Disassembling OptoRuntime::rethrow_stub()
6732 ; locals
6733 0x2d3bf320: addiu sp, sp, 0xfffffff8
6734 0x2d3bf324: sw ra, 0x4(sp)
6735 0x2d3bf328: sw fp, 0x0(sp)
6736 0x2d3bf32c: addu fp, sp, zero
6737 0x2d3bf330: addiu sp, sp, 0xfffffff0
6738 0x2d3bf334: sw ra, 0x8(sp)
6739 0x2d3bf338: sw t0, 0x4(sp)
6740 0x2d3bf33c: sw sp, 0x0(sp)
6742 ; get_thread(S2)
6743 0x2d3bf340: addu s2, sp, zero
6744 0x2d3bf344: srl s2, s2, 12
6745 0x2d3bf348: sll s2, s2, 2
6746 0x2d3bf34c: lui at, 0x2c85
6747 0x2d3bf350: addu at, at, s2
6748 0x2d3bf354: lw s2, 0xffffcc80(at)
6750 0x2d3bf358: lw s0, 0x0(sp)
6751 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6752 0x2d3bf360: sw s2, 0xc(sp)
6754 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6755 0x2d3bf364: lw a0, 0x4(sp)
6756 0x2d3bf368: lw a1, 0xc(sp)
6757 0x2d3bf36c: lw a2, 0x8(sp)
6758 ;; Java_To_Runtime
6759 0x2d3bf370: lui t9, 0x2c34
6760 0x2d3bf374: addiu t9, t9, 0xffff8a48
6761 0x2d3bf378: jalr t9
6762 0x2d3bf37c: nop
6764 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6766 0x2d3bf384: lw s0, 0xc(sp)
6767 0x2d3bf388: sw zero, 0x118(s0)
6768 0x2d3bf38c: sw zero, 0x11c(s0)
6769 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6770 0x2d3bf394: addu s2, s0, zero
6771 0x2d3bf398: sw zero, 0x144(s2)
6772 0x2d3bf39c: lw s0, 0x4(s2)
6773 0x2d3bf3a0: addiu s4, zero, 0x0
6774 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6775 0x2d3bf3a8: nop
6776 0x2d3bf3ac: addiu sp, sp, 0x10
6777 0x2d3bf3b0: addiu sp, sp, 0x8
6778 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6779 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6780 0x2d3bf3bc: lui at, 0x2b48
6781 0x2d3bf3c0: lw at, 0x100(at)
6783 ; tailjmpInd: Restores exception_oop & exception_pc
6784 0x2d3bf3c4: addu v1, ra, zero
6785 0x2d3bf3c8: addu v0, s1, zero
6786 0x2d3bf3cc: jr s3
6787 0x2d3bf3d0: nop
6788 ; Exception:
6789 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6790 0x2d3bf3d8: addiu s1, s1, 0x40
6791 0x2d3bf3dc: addiu s2, zero, 0x0
6792 0x2d3bf3e0: addiu sp, sp, 0x10
6793 0x2d3bf3e4: addiu sp, sp, 0x8
6794 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6795 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6796 0x2d3bf3f0: lui at, 0x2b48
6797 0x2d3bf3f4: lw at, 0x100(at)
6798 ; TailCalljmpInd
6799 __ push(RA); ; to be used in generate_forward_exception()
6800 0x2d3bf3f8: addu t7, s2, zero
6801 0x2d3bf3fc: jr s1
6802 0x2d3bf400: nop
6803 */
6804 // Rethrow exception:
6805 // The exception oop will come in the first argument position.
6806 // Then JUMP (not call) to the rethrow stub code.
6807 instruct RethrowException()
6808 %{
6809 match(Rethrow);
6811 // use the following format syntax
6812 format %{ "JMP rethrow_stub #@RethrowException" %}
6813 ins_encode %{
6814 __ block_comment("@ RethrowException");
6816 cbuf.set_insts_mark();
6817 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6819 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6820 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6821 __ jr(T9);
6822 __ nop();
6823 %}
6824 ins_pipe( pipe_jump );
6825 %}
6827 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6828 match(If cmp (CmpP op1 zero));
6829 effect(USE labl);
6831 ins_cost(180);
6832 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6834 ins_encode %{
6835 Register op1 = $op1$$Register;
6836 Register op2 = R0;
6837 Label &L = *($labl$$label);
6838 int flag = $cmp$$cmpcode;
6840 switch(flag)
6841 {
6842 case 0x01: //equal
6843 if (&L)
6844 __ beq(op1, op2, L);
6845 else
6846 __ beq(op1, op2, (int)0);
6847 break;
6848 case 0x02: //not_equal
6849 if (&L)
6850 __ bne(op1, op2, L);
6851 else
6852 __ bne(op1, op2, (int)0);
6853 break;
6854 /*
6855 case 0x03: //above
6856 __ sltu(AT, op2, op1);
6857 if(&L)
6858 __ bne(R0, AT, L);
6859 else
6860 __ bne(R0, AT, (int)0);
6861 break;
6862 case 0x04: //above_equal
6863 __ sltu(AT, op1, op2);
6864 if(&L)
6865 __ beq(AT, R0, L);
6866 else
6867 __ beq(AT, R0, (int)0);
6868 break;
6869 case 0x05: //below
6870 __ sltu(AT, op1, op2);
6871 if(&L)
6872 __ bne(R0, AT, L);
6873 else
6874 __ bne(R0, AT, (int)0);
6875 break;
6876 case 0x06: //below_equal
6877 __ sltu(AT, op2, op1);
6878 if(&L)
6879 __ beq(AT, R0, L);
6880 else
6881 __ beq(AT, R0, (int)0);
6882 break;
6883 */
6884 default:
6885 Unimplemented();
6886 }
6887 __ nop();
6888 %}
6890 ins_pc_relative(1);
6891 ins_pipe( pipe_alu_branch );
6892 %}
6894 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
6895 match(If cmp (CmpP (DecodeN op1) zero));
6896 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6897 effect(USE labl);
6899 ins_cost(180);
6900 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
6902 ins_encode %{
6903 Register op1 = $op1$$Register;
6904 Register op2 = R0;
6905 Label &L = *($labl$$label);
6906 int flag = $cmp$$cmpcode;
6908 switch(flag)
6909 {
6910 case 0x01: //equal
6911 if (&L)
6912 __ beq(op1, op2, L);
6913 else
6914 __ beq(op1, op2, (int)0);
6915 break;
6916 case 0x02: //not_equal
6917 if (&L)
6918 __ bne(op1, op2, L);
6919 else
6920 __ bne(op1, op2, (int)0);
6921 break;
6922 default:
6923 Unimplemented();
6924 }
6925 __ nop();
6926 %}
6928 ins_pc_relative(1);
6929 ins_pipe( pipe_alu_branch );
6930 %}
6933 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6934 match(If cmp (CmpP op1 op2));
6935 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6936 effect(USE labl);
6938 ins_cost(200);
6939 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6941 ins_encode %{
6942 Register op1 = $op1$$Register;
6943 Register op2 = $op2$$Register;
6944 Label &L = *($labl$$label);
6945 int flag = $cmp$$cmpcode;
6947 switch(flag)
6948 {
6949 case 0x01: //equal
6950 if (&L)
6951 __ beq(op1, op2, L);
6952 else
6953 __ beq(op1, op2, (int)0);
6954 break;
6955 case 0x02: //not_equal
6956 if (&L)
6957 __ bne(op1, op2, L);
6958 else
6959 __ bne(op1, op2, (int)0);
6960 break;
6961 case 0x03: //above
6962 __ sltu(AT, op2, op1);
6963 if(&L)
6964 __ bne(R0, AT, L);
6965 else
6966 __ bne(R0, AT, (int)0);
6967 break;
6968 case 0x04: //above_equal
6969 __ sltu(AT, op1, op2);
6970 if(&L)
6971 __ beq(AT, R0, L);
6972 else
6973 __ beq(AT, R0, (int)0);
6974 break;
6975 case 0x05: //below
6976 __ sltu(AT, op1, op2);
6977 if(&L)
6978 __ bne(R0, AT, L);
6979 else
6980 __ bne(R0, AT, (int)0);
6981 break;
6982 case 0x06: //below_equal
6983 __ sltu(AT, op2, op1);
6984 if(&L)
6985 __ beq(AT, R0, L);
6986 else
6987 __ beq(AT, R0, (int)0);
6988 break;
6989 default:
6990 Unimplemented();
6991 }
6992 __ nop();
6993 %}
6995 ins_pc_relative(1);
6996 ins_pipe( pipe_alu_branch );
6997 %}
6999 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7000 match(If cmp (CmpN op1 null));
7001 effect(USE labl);
7003 ins_cost(180);
7004 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7005 "BP$cmp $labl @ cmpN_null_branch" %}
7006 ins_encode %{
7007 Register op1 = $op1$$Register;
7008 Register op2 = R0;
7009 Label &L = *($labl$$label);
7010 int flag = $cmp$$cmpcode;
7012 switch(flag)
7013 {
7014 case 0x01: //equal
7015 if (&L)
7016 __ beq(op1, op2, L);
7017 else
7018 __ beq(op1, op2, (int)0);
7019 break;
7020 case 0x02: //not_equal
7021 if (&L)
7022 __ bne(op1, op2, L);
7023 else
7024 __ bne(op1, op2, (int)0);
7025 break;
7026 default:
7027 Unimplemented();
7028 }
7029 __ nop();
7030 %}
7031 //TODO: pipe_branchP or create pipe_branchN LEE
7032 ins_pc_relative(1);
7033 ins_pipe( pipe_alu_branch );
7034 %}
7036 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7037 match(If cmp (CmpN op1 op2));
7038 effect(USE labl);
7040 ins_cost(180);
7041 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7042 "BP$cmp $labl" %}
7043 ins_encode %{
7044 Register op1_reg = $op1$$Register;
7045 Register op2_reg = $op2$$Register;
7046 Label &L = *($labl$$label);
7047 int flag = $cmp$$cmpcode;
7049 switch(flag)
7050 {
7051 case 0x01: //equal
7052 if (&L)
7053 __ beq(op1_reg, op2_reg, L);
7054 else
7055 __ beq(op1_reg, op2_reg, (int)0);
7056 break;
7057 case 0x02: //not_equal
7058 if (&L)
7059 __ bne(op1_reg, op2_reg, L);
7060 else
7061 __ bne(op1_reg, op2_reg, (int)0);
7062 break;
7063 case 0x03: //above
7064 __ sltu(AT, op2_reg, op1_reg);
7065 if(&L)
7066 __ bne(R0, AT, L);
7067 else
7068 __ bne(R0, AT, (int)0);
7069 break;
7070 case 0x04: //above_equal
7071 __ sltu(AT, op1_reg, op2_reg);
7072 if(&L)
7073 __ beq(AT, R0, L);
7074 else
7075 __ beq(AT, R0, (int)0);
7076 break;
7077 case 0x05: //below
7078 __ sltu(AT, op1_reg, op2_reg);
7079 if(&L)
7080 __ bne(R0, AT, L);
7081 else
7082 __ bne(R0, AT, (int)0);
7083 break;
7084 case 0x06: //below_equal
7085 __ sltu(AT, op2_reg, op1_reg);
7086 if(&L)
7087 __ beq(AT, R0, L);
7088 else
7089 __ beq(AT, R0, (int)0);
7090 break;
7091 default:
7092 Unimplemented();
7093 }
7094 __ nop();
7095 %}
7096 ins_pc_relative(1);
7097 ins_pipe( pipe_alu_branch );
7098 %}
7100 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7101 match( If cmp (CmpU src1 src2) );
7102 effect(USE labl);
7103 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7105 ins_encode %{
7106 Register op1 = $src1$$Register;
7107 Register op2 = $src2$$Register;
7108 Label &L = *($labl$$label);
7109 int flag = $cmp$$cmpcode;
7111 switch(flag)
7112 {
7113 case 0x01: //equal
7114 if (&L)
7115 __ beq(op1, op2, L);
7116 else
7117 __ beq(op1, op2, (int)0);
7118 break;
7119 case 0x02: //not_equal
7120 if (&L)
7121 __ bne(op1, op2, L);
7122 else
7123 __ bne(op1, op2, (int)0);
7124 break;
7125 case 0x03: //above
7126 __ sltu(AT, op2, op1);
7127 if(&L)
7128 __ bne(AT, R0, L);
7129 else
7130 __ bne(AT, R0, (int)0);
7131 break;
7132 case 0x04: //above_equal
7133 __ sltu(AT, op1, op2);
7134 if(&L)
7135 __ beq(AT, R0, L);
7136 else
7137 __ beq(AT, R0, (int)0);
7138 break;
7139 case 0x05: //below
7140 __ sltu(AT, op1, op2);
7141 if(&L)
7142 __ bne(AT, R0, L);
7143 else
7144 __ bne(AT, R0, (int)0);
7145 break;
7146 case 0x06: //below_equal
7147 __ sltu(AT, op2, op1);
7148 if(&L)
7149 __ beq(AT, R0, L);
7150 else
7151 __ beq(AT, R0, (int)0);
7152 break;
7153 default:
7154 Unimplemented();
7155 }
7156 __ nop();
7157 %}
7159 ins_pc_relative(1);
7160 ins_pipe( pipe_alu_branch );
7161 %}
7164 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7165 match( If cmp (CmpU src1 src2) );
7166 effect(USE labl);
7167 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7169 ins_encode %{
7170 Register op1 = $src1$$Register;
7171 int val = $src2$$constant;
7172 Label &L = *($labl$$label);
7173 int flag = $cmp$$cmpcode;
7175 __ move(AT, val);
7176 switch(flag)
7177 {
7178 case 0x01: //equal
7179 if (&L)
7180 __ beq(op1, AT, L);
7181 else
7182 __ beq(op1, AT, (int)0);
7183 break;
7184 case 0x02: //not_equal
7185 if (&L)
7186 __ bne(op1, AT, L);
7187 else
7188 __ bne(op1, AT, (int)0);
7189 break;
7190 case 0x03: //above
7191 __ sltu(AT, AT, op1);
7192 if(&L)
7193 __ bne(R0, AT, L);
7194 else
7195 __ bne(R0, AT, (int)0);
7196 break;
7197 case 0x04: //above_equal
7198 __ sltu(AT, op1, AT);
7199 if(&L)
7200 __ beq(AT, R0, L);
7201 else
7202 __ beq(AT, R0, (int)0);
7203 break;
7204 case 0x05: //below
7205 __ sltu(AT, op1, AT);
7206 if(&L)
7207 __ bne(R0, AT, L);
7208 else
7209 __ bne(R0, AT, (int)0);
7210 break;
7211 case 0x06: //below_equal
7212 __ sltu(AT, AT, op1);
7213 if(&L)
7214 __ beq(AT, R0, L);
7215 else
7216 __ beq(AT, R0, (int)0);
7217 break;
7218 default:
7219 Unimplemented();
7220 }
7221 __ nop();
7222 %}
7224 ins_pc_relative(1);
7225 ins_pipe( pipe_alu_branch );
7226 %}
7228 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7229 match( If cmp (CmpI src1 src2) );
7230 effect(USE labl);
7231 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7233 ins_encode %{
7234 Register op1 = $src1$$Register;
7235 Register op2 = $src2$$Register;
7236 Label &L = *($labl$$label);
7237 int flag = $cmp$$cmpcode;
7239 switch(flag)
7240 {
7241 case 0x01: //equal
7242 if (&L)
7243 __ beq(op1, op2, L);
7244 else
7245 __ beq(op1, op2, (int)0);
7246 break;
7247 case 0x02: //not_equal
7248 if (&L)
7249 __ bne(op1, op2, L);
7250 else
7251 __ bne(op1, op2, (int)0);
7252 break;
7253 case 0x03: //above
7254 __ slt(AT, op2, op1);
7255 if(&L)
7256 __ bne(R0, AT, L);
7257 else
7258 __ bne(R0, AT, (int)0);
7259 break;
7260 case 0x04: //above_equal
7261 __ slt(AT, op1, op2);
7262 if(&L)
7263 __ beq(AT, R0, L);
7264 else
7265 __ beq(AT, R0, (int)0);
7266 break;
7267 case 0x05: //below
7268 __ slt(AT, op1, op2);
7269 if(&L)
7270 __ bne(R0, AT, L);
7271 else
7272 __ bne(R0, AT, (int)0);
7273 break;
7274 case 0x06: //below_equal
7275 __ slt(AT, op2, op1);
7276 if(&L)
7277 __ beq(AT, R0, L);
7278 else
7279 __ beq(AT, R0, (int)0);
7280 break;
7281 default:
7282 Unimplemented();
7283 }
7284 __ nop();
7285 %}
7287 ins_pc_relative(1);
7288 ins_pipe( pipe_alu_branch );
7289 %}
7291 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7292 match( If cmp (CmpI src1 src2) );
7293 effect(USE labl);
7294 ins_cost(170);
7295 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7297 ins_encode %{
7298 Register op1 = $src1$$Register;
7299 // int val = $src2$$constant;
7300 Label &L = *($labl$$label);
7301 int flag = $cmp$$cmpcode;
7303 //__ move(AT, val);
7304 switch(flag)
7305 {
7306 case 0x01: //equal
7307 if (&L)
7308 __ beq(op1, R0, L);
7309 else
7310 __ beq(op1, R0, (int)0);
7311 break;
7312 case 0x02: //not_equal
7313 if (&L)
7314 __ bne(op1, R0, L);
7315 else
7316 __ bne(op1, R0, (int)0);
7317 break;
7318 case 0x03: //greater
7319 if(&L)
7320 __ bgtz(op1, L);
7321 else
7322 __ bgtz(op1, (int)0);
7323 break;
7324 case 0x04: //greater_equal
7325 if(&L)
7326 __ bgez(op1, L);
7327 else
7328 __ bgez(op1, (int)0);
7329 break;
7330 case 0x05: //less
7331 if(&L)
7332 __ bltz(op1, L);
7333 else
7334 __ bltz(op1, (int)0);
7335 break;
7336 case 0x06: //less_equal
7337 if(&L)
7338 __ blez(op1, L);
7339 else
7340 __ blez(op1, (int)0);
7341 break;
7342 default:
7343 Unimplemented();
7344 }
7345 __ nop();
7346 %}
7348 ins_pc_relative(1);
7349 ins_pipe( pipe_alu_branch );
7350 %}
7353 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7354 match( If cmp (CmpI src1 src2) );
7355 effect(USE labl);
7356 ins_cost(200);
7357 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7359 ins_encode %{
7360 Register op1 = $src1$$Register;
7361 int val = $src2$$constant;
7362 Label &L = *($labl$$label);
7363 int flag = $cmp$$cmpcode;
7365 __ move(AT, val);
7366 switch(flag)
7367 {
7368 case 0x01: //equal
7369 if (&L)
7370 __ beq(op1, AT, L);
7371 else
7372 __ beq(op1, AT, (int)0);
7373 break;
7374 case 0x02: //not_equal
7375 if (&L)
7376 __ bne(op1, AT, L);
7377 else
7378 __ bne(op1, AT, (int)0);
7379 break;
7380 case 0x03: //greater
7381 __ slt(AT, AT, op1);
7382 if(&L)
7383 __ bne(R0, AT, L);
7384 else
7385 __ bne(R0, AT, (int)0);
7386 break;
7387 case 0x04: //greater_equal
7388 __ slt(AT, op1, AT);
7389 if(&L)
7390 __ beq(AT, R0, L);
7391 else
7392 __ beq(AT, R0, (int)0);
7393 break;
7394 case 0x05: //less
7395 __ slt(AT, op1, AT);
7396 if(&L)
7397 __ bne(R0, AT, L);
7398 else
7399 __ bne(R0, AT, (int)0);
7400 break;
7401 case 0x06: //less_equal
7402 __ slt(AT, AT, op1);
7403 if(&L)
7404 __ beq(AT, R0, L);
7405 else
7406 __ beq(AT, R0, (int)0);
7407 break;
7408 default:
7409 Unimplemented();
7410 }
7411 __ nop();
7412 %}
7414 ins_pc_relative(1);
7415 ins_pipe( pipe_alu_branch );
7416 %}
7418 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7419 match( If cmp (CmpU src1 zero) );
7420 effect(USE labl);
7421 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7423 ins_encode %{
7424 Register op1 = $src1$$Register;
7425 Label &L = *($labl$$label);
7426 int flag = $cmp$$cmpcode;
7428 switch(flag)
7429 {
7430 case 0x01: //equal
7431 if (&L)
7432 __ beq(op1, R0, L);
7433 else
7434 __ beq(op1, R0, (int)0);
7435 break;
7436 case 0x02: //not_equal
7437 if (&L)
7438 __ bne(op1, R0, L);
7439 else
7440 __ bne(op1, R0, (int)0);
7441 break;
7442 case 0x03: //above
7443 if(&L)
7444 __ bne(R0, op1, L);
7445 else
7446 __ bne(R0, op1, (int)0);
7447 break;
7448 case 0x04: //above_equal
7449 if(&L)
7450 __ beq(R0, R0, L);
7451 else
7452 __ beq(R0, R0, (int)0);
7453 break;
7454 case 0x05: //below
7455 return;
7456 break;
7457 case 0x06: //below_equal
7458 if(&L)
7459 __ beq(op1, R0, L);
7460 else
7461 __ beq(op1, R0, (int)0);
7462 break;
7463 default:
7464 Unimplemented();
7465 }
7466 __ nop();
7467 %}
7469 ins_pc_relative(1);
7470 ins_pipe( pipe_alu_branch );
7471 %}
7474 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7475 match( If cmp (CmpU src1 src2) );
7476 effect(USE labl);
7477 ins_cost(180);
7478 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7480 ins_encode %{
7481 Register op1 = $src1$$Register;
7482 int val = $src2$$constant;
7483 Label &L = *($labl$$label);
7484 int flag = $cmp$$cmpcode;
7486 switch(flag)
7487 {
7488 case 0x01: //equal
7489 __ move(AT, val);
7490 if (&L)
7491 __ beq(op1, AT, L);
7492 else
7493 __ beq(op1, AT, (int)0);
7494 break;
7495 case 0x02: //not_equal
7496 __ move(AT, val);
7497 if (&L)
7498 __ bne(op1, AT, L);
7499 else
7500 __ bne(op1, AT, (int)0);
7501 break;
7502 case 0x03: //above
7503 __ move(AT, val);
7504 __ sltu(AT, AT, op1);
7505 if(&L)
7506 __ bne(R0, AT, L);
7507 else
7508 __ bne(R0, AT, (int)0);
7509 break;
7510 case 0x04: //above_equal
7511 __ sltiu(AT, op1, val);
7512 if(&L)
7513 __ beq(AT, R0, L);
7514 else
7515 __ beq(AT, R0, (int)0);
7516 break;
7517 case 0x05: //below
7518 __ sltiu(AT, op1, val);
7519 if(&L)
7520 __ bne(R0, AT, L);
7521 else
7522 __ bne(R0, AT, (int)0);
7523 break;
7524 case 0x06: //below_equal
7525 __ move(AT, val);
7526 __ sltu(AT, AT, op1);
7527 if(&L)
7528 __ beq(AT, R0, L);
7529 else
7530 __ beq(AT, R0, (int)0);
7531 break;
7532 default:
7533 Unimplemented();
7534 }
7535 __ nop();
7536 %}
7538 ins_pc_relative(1);
7539 ins_pipe( pipe_alu_branch );
7540 %}
7543 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7544 match( If cmp (CmpL src1 src2) );
7545 effect(USE labl);
7546 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7547 ins_cost(250);
7549 ins_encode %{
7550 Register opr1_reg = as_Register($src1$$reg);
7551 Register opr2_reg = as_Register($src2$$reg);
7553 Label &target = *($labl$$label);
7554 int flag = $cmp$$cmpcode;
7556 switch(flag)
7557 {
7558 case 0x01: //equal
7559 if (&target)
7560 __ beq(opr1_reg, opr2_reg, target);
7561 else
7562 __ beq(opr1_reg, opr2_reg, (int)0);
7563 __ delayed()->nop();
7564 break;
7566 case 0x02: //not_equal
7567 if(&target)
7568 __ bne(opr1_reg, opr2_reg, target);
7569 else
7570 __ bne(opr1_reg, opr2_reg, (int)0);
7571 __ delayed()->nop();
7572 break;
7574 case 0x03: //greater
7575 __ slt(AT, opr2_reg, opr1_reg);
7576 if(&target)
7577 __ bne(AT, R0, target);
7578 else
7579 __ bne(AT, R0, (int)0);
7580 __ delayed()->nop();
7581 break;
7583 case 0x04: //greater_equal
7584 __ slt(AT, opr1_reg, opr2_reg);
7585 if(&target)
7586 __ beq(AT, R0, target);
7587 else
7588 __ beq(AT, R0, (int)0);
7589 __ delayed()->nop();
7591 break;
7593 case 0x05: //less
7594 __ slt(AT, opr1_reg, opr2_reg);
7595 if(&target)
7596 __ bne(AT, R0, target);
7597 else
7598 __ bne(AT, R0, (int)0);
7599 __ delayed()->nop();
7601 break;
7603 case 0x06: //less_equal
7604 __ slt(AT, opr2_reg, opr1_reg);
7606 if(&target)
7607 __ beq(AT, R0, target);
7608 else
7609 __ beq(AT, R0, (int)0);
7610 __ delayed()->nop();
7612 break;
7614 default:
7615 Unimplemented();
7616 }
7617 %}
7620 ins_pc_relative(1);
7621 ins_pipe( pipe_alu_branch );
7622 %}
7624 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7625 match( If cmp (CmpL src1 src2) );
7626 effect(USE labl);
7627 ins_cost(180);
7628 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7630 ins_encode %{
7631 Register op1 = $src1$$Register;
7632 int val = $src2$$constant;
7633 Label &L = *($labl$$label);
7634 int flag = $cmp$$cmpcode;
7636 __ daddiu(AT, op1, -1 * val);
7637 switch(flag)
7638 {
7639 case 0x01: //equal
7640 if (&L)
7641 __ beq(R0, AT, L);
7642 else
7643 __ beq(R0, AT, (int)0);
7644 break;
7645 case 0x02: //not_equal
7646 if (&L)
7647 __ bne(R0, AT, L);
7648 else
7649 __ bne(R0, AT, (int)0);
7650 break;
7651 case 0x03: //greater
7652 if(&L)
7653 __ bgtz(AT, L);
7654 else
7655 __ bgtz(AT, (int)0);
7656 break;
7657 case 0x04: //greater_equal
7658 if(&L)
7659 __ bgez(AT, L);
7660 else
7661 __ bgez(AT, (int)0);
7662 break;
7663 case 0x05: //less
7664 if(&L)
7665 __ bltz(AT, L);
7666 else
7667 __ bltz(AT, (int)0);
7668 break;
7669 case 0x06: //less_equal
7670 if(&L)
7671 __ blez(AT, L);
7672 else
7673 __ blez(AT, (int)0);
7674 break;
7675 default:
7676 Unimplemented();
7677 }
7678 __ nop();
7679 %}
7681 ins_pc_relative(1);
7682 ins_pipe( pipe_alu_branch );
7683 %}
7686 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7687 match( If cmp (CmpI src1 src2) );
7688 effect(USE labl);
7689 ins_cost(180);
7690 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7692 ins_encode %{
7693 Register op1 = $src1$$Register;
7694 int val = $src2$$constant;
7695 Label &L = *($labl$$label);
7696 int flag = $cmp$$cmpcode;
7698 __ addiu32(AT, op1, -1 * val);
7699 switch(flag)
7700 {
7701 case 0x01: //equal
7702 if (&L)
7703 __ beq(R0, AT, L);
7704 else
7705 __ beq(R0, AT, (int)0);
7706 break;
7707 case 0x02: //not_equal
7708 if (&L)
7709 __ bne(R0, AT, L);
7710 else
7711 __ bne(R0, AT, (int)0);
7712 break;
7713 case 0x03: //greater
7714 if(&L)
7715 __ bgtz(AT, L);
7716 else
7717 __ bgtz(AT, (int)0);
7718 break;
7719 case 0x04: //greater_equal
7720 if(&L)
7721 __ bgez(AT, L);
7722 else
7723 __ bgez(AT, (int)0);
7724 break;
7725 case 0x05: //less
7726 if(&L)
7727 __ bltz(AT, L);
7728 else
7729 __ bltz(AT, (int)0);
7730 break;
7731 case 0x06: //less_equal
7732 if(&L)
7733 __ blez(AT, L);
7734 else
7735 __ blez(AT, (int)0);
7736 break;
7737 default:
7738 Unimplemented();
7739 }
7740 __ nop();
7741 %}
7743 ins_pc_relative(1);
7744 ins_pipe( pipe_alu_branch );
7745 %}
7747 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7748 match( If cmp (CmpL src1 zero) );
7749 effect(USE labl);
7750 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7751 ins_cost(150);
7753 ins_encode %{
7754 Register opr1_reg = as_Register($src1$$reg);
7755 Label &target = *($labl$$label);
7756 int flag = $cmp$$cmpcode;
7758 switch(flag)
7759 {
7760 case 0x01: //equal
7761 if (&target)
7762 __ beq(opr1_reg, R0, target);
7763 else
7764 __ beq(opr1_reg, R0, int(0));
7765 break;
7767 case 0x02: //not_equal
7768 if(&target)
7769 __ bne(opr1_reg, R0, target);
7770 else
7771 __ bne(opr1_reg, R0, (int)0);
7772 break;
7774 case 0x03: //greater
7775 if(&target)
7776 __ bgtz(opr1_reg, target);
7777 else
7778 __ bgtz(opr1_reg, (int)0);
7779 break;
7781 case 0x04: //greater_equal
7782 if(&target)
7783 __ bgez(opr1_reg, target);
7784 else
7785 __ bgez(opr1_reg, (int)0);
7786 break;
7788 case 0x05: //less
7789 __ slt(AT, opr1_reg, R0);
7790 if(&target)
7791 __ bne(AT, R0, target);
7792 else
7793 __ bne(AT, R0, (int)0);
7794 break;
7796 case 0x06: //less_equal
7797 if (&target)
7798 __ blez(opr1_reg, target);
7799 else
7800 __ blez(opr1_reg, int(0));
7801 break;
7803 default:
7804 Unimplemented();
7805 }
7806 __ delayed()->nop();
7807 %}
7810 ins_pc_relative(1);
7811 ins_pipe( pipe_alu_branch );
7812 %}
7815 //FIXME
7816 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7817 match( If cmp (CmpF src1 src2) );
7818 effect(USE labl);
7819 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7821 ins_encode %{
7822 FloatRegister reg_op1 = $src1$$FloatRegister;
7823 FloatRegister reg_op2 = $src2$$FloatRegister;
7824 Label &L = *($labl$$label);
7825 int flag = $cmp$$cmpcode;
7827 switch(flag)
7828 {
7829 case 0x01: //equal
7830 __ c_eq_s(reg_op1, reg_op2);
7831 if (&L)
7832 __ bc1t(L);
7833 else
7834 __ bc1t((int)0);
7835 break;
7836 case 0x02: //not_equal
7837 __ c_eq_s(reg_op1, reg_op2);
7838 if (&L)
7839 __ bc1f(L);
7840 else
7841 __ bc1f((int)0);
7842 break;
7843 case 0x03: //greater
7844 __ c_ule_s(reg_op1, reg_op2);
7845 if(&L)
7846 __ bc1f(L);
7847 else
7848 __ bc1f((int)0);
7849 break;
7850 case 0x04: //greater_equal
7851 __ c_ult_s(reg_op1, reg_op2);
7852 if(&L)
7853 __ bc1f(L);
7854 else
7855 __ bc1f((int)0);
7856 break;
7857 case 0x05: //less
7858 __ c_ult_s(reg_op1, reg_op2);
7859 if(&L)
7860 __ bc1t(L);
7861 else
7862 __ bc1t((int)0);
7863 break;
7864 case 0x06: //less_equal
7865 __ c_ule_s(reg_op1, reg_op2);
7866 if(&L)
7867 __ bc1t(L);
7868 else
7869 __ bc1t((int)0);
7870 break;
7871 default:
7872 Unimplemented();
7873 }
7874 __ nop();
7875 %}
7877 ins_pc_relative(1);
7878 ins_pipe(pipe_slow);
7879 %}
7881 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7882 match( If cmp (CmpD src1 src2) );
7883 effect(USE labl);
7884 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7886 ins_encode %{
7887 FloatRegister reg_op1 = $src1$$FloatRegister;
7888 FloatRegister reg_op2 = $src2$$FloatRegister;
7889 Label &L = *($labl$$label);
7890 int flag = $cmp$$cmpcode;
7892 switch(flag)
7893 {
7894 case 0x01: //equal
7895 __ c_eq_d(reg_op1, reg_op2);
7896 if (&L)
7897 __ bc1t(L);
7898 else
7899 __ bc1t((int)0);
7900 break;
7901 case 0x02: //not_equal
7902 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7903 __ c_eq_d(reg_op1, reg_op2);
7904 if (&L)
7905 __ bc1f(L);
7906 else
7907 __ bc1f((int)0);
7908 break;
7909 case 0x03: //greater
7910 __ c_ule_d(reg_op1, reg_op2);
7911 if(&L)
7912 __ bc1f(L);
7913 else
7914 __ bc1f((int)0);
7915 break;
7916 case 0x04: //greater_equal
7917 __ c_ult_d(reg_op1, reg_op2);
7918 if(&L)
7919 __ bc1f(L);
7920 else
7921 __ bc1f((int)0);
7922 break;
7923 case 0x05: //less
7924 __ c_ult_d(reg_op1, reg_op2);
7925 if(&L)
7926 __ bc1t(L);
7927 else
7928 __ bc1t((int)0);
7929 break;
7930 case 0x06: //less_equal
7931 __ c_ule_d(reg_op1, reg_op2);
7932 if(&L)
7933 __ bc1t(L);
7934 else
7935 __ bc1t((int)0);
7936 break;
7937 default:
7938 Unimplemented();
7939 }
7940 __ nop();
7941 %}
7943 ins_pc_relative(1);
7944 ins_pipe(pipe_slow);
7945 %}
7948 // Call Runtime Instruction
7949 instruct CallRuntimeDirect(method meth) %{
7950 match(CallRuntime );
7951 effect(USE meth);
7953 ins_cost(300);
7954 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7955 ins_encode( Java_To_Runtime( meth ) );
7956 ins_pipe( pipe_slow );
7957 ins_alignment(16);
7958 %}
7962 //------------------------MemBar Instructions-------------------------------
7963 //Memory barrier flavors
7965 instruct membar_acquire() %{
7966 match(MemBarAcquire);
7967 ins_cost(0);
7969 size(0);
7970 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7971 ins_encode();
7972 ins_pipe(empty);
7973 %}
7975 instruct load_fence() %{
7976 match(LoadFence);
7977 ins_cost(400);
7979 format %{ "MEMBAR @ load_fence" %}
7980 ins_encode %{
7981 __ sync();
7982 %}
7983 ins_pipe(pipe_slow);
7984 %}
7986 instruct membar_acquire_lock()
7987 %{
7988 match(MemBarAcquireLock);
7989 ins_cost(0);
7991 size(0);
7992 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7993 ins_encode();
7994 ins_pipe(empty);
7995 %}
7997 instruct membar_release() %{
7998 match(MemBarRelease);
7999 ins_cost(0);
8001 size(0);
8002 format %{ "MEMBAR-release (empty) @ membar_release" %}
8003 ins_encode();
8004 ins_pipe(empty);
8005 %}
8007 instruct store_fence() %{
8008 match(StoreFence);
8009 ins_cost(400);
8011 format %{ "MEMBAR @ store_fence" %}
8013 ins_encode %{
8014 __ sync();
8015 %}
8017 ins_pipe(pipe_slow);
8018 %}
8020 instruct membar_release_lock()
8021 %{
8022 match(MemBarReleaseLock);
8023 ins_cost(0);
8025 size(0);
8026 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8027 ins_encode();
8028 ins_pipe(empty);
8029 %}
8032 instruct membar_volatile() %{
8033 match(MemBarVolatile);
8034 ins_cost(400);
8036 format %{ "MEMBAR-volatile" %}
8037 ins_encode %{
8038 if( !os::is_MP() ) return; // Not needed on single CPU
8039 __ sync();
8041 %}
8042 ins_pipe(pipe_slow);
8043 %}
8045 instruct unnecessary_membar_volatile() %{
8046 match(MemBarVolatile);
8047 predicate(Matcher::post_store_load_barrier(n));
8048 ins_cost(0);
8050 size(0);
8051 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8052 ins_encode( );
8053 ins_pipe(empty);
8054 %}
8056 instruct membar_storestore() %{
8057 match(MemBarStoreStore);
8059 ins_cost(0);
8060 size(0);
8061 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8062 ins_encode( );
8063 ins_pipe(empty);
8064 %}
8066 //----------Move Instructions--------------------------------------------------
8067 instruct castX2P(mRegP dst, mRegL src) %{
8068 match(Set dst (CastX2P src));
8069 format %{ "castX2P $dst, $src @ castX2P" %}
8070 ins_encode %{
8071 Register src = $src$$Register;
8072 Register dst = $dst$$Register;
8074 if(src != dst)
8075 __ move(dst, src);
8076 %}
8077 ins_cost(10);
8078 ins_pipe( ialu_regI_mov );
8079 %}
8081 instruct castP2X(mRegL dst, mRegP src ) %{
8082 match(Set dst (CastP2X src));
8084 format %{ "mov $dst, $src\t #@castP2X" %}
8085 ins_encode %{
8086 Register src = $src$$Register;
8087 Register dst = $dst$$Register;
8089 if(src != dst)
8090 __ move(dst, src);
8091 %}
8092 ins_pipe( ialu_regI_mov );
8093 %}
8095 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8096 match(Set dst (MoveF2I src));
8097 effect(DEF dst, USE src);
8098 ins_cost(85);
8099 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8100 ins_encode %{
8101 Register dst = as_Register($dst$$reg);
8102 FloatRegister src = as_FloatRegister($src$$reg);
8104 __ mfc1(dst, src);
8105 %}
8106 ins_pipe( pipe_slow );
8107 %}
8109 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8110 match(Set dst (MoveI2F src));
8111 effect(DEF dst, USE src);
8112 ins_cost(85);
8113 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8114 ins_encode %{
8115 Register src = as_Register($src$$reg);
8116 FloatRegister dst = as_FloatRegister($dst$$reg);
8118 __ mtc1(src, dst);
8119 %}
8120 ins_pipe( pipe_slow );
8121 %}
8123 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8124 match(Set dst (MoveD2L src));
8125 effect(DEF dst, USE src);
8126 ins_cost(85);
8127 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8128 ins_encode %{
8129 Register dst = as_Register($dst$$reg);
8130 FloatRegister src = as_FloatRegister($src$$reg);
8132 __ dmfc1(dst, src);
8133 %}
8134 ins_pipe( pipe_slow );
8135 %}
8137 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8138 match(Set dst (MoveL2D src));
8139 effect(DEF dst, USE src);
8140 ins_cost(85);
8141 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8142 ins_encode %{
8143 FloatRegister dst = as_FloatRegister($dst$$reg);
8144 Register src = as_Register($src$$reg);
8146 __ dmtc1(src, dst);
8147 %}
8148 ins_pipe( pipe_slow );
8149 %}
8151 //----------Conditional Move---------------------------------------------------
8152 // Conditional move
8153 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8154 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8155 ins_cost(80);
8156 format %{
8157 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8158 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8159 %}
8161 ins_encode %{
8162 Register op1 = $tmp1$$Register;
8163 Register op2 = $tmp2$$Register;
8164 Register dst = $dst$$Register;
8165 Register src = $src$$Register;
8166 int flag = $cop$$cmpcode;
8168 switch(flag)
8169 {
8170 case 0x01: //equal
8171 __ subu32(AT, op1, op2);
8172 __ movz(dst, src, AT);
8173 break;
8175 case 0x02: //not_equal
8176 __ subu32(AT, op1, op2);
8177 __ movn(dst, src, AT);
8178 break;
8180 case 0x03: //great
8181 __ slt(AT, op2, op1);
8182 __ movn(dst, src, AT);
8183 break;
8185 case 0x04: //great_equal
8186 __ slt(AT, op1, op2);
8187 __ movz(dst, src, AT);
8188 break;
8190 case 0x05: //less
8191 __ slt(AT, op1, op2);
8192 __ movn(dst, src, AT);
8193 break;
8195 case 0x06: //less_equal
8196 __ slt(AT, op2, op1);
8197 __ movz(dst, src, AT);
8198 break;
8200 default:
8201 Unimplemented();
8202 }
8203 %}
8205 ins_pipe( pipe_slow );
8206 %}
8208 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8209 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8210 ins_cost(80);
8211 format %{
8212 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8213 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8214 %}
8215 ins_encode %{
8216 Register op1 = $tmp1$$Register;
8217 Register op2 = $tmp2$$Register;
8218 Register dst = $dst$$Register;
8219 Register src = $src$$Register;
8220 int flag = $cop$$cmpcode;
8222 switch(flag)
8223 {
8224 case 0x01: //equal
8225 __ subu(AT, op1, op2);
8226 __ movz(dst, src, AT);
8227 break;
8229 case 0x02: //not_equal
8230 __ subu(AT, op1, op2);
8231 __ movn(dst, src, AT);
8232 break;
8234 case 0x03: //above
8235 __ sltu(AT, op2, op1);
8236 __ movn(dst, src, AT);
8237 break;
8239 case 0x04: //above_equal
8240 __ sltu(AT, op1, op2);
8241 __ movz(dst, src, AT);
8242 break;
8244 case 0x05: //below
8245 __ sltu(AT, op1, op2);
8246 __ movn(dst, src, AT);
8247 break;
8249 case 0x06: //below_equal
8250 __ sltu(AT, op2, op1);
8251 __ movz(dst, src, AT);
8252 break;
8254 default:
8255 Unimplemented();
8256 }
8257 %}
8259 ins_pipe( pipe_slow );
8260 %}
8262 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8263 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8264 ins_cost(80);
8265 format %{
8266 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8267 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8268 %}
8269 ins_encode %{
8270 Register op1 = $tmp1$$Register;
8271 Register op2 = $tmp2$$Register;
8272 Register dst = $dst$$Register;
8273 Register src = $src$$Register;
8274 int flag = $cop$$cmpcode;
8276 switch(flag)
8277 {
8278 case 0x01: //equal
8279 __ subu32(AT, op1, op2);
8280 __ movz(dst, src, AT);
8281 break;
8283 case 0x02: //not_equal
8284 __ subu32(AT, op1, op2);
8285 __ movn(dst, src, AT);
8286 break;
8288 case 0x03: //above
8289 __ sltu(AT, op2, op1);
8290 __ movn(dst, src, AT);
8291 break;
8293 case 0x04: //above_equal
8294 __ sltu(AT, op1, op2);
8295 __ movz(dst, src, AT);
8296 break;
8298 case 0x05: //below
8299 __ sltu(AT, op1, op2);
8300 __ movn(dst, src, AT);
8301 break;
8303 case 0x06: //below_equal
8304 __ sltu(AT, op2, op1);
8305 __ movz(dst, src, AT);
8306 break;
8308 default:
8309 Unimplemented();
8310 }
8311 %}
8313 ins_pipe( pipe_slow );
8314 %}
8316 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8317 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8318 ins_cost(80);
8319 format %{
8320 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8321 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8322 %}
8323 ins_encode %{
8324 Register op1 = $tmp1$$Register;
8325 Register op2 = $tmp2$$Register;
8326 Register dst = $dst$$Register;
8327 Register src = $src$$Register;
8328 int flag = $cop$$cmpcode;
8330 switch(flag)
8331 {
8332 case 0x01: //equal
8333 __ subu32(AT, op1, op2);
8334 __ movz(dst, src, AT);
8335 break;
8337 case 0x02: //not_equal
8338 __ subu32(AT, op1, op2);
8339 __ movn(dst, src, AT);
8340 break;
8342 case 0x03: //above
8343 __ sltu(AT, op2, op1);
8344 __ movn(dst, src, AT);
8345 break;
8347 case 0x04: //above_equal
8348 __ sltu(AT, op1, op2);
8349 __ movz(dst, src, AT);
8350 break;
8352 case 0x05: //below
8353 __ sltu(AT, op1, op2);
8354 __ movn(dst, src, AT);
8355 break;
8357 case 0x06: //below_equal
8358 __ sltu(AT, op2, op1);
8359 __ movz(dst, src, AT);
8360 break;
8362 default:
8363 Unimplemented();
8364 }
8365 %}
8367 ins_pipe( pipe_slow );
8368 %}
8370 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8371 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8372 ins_cost(80);
8373 format %{
8374 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8375 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8376 %}
8377 ins_encode %{
8378 Register op1 = $tmp1$$Register;
8379 Register op2 = $tmp2$$Register;
8380 Register dst = $dst$$Register;
8381 Register src = $src$$Register;
8382 int flag = $cop$$cmpcode;
8384 switch(flag)
8385 {
8386 case 0x01: //equal
8387 __ subu(AT, op1, op2);
8388 __ movz(dst, src, AT);
8389 break;
8391 case 0x02: //not_equal
8392 __ subu(AT, op1, op2);
8393 __ movn(dst, src, AT);
8394 break;
8396 case 0x03: //above
8397 __ sltu(AT, op2, op1);
8398 __ movn(dst, src, AT);
8399 break;
8401 case 0x04: //above_equal
8402 __ sltu(AT, op1, op2);
8403 __ movz(dst, src, AT);
8404 break;
8406 case 0x05: //below
8407 __ sltu(AT, op1, op2);
8408 __ movn(dst, src, AT);
8409 break;
8411 case 0x06: //below_equal
8412 __ sltu(AT, op2, op1);
8413 __ movz(dst, src, AT);
8414 break;
8416 default:
8417 Unimplemented();
8418 }
8419 %}
8421 ins_pipe( pipe_slow );
8422 %}
8424 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8425 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8426 ins_cost(80);
8427 format %{
8428 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8429 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8430 %}
8431 ins_encode %{
8432 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8433 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8434 Register dst = as_Register($dst$$reg);
8435 Register src = as_Register($src$$reg);
8437 int flag = $cop$$cmpcode;
8439 switch(flag)
8440 {
8441 case 0x01: //equal
8442 __ c_eq_d(reg_op1, reg_op2);
8443 __ movt(dst, src);
8444 break;
8445 case 0x02: //not_equal
8446 __ c_eq_d(reg_op1, reg_op2);
8447 __ movf(dst, src);
8448 break;
8449 case 0x03: //greater
8450 __ c_ole_d(reg_op1, reg_op2);
8451 __ movf(dst, src);
8452 break;
8453 case 0x04: //greater_equal
8454 __ c_olt_d(reg_op1, reg_op2);
8455 __ movf(dst, src);
8456 break;
8457 case 0x05: //less
8458 __ c_ult_d(reg_op1, reg_op2);
8459 __ movt(dst, src);
8460 break;
8461 case 0x06: //less_equal
8462 __ c_ule_d(reg_op1, reg_op2);
8463 __ movt(dst, src);
8464 break;
8465 default:
8466 Unimplemented();
8467 }
8468 %}
8470 ins_pipe( pipe_slow );
8471 %}
8474 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8475 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8476 ins_cost(80);
8477 format %{
8478 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8479 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8480 %}
8481 ins_encode %{
8482 Register op1 = $tmp1$$Register;
8483 Register op2 = $tmp2$$Register;
8484 Register dst = $dst$$Register;
8485 Register src = $src$$Register;
8486 int flag = $cop$$cmpcode;
8488 switch(flag)
8489 {
8490 case 0x01: //equal
8491 __ subu32(AT, op1, op2);
8492 __ movz(dst, src, AT);
8493 break;
8495 case 0x02: //not_equal
8496 __ subu32(AT, op1, op2);
8497 __ movn(dst, src, AT);
8498 break;
8500 case 0x03: //above
8501 __ sltu(AT, op2, op1);
8502 __ movn(dst, src, AT);
8503 break;
8505 case 0x04: //above_equal
8506 __ sltu(AT, op1, op2);
8507 __ movz(dst, src, AT);
8508 break;
8510 case 0x05: //below
8511 __ sltu(AT, op1, op2);
8512 __ movn(dst, src, AT);
8513 break;
8515 case 0x06: //below_equal
8516 __ sltu(AT, op2, op1);
8517 __ movz(dst, src, AT);
8518 break;
8520 default:
8521 Unimplemented();
8522 }
8523 %}
8525 ins_pipe( pipe_slow );
8526 %}
8529 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8530 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8531 ins_cost(80);
8532 format %{
8533 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8534 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8535 %}
8536 ins_encode %{
8537 Register op1 = $tmp1$$Register;
8538 Register op2 = $tmp2$$Register;
8539 Register dst = $dst$$Register;
8540 Register src = $src$$Register;
8541 int flag = $cop$$cmpcode;
8543 switch(flag)
8544 {
8545 case 0x01: //equal
8546 __ subu(AT, op1, op2);
8547 __ movz(dst, src, AT);
8548 break;
8550 case 0x02: //not_equal
8551 __ subu(AT, op1, op2);
8552 __ movn(dst, src, AT);
8553 break;
8555 case 0x03: //above
8556 __ sltu(AT, op2, op1);
8557 __ movn(dst, src, AT);
8558 break;
8560 case 0x04: //above_equal
8561 __ sltu(AT, op1, op2);
8562 __ movz(dst, src, AT);
8563 break;
8565 case 0x05: //below
8566 __ sltu(AT, op1, op2);
8567 __ movn(dst, src, AT);
8568 break;
8570 case 0x06: //below_equal
8571 __ sltu(AT, op2, op1);
8572 __ movz(dst, src, AT);
8573 break;
8575 default:
8576 Unimplemented();
8577 }
8578 %}
8580 ins_pipe( pipe_slow );
8581 %}
8583 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8584 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8585 ins_cost(80);
8586 format %{
8587 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8588 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8589 %}
8590 ins_encode %{
8591 Register opr1 = as_Register($tmp1$$reg);
8592 Register opr2 = as_Register($tmp2$$reg);
8593 Register dst = $dst$$Register;
8594 Register src = $src$$Register;
8595 int flag = $cop$$cmpcode;
8597 switch(flag)
8598 {
8599 case 0x01: //equal
8600 __ subu(AT, opr1, opr2);
8601 __ movz(dst, src, AT);
8602 break;
8604 case 0x02: //not_equal
8605 __ subu(AT, opr1, opr2);
8606 __ movn(dst, src, AT);
8607 break;
8609 case 0x03: //greater
8610 __ slt(AT, opr2, opr1);
8611 __ movn(dst, src, AT);
8612 break;
8614 case 0x04: //greater_equal
8615 __ slt(AT, opr1, opr2);
8616 __ movz(dst, src, AT);
8617 break;
8619 case 0x05: //less
8620 __ slt(AT, opr1, opr2);
8621 __ movn(dst, src, AT);
8622 break;
8624 case 0x06: //less_equal
8625 __ slt(AT, opr2, opr1);
8626 __ movz(dst, src, AT);
8627 break;
8629 default:
8630 Unimplemented();
8631 }
8632 %}
8634 ins_pipe( pipe_slow );
8635 %}
8637 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8638 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8639 ins_cost(80);
8640 format %{
8641 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8642 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8643 %}
8644 ins_encode %{
8645 Register opr1 = as_Register($tmp1$$reg);
8646 Register opr2 = as_Register($tmp2$$reg);
8647 Register dst = $dst$$Register;
8648 Register src = $src$$Register;
8649 int flag = $cop$$cmpcode;
8651 switch(flag)
8652 {
8653 case 0x01: //equal
8654 __ subu(AT, opr1, opr2);
8655 __ movz(dst, src, AT);
8656 break;
8658 case 0x02: //not_equal
8659 __ subu(AT, opr1, opr2);
8660 __ movn(dst, src, AT);
8661 break;
8663 case 0x03: //greater
8664 __ slt(AT, opr2, opr1);
8665 __ movn(dst, src, AT);
8666 break;
8668 case 0x04: //greater_equal
8669 __ slt(AT, opr1, opr2);
8670 __ movz(dst, src, AT);
8671 break;
8673 case 0x05: //less
8674 __ slt(AT, opr1, opr2);
8675 __ movn(dst, src, AT);
8676 break;
8678 case 0x06: //less_equal
8679 __ slt(AT, opr2, opr1);
8680 __ movz(dst, src, AT);
8681 break;
8683 default:
8684 Unimplemented();
8685 }
8686 %}
8688 ins_pipe( pipe_slow );
8689 %}
8691 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8692 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8693 ins_cost(80);
8694 format %{
8695 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8696 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8697 %}
8698 ins_encode %{
8699 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8700 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8701 Register dst = as_Register($dst$$reg);
8702 Register src = as_Register($src$$reg);
8704 int flag = $cop$$cmpcode;
8706 switch(flag)
8707 {
8708 case 0x01: //equal
8709 __ c_eq_d(reg_op1, reg_op2);
8710 __ movt(dst, src);
8711 break;
8712 case 0x02: //not_equal
8713 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8714 __ c_eq_d(reg_op1, reg_op2);
8715 __ movf(dst, src);
8716 break;
8717 case 0x03: //greater
8718 __ c_ole_d(reg_op1, reg_op2);
8719 __ movf(dst, src);
8720 break;
8721 case 0x04: //greater_equal
8722 __ c_olt_d(reg_op1, reg_op2);
8723 __ movf(dst, src);
8724 break;
8725 case 0x05: //less
8726 __ c_ult_d(reg_op1, reg_op2);
8727 __ movt(dst, src);
8728 break;
8729 case 0x06: //less_equal
8730 __ c_ule_d(reg_op1, reg_op2);
8731 __ movt(dst, src);
8732 break;
8733 default:
8734 Unimplemented();
8735 }
8736 %}
8738 ins_pipe( pipe_slow );
8739 %}
8742 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8743 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8744 ins_cost(80);
8745 format %{
8746 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8747 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8748 %}
8749 ins_encode %{
8750 Register op1 = $tmp1$$Register;
8751 Register op2 = $tmp2$$Register;
8752 Register dst = $dst$$Register;
8753 Register src = $src$$Register;
8754 int flag = $cop$$cmpcode;
8756 switch(flag)
8757 {
8758 case 0x01: //equal
8759 __ subu(AT, op1, op2);
8760 __ movz(dst, src, AT);
8761 break;
8763 case 0x02: //not_equal
8764 __ subu(AT, op1, op2);
8765 __ movn(dst, src, AT);
8766 break;
8768 case 0x03: //above
8769 __ sltu(AT, op2, op1);
8770 __ movn(dst, src, AT);
8771 break;
8773 case 0x04: //above_equal
8774 __ sltu(AT, op1, op2);
8775 __ movz(dst, src, AT);
8776 break;
8778 case 0x05: //below
8779 __ sltu(AT, op1, op2);
8780 __ movn(dst, src, AT);
8781 break;
8783 case 0x06: //below_equal
8784 __ sltu(AT, op2, op1);
8785 __ movz(dst, src, AT);
8786 break;
8788 default:
8789 Unimplemented();
8790 }
8791 %}
8793 ins_pipe( pipe_slow );
8794 %}
8796 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8797 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8798 ins_cost(80);
8799 format %{
8800 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8801 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8802 %}
8803 ins_encode %{
8804 Register op1 = $tmp1$$Register;
8805 Register op2 = $tmp2$$Register;
8806 Register dst = $dst$$Register;
8807 Register src = $src$$Register;
8808 int flag = $cop$$cmpcode;
8810 switch(flag)
8811 {
8812 case 0x01: //equal
8813 __ subu32(AT, op1, op2);
8814 __ movz(dst, src, AT);
8815 break;
8817 case 0x02: //not_equal
8818 __ subu32(AT, op1, op2);
8819 __ movn(dst, src, AT);
8820 break;
8822 case 0x03: //above
8823 __ slt(AT, op2, op1);
8824 __ movn(dst, src, AT);
8825 break;
8827 case 0x04: //above_equal
8828 __ slt(AT, op1, op2);
8829 __ movz(dst, src, AT);
8830 break;
8832 case 0x05: //below
8833 __ slt(AT, op1, op2);
8834 __ movn(dst, src, AT);
8835 break;
8837 case 0x06: //below_equal
8838 __ slt(AT, op2, op1);
8839 __ movz(dst, src, AT);
8840 break;
8842 default:
8843 Unimplemented();
8844 }
8845 %}
8847 ins_pipe( pipe_slow );
8848 %}
8850 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8851 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8852 ins_cost(80);
8853 format %{
8854 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8855 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8856 %}
8857 ins_encode %{
8858 Register op1 = $tmp1$$Register;
8859 Register op2 = $tmp2$$Register;
8860 Register dst = $dst$$Register;
8861 Register src = $src$$Register;
8862 int flag = $cop$$cmpcode;
8864 switch(flag)
8865 {
8866 case 0x01: //equal
8867 __ subu32(AT, op1, op2);
8868 __ movz(dst, src, AT);
8869 break;
8871 case 0x02: //not_equal
8872 __ subu32(AT, op1, op2);
8873 __ movn(dst, src, AT);
8874 break;
8876 case 0x03: //above
8877 __ slt(AT, op2, op1);
8878 __ movn(dst, src, AT);
8879 break;
8881 case 0x04: //above_equal
8882 __ slt(AT, op1, op2);
8883 __ movz(dst, src, AT);
8884 break;
8886 case 0x05: //below
8887 __ slt(AT, op1, op2);
8888 __ movn(dst, src, AT);
8889 break;
8891 case 0x06: //below_equal
8892 __ slt(AT, op2, op1);
8893 __ movz(dst, src, AT);
8894 break;
8896 default:
8897 Unimplemented();
8898 }
8899 %}
8901 ins_pipe( pipe_slow );
8902 %}
8905 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8906 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8907 ins_cost(80);
8908 format %{
8909 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8910 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8911 %}
8913 ins_encode %{
8914 Register op1 = $tmp1$$Register;
8915 Register op2 = $tmp2$$Register;
8916 Register dst = as_Register($dst$$reg);
8917 Register src = as_Register($src$$reg);
8918 int flag = $cop$$cmpcode;
8920 switch(flag)
8921 {
8922 case 0x01: //equal
8923 __ subu32(AT, op1, op2);
8924 __ movz(dst, src, AT);
8925 break;
8927 case 0x02: //not_equal
8928 __ subu32(AT, op1, op2);
8929 __ movn(dst, src, AT);
8930 break;
8932 case 0x03: //great
8933 __ slt(AT, op2, op1);
8934 __ movn(dst, src, AT);
8935 break;
8937 case 0x04: //great_equal
8938 __ slt(AT, op1, op2);
8939 __ movz(dst, src, AT);
8940 break;
8942 case 0x05: //less
8943 __ slt(AT, op1, op2);
8944 __ movn(dst, src, AT);
8945 break;
8947 case 0x06: //less_equal
8948 __ slt(AT, op2, op1);
8949 __ movz(dst, src, AT);
8950 break;
8952 default:
8953 Unimplemented();
8954 }
8955 %}
8957 ins_pipe( pipe_slow );
8958 %}
8960 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8961 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8962 ins_cost(80);
8963 format %{
8964 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8965 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8966 %}
8967 ins_encode %{
8968 Register opr1 = as_Register($tmp1$$reg);
8969 Register opr2 = as_Register($tmp2$$reg);
8970 Register dst = as_Register($dst$$reg);
8971 Register src = as_Register($src$$reg);
8972 int flag = $cop$$cmpcode;
8974 switch(flag)
8975 {
8976 case 0x01: //equal
8977 __ subu(AT, opr1, opr2);
8978 __ movz(dst, src, AT);
8979 break;
8981 case 0x02: //not_equal
8982 __ subu(AT, opr1, opr2);
8983 __ movn(dst, src, AT);
8984 break;
8986 case 0x03: //greater
8987 __ slt(AT, opr2, opr1);
8988 __ movn(dst, src, AT);
8989 break;
8991 case 0x04: //greater_equal
8992 __ slt(AT, opr1, opr2);
8993 __ movz(dst, src, AT);
8994 break;
8996 case 0x05: //less
8997 __ slt(AT, opr1, opr2);
8998 __ movn(dst, src, AT);
8999 break;
9001 case 0x06: //less_equal
9002 __ slt(AT, opr2, opr1);
9003 __ movz(dst, src, AT);
9004 break;
9006 default:
9007 Unimplemented();
9008 }
9009 %}
9011 ins_pipe( pipe_slow );
9012 %}
9014 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9015 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9016 ins_cost(80);
9017 format %{
9018 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9019 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9020 %}
9021 ins_encode %{
9022 Register op1 = $tmp1$$Register;
9023 Register op2 = $tmp2$$Register;
9024 Register dst = $dst$$Register;
9025 Register src = $src$$Register;
9026 int flag = $cop$$cmpcode;
9028 switch(flag)
9029 {
9030 case 0x01: //equal
9031 __ subu32(AT, op1, op2);
9032 __ movz(dst, src, AT);
9033 break;
9035 case 0x02: //not_equal
9036 __ subu32(AT, op1, op2);
9037 __ movn(dst, src, AT);
9038 break;
9040 case 0x03: //above
9041 __ sltu(AT, op2, op1);
9042 __ movn(dst, src, AT);
9043 break;
9045 case 0x04: //above_equal
9046 __ sltu(AT, op1, op2);
9047 __ movz(dst, src, AT);
9048 break;
9050 case 0x05: //below
9051 __ sltu(AT, op1, op2);
9052 __ movn(dst, src, AT);
9053 break;
9055 case 0x06: //below_equal
9056 __ sltu(AT, op2, op1);
9057 __ movz(dst, src, AT);
9058 break;
9060 default:
9061 Unimplemented();
9062 }
9063 %}
9065 ins_pipe( pipe_slow );
9066 %}
9069 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9070 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9071 ins_cost(80);
9072 format %{
9073 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9074 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9075 %}
9076 ins_encode %{
9077 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9078 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9079 Register dst = as_Register($dst$$reg);
9080 Register src = as_Register($src$$reg);
9082 int flag = $cop$$cmpcode;
9084 switch(flag)
9085 {
9086 case 0x01: //equal
9087 __ c_eq_d(reg_op1, reg_op2);
9088 __ movt(dst, src);
9089 break;
9090 case 0x02: //not_equal
9091 __ c_eq_d(reg_op1, reg_op2);
9092 __ movf(dst, src);
9093 break;
9094 case 0x03: //greater
9095 __ c_ole_d(reg_op1, reg_op2);
9096 __ movf(dst, src);
9097 break;
9098 case 0x04: //greater_equal
9099 __ c_olt_d(reg_op1, reg_op2);
9100 __ movf(dst, src);
9101 break;
9102 case 0x05: //less
9103 __ c_ult_d(reg_op1, reg_op2);
9104 __ movt(dst, src);
9105 break;
9106 case 0x06: //less_equal
9107 __ c_ule_d(reg_op1, reg_op2);
9108 __ movt(dst, src);
9109 break;
9110 default:
9111 Unimplemented();
9112 }
9113 %}
9115 ins_pipe( pipe_slow );
9116 %}
9118 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9119 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9120 ins_cost(200);
9121 format %{
9122 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9123 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9124 %}
9125 ins_encode %{
9126 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9127 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9128 FloatRegister dst = as_FloatRegister($dst$$reg);
9129 FloatRegister src = as_FloatRegister($src$$reg);
9131 int flag = $cop$$cmpcode;
9133 Label L;
9135 switch(flag)
9136 {
9137 case 0x01: //equal
9138 __ c_eq_d(reg_op1, reg_op2);
9139 __ bc1f(L);
9140 __ nop();
9141 __ mov_d(dst, src);
9142 __ bind(L);
9143 break;
9144 case 0x02: //not_equal
9145 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9146 __ c_eq_d(reg_op1, reg_op2);
9147 __ bc1t(L);
9148 __ nop();
9149 __ mov_d(dst, src);
9150 __ bind(L);
9151 break;
9152 case 0x03: //greater
9153 __ c_ole_d(reg_op1, reg_op2);
9154 __ bc1t(L);
9155 __ nop();
9156 __ mov_d(dst, src);
9157 __ bind(L);
9158 break;
9159 case 0x04: //greater_equal
9160 __ c_olt_d(reg_op1, reg_op2);
9161 __ bc1t(L);
9162 __ nop();
9163 __ mov_d(dst, src);
9164 __ bind(L);
9165 break;
9166 case 0x05: //less
9167 __ c_ult_d(reg_op1, reg_op2);
9168 __ bc1f(L);
9169 __ nop();
9170 __ mov_d(dst, src);
9171 __ bind(L);
9172 break;
9173 case 0x06: //less_equal
9174 __ c_ule_d(reg_op1, reg_op2);
9175 __ bc1f(L);
9176 __ nop();
9177 __ mov_d(dst, src);
9178 __ bind(L);
9179 break;
9180 default:
9181 Unimplemented();
9182 }
9183 %}
9185 ins_pipe( pipe_slow );
9186 %}
9188 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9189 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9190 ins_cost(200);
9191 format %{
9192 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9193 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9194 %}
9196 ins_encode %{
9197 Register op1 = $tmp1$$Register;
9198 Register op2 = $tmp2$$Register;
9199 FloatRegister dst = as_FloatRegister($dst$$reg);
9200 FloatRegister src = as_FloatRegister($src$$reg);
9201 int flag = $cop$$cmpcode;
9202 Label L;
9204 switch(flag)
9205 {
9206 case 0x01: //equal
9207 __ bne(op1, op2, L);
9208 __ nop();
9209 __ mov_s(dst, src);
9210 __ bind(L);
9211 break;
9212 case 0x02: //not_equal
9213 __ beq(op1, op2, L);
9214 __ nop();
9215 __ mov_s(dst, src);
9216 __ bind(L);
9217 break;
9218 case 0x03: //great
9219 __ slt(AT, op2, op1);
9220 __ beq(AT, R0, L);
9221 __ nop();
9222 __ mov_s(dst, src);
9223 __ bind(L);
9224 break;
9225 case 0x04: //great_equal
9226 __ slt(AT, op1, op2);
9227 __ bne(AT, R0, L);
9228 __ nop();
9229 __ mov_s(dst, src);
9230 __ bind(L);
9231 break;
9232 case 0x05: //less
9233 __ slt(AT, op1, op2);
9234 __ beq(AT, R0, L);
9235 __ nop();
9236 __ mov_s(dst, src);
9237 __ bind(L);
9238 break;
9239 case 0x06: //less_equal
9240 __ slt(AT, op2, op1);
9241 __ bne(AT, R0, L);
9242 __ nop();
9243 __ mov_s(dst, src);
9244 __ bind(L);
9245 break;
9246 default:
9247 Unimplemented();
9248 }
9249 %}
9251 ins_pipe( pipe_slow );
9252 %}
9254 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9255 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9256 ins_cost(200);
9257 format %{
9258 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9259 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9260 %}
9262 ins_encode %{
9263 Register op1 = $tmp1$$Register;
9264 Register op2 = $tmp2$$Register;
9265 FloatRegister dst = as_FloatRegister($dst$$reg);
9266 FloatRegister src = as_FloatRegister($src$$reg);
9267 int flag = $cop$$cmpcode;
9268 Label L;
9270 switch(flag)
9271 {
9272 case 0x01: //equal
9273 __ bne(op1, op2, L);
9274 __ nop();
9275 __ mov_d(dst, src);
9276 __ bind(L);
9277 break;
9278 case 0x02: //not_equal
9279 __ beq(op1, op2, L);
9280 __ nop();
9281 __ mov_d(dst, src);
9282 __ bind(L);
9283 break;
9284 case 0x03: //great
9285 __ slt(AT, op2, op1);
9286 __ beq(AT, R0, L);
9287 __ nop();
9288 __ mov_d(dst, src);
9289 __ bind(L);
9290 break;
9291 case 0x04: //great_equal
9292 __ slt(AT, op1, op2);
9293 __ bne(AT, R0, L);
9294 __ nop();
9295 __ mov_d(dst, src);
9296 __ bind(L);
9297 break;
9298 case 0x05: //less
9299 __ slt(AT, op1, op2);
9300 __ beq(AT, R0, L);
9301 __ nop();
9302 __ mov_d(dst, src);
9303 __ bind(L);
9304 break;
9305 case 0x06: //less_equal
9306 __ slt(AT, op2, op1);
9307 __ bne(AT, R0, L);
9308 __ nop();
9309 __ mov_d(dst, src);
9310 __ bind(L);
9311 break;
9312 default:
9313 Unimplemented();
9314 }
9315 %}
9317 ins_pipe( pipe_slow );
9318 %}
9320 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9321 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9322 ins_cost(200);
9323 format %{
9324 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9325 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9326 %}
9328 ins_encode %{
9329 Register op1 = $tmp1$$Register;
9330 Register op2 = $tmp2$$Register;
9331 FloatRegister dst = as_FloatRegister($dst$$reg);
9332 FloatRegister src = as_FloatRegister($src$$reg);
9333 int flag = $cop$$cmpcode;
9334 Label L;
9336 switch(flag)
9337 {
9338 case 0x01: //equal
9339 __ bne(op1, op2, L);
9340 __ nop();
9341 __ mov_d(dst, src);
9342 __ bind(L);
9343 break;
9344 case 0x02: //not_equal
9345 __ beq(op1, op2, L);
9346 __ nop();
9347 __ mov_d(dst, src);
9348 __ bind(L);
9349 break;
9350 case 0x03: //great
9351 __ slt(AT, op2, op1);
9352 __ beq(AT, R0, L);
9353 __ nop();
9354 __ mov_d(dst, src);
9355 __ bind(L);
9356 break;
9357 case 0x04: //great_equal
9358 __ slt(AT, op1, op2);
9359 __ bne(AT, R0, L);
9360 __ nop();
9361 __ mov_d(dst, src);
9362 __ bind(L);
9363 break;
9364 case 0x05: //less
9365 __ slt(AT, op1, op2);
9366 __ beq(AT, R0, L);
9367 __ nop();
9368 __ mov_d(dst, src);
9369 __ bind(L);
9370 break;
9371 case 0x06: //less_equal
9372 __ slt(AT, op2, op1);
9373 __ bne(AT, R0, L);
9374 __ nop();
9375 __ mov_d(dst, src);
9376 __ bind(L);
9377 break;
9378 default:
9379 Unimplemented();
9380 }
9381 %}
9383 ins_pipe( pipe_slow );
9384 %}
9386 //FIXME
9387 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9388 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9389 ins_cost(80);
9390 format %{
9391 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9392 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9393 %}
9395 ins_encode %{
9396 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9397 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9398 Register dst = $dst$$Register;
9399 Register src = $src$$Register;
9400 int flag = $cop$$cmpcode;
9402 switch(flag)
9403 {
9404 case 0x01: //equal
9405 __ c_eq_s(reg_op1, reg_op2);
9406 __ movt(dst, src);
9407 break;
9408 case 0x02: //not_equal
9409 __ c_eq_s(reg_op1, reg_op2);
9410 __ movf(dst, src);
9411 break;
9412 case 0x03: //greater
9413 __ c_ole_s(reg_op1, reg_op2);
9414 __ movf(dst, src);
9415 break;
9416 case 0x04: //greater_equal
9417 __ c_olt_s(reg_op1, reg_op2);
9418 __ movf(dst, src);
9419 break;
9420 case 0x05: //less
9421 __ c_ult_s(reg_op1, reg_op2);
9422 __ movt(dst, src);
9423 break;
9424 case 0x06: //less_equal
9425 __ c_ule_s(reg_op1, reg_op2);
9426 __ movt(dst, src);
9427 break;
9428 default:
9429 Unimplemented();
9430 }
9431 %}
9432 ins_pipe( pipe_slow );
9433 %}
9435 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9436 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9437 ins_cost(200);
9438 format %{
9439 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9440 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9441 %}
9443 ins_encode %{
9444 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9445 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9446 FloatRegister dst = $dst$$FloatRegister;
9447 FloatRegister src = $src$$FloatRegister;
9448 Label L;
9449 int flag = $cop$$cmpcode;
9451 switch(flag)
9452 {
9453 case 0x01: //equal
9454 __ c_eq_s(reg_op1, reg_op2);
9455 __ bc1f(L);
9456 __ nop();
9457 __ mov_s(dst, src);
9458 __ bind(L);
9459 break;
9460 case 0x02: //not_equal
9461 __ c_eq_s(reg_op1, reg_op2);
9462 __ bc1t(L);
9463 __ nop();
9464 __ mov_s(dst, src);
9465 __ bind(L);
9466 break;
9467 case 0x03: //greater
9468 __ c_ole_s(reg_op1, reg_op2);
9469 __ bc1t(L);
9470 __ nop();
9471 __ mov_s(dst, src);
9472 __ bind(L);
9473 break;
9474 case 0x04: //greater_equal
9475 __ c_olt_s(reg_op1, reg_op2);
9476 __ bc1t(L);
9477 __ nop();
9478 __ mov_s(dst, src);
9479 __ bind(L);
9480 break;
9481 case 0x05: //less
9482 __ c_ult_s(reg_op1, reg_op2);
9483 __ bc1f(L);
9484 __ nop();
9485 __ mov_s(dst, src);
9486 __ bind(L);
9487 break;
9488 case 0x06: //less_equal
9489 __ c_ule_s(reg_op1, reg_op2);
9490 __ bc1f(L);
9491 __ nop();
9492 __ mov_s(dst, src);
9493 __ bind(L);
9494 break;
9495 default:
9496 Unimplemented();
9497 }
9498 %}
9499 ins_pipe( pipe_slow );
9500 %}
9502 // Manifest a CmpL result in an integer register. Very painful.
9503 // This is the test to avoid.
9504 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9505 match(Set dst (CmpL3 src1 src2));
9506 ins_cost(1000);
9507 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9508 ins_encode %{
9509 Register opr1 = as_Register($src1$$reg);
9510 Register opr2 = as_Register($src2$$reg);
9511 Register dst = as_Register($dst$$reg);
9513 Label Done;
9515 __ subu(AT, opr1, opr2);
9516 __ bltz(AT, Done);
9517 __ delayed()->daddiu(dst, R0, -1);
9519 __ move(dst, 1);
9520 __ movz(dst, R0, AT);
9522 __ bind(Done);
9523 %}
9524 ins_pipe( pipe_slow );
9525 %}
9527 //
9528 // less_rsult = -1
9529 // greater_result = 1
9530 // equal_result = 0
9531 // nan_result = -1
9532 //
9533 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9534 match(Set dst (CmpF3 src1 src2));
9535 ins_cost(1000);
9536 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9537 ins_encode %{
9538 FloatRegister src1 = as_FloatRegister($src1$$reg);
9539 FloatRegister src2 = as_FloatRegister($src2$$reg);
9540 Register dst = as_Register($dst$$reg);
9542 Label Done;
9544 __ c_ult_s(src1, src2);
9545 __ bc1t(Done);
9546 __ delayed()->daddiu(dst, R0, -1);
9548 __ c_eq_s(src1, src2);
9549 __ move(dst, 1);
9550 __ movt(dst, R0);
9552 __ bind(Done);
9553 %}
9554 ins_pipe( pipe_slow );
9555 %}
9557 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9558 match(Set dst (CmpD3 src1 src2));
9559 ins_cost(1000);
9560 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9561 ins_encode %{
9562 FloatRegister src1 = as_FloatRegister($src1$$reg);
9563 FloatRegister src2 = as_FloatRegister($src2$$reg);
9564 Register dst = as_Register($dst$$reg);
9566 Label Done;
9568 __ c_ult_d(src1, src2);
9569 __ bc1t(Done);
9570 __ delayed()->daddiu(dst, R0, -1);
9572 __ c_eq_d(src1, src2);
9573 __ move(dst, 1);
9574 __ movt(dst, R0);
9576 __ bind(Done);
9577 %}
9578 ins_pipe( pipe_slow );
9579 %}
9581 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9582 match(Set dummy (ClearArray cnt base));
9583 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9584 ins_encode %{
9585 //Assume cnt is the number of bytes in an array to be cleared,
9586 //and base points to the starting address of the array.
9587 Register base = $base$$Register;
9588 Register num = $cnt$$Register;
9589 Label Loop, done;
9591 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9592 __ move(T9, num); /* T9 = words */
9593 __ beq(T9, R0, done);
9594 __ nop();
9595 __ move(AT, base);
9597 __ bind(Loop);
9598 __ sd(R0, Address(AT, 0));
9599 __ daddi(AT, AT, wordSize);
9600 __ daddi(T9, T9, -1);
9601 __ bne(T9, R0, Loop);
9602 __ delayed()->nop();
9603 __ bind(done);
9604 %}
9605 ins_pipe( pipe_slow );
9606 %}
9608 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9609 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9610 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9612 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9613 ins_encode %{
9614 // Get the first character position in both strings
9615 // [8] char array, [12] offset, [16] count
9616 Register str1 = $str1$$Register;
9617 Register str2 = $str2$$Register;
9618 Register cnt1 = $cnt1$$Register;
9619 Register cnt2 = $cnt2$$Register;
9620 Register result = $result$$Register;
9622 Label L, Loop, haveResult, done;
9624 // compute the and difference of lengths (in result)
9625 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9627 // compute the shorter length (in cnt1)
9628 __ slt(AT, cnt2, cnt1);
9629 __ movn(cnt1, cnt2, AT);
9631 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9632 __ bind(Loop); // Loop begin
9633 __ beq(cnt1, R0, done);
9634 __ delayed()->lhu(AT, str1, 0);;
9636 // compare current character
9637 __ lhu(cnt2, str2, 0);
9638 __ bne(AT, cnt2, haveResult);
9639 __ delayed()->addi(str1, str1, 2);
9640 __ addi(str2, str2, 2);
9641 __ b(Loop);
9642 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9644 __ bind(haveResult);
9645 __ subu(result, AT, cnt2);
9647 __ bind(done);
9648 %}
9650 ins_pipe( pipe_slow );
9651 %}
9653 // intrinsic optimization
9654 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9655 match(Set result (StrEquals (Binary str1 str2) cnt));
9656 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9658 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9659 ins_encode %{
9660 // Get the first character position in both strings
9661 // [8] char array, [12] offset, [16] count
9662 Register str1 = $str1$$Register;
9663 Register str2 = $str2$$Register;
9664 Register cnt = $cnt$$Register;
9665 Register tmp = $temp$$Register;
9666 Register result = $result$$Register;
9668 Label Loop, done;
9671 __ beq(str1, str2, done); // same char[] ?
9672 __ daddiu(result, R0, 1);
9674 __ bind(Loop); // Loop begin
9675 __ beq(cnt, R0, done);
9676 __ daddiu(result, R0, 1); // count == 0
9678 // compare current character
9679 __ lhu(AT, str1, 0);;
9680 __ lhu(tmp, str2, 0);
9681 __ bne(AT, tmp, done);
9682 __ delayed()->daddi(result, R0, 0);
9683 __ addi(str1, str1, 2);
9684 __ addi(str2, str2, 2);
9685 __ b(Loop);
9686 __ delayed()->addi(cnt, cnt, -1); // Loop end
9688 __ bind(done);
9689 %}
9691 ins_pipe( pipe_slow );
9692 %}
9694 //----------Arithmetic Instructions-------------------------------------------
9695 //----------Addition Instructions---------------------------------------------
9696 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9697 match(Set dst (AddI src1 src2));
9699 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9700 ins_encode %{
9701 Register dst = $dst$$Register;
9702 Register src1 = $src1$$Register;
9703 Register src2 = $src2$$Register;
9704 __ addu32(dst, src1, src2);
9705 %}
9706 ins_pipe( ialu_regI_regI );
9707 %}
9709 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9710 match(Set dst (AddI src1 src2));
9712 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9713 ins_encode %{
9714 Register dst = $dst$$Register;
9715 Register src1 = $src1$$Register;
9716 int imm = $src2$$constant;
9718 if(Assembler::is_simm16(imm)) {
9719 __ addiu32(dst, src1, imm);
9720 } else {
9721 __ move(AT, imm);
9722 __ addu32(dst, src1, AT);
9723 }
9724 %}
9725 ins_pipe( ialu_regI_regI );
9726 %}
9728 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9729 match(Set dst (AddP src1 src2));
9731 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9733 ins_encode %{
9734 Register dst = $dst$$Register;
9735 Register src1 = $src1$$Register;
9736 Register src2 = $src2$$Register;
9737 __ daddu(dst, src1, src2);
9738 %}
9740 ins_pipe( ialu_regI_regI );
9741 %}
9743 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9744 match(Set dst (AddP src1 (ConvI2L src2)));
9746 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9748 ins_encode %{
9749 Register dst = $dst$$Register;
9750 Register src1 = $src1$$Register;
9751 Register src2 = $src2$$Register;
9752 __ daddu(dst, src1, src2);
9753 %}
9755 ins_pipe( ialu_regI_regI );
9756 %}
9758 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9759 match(Set dst (AddP src1 src2));
9761 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9762 ins_encode %{
9763 Register src1 = $src1$$Register;
9764 long src2 = $src2$$constant;
9765 Register dst = $dst$$Register;
9767 if(Assembler::is_simm16(src2)) {
9768 __ daddiu(dst, src1, src2);
9769 } else {
9770 __ set64(AT, src2);
9771 __ daddu(dst, src1, AT);
9772 }
9773 %}
9774 ins_pipe( ialu_regI_imm16 );
9775 %}
9777 // Add Long Register with Register
9778 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9779 match(Set dst (AddL src1 src2));
9780 ins_cost(200);
9781 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9783 ins_encode %{
9784 Register dst_reg = as_Register($dst$$reg);
9785 Register src1_reg = as_Register($src1$$reg);
9786 Register src2_reg = as_Register($src2$$reg);
9788 __ daddu(dst_reg, src1_reg, src2_reg);
9789 %}
9791 ins_pipe( ialu_regL_regL );
9792 %}
9794 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9795 %{
9796 match(Set dst (AddL src1 src2));
9798 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9799 ins_encode %{
9800 Register dst_reg = as_Register($dst$$reg);
9801 Register src1_reg = as_Register($src1$$reg);
9802 int src2_imm = $src2$$constant;
9804 __ daddiu(dst_reg, src1_reg, src2_imm);
9805 %}
9807 ins_pipe( ialu_regL_regL );
9808 %}
9810 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9811 %{
9812 match(Set dst (AddL (ConvI2L src1) src2));
9814 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9815 ins_encode %{
9816 Register dst_reg = as_Register($dst$$reg);
9817 Register src1_reg = as_Register($src1$$reg);
9818 int src2_imm = $src2$$constant;
9820 __ daddiu(dst_reg, src1_reg, src2_imm);
9821 %}
9823 ins_pipe( ialu_regL_regL );
9824 %}
9826 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9827 match(Set dst (AddL (ConvI2L src1) src2));
9828 ins_cost(200);
9829 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9831 ins_encode %{
9832 Register dst_reg = as_Register($dst$$reg);
9833 Register src1_reg = as_Register($src1$$reg);
9834 Register src2_reg = as_Register($src2$$reg);
9836 __ daddu(dst_reg, src1_reg, src2_reg);
9837 %}
9839 ins_pipe( ialu_regL_regL );
9840 %}
9842 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9843 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9844 ins_cost(200);
9845 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9847 ins_encode %{
9848 Register dst_reg = as_Register($dst$$reg);
9849 Register src1_reg = as_Register($src1$$reg);
9850 Register src2_reg = as_Register($src2$$reg);
9852 __ daddu(dst_reg, src1_reg, src2_reg);
9853 %}
9855 ins_pipe( ialu_regL_regL );
9856 %}
9858 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9859 match(Set dst (AddL src1 (ConvI2L src2)));
9860 ins_cost(200);
9861 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9863 ins_encode %{
9864 Register dst_reg = as_Register($dst$$reg);
9865 Register src1_reg = as_Register($src1$$reg);
9866 Register src2_reg = as_Register($src2$$reg);
9868 __ daddu(dst_reg, src1_reg, src2_reg);
9869 %}
9871 ins_pipe( ialu_regL_regL );
9872 %}
9874 //----------Subtraction Instructions-------------------------------------------
9875 // Integer Subtraction Instructions
9876 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9877 match(Set dst (SubI src1 src2));
9878 ins_cost(100);
9880 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9881 ins_encode %{
9882 Register dst = $dst$$Register;
9883 Register src1 = $src1$$Register;
9884 Register src2 = $src2$$Register;
9885 __ subu32(dst, src1, src2);
9886 %}
9887 ins_pipe( ialu_regI_regI );
9888 %}
9890 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9891 match(Set dst (SubI src1 src2));
9892 ins_cost(80);
9894 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9895 ins_encode %{
9896 Register dst = $dst$$Register;
9897 Register src1 = $src1$$Register;
9898 __ addiu32(dst, src1, -1 * $src2$$constant);
9899 %}
9900 ins_pipe( ialu_regI_regI );
9901 %}
9903 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9904 match(Set dst (SubI zero src));
9905 ins_cost(80);
9907 format %{ "neg $dst, $src #@negI_Reg" %}
9908 ins_encode %{
9909 Register dst = $dst$$Register;
9910 Register src = $src$$Register;
9911 __ subu32(dst, R0, src);
9912 %}
9913 ins_pipe( ialu_regI_regI );
9914 %}
9916 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9917 match(Set dst (SubL zero src));
9918 ins_cost(80);
9920 format %{ "neg $dst, $src #@negL_Reg" %}
9921 ins_encode %{
9922 Register dst = $dst$$Register;
9923 Register src = $src$$Register;
9924 __ subu(dst, R0, src);
9925 %}
9926 ins_pipe( ialu_regI_regI );
9927 %}
9929 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9930 match(Set dst (SubL src1 src2));
9931 ins_cost(80);
9933 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9934 ins_encode %{
9935 Register dst = $dst$$Register;
9936 Register src1 = $src1$$Register;
9937 __ daddiu(dst, src1, -1 * $src2$$constant);
9938 %}
9939 ins_pipe( ialu_regI_regI );
9940 %}
9942 // Subtract Long Register with Register.
9943 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9944 match(Set dst (SubL src1 src2));
9945 ins_cost(100);
9946 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9947 ins_encode %{
9948 Register dst = as_Register($dst$$reg);
9949 Register src1 = as_Register($src1$$reg);
9950 Register src2 = as_Register($src2$$reg);
9952 __ subu(dst, src1, src2);
9953 %}
9954 ins_pipe( ialu_regL_regL );
9955 %}
9957 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9958 match(Set dst (SubL src1 (ConvI2L src2)));
9959 ins_cost(100);
9960 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9961 ins_encode %{
9962 Register dst = as_Register($dst$$reg);
9963 Register src1 = as_Register($src1$$reg);
9964 Register src2 = as_Register($src2$$reg);
9966 __ subu(dst, src1, src2);
9967 %}
9968 ins_pipe( ialu_regL_regL );
9969 %}
9971 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9972 match(Set dst (SubL (ConvI2L src1) src2));
9973 ins_cost(200);
9974 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9975 ins_encode %{
9976 Register dst = as_Register($dst$$reg);
9977 Register src1 = as_Register($src1$$reg);
9978 Register src2 = as_Register($src2$$reg);
9980 __ subu(dst, src1, src2);
9981 %}
9982 ins_pipe( ialu_regL_regL );
9983 %}
9985 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9986 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9987 ins_cost(200);
9988 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9989 ins_encode %{
9990 Register dst = as_Register($dst$$reg);
9991 Register src1 = as_Register($src1$$reg);
9992 Register src2 = as_Register($src2$$reg);
9994 __ subu(dst, src1, src2);
9995 %}
9996 ins_pipe( ialu_regL_regL );
9997 %}
9999 // Integer MOD with Register
10000 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10001 match(Set dst (ModI src1 src2));
10002 ins_cost(300);
10003 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10004 ins_encode %{
10005 Register dst = $dst$$Register;
10006 Register src1 = $src1$$Register;
10007 Register src2 = $src2$$Register;
10009 //if (UseLoongsonISA) {
10010 if (0) {
10011 // 2016.08.10
10012 // Experiments show that gsmod is slower that div+mfhi.
10013 // So I just disable it here.
10014 __ gsmod(dst, src1, src2);
10015 } else {
10016 __ div(src1, src2);
10017 __ mfhi(dst);
10018 }
10019 %}
10021 //ins_pipe( ialu_mod );
10022 ins_pipe( ialu_regI_regI );
10023 %}
10025 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10026 match(Set dst (ModL src1 src2));
10027 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10029 ins_encode %{
10030 Register dst = as_Register($dst$$reg);
10031 Register op1 = as_Register($src1$$reg);
10032 Register op2 = as_Register($src2$$reg);
10034 if (UseLoongsonISA) {
10035 __ gsdmod(dst, op1, op2);
10036 } else {
10037 __ ddiv(op1, op2);
10038 __ mfhi(dst);
10039 }
10040 %}
10041 ins_pipe( pipe_slow );
10042 %}
10044 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10045 match(Set dst (MulI src1 src2));
10047 ins_cost(300);
10048 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10049 ins_encode %{
10050 Register src1 = $src1$$Register;
10051 Register src2 = $src2$$Register;
10052 Register dst = $dst$$Register;
10054 __ mul(dst, src1, src2);
10055 %}
10056 ins_pipe( ialu_mult );
10057 %}
10059 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10060 match(Set dst (AddI (MulI src1 src2) src3));
10062 ins_cost(999);
10063 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10064 ins_encode %{
10065 Register src1 = $src1$$Register;
10066 Register src2 = $src2$$Register;
10067 Register src3 = $src3$$Register;
10068 Register dst = $dst$$Register;
10070 __ mtlo(src3);
10071 __ madd(src1, src2);
10072 __ mflo(dst);
10073 %}
10074 ins_pipe( ialu_mult );
10075 %}
10077 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10078 match(Set dst (DivI src1 src2));
10080 ins_cost(300);
10081 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10082 ins_encode %{
10083 Register src1 = $src1$$Register;
10084 Register src2 = $src2$$Register;
10085 Register dst = $dst$$Register;
10087 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10088 We must trap an exception manually. */
10089 __ teq(R0, src2, 0x7);
10091 if (UseLoongsonISA) {
10092 __ gsdiv(dst, src1, src2);
10093 } else {
10094 __ div(src1, src2);
10096 __ nop();
10097 __ nop();
10098 __ mflo(dst);
10099 }
10100 %}
10101 ins_pipe( ialu_mod );
10102 %}
10104 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10105 match(Set dst (DivF src1 src2));
10107 ins_cost(300);
10108 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10109 ins_encode %{
10110 FloatRegister src1 = $src1$$FloatRegister;
10111 FloatRegister src2 = $src2$$FloatRegister;
10112 FloatRegister dst = $dst$$FloatRegister;
10114 /* Here do we need to trap an exception manually ? */
10115 __ div_s(dst, src1, src2);
10116 %}
10117 ins_pipe( pipe_slow );
10118 %}
10120 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10121 match(Set dst (DivD src1 src2));
10123 ins_cost(300);
10124 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10125 ins_encode %{
10126 FloatRegister src1 = $src1$$FloatRegister;
10127 FloatRegister src2 = $src2$$FloatRegister;
10128 FloatRegister dst = $dst$$FloatRegister;
10130 /* Here do we need to trap an exception manually ? */
10131 __ div_d(dst, src1, src2);
10132 %}
10133 ins_pipe( pipe_slow );
10134 %}
10136 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10137 match(Set dst (MulL src1 src2));
10138 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10139 ins_encode %{
10140 Register dst = as_Register($dst$$reg);
10141 Register op1 = as_Register($src1$$reg);
10142 Register op2 = as_Register($src2$$reg);
10144 if (UseLoongsonISA) {
10145 __ gsdmult(dst, op1, op2);
10146 } else {
10147 __ dmult(op1, op2);
10148 __ mflo(dst);
10149 }
10150 %}
10151 ins_pipe( pipe_slow );
10152 %}
10154 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10155 match(Set dst (MulL src1 (ConvI2L src2)));
10156 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10157 ins_encode %{
10158 Register dst = as_Register($dst$$reg);
10159 Register op1 = as_Register($src1$$reg);
10160 Register op2 = as_Register($src2$$reg);
10162 if (UseLoongsonISA) {
10163 __ gsdmult(dst, op1, op2);
10164 } else {
10165 __ dmult(op1, op2);
10166 __ mflo(dst);
10167 }
10168 %}
10169 ins_pipe( pipe_slow );
10170 %}
10172 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10173 match(Set dst (DivL src1 src2));
10174 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10176 ins_encode %{
10177 Register dst = as_Register($dst$$reg);
10178 Register op1 = as_Register($src1$$reg);
10179 Register op2 = as_Register($src2$$reg);
10181 if (UseLoongsonISA) {
10182 __ gsddiv(dst, op1, op2);
10183 } else {
10184 __ ddiv(op1, op2);
10185 __ mflo(dst);
10186 }
10187 %}
10188 ins_pipe( pipe_slow );
10189 %}
10191 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10192 match(Set dst (AddF src1 src2));
10193 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10194 ins_encode %{
10195 FloatRegister src1 = as_FloatRegister($src1$$reg);
10196 FloatRegister src2 = as_FloatRegister($src2$$reg);
10197 FloatRegister dst = as_FloatRegister($dst$$reg);
10199 __ add_s(dst, src1, src2);
10200 %}
10201 ins_pipe( fpu_regF_regF );
10202 %}
10204 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10205 match(Set dst (SubF src1 src2));
10206 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10207 ins_encode %{
10208 FloatRegister src1 = as_FloatRegister($src1$$reg);
10209 FloatRegister src2 = as_FloatRegister($src2$$reg);
10210 FloatRegister dst = as_FloatRegister($dst$$reg);
10212 __ sub_s(dst, src1, src2);
10213 %}
10214 ins_pipe( fpu_regF_regF );
10215 %}
10216 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10217 match(Set dst (AddD src1 src2));
10218 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10219 ins_encode %{
10220 FloatRegister src1 = as_FloatRegister($src1$$reg);
10221 FloatRegister src2 = as_FloatRegister($src2$$reg);
10222 FloatRegister dst = as_FloatRegister($dst$$reg);
10224 __ add_d(dst, src1, src2);
10225 %}
10226 ins_pipe( fpu_regF_regF );
10227 %}
10229 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10230 match(Set dst (SubD src1 src2));
10231 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10232 ins_encode %{
10233 FloatRegister src1 = as_FloatRegister($src1$$reg);
10234 FloatRegister src2 = as_FloatRegister($src2$$reg);
10235 FloatRegister dst = as_FloatRegister($dst$$reg);
10237 __ sub_d(dst, src1, src2);
10238 %}
10239 ins_pipe( fpu_regF_regF );
10240 %}
10242 instruct negF_reg(regF dst, regF src) %{
10243 match(Set dst (NegF src));
10244 format %{ "negF $dst, $src @negF_reg" %}
10245 ins_encode %{
10246 FloatRegister src = as_FloatRegister($src$$reg);
10247 FloatRegister dst = as_FloatRegister($dst$$reg);
10249 __ neg_s(dst, src);
10250 %}
10251 ins_pipe( fpu_regF_regF );
10252 %}
10254 instruct negD_reg(regD dst, regD src) %{
10255 match(Set dst (NegD src));
10256 format %{ "negD $dst, $src @negD_reg" %}
10257 ins_encode %{
10258 FloatRegister src = as_FloatRegister($src$$reg);
10259 FloatRegister dst = as_FloatRegister($dst$$reg);
10261 __ neg_d(dst, src);
10262 %}
10263 ins_pipe( fpu_regF_regF );
10264 %}
10267 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10268 match(Set dst (MulF src1 src2));
10269 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10270 ins_encode %{
10271 FloatRegister src1 = $src1$$FloatRegister;
10272 FloatRegister src2 = $src2$$FloatRegister;
10273 FloatRegister dst = $dst$$FloatRegister;
10275 __ mul_s(dst, src1, src2);
10276 %}
10277 ins_pipe( fpu_regF_regF );
10278 %}
10280 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10281 match(Set dst (AddF (MulF src1 src2) src3));
10282 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10283 ins_cost(44444);
10284 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10285 ins_encode %{
10286 FloatRegister src1 = $src1$$FloatRegister;
10287 FloatRegister src2 = $src2$$FloatRegister;
10288 FloatRegister src3 = $src3$$FloatRegister;
10289 FloatRegister dst = $dst$$FloatRegister;
10291 __ madd_s(dst, src1, src2, src3);
10292 %}
10293 ins_pipe( fpu_regF_regF );
10294 %}
10296 // Mul two double precision floating piont number
10297 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10298 match(Set dst (MulD src1 src2));
10299 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10300 ins_encode %{
10301 FloatRegister src1 = $src1$$FloatRegister;
10302 FloatRegister src2 = $src2$$FloatRegister;
10303 FloatRegister dst = $dst$$FloatRegister;
10305 __ mul_d(dst, src1, src2);
10306 %}
10307 ins_pipe( fpu_regF_regF );
10308 %}
10310 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10311 match(Set dst (AddD (MulD src1 src2) src3));
10312 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10313 ins_cost(44444);
10314 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10315 ins_encode %{
10316 FloatRegister src1 = $src1$$FloatRegister;
10317 FloatRegister src2 = $src2$$FloatRegister;
10318 FloatRegister src3 = $src3$$FloatRegister;
10319 FloatRegister dst = $dst$$FloatRegister;
10321 __ madd_d(dst, src1, src2, src3);
10322 %}
10323 ins_pipe( fpu_regF_regF );
10324 %}
10326 instruct absF_reg(regF dst, regF src) %{
10327 match(Set dst (AbsF src));
10328 ins_cost(100);
10329 format %{ "absF $dst, $src @absF_reg" %}
10330 ins_encode %{
10331 FloatRegister src = as_FloatRegister($src$$reg);
10332 FloatRegister dst = as_FloatRegister($dst$$reg);
10334 __ abs_s(dst, src);
10335 %}
10336 ins_pipe( fpu_regF_regF );
10337 %}
10340 // intrinsics for math_native.
10341 // AbsD SqrtD CosD SinD TanD LogD Log10D
10343 instruct absD_reg(regD dst, regD src) %{
10344 match(Set dst (AbsD src));
10345 ins_cost(100);
10346 format %{ "absD $dst, $src @absD_reg" %}
10347 ins_encode %{
10348 FloatRegister src = as_FloatRegister($src$$reg);
10349 FloatRegister dst = as_FloatRegister($dst$$reg);
10351 __ abs_d(dst, src);
10352 %}
10353 ins_pipe( fpu_regF_regF );
10354 %}
10356 instruct sqrtD_reg(regD dst, regD src) %{
10357 match(Set dst (SqrtD src));
10358 ins_cost(100);
10359 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10360 ins_encode %{
10361 FloatRegister src = as_FloatRegister($src$$reg);
10362 FloatRegister dst = as_FloatRegister($dst$$reg);
10364 __ sqrt_d(dst, src);
10365 %}
10366 ins_pipe( fpu_regF_regF );
10367 %}
10369 instruct sqrtF_reg(regF dst, regF src) %{
10370 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10371 ins_cost(100);
10372 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10373 ins_encode %{
10374 FloatRegister src = as_FloatRegister($src$$reg);
10375 FloatRegister dst = as_FloatRegister($dst$$reg);
10377 __ sqrt_s(dst, src);
10378 %}
10379 ins_pipe( fpu_regF_regF );
10380 %}
10381 //----------------------------------Logical Instructions----------------------
10382 //__________________________________Integer Logical Instructions-------------
10384 //And Instuctions
10385 // And Register with Immediate
10386 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10387 match(Set dst (AndI src1 src2));
10389 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10390 ins_encode %{
10391 Register dst = $dst$$Register;
10392 Register src = $src1$$Register;
10393 int val = $src2$$constant;
10395 __ move(AT, val);
10396 __ andr(dst, src, AT);
10397 %}
10398 ins_pipe( ialu_regI_regI );
10399 %}
10401 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10402 match(Set dst (AndI src1 src2));
10403 ins_cost(60);
10405 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10406 ins_encode %{
10407 Register dst = $dst$$Register;
10408 Register src = $src1$$Register;
10409 int val = $src2$$constant;
10411 __ andi(dst, src, val);
10412 %}
10413 ins_pipe( ialu_regI_regI );
10414 %}
10416 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10417 match(Set dst (AndI src1 mask));
10418 ins_cost(60);
10420 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10421 ins_encode %{
10422 Register dst = $dst$$Register;
10423 Register src = $src1$$Register;
10424 int size = Assembler::is_int_mask($mask$$constant);
10426 __ ext(dst, src, 0, size);
10427 %}
10428 ins_pipe( ialu_regI_regI );
10429 %}
10431 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10432 match(Set dst (AndL src1 mask));
10433 ins_cost(60);
10435 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10436 ins_encode %{
10437 Register dst = $dst$$Register;
10438 Register src = $src1$$Register;
10439 int size = Assembler::is_jlong_mask($mask$$constant);
10441 __ dext(dst, src, 0, size);
10442 %}
10443 ins_pipe( ialu_regI_regI );
10444 %}
10446 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10447 match(Set dst (XorI src1 src2));
10448 ins_cost(60);
10450 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10451 ins_encode %{
10452 Register dst = $dst$$Register;
10453 Register src = $src1$$Register;
10454 int val = $src2$$constant;
10456 __ xori(dst, src, val);
10457 %}
10458 ins_pipe( ialu_regI_regI );
10459 %}
10461 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10462 match(Set dst (XorI src1 M1));
10463 predicate(UseLoongsonISA && Use3A2000);
10464 ins_cost(60);
10466 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10467 ins_encode %{
10468 Register dst = $dst$$Register;
10469 Register src = $src1$$Register;
10471 __ gsorn(dst, R0, src);
10472 %}
10473 ins_pipe( ialu_regI_regI );
10474 %}
10476 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10477 match(Set dst (XorI (ConvL2I src1) M1));
10478 predicate(UseLoongsonISA && Use3A2000);
10479 ins_cost(60);
10481 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10482 ins_encode %{
10483 Register dst = $dst$$Register;
10484 Register src = $src1$$Register;
10486 __ gsorn(dst, R0, src);
10487 %}
10488 ins_pipe( ialu_regI_regI );
10489 %}
10491 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10492 match(Set dst (XorL src1 src2));
10493 ins_cost(60);
10495 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10496 ins_encode %{
10497 Register dst = $dst$$Register;
10498 Register src = $src1$$Register;
10499 int val = $src2$$constant;
10501 __ xori(dst, src, val);
10502 %}
10503 ins_pipe( ialu_regI_regI );
10504 %}
10506 /*
10507 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10508 match(Set dst (XorL src1 M1));
10509 predicate(UseLoongsonISA);
10510 ins_cost(60);
10512 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10513 ins_encode %{
10514 Register dst = $dst$$Register;
10515 Register src = $src1$$Register;
10517 __ gsorn(dst, R0, src);
10518 %}
10519 ins_pipe( ialu_regI_regI );
10520 %}
10521 */
10523 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10524 match(Set dst (AndI mask (LoadB mem)));
10525 ins_cost(60);
10527 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10528 ins_encode(load_UB_enc(dst, mem));
10529 ins_pipe( ialu_loadI );
10530 %}
10532 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10533 match(Set dst (AndI (LoadB mem) mask));
10534 ins_cost(60);
10536 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10537 ins_encode(load_UB_enc(dst, mem));
10538 ins_pipe( ialu_loadI );
10539 %}
10541 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10542 match(Set dst (AndI src1 src2));
10544 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10545 ins_encode %{
10546 Register dst = $dst$$Register;
10547 Register src1 = $src1$$Register;
10548 Register src2 = $src2$$Register;
10549 __ andr(dst, src1, src2);
10550 %}
10551 ins_pipe( ialu_regI_regI );
10552 %}
10554 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10555 match(Set dst (AndI src1 (XorI src2 M1)));
10556 predicate(UseLoongsonISA && Use3A2000);
10558 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10559 ins_encode %{
10560 Register dst = $dst$$Register;
10561 Register src1 = $src1$$Register;
10562 Register src2 = $src2$$Register;
10564 __ gsandn(dst, src1, src2);
10565 %}
10566 ins_pipe( ialu_regI_regI );
10567 %}
10569 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10570 match(Set dst (OrI src1 (XorI src2 M1)));
10571 predicate(UseLoongsonISA && Use3A2000);
10573 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10574 ins_encode %{
10575 Register dst = $dst$$Register;
10576 Register src1 = $src1$$Register;
10577 Register src2 = $src2$$Register;
10579 __ gsorn(dst, src1, src2);
10580 %}
10581 ins_pipe( ialu_regI_regI );
10582 %}
10584 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10585 match(Set dst (AndI (XorI src1 M1) src2));
10586 predicate(UseLoongsonISA && Use3A2000);
10588 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10589 ins_encode %{
10590 Register dst = $dst$$Register;
10591 Register src1 = $src1$$Register;
10592 Register src2 = $src2$$Register;
10594 __ gsandn(dst, src2, src1);
10595 %}
10596 ins_pipe( ialu_regI_regI );
10597 %}
10599 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10600 match(Set dst (OrI (XorI src1 M1) src2));
10601 predicate(UseLoongsonISA && Use3A2000);
10603 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10604 ins_encode %{
10605 Register dst = $dst$$Register;
10606 Register src1 = $src1$$Register;
10607 Register src2 = $src2$$Register;
10609 __ gsorn(dst, src2, src1);
10610 %}
10611 ins_pipe( ialu_regI_regI );
10612 %}
10614 // And Long Register with Register
10615 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10616 match(Set dst (AndL src1 src2));
10617 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10618 ins_encode %{
10619 Register dst_reg = as_Register($dst$$reg);
10620 Register src1_reg = as_Register($src1$$reg);
10621 Register src2_reg = as_Register($src2$$reg);
10623 __ andr(dst_reg, src1_reg, src2_reg);
10624 %}
10625 ins_pipe( ialu_regL_regL );
10626 %}
10628 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10629 match(Set dst (AndL src1 (ConvI2L src2)));
10630 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10631 ins_encode %{
10632 Register dst_reg = as_Register($dst$$reg);
10633 Register src1_reg = as_Register($src1$$reg);
10634 Register src2_reg = as_Register($src2$$reg);
10636 __ andr(dst_reg, src1_reg, src2_reg);
10637 %}
10638 ins_pipe( ialu_regL_regL );
10639 %}
10641 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10642 match(Set dst (AndL src1 src2));
10643 ins_cost(60);
10645 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10646 ins_encode %{
10647 Register dst = $dst$$Register;
10648 Register src = $src1$$Register;
10649 long val = $src2$$constant;
10651 __ andi(dst, src, val);
10652 %}
10653 ins_pipe( ialu_regI_regI );
10654 %}
10656 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10657 match(Set dst (ConvL2I (AndL src1 src2)));
10658 ins_cost(60);
10660 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10661 ins_encode %{
10662 Register dst = $dst$$Register;
10663 Register src = $src1$$Register;
10664 long val = $src2$$constant;
10666 __ andi(dst, src, val);
10667 %}
10668 ins_pipe( ialu_regI_regI );
10669 %}
10671 /*
10672 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10673 match(Set dst (AndL src1 (XorL src2 M1)));
10674 predicate(UseLoongsonISA);
10676 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10677 ins_encode %{
10678 Register dst = $dst$$Register;
10679 Register src1 = $src1$$Register;
10680 Register src2 = $src2$$Register;
10682 __ gsandn(dst, src1, src2);
10683 %}
10684 ins_pipe( ialu_regI_regI );
10685 %}
10686 */
10688 /*
10689 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10690 match(Set dst (OrL src1 (XorL src2 M1)));
10691 predicate(UseLoongsonISA);
10693 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10694 ins_encode %{
10695 Register dst = $dst$$Register;
10696 Register src1 = $src1$$Register;
10697 Register src2 = $src2$$Register;
10699 __ gsorn(dst, src1, src2);
10700 %}
10701 ins_pipe( ialu_regI_regI );
10702 %}
10703 */
10705 /*
10706 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10707 match(Set dst (AndL (XorL src1 M1) src2));
10708 predicate(UseLoongsonISA);
10710 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10711 ins_encode %{
10712 Register dst = $dst$$Register;
10713 Register src1 = $src1$$Register;
10714 Register src2 = $src2$$Register;
10716 __ gsandn(dst, src2, src1);
10717 %}
10718 ins_pipe( ialu_regI_regI );
10719 %}
10720 */
10722 /*
10723 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10724 match(Set dst (OrL (XorL src1 M1) src2));
10725 predicate(UseLoongsonISA);
10727 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10728 ins_encode %{
10729 Register dst = $dst$$Register;
10730 Register src1 = $src1$$Register;
10731 Register src2 = $src2$$Register;
10733 __ gsorn(dst, src2, src1);
10734 %}
10735 ins_pipe( ialu_regI_regI );
10736 %}
10737 */
10739 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10740 match(Set dst (AndL dst M8));
10741 ins_cost(60);
10743 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10744 ins_encode %{
10745 Register dst = $dst$$Register;
10747 __ dins(dst, R0, 0, 3);
10748 %}
10749 ins_pipe( ialu_regI_regI );
10750 %}
10752 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10753 match(Set dst (AndL dst M5));
10754 ins_cost(60);
10756 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10757 ins_encode %{
10758 Register dst = $dst$$Register;
10760 __ dins(dst, R0, 2, 1);
10761 %}
10762 ins_pipe( ialu_regI_regI );
10763 %}
10765 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10766 match(Set dst (AndL dst M7));
10767 ins_cost(60);
10769 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10770 ins_encode %{
10771 Register dst = $dst$$Register;
10773 __ dins(dst, R0, 1, 2);
10774 %}
10775 ins_pipe( ialu_regI_regI );
10776 %}
10778 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10779 match(Set dst (AndL dst M4));
10780 ins_cost(60);
10782 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10783 ins_encode %{
10784 Register dst = $dst$$Register;
10786 __ dins(dst, R0, 0, 2);
10787 %}
10788 ins_pipe( ialu_regI_regI );
10789 %}
10791 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10792 match(Set dst (AndL dst M121));
10793 ins_cost(60);
10795 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10796 ins_encode %{
10797 Register dst = $dst$$Register;
10799 __ dins(dst, R0, 3, 4);
10800 %}
10801 ins_pipe( ialu_regI_regI );
10802 %}
10804 // Or Long Register with Register
10805 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10806 match(Set dst (OrL src1 src2));
10807 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10808 ins_encode %{
10809 Register dst_reg = $dst$$Register;
10810 Register src1_reg = $src1$$Register;
10811 Register src2_reg = $src2$$Register;
10813 __ orr(dst_reg, src1_reg, src2_reg);
10814 %}
10815 ins_pipe( ialu_regL_regL );
10816 %}
10818 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10819 match(Set dst (OrL (CastP2X src1) src2));
10820 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10821 ins_encode %{
10822 Register dst_reg = $dst$$Register;
10823 Register src1_reg = $src1$$Register;
10824 Register src2_reg = $src2$$Register;
10826 __ orr(dst_reg, src1_reg, src2_reg);
10827 %}
10828 ins_pipe( ialu_regL_regL );
10829 %}
10831 // Xor Long Register with Register
10832 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10833 match(Set dst (XorL src1 src2));
10834 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10835 ins_encode %{
10836 Register dst_reg = as_Register($dst$$reg);
10837 Register src1_reg = as_Register($src1$$reg);
10838 Register src2_reg = as_Register($src2$$reg);
10840 __ xorr(dst_reg, src1_reg, src2_reg);
10841 %}
10842 ins_pipe( ialu_regL_regL );
10843 %}
10845 // Shift Left by 8-bit immediate
10846 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10847 match(Set dst (LShiftI src shift));
10849 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10850 ins_encode %{
10851 Register src = $src$$Register;
10852 Register dst = $dst$$Register;
10853 int shamt = $shift$$constant;
10855 __ sll(dst, src, shamt);
10856 %}
10857 ins_pipe( ialu_regI_regI );
10858 %}
10860 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10861 match(Set dst (LShiftI (ConvL2I src) shift));
10863 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10864 ins_encode %{
10865 Register src = $src$$Register;
10866 Register dst = $dst$$Register;
10867 int shamt = $shift$$constant;
10869 __ sll(dst, src, shamt);
10870 %}
10871 ins_pipe( ialu_regI_regI );
10872 %}
10874 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10875 match(Set dst (AndI (LShiftI src shift) mask));
10877 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10878 ins_encode %{
10879 Register src = $src$$Register;
10880 Register dst = $dst$$Register;
10882 __ sll(dst, src, 16);
10883 %}
10884 ins_pipe( ialu_regI_regI );
10885 %}
10887 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10888 %{
10889 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10891 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10892 ins_encode %{
10893 Register src = $src$$Register;
10894 Register dst = $dst$$Register;
10896 __ andi(dst, src, 7);
10897 %}
10898 ins_pipe(ialu_regI_regI);
10899 %}
10901 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10902 %{
10903 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10905 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10906 ins_encode %{
10907 Register src = $src1$$Register;
10908 int val = $src2$$constant;
10909 Register dst = $dst$$Register;
10911 __ ori(dst, src, val);
10912 %}
10913 ins_pipe(ialu_regI_regI);
10914 %}
10916 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10917 // This idiom is used by the compiler the i2s bytecode.
10918 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10919 %{
10920 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10922 format %{ "i2s $dst, $src\t# @i2s" %}
10923 ins_encode %{
10924 Register src = $src$$Register;
10925 Register dst = $dst$$Register;
10927 __ seh(dst, src);
10928 %}
10929 ins_pipe(ialu_regI_regI);
10930 %}
10932 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10933 // This idiom is used by the compiler for the i2b bytecode.
10934 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10935 %{
10936 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10938 format %{ "i2b $dst, $src\t# @i2b" %}
10939 ins_encode %{
10940 Register src = $src$$Register;
10941 Register dst = $dst$$Register;
10943 __ seb(dst, src);
10944 %}
10945 ins_pipe(ialu_regI_regI);
10946 %}
10949 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10950 match(Set dst (LShiftI (ConvL2I src) shift));
10952 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10953 ins_encode %{
10954 Register src = $src$$Register;
10955 Register dst = $dst$$Register;
10956 int shamt = $shift$$constant;
10958 __ sll(dst, src, shamt);
10959 %}
10960 ins_pipe( ialu_regI_regI );
10961 %}
10963 // Shift Left by 8-bit immediate
10964 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10965 match(Set dst (LShiftI src shift));
10967 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10968 ins_encode %{
10969 Register src = $src$$Register;
10970 Register dst = $dst$$Register;
10971 Register shamt = $shift$$Register;
10972 __ sllv(dst, src, shamt);
10973 %}
10974 ins_pipe( ialu_regI_regI );
10975 %}
10978 // Shift Left Long
10979 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10980 //predicate(UseNewLongLShift);
10981 match(Set dst (LShiftL src shift));
10982 ins_cost(100);
10983 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10984 ins_encode %{
10985 Register src_reg = as_Register($src$$reg);
10986 Register dst_reg = as_Register($dst$$reg);
10987 int shamt = $shift$$constant;
10989 if (__ is_simm(shamt, 5))
10990 __ dsll(dst_reg, src_reg, shamt);
10991 else
10992 {
10993 int sa = Assembler::low(shamt, 6);
10994 if (sa < 32) {
10995 __ dsll(dst_reg, src_reg, sa);
10996 } else {
10997 __ dsll32(dst_reg, src_reg, sa - 32);
10998 }
10999 }
11000 %}
11001 ins_pipe( ialu_regL_regL );
11002 %}
11004 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11005 //predicate(UseNewLongLShift);
11006 match(Set dst (LShiftL (ConvI2L src) shift));
11007 ins_cost(100);
11008 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11009 ins_encode %{
11010 Register src_reg = as_Register($src$$reg);
11011 Register dst_reg = as_Register($dst$$reg);
11012 int shamt = $shift$$constant;
11014 if (__ is_simm(shamt, 5))
11015 __ dsll(dst_reg, src_reg, shamt);
11016 else
11017 {
11018 int sa = Assembler::low(shamt, 6);
11019 if (sa < 32) {
11020 __ dsll(dst_reg, src_reg, sa);
11021 } else {
11022 __ dsll32(dst_reg, src_reg, sa - 32);
11023 }
11024 }
11025 %}
11026 ins_pipe( ialu_regL_regL );
11027 %}
11029 // Shift Left Long
11030 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11031 //predicate(UseNewLongLShift);
11032 match(Set dst (LShiftL src shift));
11033 ins_cost(100);
11034 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11035 ins_encode %{
11036 Register src_reg = as_Register($src$$reg);
11037 Register dst_reg = as_Register($dst$$reg);
11039 __ dsllv(dst_reg, src_reg, $shift$$Register);
11040 %}
11041 ins_pipe( ialu_regL_regL );
11042 %}
11044 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11045 match(Set dst (LShiftL (ConvI2L src) shift));
11046 ins_cost(100);
11047 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11048 ins_encode %{
11049 Register src_reg = as_Register($src$$reg);
11050 Register dst_reg = as_Register($dst$$reg);
11051 int shamt = $shift$$constant;
11053 if (__ is_simm(shamt, 5)) {
11054 __ dsll(dst_reg, src_reg, shamt);
11055 } else {
11056 int sa = Assembler::low(shamt, 6);
11057 if (sa < 32) {
11058 __ dsll(dst_reg, src_reg, sa);
11059 } else {
11060 __ dsll32(dst_reg, src_reg, sa - 32);
11061 }
11062 }
11063 %}
11064 ins_pipe( ialu_regL_regL );
11065 %}
11067 // Shift Right Long
11068 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11069 match(Set dst (RShiftL src shift));
11070 ins_cost(100);
11071 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11072 ins_encode %{
11073 Register src_reg = as_Register($src$$reg);
11074 Register dst_reg = as_Register($dst$$reg);
11075 int shamt = ($shift$$constant & 0x3f);
11076 if (__ is_simm(shamt, 5))
11077 __ dsra(dst_reg, src_reg, shamt);
11078 else {
11079 int sa = Assembler::low(shamt, 6);
11080 if (sa < 32) {
11081 __ dsra(dst_reg, src_reg, sa);
11082 } else {
11083 __ dsra32(dst_reg, src_reg, sa - 32);
11084 }
11085 }
11086 %}
11087 ins_pipe( ialu_regL_regL );
11088 %}
11090 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11091 match(Set dst (ConvL2I (RShiftL src shift)));
11092 ins_cost(100);
11093 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11094 ins_encode %{
11095 Register src_reg = as_Register($src$$reg);
11096 Register dst_reg = as_Register($dst$$reg);
11097 int shamt = $shift$$constant;
11099 __ dsra32(dst_reg, src_reg, shamt - 32);
11100 %}
11101 ins_pipe( ialu_regL_regL );
11102 %}
11104 // Shift Right Long arithmetically
11105 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11106 //predicate(UseNewLongLShift);
11107 match(Set dst (RShiftL src shift));
11108 ins_cost(100);
11109 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11110 ins_encode %{
11111 Register src_reg = as_Register($src$$reg);
11112 Register dst_reg = as_Register($dst$$reg);
11114 __ dsrav(dst_reg, src_reg, $shift$$Register);
11115 %}
11116 ins_pipe( ialu_regL_regL );
11117 %}
11119 // Shift Right Long logically
11120 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11121 match(Set dst (URShiftL src shift));
11122 ins_cost(100);
11123 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11124 ins_encode %{
11125 Register src_reg = as_Register($src$$reg);
11126 Register dst_reg = as_Register($dst$$reg);
11128 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11129 %}
11130 ins_pipe( ialu_regL_regL );
11131 %}
11133 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11134 match(Set dst (URShiftL src shift));
11135 ins_cost(80);
11136 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11137 ins_encode %{
11138 Register src_reg = as_Register($src$$reg);
11139 Register dst_reg = as_Register($dst$$reg);
11140 int shamt = $shift$$constant;
11142 __ dsrl(dst_reg, src_reg, shamt);
11143 %}
11144 ins_pipe( ialu_regL_regL );
11145 %}
11147 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11148 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11149 ins_cost(80);
11150 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11151 ins_encode %{
11152 Register src_reg = as_Register($src$$reg);
11153 Register dst_reg = as_Register($dst$$reg);
11154 int shamt = $shift$$constant;
11156 __ dext(dst_reg, src_reg, shamt, 31);
11157 %}
11158 ins_pipe( ialu_regL_regL );
11159 %}
11161 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11162 match(Set dst (URShiftL (CastP2X src) shift));
11163 ins_cost(80);
11164 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11165 ins_encode %{
11166 Register src_reg = as_Register($src$$reg);
11167 Register dst_reg = as_Register($dst$$reg);
11168 int shamt = $shift$$constant;
11170 __ dsrl(dst_reg, src_reg, shamt);
11171 %}
11172 ins_pipe( ialu_regL_regL );
11173 %}
11175 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11176 match(Set dst (URShiftL src shift));
11177 ins_cost(80);
11178 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11179 ins_encode %{
11180 Register src_reg = as_Register($src$$reg);
11181 Register dst_reg = as_Register($dst$$reg);
11182 int shamt = $shift$$constant;
11184 __ dsrl32(dst_reg, src_reg, shamt - 32);
11185 %}
11186 ins_pipe( ialu_regL_regL );
11187 %}
11189 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11190 match(Set dst (ConvL2I (URShiftL src shift)));
11191 predicate(n->in(1)->in(2)->get_int() > 32);
11192 ins_cost(80);
11193 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11194 ins_encode %{
11195 Register src_reg = as_Register($src$$reg);
11196 Register dst_reg = as_Register($dst$$reg);
11197 int shamt = $shift$$constant;
11199 __ dsrl32(dst_reg, src_reg, shamt - 32);
11200 %}
11201 ins_pipe( ialu_regL_regL );
11202 %}
11204 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11205 match(Set dst (URShiftL (CastP2X src) shift));
11206 ins_cost(80);
11207 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11208 ins_encode %{
11209 Register src_reg = as_Register($src$$reg);
11210 Register dst_reg = as_Register($dst$$reg);
11211 int shamt = $shift$$constant;
11213 __ dsrl32(dst_reg, src_reg, shamt - 32);
11214 %}
11215 ins_pipe( ialu_regL_regL );
11216 %}
11218 // Xor Instructions
11219 // Xor Register with Register
11220 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11221 match(Set dst (XorI src1 src2));
11223 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11225 ins_encode %{
11226 Register dst = $dst$$Register;
11227 Register src1 = $src1$$Register;
11228 Register src2 = $src2$$Register;
11229 __ xorr(dst, src1, src2);
11230 __ sll(dst, dst, 0); /* long -> int */
11231 %}
11233 ins_pipe( ialu_regI_regI );
11234 %}
11236 // Or Instructions
11237 // Or Register with Register
11238 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11239 match(Set dst (OrI src1 src2));
11241 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11242 ins_encode %{
11243 Register dst = $dst$$Register;
11244 Register src1 = $src1$$Register;
11245 Register src2 = $src2$$Register;
11246 __ orr(dst, src1, src2);
11247 %}
11249 ins_pipe( ialu_regI_regI );
11250 %}
11252 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11253 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11254 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11256 format %{ "rotr $dst, $src, 1 ...\n\t"
11257 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11258 ins_encode %{
11259 Register dst = $dst$$Register;
11260 Register src = $src$$Register;
11261 int rshift = $rshift$$constant;
11263 __ rotr(dst, src, 1);
11264 if (rshift - 1) {
11265 __ srl(dst, dst, rshift - 1);
11266 }
11267 %}
11269 ins_pipe( ialu_regI_regI );
11270 %}
11272 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11273 match(Set dst (OrI src1 (CastP2X src2)));
11275 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11276 ins_encode %{
11277 Register dst = $dst$$Register;
11278 Register src1 = $src1$$Register;
11279 Register src2 = $src2$$Register;
11280 __ orr(dst, src1, src2);
11281 %}
11283 ins_pipe( ialu_regI_regI );
11284 %}
11286 // Logical Shift Right by 8-bit immediate
11287 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11288 match(Set dst (URShiftI src shift));
11289 // effect(KILL cr);
11291 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11292 ins_encode %{
11293 Register src = $src$$Register;
11294 Register dst = $dst$$Register;
11295 int shift = $shift$$constant;
11297 __ srl(dst, src, shift);
11298 %}
11299 ins_pipe( ialu_regI_regI );
11300 %}
11302 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11303 match(Set dst (AndI (URShiftI src shift) mask));
11305 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11306 ins_encode %{
11307 Register src = $src$$Register;
11308 Register dst = $dst$$Register;
11309 int pos = $shift$$constant;
11310 int size = Assembler::is_int_mask($mask$$constant);
11312 __ ext(dst, src, pos, size);
11313 %}
11314 ins_pipe( ialu_regI_regI );
11315 %}
11317 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11318 %{
11319 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11320 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11322 ins_cost(100);
11323 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11324 ins_encode %{
11325 Register dst = $dst$$Register;
11326 int sa = $rshift$$constant;
11328 __ rotr(dst, dst, sa);
11329 %}
11330 ins_pipe( ialu_regI_regI );
11331 %}
11333 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11334 %{
11335 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11336 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11338 ins_cost(100);
11339 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11340 ins_encode %{
11341 Register dst = $dst$$Register;
11342 int sa = $rshift$$constant;
11344 __ drotr(dst, dst, sa);
11345 %}
11346 ins_pipe( ialu_regI_regI );
11347 %}
11349 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11350 %{
11351 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11352 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11354 ins_cost(100);
11355 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11356 ins_encode %{
11357 Register dst = $dst$$Register;
11358 int sa = $rshift$$constant;
11360 __ drotr32(dst, dst, sa - 32);
11361 %}
11362 ins_pipe( ialu_regI_regI );
11363 %}
11365 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11366 %{
11367 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11368 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11370 ins_cost(100);
11371 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11372 ins_encode %{
11373 Register dst = $dst$$Register;
11374 int sa = $rshift$$constant;
11376 __ rotr(dst, dst, sa);
11377 %}
11378 ins_pipe( ialu_regI_regI );
11379 %}
11381 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11382 %{
11383 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11384 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11386 ins_cost(100);
11387 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11388 ins_encode %{
11389 Register dst = $dst$$Register;
11390 int sa = $rshift$$constant;
11392 __ drotr(dst, dst, sa);
11393 %}
11394 ins_pipe( ialu_regI_regI );
11395 %}
11397 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11398 %{
11399 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11400 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11402 ins_cost(100);
11403 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11404 ins_encode %{
11405 Register dst = $dst$$Register;
11406 int sa = $rshift$$constant;
11408 __ drotr32(dst, dst, sa - 32);
11409 %}
11410 ins_pipe( ialu_regI_regI );
11411 %}
11413 // Logical Shift Right
11414 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11415 match(Set dst (URShiftI src shift));
11417 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11418 ins_encode %{
11419 Register src = $src$$Register;
11420 Register dst = $dst$$Register;
11421 Register shift = $shift$$Register;
11422 __ srlv(dst, src, shift);
11423 %}
11424 ins_pipe( ialu_regI_regI );
11425 %}
11428 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11429 match(Set dst (RShiftI src shift));
11430 // effect(KILL cr);
11432 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11433 ins_encode %{
11434 Register src = $src$$Register;
11435 Register dst = $dst$$Register;
11436 int shift = $shift$$constant;
11437 __ sra(dst, src, shift);
11438 %}
11439 ins_pipe( ialu_regI_regI );
11440 %}
11442 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11443 match(Set dst (RShiftI src shift));
11444 // effect(KILL cr);
11446 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11447 ins_encode %{
11448 Register src = $src$$Register;
11449 Register dst = $dst$$Register;
11450 Register shift = $shift$$Register;
11451 __ srav(dst, src, shift);
11452 %}
11453 ins_pipe( ialu_regI_regI );
11454 %}
11456 //----------Convert Int to Boolean---------------------------------------------
11458 instruct convI2B(mRegI dst, mRegI src) %{
11459 match(Set dst (Conv2B src));
11461 ins_cost(100);
11462 format %{ "convI2B $dst, $src @ convI2B" %}
11463 ins_encode %{
11464 Register dst = as_Register($dst$$reg);
11465 Register src = as_Register($src$$reg);
11467 if (dst != src) {
11468 __ daddiu(dst, R0, 1);
11469 __ movz(dst, R0, src);
11470 } else {
11471 __ move(AT, src);
11472 __ daddiu(dst, R0, 1);
11473 __ movz(dst, R0, AT);
11474 }
11475 %}
11477 ins_pipe( ialu_regL_regL );
11478 %}
11480 instruct convI2L_reg( mRegL dst, mRegI src) %{
11481 match(Set dst (ConvI2L src));
11483 ins_cost(100);
11484 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11485 ins_encode %{
11486 Register dst = as_Register($dst$$reg);
11487 Register src = as_Register($src$$reg);
11489 if(dst != src) __ sll(dst, src, 0);
11490 %}
11491 ins_pipe( ialu_regL_regL );
11492 %}
11495 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11496 match(Set dst (ConvL2I src));
11498 format %{ "MOV $dst, $src @ convL2I_reg" %}
11499 ins_encode %{
11500 Register dst = as_Register($dst$$reg);
11501 Register src = as_Register($src$$reg);
11503 __ sll(dst, src, 0);
11504 %}
11506 ins_pipe( ialu_regI_regI );
11507 %}
11509 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11510 match(Set dst (ConvI2L (ConvL2I src)));
11512 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11513 ins_encode %{
11514 Register dst = as_Register($dst$$reg);
11515 Register src = as_Register($src$$reg);
11517 __ sll(dst, src, 0);
11518 %}
11520 ins_pipe( ialu_regI_regI );
11521 %}
11523 instruct convL2D_reg( regD dst, mRegL src ) %{
11524 match(Set dst (ConvL2D src));
11525 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11526 ins_encode %{
11527 Register src = as_Register($src$$reg);
11528 FloatRegister dst = as_FloatRegister($dst$$reg);
11530 __ dmtc1(src, dst);
11531 __ cvt_d_l(dst, dst);
11532 %}
11534 ins_pipe( pipe_slow );
11535 %}
11537 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11538 match(Set dst (ConvD2L src));
11539 ins_cost(150);
11540 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11541 ins_encode %{
11542 Register dst = as_Register($dst$$reg);
11543 FloatRegister src = as_FloatRegister($src$$reg);
11545 Label Done;
11547 __ trunc_l_d(F30, src);
11548 // max_long: 0x7fffffffffffffff
11549 // __ set64(AT, 0x7fffffffffffffff);
11550 __ daddiu(AT, R0, -1);
11551 __ dsrl(AT, AT, 1);
11552 __ dmfc1(dst, F30);
11554 __ bne(dst, AT, Done);
11555 __ delayed()->mtc1(R0, F30);
11557 __ cvt_d_w(F30, F30);
11558 __ c_ult_d(src, F30);
11559 __ bc1f(Done);
11560 __ delayed()->daddiu(T9, R0, -1);
11562 __ c_un_d(src, src); //NaN?
11563 __ subu(dst, T9, AT);
11564 __ movt(dst, R0);
11566 __ bind(Done);
11567 %}
11569 ins_pipe( pipe_slow );
11570 %}
11572 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11573 match(Set dst (ConvD2L src));
11574 ins_cost(250);
11575 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11576 ins_encode %{
11577 Register dst = as_Register($dst$$reg);
11578 FloatRegister src = as_FloatRegister($src$$reg);
11580 Label L;
11582 __ c_un_d(src, src); //NaN?
11583 __ bc1t(L);
11584 __ delayed();
11585 __ move(dst, R0);
11587 __ trunc_l_d(F30, src);
11588 __ cfc1(AT, 31);
11589 __ li(T9, 0x10000);
11590 __ andr(AT, AT, T9);
11591 __ beq(AT, R0, L);
11592 __ delayed()->dmfc1(dst, F30);
11594 __ mov_d(F12, src);
11595 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11596 __ move(dst, V0);
11597 __ bind(L);
11598 %}
11600 ins_pipe( pipe_slow );
11601 %}
11603 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11604 match(Set dst (ConvF2I src));
11605 ins_cost(150);
11606 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11607 ins_encode %{
11608 Register dreg = $dst$$Register;
11609 FloatRegister fval = $src$$FloatRegister;
11611 __ trunc_w_s(F30, fval);
11612 __ mfc1(dreg, F30);
11613 __ c_un_s(fval, fval); //NaN?
11614 __ movt(dreg, R0);
11615 %}
11617 ins_pipe( pipe_slow );
11618 %}
11620 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11621 match(Set dst (ConvF2I src));
11622 ins_cost(250);
11623 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11624 ins_encode %{
11625 Register dreg = $dst$$Register;
11626 FloatRegister fval = $src$$FloatRegister;
11627 Label L;
11629 __ c_un_s(fval, fval); //NaN?
11630 __ bc1t(L);
11631 __ delayed();
11632 __ move(dreg, R0);
11634 __ trunc_w_s(F30, fval);
11636 /* Call SharedRuntime:f2i() to do valid convention */
11637 __ cfc1(AT, 31);
11638 __ li(T9, 0x10000);
11639 __ andr(AT, AT, T9);
11640 __ beq(AT, R0, L);
11641 __ delayed()->mfc1(dreg, F30);
11643 __ mov_s(F12, fval);
11645 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11646 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11647 *
11648 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11649 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11650 */
11651 if(dreg != V0) {
11652 __ push(V0);
11653 }
11654 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11655 if(dreg != V0) {
11656 __ move(dreg, V0);
11657 __ pop(V0);
11658 }
11659 __ bind(L);
11660 %}
11662 ins_pipe( pipe_slow );
11663 %}
11665 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11666 match(Set dst (ConvF2L src));
11667 ins_cost(150);
11668 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11669 ins_encode %{
11670 Register dreg = $dst$$Register;
11671 FloatRegister fval = $src$$FloatRegister;
11673 __ trunc_l_s(F30, fval);
11674 __ dmfc1(dreg, F30);
11675 __ c_un_s(fval, fval); //NaN?
11676 __ movt(dreg, R0);
11677 %}
11679 ins_pipe( pipe_slow );
11680 %}
11682 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11683 match(Set dst (ConvF2L src));
11684 ins_cost(250);
11685 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11686 ins_encode %{
11687 Register dst = as_Register($dst$$reg);
11688 FloatRegister fval = $src$$FloatRegister;
11689 Label L;
11691 __ c_un_s(fval, fval); //NaN?
11692 __ bc1t(L);
11693 __ delayed();
11694 __ move(dst, R0);
11696 __ trunc_l_s(F30, fval);
11697 __ cfc1(AT, 31);
11698 __ li(T9, 0x10000);
11699 __ andr(AT, AT, T9);
11700 __ beq(AT, R0, L);
11701 __ delayed()->dmfc1(dst, F30);
11703 __ mov_s(F12, fval);
11704 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11705 __ move(dst, V0);
11706 __ bind(L);
11707 %}
11709 ins_pipe( pipe_slow );
11710 %}
11712 instruct convL2F_reg( regF dst, mRegL src ) %{
11713 match(Set dst (ConvL2F src));
11714 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11715 ins_encode %{
11716 FloatRegister dst = $dst$$FloatRegister;
11717 Register src = as_Register($src$$reg);
11718 Label L;
11720 __ dmtc1(src, dst);
11721 __ cvt_s_l(dst, dst);
11722 %}
11724 ins_pipe( pipe_slow );
11725 %}
11727 instruct convI2F_reg( regF dst, mRegI src ) %{
11728 match(Set dst (ConvI2F src));
11729 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11730 ins_encode %{
11731 Register src = $src$$Register;
11732 FloatRegister dst = $dst$$FloatRegister;
11734 __ mtc1(src, dst);
11735 __ cvt_s_w(dst, dst);
11736 %}
11738 ins_pipe( fpu_regF_regF );
11739 %}
11741 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11742 match(Set dst (CmpLTMask p zero));
11743 ins_cost(100);
11745 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11746 ins_encode %{
11747 Register src = $p$$Register;
11748 Register dst = $dst$$Register;
11750 __ sra(dst, src, 31);
11751 %}
11752 ins_pipe( pipe_slow );
11753 %}
11756 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11757 match(Set dst (CmpLTMask p q));
11758 ins_cost(400);
11760 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11761 ins_encode %{
11762 Register p = $p$$Register;
11763 Register q = $q$$Register;
11764 Register dst = $dst$$Register;
11766 __ slt(dst, p, q);
11767 __ subu(dst, R0, dst);
11768 %}
11769 ins_pipe( pipe_slow );
11770 %}
11772 instruct convP2B(mRegI dst, mRegP src) %{
11773 match(Set dst (Conv2B src));
11775 ins_cost(100);
11776 format %{ "convP2B $dst, $src @ convP2B" %}
11777 ins_encode %{
11778 Register dst = as_Register($dst$$reg);
11779 Register src = as_Register($src$$reg);
11781 if (dst != src) {
11782 __ daddiu(dst, R0, 1);
11783 __ movz(dst, R0, src);
11784 } else {
11785 __ move(AT, src);
11786 __ daddiu(dst, R0, 1);
11787 __ movz(dst, R0, AT);
11788 }
11789 %}
11791 ins_pipe( ialu_regL_regL );
11792 %}
11795 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11796 match(Set dst (ConvI2D src));
11797 format %{ "conI2D $dst, $src @convI2D_reg" %}
11798 ins_encode %{
11799 Register src = $src$$Register;
11800 FloatRegister dst = $dst$$FloatRegister;
11801 __ mtc1(src, dst);
11802 __ cvt_d_w(dst, dst);
11803 %}
11804 ins_pipe( fpu_regF_regF );
11805 %}
11807 instruct convF2D_reg_reg(regD dst, regF src) %{
11808 match(Set dst (ConvF2D src));
11809 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11810 ins_encode %{
11811 FloatRegister dst = $dst$$FloatRegister;
11812 FloatRegister src = $src$$FloatRegister;
11814 __ cvt_d_s(dst, src);
11815 %}
11816 ins_pipe( fpu_regF_regF );
11817 %}
11819 instruct convD2F_reg_reg(regF dst, regD src) %{
11820 match(Set dst (ConvD2F src));
11821 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11822 ins_encode %{
11823 FloatRegister dst = $dst$$FloatRegister;
11824 FloatRegister src = $src$$FloatRegister;
11826 __ cvt_s_d(dst, src);
11827 %}
11828 ins_pipe( fpu_regF_regF );
11829 %}
11831 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11832 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11833 match(Set dst (ConvD2I src));
11835 ins_cost(150);
11836 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11838 ins_encode %{
11839 FloatRegister src = $src$$FloatRegister;
11840 Register dst = $dst$$Register;
11842 Label Done;
11844 __ trunc_w_d(F30, src);
11845 // max_int: 2147483647
11846 __ move(AT, 0x7fffffff);
11847 __ mfc1(dst, F30);
11849 __ bne(dst, AT, Done);
11850 __ delayed()->mtc1(R0, F30);
11852 __ cvt_d_w(F30, F30);
11853 __ c_ult_d(src, F30);
11854 __ bc1f(Done);
11855 __ delayed()->addiu(T9, R0, -1);
11857 __ c_un_d(src, src); //NaN?
11858 __ subu32(dst, T9, AT);
11859 __ movt(dst, R0);
11861 __ bind(Done);
11862 %}
11863 ins_pipe( pipe_slow );
11864 %}
11866 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11867 match(Set dst (ConvD2I src));
11869 ins_cost(250);
11870 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11872 ins_encode %{
11873 FloatRegister src = $src$$FloatRegister;
11874 Register dst = $dst$$Register;
11875 Label L;
11877 __ trunc_w_d(F30, src);
11878 __ cfc1(AT, 31);
11879 __ li(T9, 0x10000);
11880 __ andr(AT, AT, T9);
11881 __ beq(AT, R0, L);
11882 __ delayed()->mfc1(dst, F30);
11884 __ mov_d(F12, src);
11885 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11886 __ move(dst, V0);
11887 __ bind(L);
11889 %}
11890 ins_pipe( pipe_slow );
11891 %}
11893 // Convert oop pointer into compressed form
11894 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11895 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11896 match(Set dst (EncodeP src));
11897 format %{ "encode_heap_oop $dst,$src" %}
11898 ins_encode %{
11899 Register src = $src$$Register;
11900 Register dst = $dst$$Register;
11901 if (src != dst) {
11902 __ move(dst, src);
11903 }
11904 __ encode_heap_oop(dst);
11905 %}
11906 ins_pipe( ialu_regL_regL );
11907 %}
11909 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11910 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11911 match(Set dst (EncodeP src));
11912 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11913 ins_encode %{
11914 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11915 %}
11916 ins_pipe( ialu_regL_regL );
11917 %}
11919 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11920 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11921 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11922 match(Set dst (DecodeN src));
11923 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11924 ins_encode %{
11925 Register s = $src$$Register;
11926 Register d = $dst$$Register;
11927 if (s != d) {
11928 __ move(d, s);
11929 }
11930 __ decode_heap_oop(d);
11931 %}
11932 ins_pipe( ialu_regL_regL );
11933 %}
11935 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11936 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11937 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11938 match(Set dst (DecodeN src));
11939 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11940 ins_encode %{
11941 Register s = $src$$Register;
11942 Register d = $dst$$Register;
11943 if (s != d) {
11944 __ decode_heap_oop_not_null(d, s);
11945 } else {
11946 __ decode_heap_oop_not_null(d);
11947 }
11948 %}
11949 ins_pipe( ialu_regL_regL );
11950 %}
11952 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11953 match(Set dst (EncodePKlass src));
11954 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11955 ins_encode %{
11956 __ encode_klass_not_null($dst$$Register, $src$$Register);
11957 %}
11958 ins_pipe( ialu_regL_regL );
11959 %}
11961 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11962 match(Set dst (DecodeNKlass src));
11963 format %{ "decode_heap_klass_not_null $dst,$src" %}
11964 ins_encode %{
11965 Register s = $src$$Register;
11966 Register d = $dst$$Register;
11967 if (s != d) {
11968 __ decode_klass_not_null(d, s);
11969 } else {
11970 __ decode_klass_not_null(d);
11971 }
11972 %}
11973 ins_pipe( ialu_regL_regL );
11974 %}
11976 //FIXME
11977 instruct tlsLoadP(mRegP dst) %{
11978 match(Set dst (ThreadLocal));
11980 ins_cost(0);
11981 format %{ " get_thread in $dst #@tlsLoadP" %}
11982 ins_encode %{
11983 Register dst = $dst$$Register;
11984 #ifdef OPT_THREAD
11985 __ move(dst, TREG);
11986 #else
11987 __ get_thread(dst);
11988 #endif
11989 %}
11991 ins_pipe( ialu_loadI );
11992 %}
11995 instruct checkCastPP( mRegP dst ) %{
11996 match(Set dst (CheckCastPP dst));
11998 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11999 ins_encode( /*empty encoding*/ );
12000 ins_pipe( empty );
12001 %}
12003 instruct castPP(mRegP dst)
12004 %{
12005 match(Set dst (CastPP dst));
12007 size(0);
12008 format %{ "# castPP of $dst" %}
12009 ins_encode(/* empty encoding */);
12010 ins_pipe(empty);
12011 %}
12013 instruct castII( mRegI dst ) %{
12014 match(Set dst (CastII dst));
12015 format %{ "#castII of $dst empty encoding" %}
12016 ins_encode( /*empty encoding*/ );
12017 ins_cost(0);
12018 ins_pipe( empty );
12019 %}
12021 // Return Instruction
12022 // Remove the return address & jump to it.
12023 instruct Ret() %{
12024 match(Return);
12025 format %{ "RET #@Ret" %}
12027 ins_encode %{
12028 __ jr(RA);
12029 __ nop();
12030 %}
12032 ins_pipe( pipe_jump );
12033 %}
12035 /*
12036 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12037 instruct jumpXtnd(mRegL switch_val) %{
12038 match(Jump switch_val);
12040 ins_cost(350);
12042 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12043 "jr T9\n\t"
12044 "nop" %}
12045 ins_encode %{
12046 Register table_base = $constanttablebase;
12047 int con_offset = $constantoffset;
12048 Register switch_reg = $switch_val$$Register;
12050 if (UseLoongsonISA) {
12051 if (Assembler::is_simm(con_offset, 8)) {
12052 __ gsldx(T9, table_base, switch_reg, con_offset);
12053 } else if (Assembler::is_simm16(con_offset)) {
12054 __ daddu(T9, table_base, switch_reg);
12055 __ ld(T9, T9, con_offset);
12056 } else {
12057 __ move(T9, con_offset);
12058 __ daddu(AT, table_base, switch_reg);
12059 __ gsldx(T9, AT, T9, 0);
12060 }
12061 } else {
12062 if (Assembler::is_simm16(con_offset)) {
12063 __ daddu(T9, table_base, switch_reg);
12064 __ ld(T9, T9, con_offset);
12065 } else {
12066 __ move(T9, con_offset);
12067 __ daddu(AT, table_base, switch_reg);
12068 __ daddu(AT, T9, AT);
12069 __ ld(T9, AT, 0);
12070 }
12071 }
12073 __ jr(T9);
12074 __ nop();
12076 %}
12077 ins_pipe(pipe_jump);
12078 %}
12079 */
12081 // Jump Direct - Label defines a relative address from JMP
12082 instruct jmpDir(label labl) %{
12083 match(Goto);
12084 effect(USE labl);
12086 ins_cost(300);
12087 format %{ "JMP $labl #@jmpDir" %}
12089 ins_encode %{
12090 Label &L = *($labl$$label);
12091 if(&L)
12092 __ b(L);
12093 else
12094 __ b(int(0));
12095 __ nop();
12096 %}
12098 ins_pipe( pipe_jump );
12099 ins_pc_relative(1);
12100 %}
12104 // Tail Jump; remove the return address; jump to target.
12105 // TailCall above leaves the return address around.
12106 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12107 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12108 // "restore" before this instruction (in Epilogue), we need to materialize it
12109 // in %i0.
12110 //FIXME
12111 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12112 match( TailJump jump_target ex_oop );
12113 ins_cost(200);
12114 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12115 ins_encode %{
12116 Register target = $jump_target$$Register;
12118 /* 2012/9/14 Jin: V0, V1 are indicated in:
12119 * [stubGenerator_mips.cpp] generate_forward_exception()
12120 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12121 */
12122 Register oop = $ex_oop$$Register;
12123 Register exception_oop = V0;
12124 Register exception_pc = V1;
12126 __ move(exception_pc, RA);
12127 __ move(exception_oop, oop);
12129 __ jr(target);
12130 __ nop();
12131 %}
12132 ins_pipe( pipe_jump );
12133 %}
12135 // ============================================================================
12136 // Procedure Call/Return Instructions
12137 // Call Java Static Instruction
12138 // Note: If this code changes, the corresponding ret_addr_offset() and
12139 // compute_padding() functions will have to be adjusted.
12140 instruct CallStaticJavaDirect(method meth) %{
12141 match(CallStaticJava);
12142 effect(USE meth);
12144 ins_cost(300);
12145 format %{ "CALL,static #@CallStaticJavaDirect " %}
12146 ins_encode( Java_Static_Call( meth ) );
12147 ins_pipe( pipe_slow );
12148 ins_pc_relative(1);
12149 ins_alignment(16);
12150 %}
12152 // Call Java Dynamic Instruction
12153 // Note: If this code changes, the corresponding ret_addr_offset() and
12154 // compute_padding() functions will have to be adjusted.
12155 instruct CallDynamicJavaDirect(method meth) %{
12156 match(CallDynamicJava);
12157 effect(USE meth);
12159 ins_cost(300);
12160 format %{"MOV IC_Klass, (oop)-1\n\t"
12161 "CallDynamic @ CallDynamicJavaDirect" %}
12162 ins_encode( Java_Dynamic_Call( meth ) );
12163 ins_pipe( pipe_slow );
12164 ins_pc_relative(1);
12165 ins_alignment(16);
12166 %}
12168 instruct CallLeafNoFPDirect(method meth) %{
12169 match(CallLeafNoFP);
12170 effect(USE meth);
12172 ins_cost(300);
12173 format %{ "CALL_LEAF_NOFP,runtime " %}
12174 ins_encode(Java_To_Runtime(meth));
12175 ins_pipe( pipe_slow );
12176 ins_pc_relative(1);
12177 ins_alignment(16);
12178 %}
12180 // Prefetch instructions.
12182 instruct prefetchrNTA( memory mem ) %{
12183 match(PrefetchRead mem);
12184 ins_cost(125);
12186 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12187 ins_encode %{
12188 int base = $mem$$base;
12189 int index = $mem$$index;
12190 int scale = $mem$$scale;
12191 int disp = $mem$$disp;
12193 if( index != 0 ) {
12194 if (scale == 0) {
12195 __ daddu(AT, as_Register(base), as_Register(index));
12196 } else {
12197 __ dsll(AT, as_Register(index), scale);
12198 __ daddu(AT, as_Register(base), AT);
12199 }
12200 } else {
12201 __ move(AT, as_Register(base));
12202 }
12203 if( Assembler::is_simm16(disp) ) {
12204 __ daddiu(AT, as_Register(base), disp);
12205 __ daddiu(AT, AT, disp);
12206 } else {
12207 __ move(T9, disp);
12208 __ daddu(AT, as_Register(base), T9);
12209 }
12210 __ pref(0, AT, 0); //hint: 0:load
12211 %}
12212 ins_pipe(pipe_slow);
12213 %}
12215 instruct prefetchwNTA( memory mem ) %{
12216 match(PrefetchWrite mem);
12217 ins_cost(125);
12218 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12219 ins_encode %{
12220 int base = $mem$$base;
12221 int index = $mem$$index;
12222 int scale = $mem$$scale;
12223 int disp = $mem$$disp;
12225 if( index != 0 ) {
12226 if (scale == 0) {
12227 __ daddu(AT, as_Register(base), as_Register(index));
12228 } else {
12229 __ dsll(AT, as_Register(index), scale);
12230 __ daddu(AT, as_Register(base), AT);
12231 }
12232 } else {
12233 __ move(AT, as_Register(base));
12234 }
12235 if( Assembler::is_simm16(disp) ) {
12236 __ daddiu(AT, as_Register(base), disp);
12237 __ daddiu(AT, AT, disp);
12238 } else {
12239 __ move(T9, disp);
12240 __ daddu(AT, as_Register(base), T9);
12241 }
12242 __ pref(1, AT, 0); //hint: 1:store
12243 %}
12244 ins_pipe(pipe_slow);
12245 %}
12247 // Prefetch instructions for allocation.
12249 instruct prefetchAllocNTA( memory mem ) %{
12250 match(PrefetchAllocation mem);
12251 ins_cost(125);
12252 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12253 ins_encode %{
12254 int base = $mem$$base;
12255 int index = $mem$$index;
12256 int scale = $mem$$scale;
12257 int disp = $mem$$disp;
12259 Register dst = R0;
12261 if( index != 0 ) {
12262 if( Assembler::is_simm16(disp) ) {
12263 if( UseLoongsonISA ) {
12264 if (scale == 0) {
12265 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12266 } else {
12267 __ dsll(AT, as_Register(index), scale);
12268 __ gslbx(dst, as_Register(base), AT, disp);
12269 }
12270 } else {
12271 if (scale == 0) {
12272 __ addu(AT, as_Register(base), as_Register(index));
12273 } else {
12274 __ dsll(AT, as_Register(index), scale);
12275 __ addu(AT, as_Register(base), AT);
12276 }
12277 __ lb(dst, AT, disp);
12278 }
12279 } else {
12280 if (scale == 0) {
12281 __ addu(AT, as_Register(base), as_Register(index));
12282 } else {
12283 __ dsll(AT, as_Register(index), scale);
12284 __ addu(AT, as_Register(base), AT);
12285 }
12286 __ move(T9, disp);
12287 if( UseLoongsonISA ) {
12288 __ gslbx(dst, AT, T9, 0);
12289 } else {
12290 __ addu(AT, AT, T9);
12291 __ lb(dst, AT, 0);
12292 }
12293 }
12294 } else {
12295 if( Assembler::is_simm16(disp) ) {
12296 __ lb(dst, as_Register(base), disp);
12297 } else {
12298 __ move(T9, disp);
12299 if( UseLoongsonISA ) {
12300 __ gslbx(dst, as_Register(base), T9, 0);
12301 } else {
12302 __ addu(AT, as_Register(base), T9);
12303 __ lb(dst, AT, 0);
12304 }
12305 }
12306 }
12307 %}
12308 ins_pipe(pipe_slow);
12309 %}
12312 // Call runtime without safepoint
12313 instruct CallLeafDirect(method meth) %{
12314 match(CallLeaf);
12315 effect(USE meth);
12317 ins_cost(300);
12318 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12319 ins_encode(Java_To_Runtime(meth));
12320 ins_pipe( pipe_slow );
12321 ins_pc_relative(1);
12322 ins_alignment(16);
12323 %}
12325 // Load Char (16bit unsigned)
12326 instruct loadUS(mRegI dst, memory mem) %{
12327 match(Set dst (LoadUS mem));
12329 ins_cost(125);
12330 format %{ "loadUS $dst,$mem @ loadC" %}
12331 ins_encode(load_C_enc(dst, mem));
12332 ins_pipe( ialu_loadI );
12333 %}
12335 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12336 match(Set dst (ConvI2L (LoadUS mem)));
12338 ins_cost(125);
12339 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12340 ins_encode(load_C_enc(dst, mem));
12341 ins_pipe( ialu_loadI );
12342 %}
12344 // Store Char (16bit unsigned)
12345 instruct storeC(memory mem, mRegI src) %{
12346 match(Set mem (StoreC mem src));
12348 ins_cost(125);
12349 format %{ "storeC $src, $mem @ storeC" %}
12350 ins_encode(store_C_reg_enc(mem, src));
12351 ins_pipe( ialu_loadI );
12352 %}
12354 instruct storeC0(memory mem, immI0 zero) %{
12355 match(Set mem (StoreC mem zero));
12357 ins_cost(125);
12358 format %{ "storeC $zero, $mem @ storeC0" %}
12359 ins_encode(store_C0_enc(mem));
12360 ins_pipe( ialu_loadI );
12361 %}
12364 instruct loadConF0(regF dst, immF0 zero) %{
12365 match(Set dst zero);
12366 ins_cost(100);
12368 format %{ "mov $dst, zero @ loadConF0\n"%}
12369 ins_encode %{
12370 FloatRegister dst = $dst$$FloatRegister;
12372 __ mtc1(R0, dst);
12373 %}
12374 ins_pipe( fpu_loadF );
12375 %}
12378 instruct loadConF(regF dst, immF src) %{
12379 match(Set dst src);
12380 ins_cost(125);
12382 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12383 ins_encode %{
12384 int con_offset = $constantoffset($src);
12386 if (Assembler::is_simm16(con_offset)) {
12387 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12388 } else {
12389 __ set64(AT, con_offset);
12390 if (UseLoongsonISA) {
12391 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12392 } else {
12393 __ daddu(AT, $constanttablebase, AT);
12394 __ lwc1($dst$$FloatRegister, AT, 0);
12395 }
12396 }
12397 %}
12398 ins_pipe( fpu_loadF );
12399 %}
12402 instruct loadConD0(regD dst, immD0 zero) %{
12403 match(Set dst zero);
12404 ins_cost(100);
12406 format %{ "mov $dst, zero @ loadConD0"%}
12407 ins_encode %{
12408 FloatRegister dst = as_FloatRegister($dst$$reg);
12410 __ dmtc1(R0, dst);
12411 %}
12412 ins_pipe( fpu_loadF );
12413 %}
12415 instruct loadConD(regD dst, immD src) %{
12416 match(Set dst src);
12417 ins_cost(125);
12419 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12420 ins_encode %{
12421 int con_offset = $constantoffset($src);
12423 if (Assembler::is_simm16(con_offset)) {
12424 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12425 } else {
12426 __ set64(AT, con_offset);
12427 if (UseLoongsonISA) {
12428 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12429 } else {
12430 __ daddu(AT, $constanttablebase, AT);
12431 __ ldc1($dst$$FloatRegister, AT, 0);
12432 }
12433 }
12434 %}
12435 ins_pipe( fpu_loadF );
12436 %}
12438 // Store register Float value (it is faster than store from FPU register)
12439 instruct storeF_reg( memory mem, regF src) %{
12440 match(Set mem (StoreF mem src));
12442 ins_cost(50);
12443 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12444 ins_encode(store_F_reg_enc(mem, src));
12445 ins_pipe( fpu_storeF );
12446 %}
12448 instruct storeF_imm0( memory mem, immF0 zero) %{
12449 match(Set mem (StoreF mem zero));
12451 ins_cost(40);
12452 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12453 ins_encode %{
12454 int base = $mem$$base;
12455 int index = $mem$$index;
12456 int scale = $mem$$scale;
12457 int disp = $mem$$disp;
12459 if( index != 0 ) {
12460 if ( UseLoongsonISA ) {
12461 if ( Assembler::is_simm(disp, 8) ) {
12462 if ( scale == 0 ) {
12463 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12464 } else {
12465 __ dsll(T9, as_Register(index), scale);
12466 __ gsswx(R0, as_Register(base), T9, disp);
12467 }
12468 } else if ( Assembler::is_simm16(disp) ) {
12469 if ( scale == 0 ) {
12470 __ daddu(AT, as_Register(base), as_Register(index));
12471 } else {
12472 __ dsll(T9, as_Register(index), scale);
12473 __ daddu(AT, as_Register(base), T9);
12474 }
12475 __ sw(R0, AT, disp);
12476 } else {
12477 if ( scale == 0 ) {
12478 __ move(T9, disp);
12479 __ daddu(AT, as_Register(index), T9);
12480 __ gsswx(R0, as_Register(base), AT, 0);
12481 } else {
12482 __ dsll(T9, as_Register(index), scale);
12483 __ move(AT, disp);
12484 __ daddu(AT, AT, T9);
12485 __ gsswx(R0, as_Register(base), AT, 0);
12486 }
12487 }
12488 } else { //not use loongson isa
12489 if(scale != 0) {
12490 __ dsll(T9, as_Register(index), scale);
12491 __ daddu(AT, as_Register(base), T9);
12492 } else {
12493 __ daddu(AT, as_Register(base), as_Register(index));
12494 }
12495 if( Assembler::is_simm16(disp) ) {
12496 __ sw(R0, AT, disp);
12497 } else {
12498 __ move(T9, disp);
12499 __ daddu(AT, AT, T9);
12500 __ sw(R0, AT, 0);
12501 }
12502 }
12503 } else { //index is 0
12504 if ( UseLoongsonISA ) {
12505 if ( Assembler::is_simm16(disp) ) {
12506 __ sw(R0, as_Register(base), disp);
12507 } else {
12508 __ move(T9, disp);
12509 __ gsswx(R0, as_Register(base), T9, 0);
12510 }
12511 } else {
12512 if( Assembler::is_simm16(disp) ) {
12513 __ sw(R0, as_Register(base), disp);
12514 } else {
12515 __ move(T9, disp);
12516 __ daddu(AT, as_Register(base), T9);
12517 __ sw(R0, AT, 0);
12518 }
12519 }
12520 }
12521 %}
12522 ins_pipe( ialu_storeI );
12523 %}
12525 // Load Double
12526 instruct loadD(regD dst, memory mem) %{
12527 match(Set dst (LoadD mem));
12529 ins_cost(150);
12530 format %{ "loadD $dst, $mem #@loadD" %}
12531 ins_encode(load_D_enc(dst, mem));
12532 ins_pipe( ialu_loadI );
12533 %}
12535 // Load Double - UNaligned
12536 instruct loadD_unaligned(regD dst, memory mem ) %{
12537 match(Set dst (LoadD_unaligned mem));
12538 ins_cost(250);
12539 // FIXME: Jin: Need more effective ldl/ldr
12540 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12541 ins_encode(load_D_enc(dst, mem));
12542 ins_pipe( ialu_loadI );
12543 %}
12545 instruct storeD_reg( memory mem, regD src) %{
12546 match(Set mem (StoreD mem src));
12548 ins_cost(50);
12549 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12550 ins_encode(store_D_reg_enc(mem, src));
12551 ins_pipe( fpu_storeF );
12552 %}
12554 instruct storeD_imm0( memory mem, immD0 zero) %{
12555 match(Set mem (StoreD mem zero));
12557 ins_cost(40);
12558 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12559 ins_encode %{
12560 int base = $mem$$base;
12561 int index = $mem$$index;
12562 int scale = $mem$$scale;
12563 int disp = $mem$$disp;
12565 __ mtc1(R0, F30);
12566 __ cvt_d_w(F30, F30);
12568 if( index != 0 ) {
12569 if ( UseLoongsonISA ) {
12570 if ( Assembler::is_simm(disp, 8) ) {
12571 if (scale == 0) {
12572 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12573 } else {
12574 __ dsll(T9, as_Register(index), scale);
12575 __ gssdxc1(F30, as_Register(base), T9, disp);
12576 }
12577 } else if ( Assembler::is_simm16(disp) ) {
12578 if (scale == 0) {
12579 __ daddu(AT, as_Register(base), as_Register(index));
12580 __ sdc1(F30, AT, disp);
12581 } else {
12582 __ dsll(T9, as_Register(index), scale);
12583 __ daddu(AT, as_Register(base), T9);
12584 __ sdc1(F30, AT, disp);
12585 }
12586 } else {
12587 if (scale == 0) {
12588 __ move(T9, disp);
12589 __ daddu(AT, as_Register(index), T9);
12590 __ gssdxc1(F30, as_Register(base), AT, 0);
12591 } else {
12592 __ move(T9, disp);
12593 __ dsll(AT, as_Register(index), scale);
12594 __ daddu(AT, AT, T9);
12595 __ gssdxc1(F30, as_Register(base), AT, 0);
12596 }
12597 }
12598 } else { // not use loongson isa
12599 if(scale != 0) {
12600 __ dsll(T9, as_Register(index), scale);
12601 __ daddu(AT, as_Register(base), T9);
12602 } else {
12603 __ daddu(AT, as_Register(base), as_Register(index));
12604 }
12605 if( Assembler::is_simm16(disp) ) {
12606 __ sdc1(F30, AT, disp);
12607 } else {
12608 __ move(T9, disp);
12609 __ daddu(AT, AT, T9);
12610 __ sdc1(F30, AT, 0);
12611 }
12612 }
12613 } else {// index is 0
12614 if ( UseLoongsonISA ) {
12615 if ( Assembler::is_simm16(disp) ) {
12616 __ sdc1(F30, as_Register(base), disp);
12617 } else {
12618 __ move(T9, disp);
12619 __ gssdxc1(F30, as_Register(base), T9, 0);
12620 }
12621 } else {
12622 if( Assembler::is_simm16(disp) ) {
12623 __ sdc1(F30, as_Register(base), disp);
12624 } else {
12625 __ move(T9, disp);
12626 __ daddu(AT, as_Register(base), T9);
12627 __ sdc1(F30, AT, 0);
12628 }
12629 }
12630 }
12631 %}
12632 ins_pipe( ialu_storeI );
12633 %}
12635 instruct loadSSI(mRegI dst, stackSlotI src)
12636 %{
12637 match(Set dst src);
12639 ins_cost(125);
12640 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12641 ins_encode %{
12642 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12643 __ lw($dst$$Register, SP, $src$$disp);
12644 %}
12645 ins_pipe(ialu_loadI);
12646 %}
12648 instruct storeSSI(stackSlotI dst, mRegI src)
12649 %{
12650 match(Set dst src);
12652 ins_cost(100);
12653 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12654 ins_encode %{
12655 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12656 __ sw($src$$Register, SP, $dst$$disp);
12657 %}
12658 ins_pipe(ialu_storeI);
12659 %}
12661 instruct loadSSL(mRegL dst, stackSlotL src)
12662 %{
12663 match(Set dst src);
12665 ins_cost(125);
12666 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12667 ins_encode %{
12668 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12669 __ ld($dst$$Register, SP, $src$$disp);
12670 %}
12671 ins_pipe(ialu_loadI);
12672 %}
12674 instruct storeSSL(stackSlotL dst, mRegL src)
12675 %{
12676 match(Set dst src);
12678 ins_cost(100);
12679 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12680 ins_encode %{
12681 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12682 __ sd($src$$Register, SP, $dst$$disp);
12683 %}
12684 ins_pipe(ialu_storeI);
12685 %}
12687 instruct loadSSP(mRegP dst, stackSlotP src)
12688 %{
12689 match(Set dst src);
12691 ins_cost(125);
12692 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12693 ins_encode %{
12694 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12695 __ ld($dst$$Register, SP, $src$$disp);
12696 %}
12697 ins_pipe(ialu_loadI);
12698 %}
12700 instruct storeSSP(stackSlotP dst, mRegP src)
12701 %{
12702 match(Set dst src);
12704 ins_cost(100);
12705 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12706 ins_encode %{
12707 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12708 __ sd($src$$Register, SP, $dst$$disp);
12709 %}
12710 ins_pipe(ialu_storeI);
12711 %}
12713 instruct loadSSF(regF dst, stackSlotF src)
12714 %{
12715 match(Set dst src);
12717 ins_cost(125);
12718 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12719 ins_encode %{
12720 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12721 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12722 %}
12723 ins_pipe(ialu_loadI);
12724 %}
12726 instruct storeSSF(stackSlotF dst, regF src)
12727 %{
12728 match(Set dst src);
12730 ins_cost(100);
12731 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12732 ins_encode %{
12733 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12734 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12735 %}
12736 ins_pipe(fpu_storeF);
12737 %}
12739 // Use the same format since predicate() can not be used here.
12740 instruct loadSSD(regD dst, stackSlotD src)
12741 %{
12742 match(Set dst src);
12744 ins_cost(125);
12745 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12746 ins_encode %{
12747 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12748 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12749 %}
12750 ins_pipe(ialu_loadI);
12751 %}
12753 instruct storeSSD(stackSlotD dst, regD src)
12754 %{
12755 match(Set dst src);
12757 ins_cost(100);
12758 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12759 ins_encode %{
12760 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12761 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12762 %}
12763 ins_pipe(fpu_storeF);
12764 %}
12766 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12767 match( Set cr (FastLock object box) );
12768 effect( TEMP tmp, TEMP scr, USE_KILL box );
12769 ins_cost(300);
12770 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12771 ins_encode %{
12772 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12773 %}
12775 ins_pipe( pipe_slow );
12776 ins_pc_relative(1);
12777 %}
12779 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12780 match( Set cr (FastUnlock object box) );
12781 effect( TEMP tmp, USE_KILL box );
12782 ins_cost(300);
12783 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12784 ins_encode %{
12785 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12786 %}
12788 ins_pipe( pipe_slow );
12789 ins_pc_relative(1);
12790 %}
12792 // Store CMS card-mark Immediate
12793 instruct storeImmCM(memory mem, immI8 src) %{
12794 match(Set mem (StoreCM mem src));
12796 ins_cost(150);
12797 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12798 // opcode(0xC6);
12799 ins_encode(store_B_immI_enc_sync(mem, src));
12800 ins_pipe( ialu_storeI );
12801 %}
12803 // Die now
12804 instruct ShouldNotReachHere( )
12805 %{
12806 match(Halt);
12807 ins_cost(300);
12809 // Use the following format syntax
12810 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12811 ins_encode %{
12812 // Here we should emit illtrap !
12814 __ stop("in ShoudNotReachHere");
12816 %}
12817 ins_pipe( pipe_jump );
12818 %}
12820 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12821 %{
12822 predicate(Universe::narrow_oop_shift() == 0);
12823 match(Set dst mem);
12825 ins_cost(110);
12826 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12827 ins_encode %{
12828 Register dst = $dst$$Register;
12829 Register base = as_Register($mem$$base);
12830 int disp = $mem$$disp;
12832 __ daddiu(dst, base, disp);
12833 %}
12834 ins_pipe( ialu_regI_imm16 );
12835 %}
12837 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12838 %{
12839 match(Set dst mem);
12841 ins_cost(110);
12842 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12843 ins_encode %{
12844 Register dst = $dst$$Register;
12845 Register base = as_Register($mem$$base);
12846 Register index = as_Register($mem$$index);
12847 int scale = $mem$$scale;
12848 int disp = $mem$$disp;
12850 if (scale == 0) {
12851 __ daddu(AT, base, index);
12852 __ daddiu(dst, AT, disp);
12853 } else {
12854 __ dsll(AT, index, scale);
12855 __ daddu(AT, base, AT);
12856 __ daddiu(dst, AT, disp);
12857 }
12858 %}
12860 ins_pipe( ialu_regI_imm16 );
12861 %}
12863 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12864 %{
12865 match(Set dst mem);
12867 ins_cost(110);
12868 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12869 ins_encode %{
12870 Register dst = $dst$$Register;
12871 Register base = as_Register($mem$$base);
12872 Register index = as_Register($mem$$index);
12873 int scale = $mem$$scale;
12875 if (scale == 0) {
12876 __ daddu(dst, base, index);
12877 } else {
12878 __ dsll(AT, index, scale);
12879 __ daddu(dst, base, AT);
12880 }
12881 %}
12883 ins_pipe( ialu_regI_imm16 );
12884 %}
12886 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12887 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12888 match(CountedLoopEnd cop (CmpI src1 src2));
12889 effect(USE labl);
12891 ins_cost(300);
12892 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12893 ins_encode %{
12894 Register op1 = $src1$$Register;
12895 Register op2 = $src2$$Register;
12896 Label &L = *($labl$$label);
12897 int flag = $cop$$cmpcode;
12899 switch(flag)
12900 {
12901 case 0x01: //equal
12902 if (&L)
12903 __ beq(op1, op2, L);
12904 else
12905 __ beq(op1, op2, (int)0);
12906 break;
12907 case 0x02: //not_equal
12908 if (&L)
12909 __ bne(op1, op2, L);
12910 else
12911 __ bne(op1, op2, (int)0);
12912 break;
12913 case 0x03: //above
12914 __ slt(AT, op2, op1);
12915 if(&L)
12916 __ bne(AT, R0, L);
12917 else
12918 __ bne(AT, R0, (int)0);
12919 break;
12920 case 0x04: //above_equal
12921 __ slt(AT, op1, op2);
12922 if(&L)
12923 __ beq(AT, R0, L);
12924 else
12925 __ beq(AT, R0, (int)0);
12926 break;
12927 case 0x05: //below
12928 __ slt(AT, op1, op2);
12929 if(&L)
12930 __ bne(AT, R0, L);
12931 else
12932 __ bne(AT, R0, (int)0);
12933 break;
12934 case 0x06: //below_equal
12935 __ slt(AT, op2, op1);
12936 if(&L)
12937 __ beq(AT, R0, L);
12938 else
12939 __ beq(AT, R0, (int)0);
12940 break;
12941 default:
12942 Unimplemented();
12943 }
12944 __ nop();
12945 %}
12946 ins_pipe( pipe_jump );
12947 ins_pc_relative(1);
12948 %}
12951 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12952 match(CountedLoopEnd cop (CmpI src1 src2));
12953 effect(USE labl);
12955 ins_cost(250);
12956 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12957 ins_encode %{
12958 Register op1 = $src1$$Register;
12959 int op2 = $src2$$constant;
12960 Label &L = *($labl$$label);
12961 int flag = $cop$$cmpcode;
12963 __ addiu32(AT, op1, -1 * op2);
12965 switch(flag)
12966 {
12967 case 0x01: //equal
12968 if (&L)
12969 __ beq(AT, R0, L);
12970 else
12971 __ beq(AT, R0, (int)0);
12972 break;
12973 case 0x02: //not_equal
12974 if (&L)
12975 __ bne(AT, R0, L);
12976 else
12977 __ bne(AT, R0, (int)0);
12978 break;
12979 case 0x03: //above
12980 if(&L)
12981 __ bgtz(AT, L);
12982 else
12983 __ bgtz(AT, (int)0);
12984 break;
12985 case 0x04: //above_equal
12986 if(&L)
12987 __ bgez(AT, L);
12988 else
12989 __ bgez(AT,(int)0);
12990 break;
12991 case 0x05: //below
12992 if(&L)
12993 __ bltz(AT, L);
12994 else
12995 __ bltz(AT, (int)0);
12996 break;
12997 case 0x06: //below_equal
12998 if(&L)
12999 __ blez(AT, L);
13000 else
13001 __ blez(AT, (int)0);
13002 break;
13003 default:
13004 Unimplemented();
13005 }
13006 __ nop();
13007 %}
13008 ins_pipe( pipe_jump );
13009 ins_pc_relative(1);
13010 %}
13013 /*
13014 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13015 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13016 match(CountedLoopEnd cop cmp);
13017 effect(USE labl);
13019 ins_cost(300);
13020 format %{ "J$cop,u $labl\t# Loop end" %}
13021 size(6);
13022 opcode(0x0F, 0x80);
13023 ins_encode( Jcc( cop, labl) );
13024 ins_pipe( pipe_jump );
13025 ins_pc_relative(1);
13026 %}
13028 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13029 match(CountedLoopEnd cop cmp);
13030 effect(USE labl);
13032 ins_cost(200);
13033 format %{ "J$cop,u $labl\t# Loop end" %}
13034 opcode(0x0F, 0x80);
13035 ins_encode( Jcc( cop, labl) );
13036 ins_pipe( pipe_jump );
13037 ins_pc_relative(1);
13038 %}
13039 */
13041 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13042 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13043 match(If cop cr);
13044 effect(USE labl);
13046 ins_cost(300);
13047 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13049 ins_encode %{
13050 Label &L = *($labl$$label);
13051 switch($cop$$cmpcode)
13052 {
13053 case 0x01: //equal
13054 if (&L)
13055 __ bne(AT, R0, L);
13056 else
13057 __ bne(AT, R0, (int)0);
13058 break;
13059 case 0x02: //not equal
13060 if (&L)
13061 __ beq(AT, R0, L);
13062 else
13063 __ beq(AT, R0, (int)0);
13064 break;
13065 default:
13066 Unimplemented();
13067 }
13068 __ nop();
13069 %}
13071 ins_pipe( pipe_jump );
13072 ins_pc_relative(1);
13073 %}
13076 // ============================================================================
13077 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13078 // array for an instance of the superklass. Set a hidden internal cache on a
13079 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13080 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13081 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13082 match(Set result (PartialSubtypeCheck sub super));
13083 effect(KILL tmp);
13084 ins_cost(1100); // slightly larger than the next version
13085 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13087 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13088 ins_pipe( pipe_slow );
13089 %}
13092 // Conditional-store of an int value.
13093 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13094 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13095 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13096 // effect(KILL oldval);
13097 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13099 ins_encode %{
13100 Register oldval = $oldval$$Register;
13101 Register newval = $newval$$Register;
13102 Address addr(as_Register($mem$$base), $mem$$disp);
13103 Label again, failure;
13105 // int base = $mem$$base;
13106 int index = $mem$$index;
13107 int scale = $mem$$scale;
13108 int disp = $mem$$disp;
13110 guarantee(Assembler::is_simm16(disp), "");
13112 if( index != 0 ) {
13113 __ stop("in storeIConditional: index != 0");
13114 } else {
13115 __ bind(again);
13116 if(UseSyncLevel <= 1000) __ sync();
13117 __ ll(AT, addr);
13118 __ bne(AT, oldval, failure);
13119 __ delayed()->addu(AT, R0, R0);
13121 __ addu(AT, newval, R0);
13122 __ sc(AT, addr);
13123 __ beq(AT, R0, again);
13124 __ delayed()->addiu(AT, R0, 0xFF);
13125 __ bind(failure);
13126 __ sync();
13127 }
13128 %}
13130 ins_pipe( long_memory_op );
13131 %}
13133 // Conditional-store of a long value.
13134 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13135 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13136 %{
13137 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13138 effect(KILL oldval);
13140 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13141 ins_encode%{
13142 Register oldval = $oldval$$Register;
13143 Register newval = $newval$$Register;
13144 Address addr((Register)$mem$$base, $mem$$disp);
13146 int index = $mem$$index;
13147 int scale = $mem$$scale;
13148 int disp = $mem$$disp;
13150 guarantee(Assembler::is_simm16(disp), "");
13152 if( index != 0 ) {
13153 __ stop("in storeIConditional: index != 0");
13154 } else {
13155 __ cmpxchg(newval, addr, oldval);
13156 }
13157 %}
13158 ins_pipe( long_memory_op );
13159 %}
13162 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13163 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13164 effect(KILL oldval);
13165 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13166 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13167 "MOV $res, 1 @ compareAndSwapI\n\t"
13168 "BNE AT, R0 @ compareAndSwapI\n\t"
13169 "MOV $res, 0 @ compareAndSwapI\n"
13170 "L:" %}
13171 ins_encode %{
13172 Register newval = $newval$$Register;
13173 Register oldval = $oldval$$Register;
13174 Register res = $res$$Register;
13175 Address addr($mem_ptr$$Register, 0);
13176 Label L;
13178 __ cmpxchg32(newval, addr, oldval);
13179 __ move(res, AT);
13180 %}
13181 ins_pipe( long_memory_op );
13182 %}
13184 //FIXME:
13185 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13186 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13187 effect(KILL oldval);
13188 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13189 "MOV $res, AT @ compareAndSwapP\n\t"
13190 "L:" %}
13191 ins_encode %{
13192 Register newval = $newval$$Register;
13193 Register oldval = $oldval$$Register;
13194 Register res = $res$$Register;
13195 Address addr($mem_ptr$$Register, 0);
13196 Label L;
13198 __ cmpxchg(newval, addr, oldval);
13199 __ move(res, AT);
13200 %}
13201 ins_pipe( long_memory_op );
13202 %}
13204 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13205 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13206 effect(KILL oldval);
13207 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13208 "MOV $res, AT @ compareAndSwapN\n\t"
13209 "L:" %}
13210 ins_encode %{
13211 Register newval = $newval$$Register;
13212 Register oldval = $oldval$$Register;
13213 Register res = $res$$Register;
13214 Address addr($mem_ptr$$Register, 0);
13215 Label L;
13217 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13218 * Thus, we should extend oldval's sign for correct comparision.
13219 */
13220 __ sll(oldval, oldval, 0);
13222 __ cmpxchg32(newval, addr, oldval);
13223 __ move(res, AT);
13224 %}
13225 ins_pipe( long_memory_op );
13226 %}
13228 //----------Max and Min--------------------------------------------------------
13229 // Min Instructions
13230 ////
13231 // *** Min and Max using the conditional move are slower than the
13232 // *** branch version on a Pentium III.
13233 // // Conditional move for min
13234 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13235 // effect( USE_DEF op2, USE op1, USE cr );
13236 // format %{ "CMOVlt $op2,$op1\t! min" %}
13237 // opcode(0x4C,0x0F);
13238 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13239 // ins_pipe( pipe_cmov_reg );
13240 //%}
13241 //
13242 //// Min Register with Register (P6 version)
13243 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13244 // predicate(VM_Version::supports_cmov() );
13245 // match(Set op2 (MinI op1 op2));
13246 // ins_cost(200);
13247 // expand %{
13248 // eFlagsReg cr;
13249 // compI_eReg(cr,op1,op2);
13250 // cmovI_reg_lt(op2,op1,cr);
13251 // %}
13252 //%}
13254 // Min Register with Register (generic version)
13255 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13256 match(Set dst (MinI dst src));
13257 //effect(KILL flags);
13258 ins_cost(80);
13260 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13261 ins_encode %{
13262 Register dst = $dst$$Register;
13263 Register src = $src$$Register;
13265 __ slt(AT, src, dst);
13266 __ movn(dst, src, AT);
13268 %}
13270 ins_pipe( pipe_slow );
13271 %}
13273 // Max Register with Register
13274 // *** Min and Max using the conditional move are slower than the
13275 // *** branch version on a Pentium III.
13276 // // Conditional move for max
13277 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13278 // effect( USE_DEF op2, USE op1, USE cr );
13279 // format %{ "CMOVgt $op2,$op1\t! max" %}
13280 // opcode(0x4F,0x0F);
13281 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13282 // ins_pipe( pipe_cmov_reg );
13283 //%}
13284 //
13285 // // Max Register with Register (P6 version)
13286 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13287 // predicate(VM_Version::supports_cmov() );
13288 // match(Set op2 (MaxI op1 op2));
13289 // ins_cost(200);
13290 // expand %{
13291 // eFlagsReg cr;
13292 // compI_eReg(cr,op1,op2);
13293 // cmovI_reg_gt(op2,op1,cr);
13294 // %}
13295 //%}
13297 // Max Register with Register (generic version)
13298 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13299 match(Set dst (MaxI dst src));
13300 ins_cost(80);
13302 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13304 ins_encode %{
13305 Register dst = $dst$$Register;
13306 Register src = $src$$Register;
13308 __ slt(AT, dst, src);
13309 __ movn(dst, src, AT);
13311 %}
13313 ins_pipe( pipe_slow );
13314 %}
13316 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13317 match(Set dst (MaxI dst zero));
13318 ins_cost(50);
13320 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13322 ins_encode %{
13323 Register dst = $dst$$Register;
13325 __ slt(AT, dst, R0);
13326 __ movn(dst, R0, AT);
13328 %}
13330 ins_pipe( pipe_slow );
13331 %}
13333 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13334 %{
13335 match(Set dst (AndL src mask));
13337 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13338 ins_encode %{
13339 Register dst = $dst$$Register;
13340 Register src = $src$$Register;
13342 __ dext(dst, src, 0, 32);
13343 %}
13344 ins_pipe(ialu_regI_regI);
13345 %}
13347 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13348 %{
13349 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13351 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13352 ins_encode %{
13353 Register dst = $dst$$Register;
13354 Register src1 = $src1$$Register;
13355 Register src2 = $src2$$Register;
13357 if (src1 == dst) {
13358 __ dinsu(dst, src2, 32, 32);
13359 } else if (src2 == dst) {
13360 __ dsll32(dst, dst, 0);
13361 __ dins(dst, src1, 0, 32);
13362 } else {
13363 __ dext(dst, src1, 0, 32);
13364 __ dinsu(dst, src2, 32, 32);
13365 }
13366 %}
13367 ins_pipe(ialu_regI_regI);
13368 %}
13370 // Zero-extend convert int to long
13371 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13372 %{
13373 match(Set dst (AndL (ConvI2L src) mask));
13375 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13376 ins_encode %{
13377 Register dst = $dst$$Register;
13378 Register src = $src$$Register;
13380 __ dext(dst, src, 0, 32);
13381 %}
13382 ins_pipe(ialu_regI_regI);
13383 %}
13385 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13386 %{
13387 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13389 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13390 ins_encode %{
13391 Register dst = $dst$$Register;
13392 Register src = $src$$Register;
13394 __ dext(dst, src, 0, 32);
13395 %}
13396 ins_pipe(ialu_regI_regI);
13397 %}
13399 // Match loading integer and casting it to unsigned int in long register.
13400 // LoadI + ConvI2L + AndL 0xffffffff.
13401 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13402 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13404 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13405 ins_encode (load_N_enc(dst, mem));
13406 ins_pipe(ialu_loadI);
13407 %}
13409 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13410 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13412 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13413 ins_encode (load_N_enc(dst, mem));
13414 ins_pipe(ialu_loadI);
13415 %}
13418 // ============================================================================
13419 // Safepoint Instruction
13420 instruct safePoint_poll_reg(mRegP poll) %{
13421 match(SafePoint poll);
13422 predicate(false);
13423 effect(USE poll);
13425 ins_cost(125);
13426 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13428 ins_encode %{
13429 Register poll_reg = $poll$$Register;
13431 __ block_comment("Safepoint:");
13432 __ relocate(relocInfo::poll_type);
13433 __ lw(AT, poll_reg, 0);
13434 %}
13436 ins_pipe( ialu_storeI );
13437 %}
13439 instruct safePoint_poll() %{
13440 match(SafePoint);
13442 ins_cost(105);
13443 format %{ "poll for GC @ safePoint_poll" %}
13445 ins_encode %{
13446 __ block_comment("Safepoint:");
13447 __ set64(T9, (long)os::get_polling_page());
13448 __ relocate(relocInfo::poll_type);
13449 __ lw(AT, T9, 0);
13450 %}
13452 ins_pipe( ialu_storeI );
13453 %}
13455 //----------Arithmetic Conversion Instructions---------------------------------
13457 instruct roundFloat_nop(regF dst)
13458 %{
13459 match(Set dst (RoundFloat dst));
13461 ins_cost(0);
13462 ins_encode();
13463 ins_pipe(empty);
13464 %}
13466 instruct roundDouble_nop(regD dst)
13467 %{
13468 match(Set dst (RoundDouble dst));
13470 ins_cost(0);
13471 ins_encode();
13472 ins_pipe(empty);
13473 %}
13475 //---------- Zeros Count Instructions ------------------------------------------
13476 // CountLeadingZerosINode CountTrailingZerosINode
13477 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13478 predicate(UseCountLeadingZerosInstruction);
13479 match(Set dst (CountLeadingZerosI src));
13481 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13482 ins_encode %{
13483 __ clz($dst$$Register, $src$$Register);
13484 %}
13485 ins_pipe( ialu_regL_regL );
13486 %}
13488 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13489 predicate(UseCountLeadingZerosInstruction);
13490 match(Set dst (CountLeadingZerosL src));
13492 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13493 ins_encode %{
13494 __ dclz($dst$$Register, $src$$Register);
13495 %}
13496 ins_pipe( ialu_regL_regL );
13497 %}
13499 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13500 predicate(UseCountTrailingZerosInstruction);
13501 match(Set dst (CountTrailingZerosI src));
13503 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13504 ins_encode %{
13505 // ctz and dctz is gs instructions.
13506 __ ctz($dst$$Register, $src$$Register);
13507 %}
13508 ins_pipe( ialu_regL_regL );
13509 %}
13511 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13512 predicate(UseCountTrailingZerosInstruction);
13513 match(Set dst (CountTrailingZerosL src));
13515 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13516 ins_encode %{
13517 __ dctz($dst$$Register, $src$$Register);
13518 %}
13519 ins_pipe( ialu_regL_regL );
13520 %}
13522 // ====================VECTOR INSTRUCTIONS=====================================
13524 // Load vectors (8 bytes long)
13525 instruct loadV8(vecD dst, memory mem) %{
13526 predicate(n->as_LoadVector()->memory_size() == 8);
13527 match(Set dst (LoadVector mem));
13528 ins_cost(125);
13529 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13530 ins_encode(load_D_enc(dst, mem));
13531 ins_pipe( fpu_loadF );
13532 %}
13534 // Store vectors (8 bytes long)
13535 instruct storeV8(memory mem, vecD src) %{
13536 predicate(n->as_StoreVector()->memory_size() == 8);
13537 match(Set mem (StoreVector mem src));
13538 ins_cost(145);
13539 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13540 ins_encode(store_D_reg_enc(mem, src));
13541 ins_pipe( fpu_storeF );
13542 %}
13544 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13545 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13546 match(Set dst (ReplicateB src));
13547 ins_cost(100);
13548 format %{ "replv_ob AT, $src\n\t"
13549 "dmtc1 AT, $dst\t! replicate8B" %}
13550 ins_encode %{
13551 __ replv_ob(AT, $src$$Register);
13552 __ dmtc1(AT, $dst$$FloatRegister);
13553 %}
13554 ins_pipe( pipe_mtc1 );
13555 %}
13557 instruct Repl8B(vecD dst, mRegI src) %{
13558 predicate(n->as_Vector()->length() == 8);
13559 match(Set dst (ReplicateB src));
13560 ins_cost(140);
13561 format %{ "move AT, $src\n\t"
13562 "dins AT, AT, 8, 8\n\t"
13563 "dins AT, AT, 16, 16\n\t"
13564 "dinsu AT, AT, 32, 32\n\t"
13565 "dmtc1 AT, $dst\t! replicate8B" %}
13566 ins_encode %{
13567 __ move(AT, $src$$Register);
13568 __ dins(AT, AT, 8, 8);
13569 __ dins(AT, AT, 16, 16);
13570 __ dinsu(AT, AT, 32, 32);
13571 __ dmtc1(AT, $dst$$FloatRegister);
13572 %}
13573 ins_pipe( pipe_mtc1 );
13574 %}
13576 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13577 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13578 match(Set dst (ReplicateB con));
13579 ins_cost(110);
13580 format %{ "repl_ob AT, [$con]\n\t"
13581 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13582 ins_encode %{
13583 int val = $con$$constant;
13584 __ repl_ob(AT, val);
13585 __ dmtc1(AT, $dst$$FloatRegister);
13586 %}
13587 ins_pipe( pipe_mtc1 );
13588 %}
13590 instruct Repl8B_imm(vecD dst, immI con) %{
13591 predicate(n->as_Vector()->length() == 8);
13592 match(Set dst (ReplicateB con));
13593 ins_cost(150);
13594 format %{ "move AT, [$con]\n\t"
13595 "dins AT, AT, 8, 8\n\t"
13596 "dins AT, AT, 16, 16\n\t"
13597 "dinsu AT, AT, 32, 32\n\t"
13598 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13599 ins_encode %{
13600 __ move(AT, $con$$constant);
13601 __ dins(AT, AT, 8, 8);
13602 __ dins(AT, AT, 16, 16);
13603 __ dinsu(AT, AT, 32, 32);
13604 __ dmtc1(AT, $dst$$FloatRegister);
13605 %}
13606 ins_pipe( pipe_mtc1 );
13607 %}
13609 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13610 predicate(n->as_Vector()->length() == 8);
13611 match(Set dst (ReplicateB zero));
13612 ins_cost(90);
13613 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13614 ins_encode %{
13615 __ dmtc1(R0, $dst$$FloatRegister);
13616 %}
13617 ins_pipe( pipe_mtc1 );
13618 %}
13620 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13621 predicate(n->as_Vector()->length() == 8);
13622 match(Set dst (ReplicateB M1));
13623 ins_cost(80);
13624 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13625 ins_encode %{
13626 __ nor(AT, R0, R0);
13627 __ dmtc1(AT, $dst$$FloatRegister);
13628 %}
13629 ins_pipe( pipe_mtc1 );
13630 %}
13632 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13633 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13634 match(Set dst (ReplicateS src));
13635 ins_cost(100);
13636 format %{ "replv_qh AT, $src\n\t"
13637 "dmtc1 AT, $dst\t! replicate4S" %}
13638 ins_encode %{
13639 __ replv_qh(AT, $src$$Register);
13640 __ dmtc1(AT, $dst$$FloatRegister);
13641 %}
13642 ins_pipe( pipe_mtc1 );
13643 %}
13645 instruct Repl4S(vecD dst, mRegI src) %{
13646 predicate(n->as_Vector()->length() == 4);
13647 match(Set dst (ReplicateS src));
13648 ins_cost(120);
13649 format %{ "move AT, $src \n\t"
13650 "dins AT, AT, 16, 16\n\t"
13651 "dinsu AT, AT, 32, 32\n\t"
13652 "dmtc1 AT, $dst\t! replicate4S" %}
13653 ins_encode %{
13654 __ move(AT, $src$$Register);
13655 __ dins(AT, AT, 16, 16);
13656 __ dinsu(AT, AT, 32, 32);
13657 __ dmtc1(AT, $dst$$FloatRegister);
13658 %}
13659 ins_pipe( pipe_mtc1 );
13660 %}
13662 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13663 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13664 match(Set dst (ReplicateS con));
13665 ins_cost(100);
13666 format %{ "replv_qh AT, [$con]\n\t"
13667 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13668 ins_encode %{
13669 int val = $con$$constant;
13670 if ( Assembler::is_simm(val, 10)) {
13671 //repl_qh supports 10 bits immediate
13672 __ repl_qh(AT, val);
13673 } else {
13674 __ li32(AT, val);
13675 __ replv_qh(AT, AT);
13676 }
13677 __ dmtc1(AT, $dst$$FloatRegister);
13678 %}
13679 ins_pipe( pipe_mtc1 );
13680 %}
13682 instruct Repl4S_imm(vecD dst, immI con) %{
13683 predicate(n->as_Vector()->length() == 4);
13684 match(Set dst (ReplicateS con));
13685 ins_cost(110);
13686 format %{ "move AT, [$con]\n\t"
13687 "dins AT, AT, 16, 16\n\t"
13688 "dinsu AT, AT, 32, 32\n\t"
13689 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13690 ins_encode %{
13691 __ move(AT, $con$$constant);
13692 __ dins(AT, AT, 16, 16);
13693 __ dinsu(AT, AT, 32, 32);
13694 __ dmtc1(AT, $dst$$FloatRegister);
13695 %}
13696 ins_pipe( pipe_mtc1 );
13697 %}
13699 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13700 predicate(n->as_Vector()->length() == 4);
13701 match(Set dst (ReplicateS zero));
13702 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13703 ins_encode %{
13704 __ dmtc1(R0, $dst$$FloatRegister);
13705 %}
13706 ins_pipe( pipe_mtc1 );
13707 %}
13709 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13710 predicate(n->as_Vector()->length() == 4);
13711 match(Set dst (ReplicateS M1));
13712 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13713 ins_encode %{
13714 __ nor(AT, R0, R0);
13715 __ dmtc1(AT, $dst$$FloatRegister);
13716 %}
13717 ins_pipe( pipe_mtc1 );
13718 %}
13720 // Replicate integer (4 byte) scalar to be vector
13721 instruct Repl2I(vecD dst, mRegI src) %{
13722 predicate(n->as_Vector()->length() == 2);
13723 match(Set dst (ReplicateI src));
13724 format %{ "dins AT, $src, 0, 32\n\t"
13725 "dinsu AT, $src, 32, 32\n\t"
13726 "dmtc1 AT, $dst\t! replicate2I" %}
13727 ins_encode %{
13728 __ dins(AT, $src$$Register, 0, 32);
13729 __ dinsu(AT, $src$$Register, 32, 32);
13730 __ dmtc1(AT, $dst$$FloatRegister);
13731 %}
13732 ins_pipe( pipe_mtc1 );
13733 %}
13735 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13736 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13737 predicate(n->as_Vector()->length() == 2);
13738 match(Set dst (ReplicateI con));
13739 effect(KILL tmp);
13740 format %{ "li32 AT, [$con], 32\n\t"
13741 "dinsu AT, AT\n\t"
13742 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13743 ins_encode %{
13744 int val = $con$$constant;
13745 __ li32(AT, val);
13746 __ dinsu(AT, AT, 32, 32);
13747 __ dmtc1(AT, $dst$$FloatRegister);
13748 %}
13749 ins_pipe( pipe_mtc1 );
13750 %}
13752 // Replicate integer (4 byte) scalar zero to be vector
13753 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13754 predicate(n->as_Vector()->length() == 2);
13755 match(Set dst (ReplicateI zero));
13756 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13757 ins_encode %{
13758 __ dmtc1(R0, $dst$$FloatRegister);
13759 %}
13760 ins_pipe( pipe_mtc1 );
13761 %}
13763 // Replicate integer (4 byte) scalar -1 to be vector
13764 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13765 predicate(n->as_Vector()->length() == 2);
13766 match(Set dst (ReplicateI M1));
13767 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13768 ins_encode %{
13769 __ nor(AT, R0, R0);
13770 __ dmtc1(AT, $dst$$FloatRegister);
13771 %}
13772 ins_pipe( pipe_mtc1 );
13773 %}
13775 // Replicate float (4 byte) scalar to be vector
13776 instruct Repl2F(vecD dst, regF src) %{
13777 predicate(n->as_Vector()->length() == 2);
13778 match(Set dst (ReplicateF src));
13779 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13780 ins_encode %{
13781 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13782 %}
13783 ins_pipe( pipe_slow );
13784 %}
13786 // Replicate float (4 byte) scalar zero to be vector
13787 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13788 predicate(n->as_Vector()->length() == 2);
13789 match(Set dst (ReplicateF zero));
13790 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13791 ins_encode %{
13792 __ dmtc1(R0, $dst$$FloatRegister);
13793 %}
13794 ins_pipe( pipe_mtc1 );
13795 %}
13798 // ====================VECTOR ARITHMETIC=======================================
13800 // --------------------------------- ADD --------------------------------------
13802 // Floats vector add
13803 instruct vadd2F(vecD dst, vecD src) %{
13804 predicate(n->as_Vector()->length() == 2);
13805 match(Set dst (AddVF dst src));
13806 format %{ "add.ps $dst,$src\t! add packed2F" %}
13807 ins_encode %{
13808 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13809 %}
13810 ins_pipe( pipe_slow );
13811 %}
13813 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13814 predicate(n->as_Vector()->length() == 2);
13815 match(Set dst (AddVF src1 src2));
13816 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13817 ins_encode %{
13818 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13819 %}
13820 ins_pipe( fpu_regF_regF );
13821 %}
13823 // --------------------------------- SUB --------------------------------------
13825 // Floats vector sub
13826 instruct vsub2F(vecD dst, vecD src) %{
13827 predicate(n->as_Vector()->length() == 2);
13828 match(Set dst (SubVF dst src));
13829 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13830 ins_encode %{
13831 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13832 %}
13833 ins_pipe( fpu_regF_regF );
13834 %}
13836 // --------------------------------- MUL --------------------------------------
13838 // Floats vector mul
13839 instruct vmul2F(vecD dst, vecD src) %{
13840 predicate(n->as_Vector()->length() == 2);
13841 match(Set dst (MulVF dst src));
13842 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13843 ins_encode %{
13844 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13845 %}
13846 ins_pipe( fpu_regF_regF );
13847 %}
13849 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13850 predicate(n->as_Vector()->length() == 2);
13851 match(Set dst (MulVF src1 src2));
13852 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13853 ins_encode %{
13854 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13855 %}
13856 ins_pipe( fpu_regF_regF );
13857 %}
13859 // --------------------------------- DIV --------------------------------------
13860 // MIPS do not have div.ps
13863 //----------PEEPHOLE RULES-----------------------------------------------------
13864 // These must follow all instruction definitions as they use the names
13865 // defined in the instructions definitions.
13866 //
13867 // peepmatch ( root_instr_name [preceeding_instruction]* );
13868 //
13869 // peepconstraint %{
13870 // (instruction_number.operand_name relational_op instruction_number.operand_name
13871 // [, ...] );
13872 // // instruction numbers are zero-based using left to right order in peepmatch
13873 //
13874 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13875 // // provide an instruction_number.operand_name for each operand that appears
13876 // // in the replacement instruction's match rule
13877 //
13878 // ---------VM FLAGS---------------------------------------------------------
13879 //
13880 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13881 //
13882 // Each peephole rule is given an identifying number starting with zero and
13883 // increasing by one in the order seen by the parser. An individual peephole
13884 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13885 // on the command-line.
13886 //
13887 // ---------CURRENT LIMITATIONS----------------------------------------------
13888 //
13889 // Only match adjacent instructions in same basic block
13890 // Only equality constraints
13891 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13892 // Only one replacement instruction
13893 //
13894 // ---------EXAMPLE----------------------------------------------------------
13895 //
13896 // // pertinent parts of existing instructions in architecture description
13897 // instruct movI(eRegI dst, eRegI src) %{
13898 // match(Set dst (CopyI src));
13899 // %}
13900 //
13901 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13902 // match(Set dst (AddI dst src));
13903 // effect(KILL cr);
13904 // %}
13905 //
13906 // // Change (inc mov) to lea
13907 // peephole %{
13908 // // increment preceeded by register-register move
13909 // peepmatch ( incI_eReg movI );
13910 // // require that the destination register of the increment
13911 // // match the destination register of the move
13912 // peepconstraint ( 0.dst == 1.dst );
13913 // // construct a replacement instruction that sets
13914 // // the destination to ( move's source register + one )
13915 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13916 // %}
13917 //
13918 // Implementation no longer uses movX instructions since
13919 // machine-independent system no longer uses CopyX nodes.
13920 //
13921 // peephole %{
13922 // peepmatch ( incI_eReg movI );
13923 // peepconstraint ( 0.dst == 1.dst );
13924 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13925 // %}
13926 //
13927 // peephole %{
13928 // peepmatch ( decI_eReg movI );
13929 // peepconstraint ( 0.dst == 1.dst );
13930 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13931 // %}
13932 //
13933 // peephole %{
13934 // peepmatch ( addI_eReg_imm movI );
13935 // peepconstraint ( 0.dst == 1.dst );
13936 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13937 // %}
13938 //
13939 // peephole %{
13940 // peepmatch ( addP_eReg_imm movP );
13941 // peepconstraint ( 0.dst == 1.dst );
13942 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13943 // %}
13945 // // Change load of spilled value to only a spill
13946 // instruct storeI(memory mem, eRegI src) %{
13947 // match(Set mem (StoreI mem src));
13948 // %}
13949 //
13950 // instruct loadI(eRegI dst, memory mem) %{
13951 // match(Set dst (LoadI mem));
13952 // %}
13953 //
13954 //peephole %{
13955 // peepmatch ( loadI storeI );
13956 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13957 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13958 //%}
13960 //----------SMARTSPILL RULES---------------------------------------------------
13961 // These must follow all instruction definitions as they use the names
13962 // defined in the instructions definitions.